input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>misads/torch_template
# encoding=utf-8
"""Misc PyTorch utils
Usage:
>>> from torch_template import torch_utils
>>> torch_utils.func_name() # to call functions in this file
"""
from datetime import datetime
import math
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
import numpy as np
##############################
# Functional utils
##############################
from misc_utils import format_num
def clamp(x, min=0.01, max=0.99):
"""clamp a tensor.
Args:
x(torch.Tensor): input tensor
min(float): value < min will be set to min.
max(float): value > max will be set to max.
Returns:
(torch.Tensor): a clamped tensor.
"""
return torch.clamp(x, min, max)
def repeat(x: torch.Tensor, *sizes):
"""Repeat a dimension of a tensor.
Args:
x(torch.Tensor): input tensor.
sizes: repeat times for each dimension.
Returns:
(torch.Tensor): a repeated tensor.
Example
>>> t = repeat(t, 1, 3, 1, 1) # same as t = t.repeat(1, 3, 1, 1) or t = torch.cat([t, t, t], dim=1)
"""
return x.repeat(*sizes)
def tensor2im(x: torch.Tensor, norm=False, to_save=False):
"""Convert tensor to image.
Args:
x(torch.Tensor): input tensor, [n, c, h, w] float32 type.
norm(bool): if the tensor should be denormed first
to_save(bool): if False, a float32 image of [h, w, c], if True, a uint8 image of [h, w, c].
Returns:
an image in shape of [h, w, c] if to_save else [c, h, w].
"""
if norm:
x = (x + 1) / 2
x[x > 1] = 1
x[x < 0] = 0
x = x.detach().cpu().data[0]
if to_save:
x *= 255
x = x.astype(np.uint8)
x = x.transpose((1, 2, 0))
return x
##############################
# Network utils
##############################
def print_network(net: nn.Module, print_size=False):
"""Print network structure and number of parameters.
Args:
net(nn.Module): network model.
print_size(bool): print parameter num of each layer.
Example
>>> import torchvision as tv
>>> from torch_template import torch_utils
>>>
>>> vgg16 = tv.models.vgg16()
>>> torch_utils.print_network(vgg16)
>>> '''
>>> features.0.weight [3, 64, 3, 3]
>>> features.2.weight [64, 64, 3, 3]
>>> features.5.weight [64, 128, 3, 3]
>>> features.7.weight [128, 128, 3, 3]
>>> features.10.weight [128, 256, 3, 3]
>>> features.12.weight [256, 256, 3, 3]
>>> features.14.weight [256, 256, 3, 3]
>>> features.17.weight [256, 512, 3, 3]
>>> features.19.weight [512, 512, 3, 3]
>>> features.21.weight [512, 512, 3, 3]
>>> features.24.weight [512, 512, 3, 3]
>>> features.26.weight [512, 512, 3, 3]
>>> features.28.weight [512, 512, 3, 3]
>>> classifier.0.weight [25088, 4096]
>>> classifier.3.weight [4096, 4096]
>>> classifier.6.weight [4096, 1000]
>>> Total number of parameters: 138,357,544
>>> '''
"""
num_params = 0
print(net)
for name, param in net.named_parameters():
num_params += param.numel()
size = list(param.size())
if len(size) > 1:
if print_size:
print(name, size[1:2]+size[:1]+size[2:], format_num(param.numel()))
else:
print(name, size[1:2] + size[:1] + size[2:])
print('Total number of parameters: %s' % format_num(num_params))
# print('The size of receptive field: %s' % format_num(receptive_field(net)))
# def receptive_field(net):
# def _f(output_size, ksize, stride, dilation):
# return (output_size - 1) * stride + ksize * dilation - dilation + 1
#
# stats = []
# for m in net.modules():
# if isinstance(m, torch.nn.Conv2d):
# stats.append((m.kernel_size, m.stride, m.dilation))
#
# rsize = 1
# for (ksize, stride, dilation) in reversed(stats):
# if type(ksize) == tuple: ksize = ksize[0]
# if type(stride) == tuple: stride = stride[0]
# if type(dilation) == tuple: dilation = dilation[0]
# rsize = _f(rsize, ksize, stride, dilation)
# return rsize
##############################
# Abstract Meters class
##############################
class Meters(object):
def __init__(self):
pass
def update(self, new_dic):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def keys(self):
raise NotImplementedError
def items(self):
return self.dic.items()
class AverageMeters(Meters):
"""AverageMeter class
Example
>>> avg_meters = AverageMeters()
>>> for i in range(100):
>>> avg_meters.update({'f': i})
>>> print(str(avg_meters))
"""
def __init__(self, dic=None, total_num=None):
self.dic = dic or {}
# self.total_num = total_num
self.total_num = total_num or {}
def update(self, new_dic):
for key in new_dic:
if not key in self.dic:
self.dic[key] = new_dic[key]
self.total_num[key] = 1
else:
self.dic[key] += new_dic[key]
self.total_num[key] += 1
# self.total_num += 1
def __getitem__(self, key):
return self.dic[key] / self.total_num[key]
def __str__(self):
keys = sorted(self.keys())
res = ''
for key in keys:
res += (key + ': %.4f' % self[key] + ' | ')
return res
def keys(self):
return self.dic.keys()
class ExponentialMovingAverage(Meters):
"""EMA class
Example
>>> ema_meters = ExponentialMovingAverage(0.98)
>>> for i in range(100):
>>> ema_meters.update({'f': i})
>>> print(str(ema_meters))
"""
def __init__(self, decay=0.9, dic=None, total_num=None):
self.decay = decay
self.dic = dic or {}
# self.total_num = total_num
self.total_num = total_num or {}
def update(self, new_dic):
decay = self.decay
for key in new_dic:
if not key in self.dic:
self.dic[key] = (1 - decay) * new_dic[key]
self.total_num[key] = 1
else:
self.dic[key] = decay * self.dic[key] + (1 - decay) * new_dic[key]
self.total_num[key] += 1
# self.total_num += 1
def __getitem__(self, key):
return self.dic[key] # / self.total_num[key]
def __str__(self):
keys = sorted(self.keys())
res = ''
for key in keys:
res += (key + ': %.4f' % self[key] + ' | ')
return res
def keys(self):
return self.dic.keys()
##############################
# Checkpoint helper
##############################
def load_ckpt(model, ckpt_path):
"""Load checkpoint.
Args:
model(nn.Module): object of a subclass of nn.Module.
ckpt_path(str): *.pt file to load.
Example
>>> class Model(nn.Module):
>>> pass
>>>
>>> model = Model().cuda()
>>> load_ckpt(model, 'model.pt')
"""
model.load_state_dict(torch.load(ckpt_path))
def save_ckpt(model, ckpt_path):
"""Save checkpoint.
Args:
model(nn.Module): object of a subclass of nn.Module.
ckpt_path(str): *.pt file to save.
Example
>>> class Model(nn.Module):
>>> pass
>>>
>>> model = Model().cuda()
>>> save_ckpt(model, 'model.pt')
"""
torch.save(model.state_dict(), ckpt_path)
##############################
# LR_Scheduler
##############################
class LR_Scheduler(object):
"""Learning Rate Scheduler
Example:
>>> scheduler = LR_Scheduler('cos', opt.lr, opt.epochs, len(dataloader), warmup_epochs=20)
>>> for i, data in enumerate(dataloader)
>>> scheduler(self.g_optimizer, i, epoch)
Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}`` 每到达lr_step, lr就乘以0.1
Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``
Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``
iters_per_epoch: number of iterations per epoch
"""
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0,
lr_step=0, warmup_epochs=0, logger=None):
"""
:param mode: `step` `cos` or `poly`
:param base_lr:
:param num_epochs:
:param iters_per_epoch:
:param lr_step: lr step to change lr/ for `step` mode
:param warmup_epochs:
:param logger:
"""
self.mode = mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = base_lr
if mode == 'step':
assert lr_step
self.lr_step = lr_step
self.iters_per_epoch = iters_per_epoch
self.N = num_epochs * iters_per_epoch
self.epoch = -1
self.warmup_iters = warmup_epochs * iters_per_epoch
self.logger = logger
if logger:
self.logger.info('Using {} LR Scheduler!'.format(self.mode))
def __call__(self, optimizer, i, epoch):
T = epoch * self.iters_per_epoch + i
if self.mode == 'cos':
lr = 0.5 * self.lr * (1 + math.cos(1.0 * T / self.N * math.pi))
elif self.mode == 'poly':
lr = self.lr * pow((1 - 1.0 * T / self.N), 0.9)
elif self.mode == 'step':
lr = self.lr * (0.1 ** (epoch // self.lr_step))
else:
raise NotImplemented
# warm up lr schedule
if self.warmup_iters > 0 and T < self.warmup_iters:
lr = lr * 1.0 * T / self.warmup_iters
if epoch > self.epoch:
if self.logger:
self.logger.info('\n=>Epoches %i, learning rate = %.4f' % (epoch, lr))
else:
print('\nepoch: %d lr: %.6f' % (epoch, lr))
self.epoch = epoch
assert lr >= 0
self._adjust_learning_rate(optimizer, lr)
def _adjust_learning_rate(self, optimizer, lr):
if len(optimizer.param_groups) == 1:
optimizer.param_groups[0]['lr'] = lr
else:
# enlarge the lr at the head
optimizer.param_groups[0]['lr'] = lr
for i in range(1, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
"""
TensorBoard
Example:
writer = create_summary_writer(os.path.join(self.basedir, 'logs'))
write_meters_loss(writer, 'train', avg_meters, iteration)
write_loss(writer, 'train', 'F1', 0.78, iteration)
write_image(writer, 'train', 'input', img, iteration)
# shell
tensorboard --logdir {base_path}/logs
"""
def create_summary_writer(log_dir):
"""Create a tensorboard summary writer.
Args:
log_dir: log directory.
Returns:
(SummaryWriter): a summary writer.
Example
>>> writer = create_summary_writer(os.path.join(self.basedir, 'logs'))
>>> write_meters_loss(writer, 'train', avg_meters, iteration)
>>> write_loss(writer, 'train', 'F1', 0.78, iteration)
>>> write_image(writer, 'train', 'input', img, iteration)
>>> # shell
>>> tensorboard --logdir {base_path}/logs
"""
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_dir = os.path.join(log_dir, datetime.now().strftime('%m-%d_%H-%M-%S'))
if not os.path.exists(log_dir):
os.mkdir(log_dir)
writer = SummaryWriter(log_dir, max_queue=3, flush_secs=10)
return writer
def write_loss(writer: SummaryWriter, prefix, loss_name: str, value: float, iteration):
"""Write loss into writer.
Args:
writer(SummaryWriter): writer created by create_summary_writer()
prefix(str): any string, e.g. 'train'.
loss_name(str): loss name.
value(float): loss value.
iteration(int): epochs or iterations.
Example
>>> write_loss(writer, 'train', 'F1', 0.78, iteration)
"""
writer.add_scalar(
os.path.join(prefix, loss_name), value, iteration)
def write_graph(writer: SummaryWriter, model, inputs_to_model=None):
"""Write net graph into writer.
Args:
writer(SummaryWriter): writer created by create_summary_writer()
model(nn.Module): model.
inputs_to_model(tuple or list): forward inputs.
Example
>>> from tensorboardX import SummaryWriter
>>> input_data = Variable(torch.rand(16, 3, 224, 224))
>>> vgg16 = torchvision.models.vgg16()
>>>
>>> | |
self.add(dict(s=s, sub=sub, count=count))
class SortNumbers(Problem):
"""
Sort numbers based on strings
Sample input
---
"six one four"
Sample output
---
"one four six"
Inspired by [HumanEval](https://github.com/openai/human-eval)/19
"""
@staticmethod
def sat(ans: str, s="six one four"):
nums = 'zero one two three four five six seven eight nine'.split()
return [nums.index(x) for x in ans.split(" ")] == sorted([nums.index(x) for x in s.split(" ")])
@staticmethod
def sol(s):
nums = 'zero one two three four five six seven eight nine'.split()
arr = [nums.index(x) for x in s.split()]
arr.sort()
ans = " ".join([nums[i] for i in arr])
return ans
def gen_random(self):
nums = 'zero one two three four five six seven eight nine'.split()
n = self.random.randrange(3, 9)
ans = ""
for _ in range(n):
ans += self.random.choice(nums) + " "
ans = ans[:-1]
s = ans
self.add(dict(s=s))
class FindClosePair(Problem):
"""
Given a list of numbers, find the indices of the closest pair.
Sample Input:
[1.2, 5.25, 0.89, 21.0, 5.23]
Sample Output:
[4, 1]
Inspired by [HumanEval](https://github.com/openai/human-eval)/20
"""
@staticmethod
def sat(inds: List[int], nums=[0.31, 21.3, 5.0, 9.0, 11.0, 5.01, 17.2]):
a, b = inds
assert a != b and a >= 0 and b >= 0
for i in range(len(nums)):
for j in range(i):
assert abs(nums[i] - nums[j]) >= abs(nums[b] - nums[a])
return True
@staticmethod
def sol(nums):
best = [0, 1]
best_score = abs(nums[1] - nums[0])
for i in range(len(nums)):
for j in range(i):
score = abs(nums[i] - nums[j])
if score < best_score:
best_score = score
best = [i, j]
return best
def gen_random(self):
nums = [self.random.uniform(-10, 10) for _ in range(self.random.randrange(2, 10))]
if self.random.random() < 0.2:
nums.append(nums[0])
self.random.shuffle(nums)
self.add(dict(nums=nums))
class Rescale(Problem):
"""
Rescale and shift numbers so that they cover the range [0, 1]
Sample input
---
[18.5, 17.0, 18.0, 19.0, 18.0]
Sample output
---
[0.75, 0.0, 0.5, 1.0, 0.5]
Inspired by [HumanEval](https://github.com/openai/human-eval)/21
"""
@staticmethod
def sat(ans: List[float], nums=[13.0, 17.0, 17.0, 15.5, 2.94]):
assert min(ans) == 0.0 and max(ans) == 1.0
a = min(nums)
b = max(nums)
for i in range(len(nums)):
x = a + (b - a) * ans[i]
assert abs(nums[i] - x) < 1e-6
return True
@staticmethod
def sol(nums):
nums = nums.copy()
a = min(nums)
b = max(nums)
if b - a == 0:
return [0.0] + [1.0] * (len(nums) - 1)
for i in range(len(nums)):
nums[i] = (nums[i] - a) / (b - a)
return nums
def gen_random(self):
nums = [self.random.heavy_tail_float() for _ in range(self.random.randrange(2, 10))]
if self.random.random() < 0.2:
nums = [nums[0]] * len(nums)
self.add(dict(nums=nums))
class FilterInts(Problem):
"""
Find the indices of valid python integers in a list of strings
Sample input
---
["18.5", "-1", "2+2", "7", "foo"]
Sample output
---
[1, 3]
Inspired by [HumanEval](https://github.com/openai/human-eval)/22
"""
@staticmethod
def sat(indexes: List[int], li=["Hello", "5", "10", "bye"], num=2):
[int(li[i]) for i in indexes]
return len(set(indexes)) >= num and min(indexes) >= 0
@staticmethod
def sol(li, num):
ans = []
for i in range(len(li)):
try:
int(li[i])
ans.append(i)
except:
pass
return ans
def gen_random(self):
chars = "0123456789+-*'e. "
ans = []
length = self.random.randrange(10)
for _ in range(length):
ans.append("".join(self.random.choice(chars) for i in range(self.random.randrange(10))))
class StrLength(Problem):
"""
Find the length of a non-empty string
Sample input
---
"foo"
Sample output
---
3
Inspired by [HumanEval](https://github.com/openai/human-eval)/23
"""
@staticmethod
def sat(length: int, s="pneumonoultramicroscopicsilicovolcanoconiosis"):
try:
s[length]
except IndexError:
s[length - 1]
return True
@staticmethod
def sol(s):
return len(s)
def gen_random(self):
s = self.random.pseudo_word(min_len=1, max_len=50)
self.add(dict(s=s))
class LargestDivisor(Problem):
"""
Find the largest integer divisor of a number n that is less than n
Sample input
---
1000
Sample output
---
500
Inspired by [HumanEval](https://github.com/openai/human-eval)/24
"""
@staticmethod
def sat(d: int, n=123456):
return n % d == 0 and d < n and all(n % e for e in range(d + 1, n))
@staticmethod
def sol(n):
return next(d for d in range(n - 1, 0, -1) if n % d == 0)
def gen_random(self):
n = self.random.randrange(1, 10 ** 5)
self.add(dict(n=n))
class PrimeFactorization(Problem):
"""
Factor number n into a given number of non-trivial factors
Sample input
---
1000, 6
Sample output
---
[2, 2, 2, 5, 5, 5]
Inspired by [HumanEval](https://github.com/openai/human-eval)/25
"""
@staticmethod
def sat(factors: List[int], n=123456, num_factors=8):
assert len(factors) == num_factors
prod = 1
for d in factors:
prod *= d
assert d > 1
return prod == n
@staticmethod
def sol(n, num_factors):
if num_factors == 0:
return []
if num_factors == 1:
return [n]
ans = []
for d in range(2, n):
while n % d == 0:
n //= d
ans.append(d)
if len(ans) == num_factors - 1:
ans.append(n)
return ans
assert False
def gen_random(self):
num_factors = self.random.randrange(10)
n = 2 ** num_factors
for _ in range(self.random.randrange(10)):
n *= self.random.choice([3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47])
num_factors += 1
self.add(dict(n=n, num_factors=num_factors))
class Dedup(Problem):
"""
Remove duplicates from a list of integers, preserving order
Sample input
---
[1, 3, 2, 9, 2, 1, 55]
Sample output
---
[1, 3, 2, 9, 55]
Inspired by [HumanEval](https://github.com/openai/human-eval)/26
"""
@staticmethod
def sat(ans: List[int], li=[2, 19, 2, 53, 1, 1, 2, 44, 17, 0, 19, 31]):
return set(ans) == set(li) and all(li.index(ans[i]) < li.index(ans[i + 1]) for i in range(len(ans) - 1))
@staticmethod
def sol(li):
seen = set()
ans = []
for n in li:
if n not in seen:
ans.append(n)
seen.add(n)
return ans
def gen_random(self):
n = self.random.randrange(20)
li = [self.random.randrange(10) for _ in range(n)]
self.add(dict(li=li))
class FlipCase(Problem):
"""
Flip case
Sample input
---
'cAt'
Sample output
---
'CaT'
Inspired by [HumanEval](https://github.com/openai/human-eval)/27
"""
@staticmethod
def sat(ans: str, s="FlIp ME!"):
return len(ans) == len(s) and all({c, d} == {d.upper(), d.lower()} for c, d in zip(ans, s))
@staticmethod
def sol(s):
return "".join(c.lower() if c.upper() == c else c.upper() for c in s)
def gen_random(self):
w = self.random.pseudo_word()
s = "".join(self.random.choice([c.upper(), c.lower()] * 5 + [' ', '!', '3']) for c in w)
self.add(dict(s=s))
class CatStrings(Problem):
"""
Concatenate a list of strings
Sample input
---
['cat', 'dog', 'bird']
Sample output
---
'catdogbird'
Inspired by [HumanEval](https://github.com/openai/human-eval)/28
"""
@staticmethod
def sat(cat: str, strings=["Will", "i", "am", "Now", "here"]):
i = 0
for s in strings:
for c in s:
assert cat[i] == c
i += 1
return i == len(cat)
@staticmethod
def sol(strings):
return "".join(strings)
def gen_random(self):
strings = [self.random.pseudo_word() for _ in range(self.random.randrange(10))]
self.add(dict(strings=strings))
class FindExtensions(Problem):
"""
Find the strings in a list startings with a given prefix
Sample Input:
['cat', 'car', 'fear', 'center'], 'ca'
Sample Output:
['cat', 'car']
Inspired by [HumanEval](https://github.com/openai/human-eval)/29
"""
@staticmethod
def sat(extensions: List[str], strings=['cat', 'dog', 'shatter', 'donut', 'at', 'ta'], prefix='do'):
i = 0
for s in strings:
if s.startswith(prefix):
assert extensions[i] == s
i += 1
return i == len(extensions)
@staticmethod
def sol(strings, prefix):
return [s for s in strings if s.startswith(prefix)]
def gen_random(self):
prefix = self.random.pseudo_word(min_len=0, max_len=3)
def gen():
return self.random.choice(["", prefix]) + self.random.pseudo_word(min_len=0, max_len=5)
strings = [gen() for _ in range(self.random.randrange(6))]
self.add(dict(strings=strings, prefix=prefix))
class FindPositives(Problem):
"""
Find the positive integers in a list
Sample Input:
[-1, 3, 19, -2, 0, 44, 0, 44, 11]
Sample Output:
[3, 19, 44, 44, 11]
Inspired by [HumanEval](https://github.com/openai/human-eval)/30
"""
@staticmethod
def sat(positives: List[int], nums=[2, 2342, -2, 32, -8, -5, 2342, 0, -9, 44, 11]):
stack = positives[::-1]
for n in nums:
assert n <= 0 or n == stack.pop()
return stack == []
@staticmethod
def sol(nums):
return [i for i in nums if i > 0]
def gen_random(self):
nums = [self.random.randrange(-100, 100) for _ in range(self.random.randrange(10))]
self.add(dict(nums=nums))
class FermatComposite(Problem):
"""
Find a Fermat composite certificate for a number n > 1
Sample Input:
1469
Sample Output:
3 # because (3 ** 1468) % 1469 != 1
Inspired by [HumanEval](https://github.com/openai/human-eval)/31
"""
@staticmethod
def sat(certificate: int, n=1449):
return pow(certificate, n - 1, n) > 1
@staticmethod
def sol(n):
return next(i for i in range(2, n) if pow(i, n - 1, n) > 1)
def gen_random(self):
a, b = [self.random.randrange(3, 10 ** 5, 2) for _ in range(2)]
if not self.random.randrange(10):
a += 1
n = a * b
self.add(dict(n=n))
class OddDegreePolynomialRoot(Problem):
"""
Find a real root of an odd | |
# Copyright 2015 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SparseArray import SparseArray
from .node import Variable, Function
from .utils import tonparray
from multiprocessing import Pool
import logging
import gc
import os
import gzip
import pickle
import shutil
from time import time
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, **kwargs):
return x
LOGGER = logging.getLogger('EvoDAG')
def fit(X_y_evodag):
X, y, test_set, evodag, tmpdir, init_time = X_y_evodag
if tmpdir is not None:
seed = evodag['seed']
output = os.path.join(tmpdir, '%s.evodag' % seed)
if os.path.isfile(output):
with gzip.open(output) as fpt:
try:
return pickle.load(fpt)
except Exception:
pass
try:
time_limit = evodag['time_limit']
if time_limit is not None:
evodag['time_limit'] = time_limit - (time() - init_time)
if evodag['time_limit'] < 2:
LOGGER.info('Not enough time (seed: %s) ' % evodag['seed'])
return None
except KeyError:
pass
try:
evodag = EvoDAG(**evodag)
evodag.fit(X, y, test_set=test_set)
except RuntimeError:
return None
m = evodag.model
gc.collect()
if tmpdir is not None:
with gzip.open(output, 'w') as fpt:
pickle.dump(m, fpt)
return m
def decision_function(model_X):
k, model, X = model_X
return [k, model.decision_function(X)]
def predict_proba(model_X):
k, model, X = model_X
return [k, model.predict_proba(X)]
class Model(object):
"""Object to store the necesary elements to make predictions
based on an individual"""
def __init__(self, trace, hist, nvar=None, classifier=True, labels=None,
probability_calibration=None, nclasses=None):
self._classifier = classifier
self._trace = trace
self._hist = hist
self._map = {}
for k, v in enumerate(self._trace):
self._map[v] = k
self._hy_test = self._hist[self._trace[-1]].hy_test
self._hist = [self.transform(self._hist[x].tostore()) for x in
self._trace]
self._labels = labels
self._nvar = nvar
self._probability_calibration = probability_calibration
self._nclasses = nclasses
@property
def nclasses(self):
return self._nclasses
@property
def nvar(self):
return self._nvar
@property
def multiple_outputs(self):
return self._hist[0]._multiple_outputs
@property
def classifier(self):
"whether this is classification or regression task"
return self._classifier
@property
def fitness_vs(self):
"Fitness in the validation set"
return self._hist[-1].fitness_vs
@property
def size(self):
return len(self._hist)
@property
def height(self):
return self._hist[-1].height
def inputs(self, counter=None):
from collections import Counter
if counter is None:
counter = Counter()
for node in self._hist:
if node.height == 0:
if isinstance(node._variable, list):
for _ in node._variable:
counter[_] += 1
else:
counter[node._variable] += 1
return counter
def transform(self, v):
if v.height == 0:
return v
if v.nargs == 1:
v.variable = self._map[v.variable]
else:
v.variable = [self._map[x] for x in v.variable]
return v
def predict_proba(self, X, **kwargs):
X = self.decision_function(X, **kwargs)
return self._probability_calibration.predict_proba(X)
def decision_function(self, X, **kwargs):
"Decision function i.e. the raw data of the prediction"
if X is None:
return self._hy_test
X = self.convert_features(X)
if len(X) < self.nvar:
_ = 'Number of variables differ, trained with %s given %s' % (self.nvar, len(X))
raise RuntimeError(_)
hist = self._hist
for node in hist:
if node.height:
node.eval(hist)
else:
node.eval(X)
node.normalize()
r = node.hy
for i in hist[:-1]:
i.hy = None
i.hy_test = None
gc.collect()
return r
def predict(self, X, **kwargs):
hy = self.decision_function(X, **kwargs)
if self._classifier:
[x.finite(inplace=True) for x in hy]
hy = np.array(SparseArray.argmax(hy).full_array(), dtype=np.int)
if self._labels is not None:
hy = self._labels[hy]
else:
hy = tonparray(hy)
return hy
def graphviz(self, fpt, terminals=True):
flag = False
if isinstance(fpt, str):
flag = True
fpt = open(fpt, 'w')
fpt.write("digraph EvoDAG {\n")
last = len(self._hist) - 1
height = self._hist[-1].height
try:
b, m = np.linalg.solve([[0, height-1], [1, 1]], [9, 1])
except np.linalg.linalg.LinAlgError:
b, m = 0, 1
done = {}
for k, n in enumerate(self._hist):
if isinstance(n, Function):
done[k] = 1
name = n.__class__.__name__
if n.height == 0:
cdn = "n{0} [label=\"{1}\" fillcolor=red style=filled];\n"
fpt.write(cdn.format(k, name))
continue
color = int(np.round(n.height * m + b))
extra = "colorscheme=blues9 style=filled color={0}".format(color)
if k == last:
extra = "fillcolor=green style=filled"
fpt.write("n{0} [label=\"{1}\" {2}];\n".format(k,
name,
extra))
vars = n._variable
if not isinstance(vars, list):
vars = [vars]
for j in vars:
if j in done:
fpt.write("n{0} -> n{1};\n".format(k, j))
elif terminals:
cdn = "n{0} [label=\"X{1}\" fillcolor=red style=filled];\n"
fpt.write(cdn.format(k, n._variable))
done[k] = 1
fpt.write("}\n")
if flag:
fpt.close()
@staticmethod
def convert_features(v):
if v is None:
return None
if isinstance(v[0], Variable):
return v
if isinstance(v, np.ndarray):
X = v.T
elif isinstance(v[0], SparseArray):
X = v
else:
X = np.array(v).T
lst = []
for var, d in enumerate(X):
v = Variable(var, 1)
if isinstance(d, SparseArray):
v._eval_tr = d
else:
v._eval_tr = SparseArray.fromlist(d)
lst.append(v)
return lst
@staticmethod
def convert_features_test_set(vars, v):
if isinstance(v, np.ndarray):
X = v.T
else:
X = v
for var, d in zip(vars, X):
if isinstance(d, SparseArray):
var._eval_ts = d
else:
var._eval_ts = SparseArray.fromlist(d)
class Ensemble(object):
"Ensemble that predicts using the average"
def __init__(self, models, n_jobs=1, evodags=None, tmpdir=None):
self._models = models
self._n_jobs = n_jobs
self._evodags = evodags
self._tmpdir = tmpdir
if models is not None:
self._init()
def fit(self, X, y, test_set=None):
evodags = self._evodags
init_time = time()
args = [(X, y, test_set, evodag, self._tmpdir, init_time) for evodag in evodags]
try:
time_limit = evodags[0]['time_limit']
except KeyError:
time_limit = None
if time_limit is not None:
LOGGER.info('time_limit in Ensemble: %0.2f' % time_limit)
if self._n_jobs == 1:
_ = [fit(x) for x in tqdm(args)]
self._models = [x for x in _ if x is not None]
else:
p = Pool(self._n_jobs, maxtasksperchild=1)
self._models = []
for x in tqdm(p.imap_unordered(fit, args),
total=len(args)):
if x is not None:
self._models.append(x)
if time_limit is not None and time() - init_time > time_limit:
p.terminate()
break
p.close()
if self._tmpdir is not None:
shutil.rmtree(self._tmpdir)
self._init()
if time_limit is not None:
LOGGER.info('Used time in Ensemble: %0.2f' % (time() - init_time))
return self
def _init(self):
self._labels = self._models[0]._labels
self._classifier = False
flag = False
if self._models[0]._classifier:
flag = True
self._classifier = flag
@property
def nclasses(self):
return self.models[0].nclasses
@property
def probability_calibration(self):
return self.models[0]._probability_calibration is not None
@property
def models(self):
"List containing the models that compose the ensemble"
return self._models
@property
def multiple_outputs(self):
return self.models[0].multiple_outputs
@property
def classifier(self):
return self._classifier
@property
def fitness_vs(self):
"Median Fitness in the validation set"
l = [x.fitness_vs for x in self.models]
return np.median(l)
@property
def size(self):
l = [x.size for x in self.models]
return np.median(l)
@property
def height(self):
l = [x.height for x in self.models]
return np.median(l)
def inputs(self, counter=None):
from collections import Counter
if counter is None:
counter = Counter()
for m in self.models:
m.inputs(counter=counter)
return counter
def _decision_function_raw(self, X, cpu_cores=1):
if cpu_cores == 1:
r = [m.decision_function(X) for m in self._models]
else:
p = Pool(cpu_cores, maxtasksperchild=1)
args = [(k, m, X) for k, m in enumerate(self._models)]
r = [x for x in tqdm(p.imap_unordered(decision_function,
args),
total=len(args))]
r.sort(key=lambda x: x[0])
r = [x[1] for x in r]
p.close()
return r
def raw_decision_function(self, X):
hy = self._decision_function_raw(X, cpu_cores=self._n_jobs)
if isinstance(hy[0], list):
_ = []
[[_.append(y) for y in x] for x in hy]
hy = _
if self.classifier:
[x.finite(inplace=True) for x in hy]
return np.array([tonparray(x) for x in hy]).T
def _predict_proba_raw(self, X, cpu_cores=1):
if cpu_cores == 1:
r = [m.predict_proba(X) for m in self._models]
else:
p = Pool(cpu_cores, maxtasksperchild=1)
args = [(k, m, X) for k, m in enumerate(self._models)]
r = [x for x in tqdm(p.imap_unordered(predict_proba,
args),
total=len(args))]
r.sort(key=lambda x: x[0])
r = [x[1] for x in r]
p.close()
return r
def predict_proba(self, X):
if self.probability_calibration:
proba = np.array(self._predict_proba_raw(X, cpu_cores=self._n_jobs))
proba = np.mean(proba, axis=0)
proba /= np.sum(proba, axis=1)[:, np.newaxis]
proba[np.isnan(proba)] = 1. / self.nclasses
return proba
hy = self._decision_function_raw(X, cpu_cores=self._n_jobs)
minlength = len(hy[0])
hy = [SparseArray.argmax(x) for x in hy]
hy = np.array([x.full_array() for x in hy], dtype=np.int).T
hy = [np.bincount(x, minlength=minlength) for x in hy]
return np.array([x / float(x.sum()) for x in hy])
def decision_function(self, X, cpu_cores=1):
cpu_cores = max(cpu_cores, self._n_jobs)
r = self._decision_function_raw(X, cpu_cores=cpu_cores)
if isinstance(r[0], SparseArray):
r = np.array([tonparray(x) for x in r if x.isfinite()])
r = np.median(r, axis=0)
else:
[[x.finite(inplace=True) for x in o] for o in r]
r = np.array([[tonparray(y) for y in x] for x in r])
r = np.median(r, axis=0)
return | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import json
import logging
import argparse
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# apps = ["PinLock", "Camera_To_USBDisk", "FatFs_RAMDisk", "FatFs_uSD", "LCD_AnimatedPictureFromSDCard", "LCD_PicturesFromSDCard", "LwIP_TCP_Echo_Server"]
apps = ["PinLock", "Camera_To_USBDisk", "FatFs_uSD", "LCD_AnimatedPictureFromSDCard", "LCD_PicturesFromSDCard", "LwIP_TCP_Echo_Server", "CoreMark"]
FLASH_SIZE_1 = 1 * 1024 * 1024
FLASH_SIZE_2 = 2 * 1024 * 1024
SRAM_SIZE_1 = 192 * 1024
SRAM_SIZE_2 = 288 * 1024
Default_Flash_Sections = [".text", ".rodata", ".data"]
Default_SRAM_Sections = [".data", ".bss"]
operation_data_section_prefix = "_Operation"
heap_data_section_prefix = "memp_"
def store_json_to_file(json_to_dump, filename):
with open(filename, "w") as f:
json.dump(json_to_dump, f, sort_keys=True, indent=4)
def load_json_from_file(filename):
with open(filename, "r") as f:
json_to_load = json.load(f)
return json_to_load
def next_power_2(size):
if size == 0:
return 0
elif 0< size <=32:
return 32
else:
return 1 << (size - 1).bit_length()
"""
$ readelf -S opec/LwIP_TCP_Echo_Server--oi--final.elf
There are 44 section headers, starting at offset 0x1e18cc:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .isr_vector PROGBITS 08000000 010000 0001b4 00 A 0 0 1
[ 2] .text PROGBITS 080001c0 0101c0 026df0 00 AX 0 0 64
[ 3] .rodata PROGBITS 08040000 040000 005850 00 A 0 0 16
[ 4] .ARM ARM_EXIDX 08045850 045850 000008 00 AL 2 0 4
[ 5] .data PROGBITS 20001000 051000 0005f0 00 WA 0 0 8
[ 6] .stack NOBITS 20000000 180000 001000 00 WA 0 0 1
[ 7] .ccmram PROGBITS 10000000 173764 000000 00 W 0 0 1
[ 8] .bss NOBITS 200015f0 180000 000810 00 WA 0 0 4
[ 9] ._user_heap_stack NOBITS 20001e00 180000 001200 00 WA 0 0 1
[10] memp_ETHDMA PROGBITS 2001c000 05c000 0030a0 00 WA 0 0 4
[11] memp_MEMHEAP PROGBITS 20018000 068000 002820 00 WA 0 0 4
[12] memp_PBUF_POOL PROGBITS 20016000 076000 0018c4 00 WA 0 0 4
[13] memp_TCP_PCB PROGBITS 20015800 085800 000678 00 WA 0 0 4
[14] _Operation_0__dat PROGBITS 20015400 095400 00028c 00 WA 0 0 4
[15] _Operation_1__dat PROGBITS 20015000 0a5000 0002b8 00 WA 0 0 4
[16] _Operation_2__dat PROGBITS 20014c00 0b4c00 000288 00 WA 0 0 4
[17] _Operation_3__dat PROGBITS 20014800 0c4800 000288 00 WA 0 0 4
[18] _Operation_7__dat PROGBITS 20014400 0d4400 000288 00 WA 0 0 4
[19] _Operation_8__dat PROGBITS 20014000 0e4000 000290 00 WA 0 0 4
[20] memp_FRAG_PBUF PROGBITS 20013e00 0f3e00 00016c 00 WA 0 0 4
[21] memp_PBUF PROGBITS 20013d00 103d00 0000a4 00 WA 0 0 4
[22] memp_REASSDATA PROGBITS 20013c00 113c00 0000a4 00 WA 0 0 4
[23] memp_SYS_TIMEOUT PROGBITS 20013b00 123b00 0000a4 00 WA 0 0 4
[24] memp_TCP_PCB_LIST PROGBITS 20013a00 173764 000000 00 W 0 0 1
[25] memp_TCP_SEG PROGBITS 20013900 133900 0000f4 00 WA 0 0 4
[26] memp_UDP_PCB PROGBITS 20013800 143800 0000c4 00 WA 0 0 4
[27] _Operation_4__dat PROGBITS 200137c0 1537c0 000038 00 WA 0 0 4
[28] _Operation_5__dat PROGBITS 20013780 163780 000030 00 WA 0 0 4
[29] _Operation_6__dat PROGBITS 20013760 173760 000004 00 WA 0 0 4
[30] .ARM.attributes ARM_ATTRIBUTES 00000000 173764 000032 00 0 0 1
[31] .debug_str PROGBITS 00000000 173796 00a068 01 MS 0 0 1
[32] .debug_abbrev PROGBITS 00000000 17d7fe 00052a 00 0 0 1
[33] .debug_info PROGBITS 00000000 17dd28 01da9c 00 0 0 1
[34] .debug_ranges PROGBITS 00000000 19b7c4 000018 00 0 0 1
[35] .debug_macinfo PROGBITS 00000000 19b7dc 000001 00 0 0 1
[36] .debug_pubnames PROGBITS 00000000 19b7dd 005d8f 00 0 0 1
[37] .debug_pubtypes PROGBITS 00000000 1a156c 0025e2 00 0 0 1
[38] .comment PROGBITS 00000000 1a3b4e 000015 01 MS 0 0 1
[39] .debug_frame PROGBITS 00000000 1a3b64 005908 00 0 0 4
[40] .debug_line PROGBITS 00000000 1a946c 01ddec 00 0 0 1
[41] .symtab SYMTAB 00000000 1c7258 0126d0 10 42 4430 4
[42] .strtab STRTAB 00000000 1d9928 007d61 00 0 0 1
[43] .shstrtab STRTAB 00000000 1e1689 000241 00 0 0 1
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings), I (info),
L (link order), O (extra OS processing required), G (group), T (TLS),
C (compressed), x (unknown), o (OS specific), E (exclude),
y (purecode), p (processor specific)
"""
def readelf(object_filename):
cmd = ['readelf', '-S']
cmd.append(object_filename)
stdout = subprocess.check_output(cmd).decode()
return stdout.split("\n")
def parse_readelf_data(raw_readelf_data):
"""
{
"section_name": {
"section_addr":x,
"section_offset":x,
"section_size":x,
"section_type":"xxx",
"frag_size":x
}
}
"""
format_readelf_data = {}
for line in raw_readelf_data:
if "[" in line and "]" in line and "Nr" not in line and "NULL" not in line:
elements = line.split("]")[1].split()
section_name = elements[0]
section_type = elements[1]
section_addr = int(elements[2], 16)
section_offset = int(elements[3], 16)
section_size = int(elements[4], 16)
frag_size = next_power_2(section_size) - section_size
# print("frag_size:", frag_size)
format_readelf_data[section_name] = {
"section_type": section_type,
"section_addr": section_addr,
"section_offset": section_offset,
"section_size": section_size,
"frag_size": frag_size
}
# print(format_readelf_data)
return format_readelf_data
"""
struct Pointer_Field {
uint32_t first_index;
uint32_t second_index;
};
struct Shadow_Data_TBL {
uint32_t attr; // deprecated
char **ptr_addr;
char *sha_addr;
char *org_addr;
uint32_t size; // size of shadow variable
uint32_t need_sanitize;
uint32_t offset_of_the_var_to_check;
uint32_t min_value;
uint32_t max_value;
uint32_t has_pointer_fields;
uint32_t pointer_fields_num;
struct Pointer_Field *pointer_fields_ptr;
};
struct Shadow_Stack_TBL {
uint32_t total_size;
uint32_t ptr_num;
struct {
int type; // type为正表示为offset,为负表示寄存器-1到-4表示寄存器r0-r3
int count; // count表示个数,负数表示跟其他参数关联,绝对值为索引
int size; // size表示每个的大小,负数表示和其他参数关联,绝对值为索引
char *org_addr;
char *new_addr;
}arg[0];
};
struct Sanitization_TBL {
char **ptr_addr;
uint32_t min;
uint32_t max;
};
/**
* @brief Peripheral_Regionlist is used for switching MPU regions when encountered a Memory Management Fault
* start_addr: start address of the private peripheral
* end_addr: end address of the private peripheral
* attr: attribute for uprivileged code, and the encoding rules are below:
* wr
* 0b00: 0: No Access
* 0b01: 1: Read-Only
* 0b10: 2: Write-only
* 0b11: 3: Write&Read
* mpu_rbar: MPU RBAR register value, the region number is set to 0
* mpu_rasr: MPU RASR register value
*/
struct Peripheral_MPU_Region {
uint32_t start_addr;
uint32_t attr;
uint32_t size;
uint32_t mpu_rbar;
uint32_t mpu_rasr;
};
/**
* @brief PPB_Register_Whitelist is used for checked in PPB access
* start_addr: start address of the private peripheral
* end_addr: end address of the private peripheral
* attr: attribute for uprivileged code, and the encoding rules are below:
* wr
* 0b00: 0: No Access
* 0b01: 1: Read-Only
* 0b10: 2: Write-only
* 0b11: 3: Write&Read
*
*/
struct PPB_Register_Whitelist {
uint32_t start_addr;
uint32_t size;
uint32_t attr;
};
struct Operation_Policy {
struct MPU_Region region[8]; // 8 MPU regions for Cortex-M4 MCU
uint32_t current_operation_id;
struct Operations_Data_Section_Info *OpeDataSecInfo_ptr;
uint32_t stack_copy_size;
struct Shadow_Stack_TBL *stbl;
uint32_t peripehral_region_num;
struct Peripheral_MPU_Region *regions_ptr;
uint32_t ppb_whitelist_num;
struct PPB_Register_Whitelist *ppb_ptr;
uint32_t shadowdata_tbl_size;
struct Shadow_Data_TBL shadowdata_tbl[0];
};
struct Operation_Metadata {
struct Operation_Policy *policy;
uint32_t stack_pointer;
};
"""
def calculate_metadata_size(policy):
operation_cnt = len(policy["Operation"].keys())
TOTAL = 0
struct_operation_metadata_size = 4*2*operation_cnt
struct_operation_policy_size = 4*10*operation_cnt
MPU_region_size = (4+4)*8*operation_cnt
shadow_stack_table_size = 4*3+4*5*operation_cnt
PPB_register_white_size = 4*3
shadow_data_tbl_size = 4*5
TOTAL += struct_operation_metadata_size + struct_operation_policy_size + MPU_region_size + shadow_stack_table_size
for ope_name, operation_policy in policy["Operation"].items():
TOTAL += PPB_register_white_size * len(operation_policy["Whitelist"])
TOTAL += shadow_data_tbl_size * len(operation_policy["ExternalData"])
return TOTAL
"""
造成Flash Overhead的原因:
OPEC-Monitor code size
initialization code
instrumented SVCs
Metadata
Operation data section
计算:
(OPEC_.text + OPEC_.data + OEPC_.rodata + OPEC_.memp + OPEC_._Operation) - (baseline_.text + baseline_.data + baseline_.rodata)
"""
def calc_flash_overhead(BASELINE_binary_section_data, OPEC_binary_section_data):
delta_text = OPEC_binary_section_data[".text"]["section_size"] - BASELINE_binary_section_data[".text"]["section_size"]
delta_rodata = OPEC_binary_section_data[".rodata"]["section_size"] - BASELINE_binary_section_data[".rodata"]["section_size"]
delta_data = OPEC_binary_section_data[".data"]["section_size"] + OPEC_binary_section_data[".data"]["frag_size"] - BASELINE_binary_section_data[".data"]["section_size"]
delta_extra_sections = 0
delta_extra_frags = 0
delta_memp_sections = 0
delta_memp_frags = 0
operation_cnt = 0
memp_cnt = 0
for section, info in OPEC_binary_section_data.items():
if section.startswith(operation_data_section_prefix):
operation_cnt += 1
delta_extra_sections += info["section_size"]
delta_extra_frags += info["frag_size"]
elif section.startswith(heap_data_section_prefix):
memp_cnt += 1
delta_memp_sections += info["section_size"]
delta_memp_frags += info["frag_size"]
# initialization instructions
# operation_initdata
# operation_start
# environment_init
# bl to old main
initialized_code_size = 4 + (8+8+8+4)*(operation_cnt+memp_cnt) + (8+4) + 4 + 4 + 4 + 4
instrumented_svc_size = (4+4) * operation_cnt
delta_text = delta_text + instrumented_svc_size + initialized_code_size
total = delta_text + delta_rodata + delta_data + delta_extra_sections + delta_extra_frags + delta_memp_sections + delta_memp_frags
result = {
".text": delta_text,
".rodata": delta_rodata,
".data": delta_data,
".operation_data_sections": delta_extra_sections + delta_extra_frags,
".memp_data_sections": delta_memp_sections + delta_memp_frags,
"total": total
}
return result
"""
SRAM overhead:
operation_data_sections
.data
.bss
memp_sections
计算方式:
(OPEC_.data + OEPC_.bss + OPEC_.memp + operation_data_sections) - (baseline_.data + baseline_.bss + baseline_.rodata)
"""
def calc_sram_overhead(BASELINE_binary_section_data, OPEC_binary_section_data):
delta_data = OPEC_binary_section_data[".data"]["section_size"] + OPEC_binary_section_data[".data"]["frag_size"] - BASELINE_binary_section_data[".data"]["section_size"]
delta_bss = OPEC_binary_section_data[".bss"]["section_size"] - BASELINE_binary_section_data[".bss"]["section_size"]
delta_extra_sections = 0
delta_extra_frags = 0
delta_memp_sections = 0
delta_memp_frags = 0
operation_cnt = 0
memp_cnt = 0
for section, info | |
<gh_stars>0
from __future__ import absolute_import
from __future__ import unicode_literals
import django
from django.test import TestCase
from django.utils.timezone import now
from mock import MagicMock
import job.test.utils as job_test_utils
import queue.test.utils as queue_test_utils
from job.execution.job_exe import RunningJobExecution
from job.tasks.health_task import HealthTask
from job.tasks.pull_task import PullTask
from node.resources.node_resources import NodeResources
from node.resources.resource import Cpus, Disk, Mem
from queue.job_exe import QueuedJobExecution
from scheduler.resources.agent import ResourceSet
from scheduler.resources.offer import ResourceOffer
from scheduler.scheduling.scheduling_node import SchedulingNode
class TestSchedulingNode(TestCase):
def setUp(self):
django.setup()
self.agent_id = 'agent_1'
def test_accept_job_exe_next_task(self):
"""Tests successfully calling accept_job_exe_next_task()"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
offered_resources = NodeResources([Cpus(10.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode(self.agent_id, node, [], [], resource_set)
job_exe = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(1.0), Mem(10.0)]))
waiting_tasks = []
had_waiting_task = scheduling_node.accept_job_exe_next_task(job_exe, waiting_tasks)
self.assertFalse(had_waiting_task)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 1)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources([Cpus(1.0), Mem(10.0)])))
self.assertTrue(scheduling_node._remaining_resources.is_equal(NodeResources([Cpus(9.0), Mem(40.0)])))
self.assertListEqual(waiting_tasks, [])
def test_accept_job_exe_next_task_no_jobs(self):
"""Tests calling accept_job_exe_next_task() when job exe tasks are not allowed"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = False
offered_resources = NodeResources([Cpus(10.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
job_exe = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(1.0), Mem(10.0)]))
waiting_tasks = []
had_waiting_task = scheduling_node.accept_job_exe_next_task(job_exe, waiting_tasks)
self.assertFalse(had_waiting_task)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources()))
self.assertTrue(scheduling_node._remaining_resources.is_equal(NodeResources([Cpus(10.0), Mem(50.0)])))
self.assertListEqual(waiting_tasks, [])
def test_accept_job_exe_next_task_canceled(self):
"""Tests calling accept_job_exe_next_task() when job exe gets canceled (no next task)"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
offered_resources = NodeResources([Cpus(10.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
job_exe = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(1.0), Mem(10.0)]))
waiting_tasks = []
job_exe.execution_canceled(now())
had_waiting_task = scheduling_node.accept_job_exe_next_task(job_exe, waiting_tasks)
self.assertFalse(had_waiting_task)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources()))
self.assertTrue(scheduling_node._remaining_resources.is_equal(NodeResources([Cpus(10.0), Mem(50.0)])))
self.assertListEqual(waiting_tasks, [])
def test_accept_job_exe_next_task_insufficient_resources(self):
"""Tests calling accept_job_exe_next_task() when there are not enough resources"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
offered_resources = NodeResources([Cpus(10.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
job_exe = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(11.0), Mem(10.0)]))
waiting_tasks = []
had_waiting_task = scheduling_node.accept_job_exe_next_task(job_exe, waiting_tasks)
self.assertTrue(had_waiting_task)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources()))
self.assertTrue(scheduling_node._remaining_resources.is_equal(NodeResources([Cpus(10.0), Mem(50.0)])))
self.assertListEqual(waiting_tasks, [job_exe.next_task()])
def test_accept_new_job_exe(self):
"""Tests successfully calling accept_new_job_exe()"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
offered_resources = NodeResources([Cpus(10.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
queue_model = queue_test_utils.create_queue(cpus_required=1.0, mem_required=10.0, disk_in_required=0.0,
disk_out_required=0.0, disk_total_required=0.0)
job_exe = QueuedJobExecution(queue_model)
accepted = scheduling_node.accept_new_job_exe(job_exe)
self.assertTrue(accepted)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 1)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources([Cpus(1.0), Mem(10.0)])))
self.assertTrue(scheduling_node._remaining_resources.is_equal(NodeResources([Cpus(9.0), Mem(40.0)])))
self.assertEqual(job_exe._scheduled_node_id, node.id)
def test_accept_new_job_exe_insufficient_resources(self):
"""Tests calling accept_new_job_exe() when there are not enough resources"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
offered_resources = NodeResources([Cpus(10.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
queue_model = queue_test_utils.create_queue(cpus_required=11.0, mem_required=10.0, disk_in_required=0.0,
disk_out_required=0.0, disk_total_required=0.0)
job_exe = QueuedJobExecution(queue_model)
accepted = scheduling_node.accept_new_job_exe(job_exe)
self.assertFalse(accepted)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources()))
self.assertTrue(scheduling_node._remaining_resources.is_equal(NodeResources([Cpus(10.0), Mem(50.0)])))
self.assertIsNone(job_exe._scheduled_node_id)
def test_accept_new_job_exe_no_jobs(self):
"""Tests calling accept_new_job_exe() when new job exes are not allowed"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = False
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
offered_resources = NodeResources([Cpus(10.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
queue_model = queue_test_utils.create_queue(cpus_required=1.0, mem_required=10.0, disk_in_required=0.0,
disk_out_required=0.0, disk_total_required=0.0)
job_exe = QueuedJobExecution(queue_model)
accepted = scheduling_node.accept_new_job_exe(job_exe)
self.assertFalse(accepted)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources()))
self.assertTrue(scheduling_node._remaining_resources.is_equal(NodeResources([Cpus(10.0), Mem(50.0)])))
self.assertIsNone(job_exe._scheduled_node_id)
def test_accept_node_tasks(self):
"""Tests successfully calling accept_node_tasks()"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
health_task = HealthTask('1234', 'agent_1')
pull_task = PullTask('1234', 'agent_1')
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
node.get_next_tasks = MagicMock()
node.get_next_tasks.return_value = [health_task, pull_task]
node_task_resources = NodeResources()
node_task_resources.add(health_task.get_resources())
node_task_resources.add(pull_task.get_resources())
offered_resources = NodeResources([Cpus(100.0), Mem(5000.0)])
expected_remaining_resources = NodeResources()
expected_remaining_resources.add(offered_resources)
expected_remaining_resources.subtract(node_task_resources)
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(5000.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
waiting_tasks = []
had_waiting_task = scheduling_node.accept_node_tasks(now(), waiting_tasks)
self.assertFalse(had_waiting_task)
self.assertEqual(len(scheduling_node.allocated_tasks), 2)
self.assertTrue(scheduling_node.allocated_resources.is_equal(node_task_resources))
self.assertTrue(scheduling_node._remaining_resources.is_equal(expected_remaining_resources))
self.assertListEqual(waiting_tasks, [])
def test_accept_node_tasks_insufficient_resources(self):
"""Tests calling accept_node_tasks() when there are not enough resources"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
health_task = HealthTask('1234', 'agent_1')
pull_task = PullTask('1234', 'agent_1')
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
node.get_next_tasks = MagicMock()
node.get_next_tasks.return_value = [health_task, pull_task]
offered_resources = NodeResources([Cpus(0.0), Mem(50.0)])
task_resources = NodeResources()
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, task_resources, watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
waiting_tasks = []
had_waiting_task = scheduling_node.accept_node_tasks(now(), waiting_tasks)
self.assertTrue(had_waiting_task)
self.assertEqual(len(scheduling_node.allocated_tasks), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(NodeResources()))
self.assertTrue(scheduling_node._remaining_resources.is_equal(offered_resources))
self.assertListEqual(waiting_tasks, [health_task, pull_task])
def test_add_allocated_offers(self):
"""Tests calling add_allocated_offers() when there are enough resources for everything"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
health_task = HealthTask('1234', 'agent_1')
pull_task = PullTask('1234', 'agent_1')
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
node.get_next_tasks = MagicMock()
node.get_next_tasks.return_value = [health_task, pull_task]
offered_resources = NodeResources([Cpus(100.0), Mem(500.0)])
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, NodeResources(), watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
running_job_exe_1 = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(1.0), Mem(10.0)]))
running_job_exe_2 = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(2.0), Mem(20.0)]))
node_task_resources = NodeResources()
node_task_resources.add(health_task.get_resources())
node_task_resources.add(pull_task.get_resources())
all_required_resources = NodeResources()
all_required_resources.add(node_task_resources)
all_required_resources.add(running_job_exe_1.next_task().get_resources())
all_required_resources.add(running_job_exe_2.next_task().get_resources())
expected_remaining_resources = NodeResources()
expected_remaining_resources.add(offered_resources)
expected_remaining_resources.subtract(all_required_resources)
# Set up node with node tasks and job exes (there would never be queued job exes since they would be scheduled
# before add_allocated_offers() was called
scheduling_node.accept_node_tasks(now(), [])
scheduling_node.accept_job_exe_next_task(running_job_exe_1, [])
scheduling_node.accept_job_exe_next_task(running_job_exe_2, [])
self.assertEqual(len(scheduling_node.allocated_tasks), 2)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 2)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(all_required_resources))
# Set up offers (we get back more than we need)
offer_1 = ResourceOffer('offer_1', 'agent_1', '1234', NodeResources([Cpus(1.0)]), now())
offer_2 = ResourceOffer('offer_2', 'agent_1', '1234', all_required_resources, now())
offer_3 = ResourceOffer('offer_3', 'agent_1', '1234', NodeResources([Cpus(7.5), Mem(600.0), Disk(800.0)]),
now())
scheduling_node.add_allocated_offers([offer_1, offer_2, offer_3])
self.assertListEqual(scheduling_node.allocated_offers, [offer_1, offer_2, offer_3])
# All allocated tasks and job exes should still be here
self.assertEqual(len(scheduling_node.allocated_tasks), 2)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 2)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(all_required_resources))
self.assertTrue(scheduling_node._remaining_resources.is_equal(expected_remaining_resources))
def test_add_allocated_offers_remove_job_exes(self):
"""Tests calling add_allocated_offers() when there are not enough resources for the job exes"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
health_task = HealthTask('1234', 'agent_1')
pull_task = PullTask('1234', 'agent_1')
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
node.get_next_tasks = MagicMock()
node.get_next_tasks.return_value = [health_task, pull_task]
offered_resources = NodeResources([Cpus(100.0), Mem(500.0)])
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, NodeResources(), watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
running_job_exe_1 = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(1.0), Mem(10.0)]))
running_job_exe_2 = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(2.0), Mem(20.0)]))
node_task_resources = NodeResources()
node_task_resources.add(health_task.get_resources())
node_task_resources.add(pull_task.get_resources())
all_required_resources = NodeResources()
all_required_resources.add(node_task_resources)
all_required_resources.add(running_job_exe_1.next_task().get_resources())
all_required_resources.add(running_job_exe_2.next_task().get_resources())
expected_remaining_resources = NodeResources()
expected_remaining_resources.add(offered_resources)
expected_remaining_resources.subtract(node_task_resources)
# Set up node with node tasks and job exes (there would never be queued job exes since they would be scheduled
# before add_allocated_offers() was called
scheduling_node.accept_node_tasks(now(), [])
scheduling_node.accept_job_exe_next_task(running_job_exe_1, [])
scheduling_node.accept_job_exe_next_task(running_job_exe_2, [])
self.assertEqual(len(scheduling_node.allocated_tasks), 2)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 2)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(all_required_resources))
# Set up offers (enough for node tasks but not enough for job exes)
offer_1 = ResourceOffer('offer_1', 'agent_1', '1234', NodeResources([Cpus(0.5)]), now())
offer_2 = ResourceOffer('offer_2', 'agent_1', '1234', node_task_resources, now())
scheduling_node.add_allocated_offers([offer_1, offer_2])
self.assertListEqual(scheduling_node.allocated_offers, [offer_1, offer_2])
# All allocated tasks should still be here, but not job exes
self.assertEqual(len(scheduling_node.allocated_tasks), 2)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 0)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(node_task_resources))
self.assertTrue(scheduling_node._remaining_resources.is_equal(expected_remaining_resources))
def test_add_allocated_offers_remove_all_tasks(self):
"""Tests calling add_allocated_offers() when there are not enough resources for the job exes or node tasks"""
node = MagicMock()
node.hostname = 'host_1'
node.id = 1
health_task = HealthTask('1234', 'agent_1')
pull_task = PullTask('1234', 'agent_1')
node.is_ready_for_new_job = MagicMock()
node.is_ready_for_new_job.return_value = True
node.is_ready_for_next_job_task = MagicMock()
node.is_ready_for_next_job_task.return_value = True
node.get_next_tasks = MagicMock()
node.get_next_tasks.return_value = [health_task, pull_task]
offered_resources = NodeResources([Cpus(100.0), Mem(500.0)])
watermark_resources = NodeResources([Cpus(100.0), Mem(500.0)])
resource_set = ResourceSet(offered_resources, NodeResources(), watermark_resources)
scheduling_node = SchedulingNode('agent_1', node, [], [], resource_set)
running_job_exe_1 = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(1.0), Mem(10.0)]))
running_job_exe_2 = job_test_utils.create_running_job_exe(agent_id=self.agent_id,
resources=NodeResources([Cpus(2.0), Mem(20.0)]))
node_task_resources = NodeResources()
node_task_resources.add(health_task.get_resources())
node_task_resources.add(pull_task.get_resources())
all_required_resources = NodeResources()
all_required_resources.add(node_task_resources)
all_required_resources.add(running_job_exe_1.next_task().get_resources())
all_required_resources.add(running_job_exe_2.next_task().get_resources())
expected_remaining_resources = NodeResources()
expected_remaining_resources.add(offered_resources)
expected_remaining_resources.subtract(node_task_resources)
# Set up node with node tasks and job exes (there would never be queued job exes since they would be scheduled
# before add_allocated_offers() was called
scheduling_node.accept_node_tasks(now(), [])
scheduling_node.accept_job_exe_next_task(running_job_exe_1, [])
scheduling_node.accept_job_exe_next_task(running_job_exe_2, [])
self.assertEqual(len(scheduling_node.allocated_tasks), 2)
self.assertEqual(len(scheduling_node._allocated_running_job_exes), 2)
self.assertEqual(len(scheduling_node._allocated_queued_job_exes), 0)
self.assertTrue(scheduling_node.allocated_resources.is_equal(all_required_resources))
| |
<filename>openstack_dashboard/dashboards/project/instances/tests.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from django.core.urlresolvers import reverse # noqa
from django import http
from django.test import utils as test_utils
from django.utils.datastructures import SortedDict # noqa
from django.utils.http import urlencode # noqa
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.instances import tables
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances import workflows
INDEX_URL = reverse('horizon:project:instances:index')
SEC_GROUP_ROLE_PREFIX = \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + "_role_"
class InstanceTests(test.TestCase):
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',
'servers_update_addresses',),
})
def test_index(self):
servers = self.servers.list()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res,
'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('server_list',
'tenant_absolute_limits',)})
def test_index_server_list_exception(self):
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'flavor_get',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',
'servers_update_addresses',),
})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
search_opts = {'marker': None, 'paginate': True}
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'flavor_get',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',
'servers_update_addresses',),
})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
# UUIDs generated using indexes are unlikely to match
# any of existing flavor ids and are guaranteed to be deterministic.
for i, server in enumerate(servers):
server.flavor['id'] = str(uuid.UUID(int=i))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
instances = res.context['instances_table'].data
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertMessageCount(res, error=len(servers))
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',
'servers_update_addresses',),
})
def test_index_with_instance_booted_from_volume(self):
volume_server = self.servers.first()
volume_server.image = ""
volume_server.image_name = "(not found)"
servers = self.servers.list()
servers[0] = volume_server
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertEqual(len(instances), len(servers))
self.assertContains(res, "(not found)")
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance(self):
servers = self.servers.list()
server = servers[0]
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
api.nova.server_delete(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance_exception(self):
servers = self.servers.list()
server = servers[0]
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
api.nova.server_delete(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_pause_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_pause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_pause_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_pause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unpause_instance(self):
servers = self.servers.list()
server = servers[0]
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unpause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unpause_instance_exception(self):
servers = self.servers.list()
server = servers[0]
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unpause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_reboot_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_reboot_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_soft_reboot_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=True)
self.mox.ReplayAll()
formData = {'action': 'instances__soft_reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_suspend_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_suspend_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_resume_instance(self):
servers = self.servers.list()
server = servers[0]
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_resume(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_resume_instance_exception(self):
servers = self.servers.list()
server = servers[0]
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_resume(IsA(http.HttpRequest),
unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get"),
api.network: ("server_security_groups",
"servers_update_addresses")})
def test_instance_details_volumes(self):
server = self.servers.first()
volumes = [self.volumes.list()[1]]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.servers_update_addresses(IsA(http.HttpRequest),
IgnoreArg())
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.first())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get"),
api.network: ("server_security_groups",
| |
to which this particular claim pertains - eg Property/Casualy insurer
claim # or Workers Compensation case # .
"""
__name__ = 'ExplanationOfBenefit_Related'
def __init__(self, dict_values=None):
self.claim = None
# reference to Reference: identifier
self.relationship = None
# reference to CodeableConcept
self.reference = None
# reference to Identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Related',
'child_variable': 'reference'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Related',
'child_variable': 'claim'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Related',
'child_variable': 'relationship'},
]
class ExplanationOfBenefit_Payee(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
type: Type of Party to be reimbursed: Subscriber, provider, other.
resourceType: organization | patient | practitioner | relatedperson.
party: Party to be reimbursed: Subscriber, provider, other.
"""
__name__ = 'ExplanationOfBenefit_Payee'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.resourceType = None
# reference to CodeableConcept
self.party = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payee',
'child_variable': 'resourceType'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Payee',
'child_variable': 'party'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payee',
'child_variable': 'type'},
]
class ExplanationOfBenefit_Information(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of the information element which serves to provide
a link.
category: The general class of the information supplied: information;
exception; accident, employment; onset, etc.
code: System and code pertaining to the specific information regarding
special conditions relating to the setting, treatment or patient for
which care is sought which may influence the adjudication.
timingDate: The date when or period to which this information refers.
timingPeriod: The date when or period to which this information
refers.
valueString: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
valueQuantity: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
valueAttachment: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
valueReference: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
reason: For example, provides the reason for: the additional stay, or
missing tooth or any other situation where a reason code is required
in addition to the content.
"""
__name__ = 'ExplanationOfBenefit_Information'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.category = None
# reference to CodeableConcept
self.code = None
# reference to CodeableConcept
self.timingDate = None
# type: str
self.timingPeriod = None
# reference to Period
self.valueString = None
# type: str
self.valueQuantity = None
# reference to Quantity
self.valueAttachment = None
# reference to Attachment
self.valueReference = None
# reference to Reference: identifier
self.reason = None
# reference to Coding
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'valueReference'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'valueQuantity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'code'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'reason'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'category'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'timingPeriod'},
]
class ExplanationOfBenefit_CareTeam(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of careteam which serves to order and provide a
link.
provider: The members of the team who provided the overall service.
responsible: The practitioner who is billing and responsible for the
claimed services rendered to the patient.
role: The lead, assisting or supervising practitioner and their
discipline if a multidisiplinary team.
qualification: The qualification which is applicable for this service.
"""
__name__ = 'ExplanationOfBenefit_CareTeam'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.provider = None
# reference to Reference: identifier
self.responsible = None
# type: bool
self.role = None
# reference to CodeableConcept
self.qualification = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_CareTeam',
'child_variable': 'provider'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_CareTeam',
'child_variable': 'role'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_CareTeam',
'child_variable': 'qualification'},
]
class ExplanationOfBenefit_Diagnosis(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of diagnosis which serves to provide a link.
diagnosisCodeableConcept: The diagnosis.
diagnosisReference: The diagnosis.
type: The type of the Diagnosis, for example: admitting, primary,
secondary, discharge.
packageCode: The package billing code, for example DRG, based on the
assigned grouping code system.
"""
__name__ = 'ExplanationOfBenefit_Diagnosis'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.diagnosisCodeableConcept = None
# reference to CodeableConcept
self.diagnosisReference = None
# reference to Reference: identifier
self.type = None
# type: list
# reference to CodeableConcept
self.packageCode = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'type'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'diagnosisReference'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'packageCode'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'diagnosisCodeableConcept'},
]
class ExplanationOfBenefit_Procedure(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of procedures which serves to order and provide a
link.
date: Date and optionally time the procedure was performed .
procedureCodeableConcept: The procedure code.
procedureReference: The procedure code.
"""
__name__ = 'ExplanationOfBenefit_Procedure'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.date = None
# type: str
self.procedureCodeableConcept = None
# reference to CodeableConcept
self.procedureReference = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Procedure',
'child_variable': 'procedureCodeableConcept'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Procedure',
'child_variable': 'procedureReference'},
]
class ExplanationOfBenefit_Insurance(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
coverage: Reference to the program or plan identification, underwriter
or payor.
preAuthRef: A list of references from the Insurer to which these
services pertain.
"""
__name__ = 'ExplanationOfBenefit_Insurance'
def __init__(self, dict_values=None):
self.coverage = None
# reference to Reference: identifier
self.preAuthRef = None
# type: list
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Insurance',
'child_variable': 'coverage'},
]
class ExplanationOfBenefit_Accident(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
date: Date of an accident which these services are addressing.
type: Type of accident: work, auto, etc.
locationAddress: Where the accident occurred.
locationReference: Where the accident occurred.
"""
__name__ = 'ExplanationOfBenefit_Accident'
def __init__(self, dict_values=None):
self.date = None
# type: str
self.type = None
# reference to CodeableConcept
self.locationAddress = None
# reference to Address
self.locationReference = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Accident',
'child_variable': 'locationAddress'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Accident',
'child_variable': | |
GenerateDSNamespaceDefs_.get('ArrayOfCountry')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ArrayOfCountry':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ArrayOfCountry')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ArrayOfCountry', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ArrayOfCountry'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ArrayOfCountry', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Country_ in self.Country:
namespaceprefix_ = self.Country_nsprefix_ + ':' if (UseCapturedNS_ and self.Country_nsprefix_) else ''
Country_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Country', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Country':
obj_ = Country.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Country.append(obj_)
obj_.original_tagname_ = 'Country'
# end class ArrayOfCountry
class Country(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Code=None, Name=None, IsoCode=None, StateRequired=None, PostCodeRequired=None, PostCodeRegex=None, InternationalCallingNumber=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Code = Code
self.Code_nsprefix_ = None
self.Name = Name
self.Name_nsprefix_ = None
self.IsoCode = IsoCode
self.IsoCode_nsprefix_ = None
self.StateRequired = StateRequired
self.StateRequired_nsprefix_ = None
self.PostCodeRequired = PostCodeRequired
self.PostCodeRequired_nsprefix_ = None
self.PostCodeRegex = PostCodeRegex
self.PostCodeRegex_nsprefix_ = None
self.InternationalCallingNumber = InternationalCallingNumber
self.InternationalCallingNumber_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Country)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Country.subclass:
return Country.subclass(*args_, **kwargs_)
else:
return Country(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Code(self):
return self.Code
def set_Code(self, Code):
self.Code = Code
def get_Name(self):
return self.Name
def set_Name(self, Name):
self.Name = Name
def get_IsoCode(self):
return self.IsoCode
def set_IsoCode(self, IsoCode):
self.IsoCode = IsoCode
def get_StateRequired(self):
return self.StateRequired
def set_StateRequired(self, StateRequired):
self.StateRequired = StateRequired
def get_PostCodeRequired(self):
return self.PostCodeRequired
def set_PostCodeRequired(self, PostCodeRequired):
self.PostCodeRequired = PostCodeRequired
def get_PostCodeRegex(self):
return self.PostCodeRegex
def set_PostCodeRegex(self, PostCodeRegex):
self.PostCodeRegex = PostCodeRegex
def get_InternationalCallingNumber(self):
return self.InternationalCallingNumber
def set_InternationalCallingNumber(self, InternationalCallingNumber):
self.InternationalCallingNumber = InternationalCallingNumber
def hasContent_(self):
if (
self.Code is not None or
self.Name is not None or
self.IsoCode is not None or
self.StateRequired is not None or
self.PostCodeRequired is not None or
self.PostCodeRegex is not None or
self.InternationalCallingNumber is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Country', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Country')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'Country':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Country')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='Country', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Country'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Country', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Code is not None:
namespaceprefix_ = self.Code_nsprefix_ + ':' if (UseCapturedNS_ and self.Code_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCode>%s</%sCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Code), input_name='Code')), namespaceprefix_ , eol_))
if self.Name is not None:
namespaceprefix_ = self.Name_nsprefix_ + ':' if (UseCapturedNS_ and self.Name_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sName>%s</%sName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Name), input_name='Name')), namespaceprefix_ , eol_))
if self.IsoCode is not None:
namespaceprefix_ = self.IsoCode_nsprefix_ + ':' if (UseCapturedNS_ and self.IsoCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsoCode>%s</%sIsoCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.IsoCode), input_name='IsoCode')), namespaceprefix_ , eol_))
if self.StateRequired is not None:
namespaceprefix_ = self.StateRequired_nsprefix_ + ':' if (UseCapturedNS_ and self.StateRequired_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sStateRequired>%s</%sStateRequired>%s' % (namespaceprefix_ , self.gds_format_boolean(self.StateRequired, input_name='StateRequired'), namespaceprefix_ , eol_))
if self.PostCodeRequired is not None:
namespaceprefix_ = self.PostCodeRequired_nsprefix_ + ':' if (UseCapturedNS_ and self.PostCodeRequired_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sPostCodeRequired>%s</%sPostCodeRequired>%s' % (namespaceprefix_ , self.gds_format_boolean(self.PostCodeRequired, input_name='PostCodeRequired'), namespaceprefix_ , eol_))
if self.PostCodeRegex is not None:
namespaceprefix_ = self.PostCodeRegex_nsprefix_ + ':' if (UseCapturedNS_ and self.PostCodeRegex_nsprefix_) else ''
self.PostCodeRegex.export(outfile, level, namespaceprefix_, namespacedef_='', name_='PostCodeRegex', pretty_print=pretty_print)
if self.InternationalCallingNumber is not None:
namespaceprefix_ = self.InternationalCallingNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.InternationalCallingNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sInternationalCallingNumber>%s</%sInternationalCallingNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.InternationalCallingNumber), input_name='InternationalCallingNumber')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Code':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Code')
value_ = self.gds_validate_string(value_, node, 'Code')
self.Code = value_
self.Code_nsprefix_ = child_.prefix
elif nodeName_ == 'Name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Name')
value_ = self.gds_validate_string(value_, node, 'Name')
self.Name = value_
self.Name_nsprefix_ = child_.prefix
elif nodeName_ == 'IsoCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'IsoCode')
value_ = self.gds_validate_string(value_, node, 'IsoCode')
self.IsoCode = value_
self.IsoCode_nsprefix_ = child_.prefix
elif nodeName_ == 'StateRequired':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'StateRequired')
ival_ = self.gds_validate_boolean(ival_, node, 'StateRequired')
self.StateRequired = ival_
self.StateRequired_nsprefix_ = child_.prefix
elif nodeName_ == 'PostCodeRequired':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'PostCodeRequired')
ival_ = self.gds_validate_boolean(ival_, node, 'PostCodeRequired')
self.PostCodeRequired = ival_
self.PostCodeRequired_nsprefix_ = child_.prefix
elif nodeName_ == 'PostCodeRegex':
obj_ = ArrayOfstring.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.PostCodeRegex = obj_
obj_.original_tagname_ = 'PostCodeRegex'
elif nodeName_ == 'InternationalCallingNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'InternationalCallingNumber')
value_ = self.gds_validate_string(value_, node, 'InternationalCallingNumber')
self.InternationalCallingNumber = value_
self.InternationalCallingNumber_nsprefix_ = child_.prefix
# end class Country
class CountryFetchingRequest(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, ClientInfo=None, Transaction=None, Code=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.ClientInfo = ClientInfo
self.ClientInfo_nsprefix_ = None
self.Transaction = Transaction
self.Transaction_nsprefix_ = None
self.Code = Code
self.Code_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CountryFetchingRequest)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CountryFetchingRequest.subclass:
return CountryFetchingRequest.subclass(*args_, **kwargs_)
else:
return CountryFetchingRequest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_ClientInfo(self):
return self.ClientInfo
def set_ClientInfo(self, ClientInfo):
self.ClientInfo = ClientInfo
def get_Transaction(self):
return self.Transaction
def set_Transaction(self, Transaction):
self.Transaction = Transaction
def get_Code(self):
return self.Code
def set_Code(self, Code):
self.Code = Code
def hasContent_(self):
if (
self.ClientInfo is not None or
self.Transaction is not None or
self.Code is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='CountryFetchingRequest', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CountryFetchingRequest')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'CountryFetchingRequest':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CountryFetchingRequest')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='CountryFetchingRequest', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CountryFetchingRequest'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='CountryFetchingRequest', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ClientInfo is not None:
namespaceprefix_ = self.ClientInfo_nsprefix_ + ':' if (UseCapturedNS_ and self.ClientInfo_nsprefix_) else ''
self.ClientInfo.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ClientInfo', pretty_print=pretty_print)
if self.Transaction is not None:
namespaceprefix_ = self.Transaction_nsprefix_ + ':' if (UseCapturedNS_ and self.Transaction_nsprefix_) else ''
self.Transaction.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Transaction', pretty_print=pretty_print)
if self.Code is not None:
namespaceprefix_ = self.Code_nsprefix_ + | |
zer is M.scalar_field_algebra().zero()
True
By definition, a scalar field acts on the manifold's points, sending
them to elements of the manifold's base field (real numbers in the
present case)::
sage: N = M.point((0,0), chart=c_uv) # the North pole
sage: S = M.point((0,0), chart=c_xy) # the South pole
sage: E = M.point((1,0), chart=c_xy) # a point at the equator
sage: f(N)
0
sage: f(S)
1
sage: f(E)
1/2
sage: h(E)
H(1, 0)
sage: c(E)
a
sage: zer(E)
0
A scalar field can be compared to another scalar field::
sage: f == g
False
...to a symbolic expression::
sage: f == x*y
False
sage: g == x*y
True
sage: c == a
True
...to a number::
sage: f == 2
False
sage: zer == 0
True
...to anything else::
sage: f == M
False
Standard mathematical functions are implemented::
sage: sqrt(f)
Scalar field sqrt(f) on the 2-dimensional topological manifold M
sage: sqrt(f).display()
sqrt(f): M --> R
on U: (x, y) |--> 1/sqrt(x^2 + y^2 + 1)
on V: (u, v) |--> sqrt(u^2 + v^2)/sqrt(u^2 + v^2 + 1)
::
sage: tan(f)
Scalar field tan(f) on the 2-dimensional topological manifold M
sage: tan(f).display()
tan(f): M --> R
on U: (x, y) |--> sin(1/(x^2 + y^2 + 1))/cos(1/(x^2 + y^2 + 1))
on V: (u, v) |--> sin((u^2 + v^2)/(u^2 + v^2 + 1))/cos((u^2 + v^2)/(u^2 + v^2 + 1))
.. RUBRIC:: Arithmetics of scalar fields
Scalar fields on `M` (resp. `U`) belong to the algebra `C^0(M)`
(resp. `C^0(U)`)::
sage: f.parent()
Algebra of scalar fields on the 2-dimensional topological manifold M
sage: f.parent() is M.scalar_field_algebra()
True
sage: g.parent()
Algebra of scalar fields on the Open subset U of the 2-dimensional
topological manifold M
sage: g.parent() is U.scalar_field_algebra()
True
Consequently, scalar fields can be added::
sage: s = f + c ; s
Scalar field f+c on the 2-dimensional topological manifold M
sage: s.display()
f+c: M --> R
on U: (x, y) |--> (a*x^2 + a*y^2 + a + 1)/(x^2 + y^2 + 1)
on V: (u, v) |--> ((a + 1)*u^2 + (a + 1)*v^2 + a)/(u^2 + v^2 + 1)
and subtracted::
sage: s = f - c ; s
Scalar field f-c on the 2-dimensional topological manifold M
sage: s.display()
f-c: M --> R
on U: (x, y) |--> -(a*x^2 + a*y^2 + a - 1)/(x^2 + y^2 + 1)
on V: (u, v) |--> -((a - 1)*u^2 + (a - 1)*v^2 + a)/(u^2 + v^2 + 1)
Some tests::
sage: f + zer == f
True
sage: f - f == zer
True
sage: f + (-f) == zer
True
sage: (f+c)-f == c
True
sage: (f-c)+c == f
True
We may add a number (interpreted as a constant scalar field) to a scalar
field::
sage: s = f + 1 ; s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
on U: (x, y) |--> (x^2 + y^2 + 2)/(x^2 + y^2 + 1)
on V: (u, v) |--> (2*u^2 + 2*v^2 + 1)/(u^2 + v^2 + 1)
sage: (f+1)-1 == f
True
The number can represented by a symbolic variable::
sage: s = a + f ; s
Scalar field on the 2-dimensional topological manifold M
sage: s == c + f
True
However if the symbolic variable is a chart coordinate, the addition
is performed only on the chart domain::
sage: s = f + x; s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
on U: (x, y) |--> (x^3 + x*y^2 + x + 1)/(x^2 + y^2 + 1)
on W: (u, v) |--> (u^4 + v^4 + u^3 + (2*u^2 + u)*v^2 + u)/(u^4 + v^4 + (2*u^2 + 1)*v^2 + u^2)
sage: s = f + u; s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
on W: (x, y) |--> (x^3 + (x + 1)*y^2 + x^2 + x)/(x^4 + y^4 + (2*x^2 + 1)*y^2 + x^2)
on V: (u, v) |--> (u^3 + (u + 1)*v^2 + u^2 + u)/(u^2 + v^2 + 1)
The addition of two scalar fields with different domains is possible if
the domain of one of them is a subset of the domain of the other; the
domain of the result is then this subset::
sage: f.domain()
2-dimensional topological manifold M
sage: g.domain()
Open subset U of the 2-dimensional topological manifold M
sage: s = f + g ; s
Scalar field f+g on the Open subset U of the 2-dimensional topological
manifold M
sage: s.domain()
Open subset U of the 2-dimensional topological manifold M
sage: s.display()
f+g: U --> R
(x, y) |--> (x*y^3 + (x^3 + x)*y + 1)/(x^2 + y^2 + 1)
on W: (u, v) |--> (u^6 + 3*u^4*v^2 + 3*u^2*v^4 + v^6 + u*v^3
+ (u^3 + u)*v)/(u^6 + v^6 + (3*u^2 + 1)*v^4 + u^4 + (3*u^4 + 2*u^2)*v^2)
The operation actually performed is `f|_U + g`::
sage: s == f.restrict(U) + g
True
In Sage framework, the addition of `f` and `g` is permitted because
there is a *coercion* of the parent of `f`, namely `C^0(M)`, to
the parent of `g`, namely `C^0(U)` (see
:class:`~sage.manifolds.scalarfield_algebra.ScalarFieldAlgebra`)::
sage: CM = M.scalar_field_algebra()
sage: CU = U.scalar_field_algebra()
sage: CU.has_coerce_map_from(CM)
True
The coercion map is nothing but the restriction to domain `U`::
sage: CU.coerce(f) == f.restrict(U)
True
Since the algebra `C^0(M)` is a vector space over `\RR`, scalar fields
can be multiplied by a number, either an explicit one::
sage: s = 2*f ; s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
on U: (x, y) |--> 2/(x^2 + y^2 + 1)
on V: (u, v) |--> 2*(u^2 + v^2)/(u^2 + v^2 + 1)
or a symbolic one::
sage: s = a*f ; s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
on U: (x, y) |--> a/(x^2 + y^2 + 1)
on V: (u, v) |--> (u^2 + v^2)*a/(u^2 + v^2 + 1)
However, if the symbolic variable is a chart coordinate, the
multiplication is performed only in the corresponding chart::
sage: s = x*f; s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
on U: (x, y) |--> x/(x^2 + y^2 + 1)
on W: (u, v) |--> u/(u^2 + v^2 + 1)
sage: s = u*f; s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
on W: (x, y) |--> x/(x^4 + y^4 + (2*x^2 + 1)*y^2 + x^2)
on V: (u, v) |--> (u^2 + v^2)*u/(u^2 + v^2 + 1)
Some tests::
sage: 0*f == 0
True
sage: 0*f == zer
True
sage: 1*f == f
True
sage: (-2)*f == - f - f
True
The ring multiplication of the algebras `C^0(M)` and `C^0(U)`
is the pointwise multiplication of functions::
sage: s = f*f ; s
Scalar field f*f on the 2-dimensional topological manifold M
sage: s.display()
f*f: M --> R
on U: (x, y) |--> 1/(x^4 + y^4 + 2*(x^2 + 1)*y^2 + 2*x^2 + 1)
on V: (u, v) |--> (u^4 + 2*u^2*v^2 + v^4)/(u^4 + v^4 + 2*(u^2 + 1)*v^2
+ 2*u^2 + 1)
sage: s = g*h ; s
Scalar field g*h on the Open subset U of the 2-dimensional topological
manifold M
sage: s.display()
g*h: U --> R
(x, y) |--> x*y*H(x, y)
on W: (u, v) |--> u*v*H(u/(u^2 + v^2), v/(u^2 + v^2))/(u^4 + 2*u^2*v^2 + v^4)
Thanks to the coercion `C^0(M) \to C^0(U)` mentioned above,
it is possible to multiply a scalar field defined on `M` by a
scalar field defined on `U`, the result being a scalar field
defined on `U`::
sage: f.domain(), g.domain()
(2-dimensional topological manifold | |
tuple contains (the cost value, target output - calculated output)
"""
difference = target_output - self.output
ann_error = 0.5 * difference * difference
# is_categoric = True if self.parent_layer == 2 else False
# if self.is_categoric:
# #logistic cost function
# ann_error = -target_output * math.log(self.output) - (1- target_output) * math.log(1- self.output)
# else: # Numeric
# # Linear cost function
# ann_error = 0.5 * difference * difference
if self.is_categoric:
categorical_extra_divisor = self.parent_layer.parent_network.categorical_extra_divisor
ann_error /= categorical_extra_divisor
return ann_error, difference
def derive_cost(self, target_output):
"""
Returns the derivative of the cost function
@param target_output:
@return:
"""
# if self.is_categoric:
# # logistic cost function
# if target_output == 0:
# return 1. / (1.- self.output)
# else:
# return -1. / self.output
# else: # Numeric
# # Linear cost function
# return self.output - target_output # this form is a bit faster than -(target_output - self.output)
if self.is_categoric:
categorical_extra_divisor = self.parent_layer.parent_network.categorical_extra_divisor
return (self.output - target_output) / categorical_extra_divisor
return self.output - target_output
def derive_func(self, func):
"""
returns the derivative of the activation function for the value of the output of the neuron
@param func: the function type
@return: the derivative, depending on the function
"""
# func = self.parent_layer.get_activation_function()
x = self.output
if func == SIGMOID:
return x * (1 - x)
elif func == TANH:
return (math.cosh(x)) ** -2
elif func == SOFTMAX:
# since the SOFTMAX function's derivative is similar to the sigmoid when i = m
# and is different otherwise. But in ANN, we only deal with the first case
# Then we will apply the sigmoid derivative.
return x * (1. - x)
elif func == LINEAR:
return 1.
elif func == BINARY:
return 0.
elif func == ARCTAN:
return 1. / (1. + x * x)
elif func == SOFTSIGN:
return 1. / (1. + abs(x)) ** 2
elif func == SINUSOID:
return math.cos(x)
elif func == BENT:
return 1. + 0.5 * x * (x * x + 1) ** -0.5
elif func == SOFTPLUS:
return 1. / (1 + math.exp(-x))
elif func == GAUSIAN:
return -2 * x * math.exp(-x * x)
elif func == SINC:
if x == 0.:
return 0
else:
return math.cos(x) / x - math.sin(x) / (x * x)
pass
def neuron_net_input(self, index):
"""
Returns the net input of a specific neuron
@param index: the index of the neuron
@return: the net input of a specific neuron
"""
return self.inputs[index]
class PlotNeuralNetwork:
"""
Plot a neural network
basic code quoted from the following stack exchange article
http://stackoverflow.com/questions/29888233/how-to-visualize-a-neural-network
"""
def __init__(self, labels, horizontal__distance_between_layers=10., vertical__distance_between_neurons=2.,
neuron_radius=0.5, number_of_neurons_in_widest_layer=9, numeric_categoric_list=None,
categoric_is_lighter=True):
"""
Plots a neural network with varying synapsis widths according to weights
@param labels: A list contains 2 elements, the fist is a list of all labels, the second is NumInputs
@param horizontal__distance_between_layers: as written
@param vertical__distance_between_neurons: as written
@param neuron_radius: the radius of the circle representing the neuron
@param number_of_neurons_in_widest_layer: as written
@param numeric_categoric_list: adding a list of two lists, the first list shows boolean representation
of variables types; for example [True, True, False, False], means the first two neurons represent
Numeric values, while the last two are categoric.
The first sub-list is for inputs, the other for outputs.
The gross list is on the form [[True, True, False, False], [False, False, False, True]]
This list is optional, the default is None.
If left blank, all the neuron will be colored the same color, other wise, Categoric neurons
will be lighter or darker depending on the following parameter
@param categoric_is_lighter: if True, the categoric neurons color will be lighter than numeric ones,
and vise-versa
"""
self.layers = []
self.biases = []
self.vertical__distance = vertical__distance_between_neurons
self.horizontal__distance = horizontal__distance_between_layers
self.neuron_radius = neuron_radius
self.widest_layer = number_of_neurons_in_widest_layer
self.labels = labels # A list contains 2 elements, the fist is a list of all labels, the second is NumInputs
self.highest_neuron = 0
self.numeric_categoric_list = numeric_categoric_list
self.categoric_is_lighter = categoric_is_lighter
def add_layer(self, number_of_neurons, layer_type='any', weights=None):
"""
Adds a layer to be drawn
@param number_of_neurons: of the desired layer
@param layer_type: either input, hidden, output, or 'any' for None
@param weights: weights of synapses associated with this layer (input to it)
"""
layer = self.PlotLayer(self, number_of_neurons, weights, layer_type)
self.layers.append(layer)
def add_bias(self, layer1, layer2, weights=None):
"""
@param layer1: the bias will be drawn between which layers (this is the from)
@param layer2: this is the important target layer
@param weights: the weight of bias
"""
from_layer = self.layers[layer1]
to_layer = self.layers[layer2]
bias = to_layer.PlotBias(self, from_layer, to_layer, weights, layer_type='bias')
self.biases.append(bias)
def draw(self, inputs_label="Inputs Layer", outputs_label="Outputs Layer"):
"""
Draws the whole network depending on its components
It will recall similar method from the subclasses
@param inputs_label: The label that will appear to the left of the diagram corresponding to the inputs layer
@param outputs_label: The label that will appear to the right of the diagram corresponding to the outputs layer
"""
for layer in self.layers:
layer.draw()
for bias in self.biases:
bias.draw()
# plt.axis('scaled')
# for layer in self.layers:
# layer.draw_only_neuron()
# plt.axis('auto')
# plt.axis('tight')
xx = (self.layers[0].xz + self.layers[len(self.layers) - 1].xz) / 2 - 2.5
yy = 0.5
# label = str(self.layers[1].number_of_neurons) + ' hidden neurons'
label = "Network Structure is ( " + str(self.layers[0].number_of_neurons) + ' : ' + \
str(self.layers[1].number_of_neurons) + ' : ' + \
str(self.layers[2].number_of_neurons) + " )"
plt.text(xx, yy, label, color='r', zorder=8)
# max_yz = (self.vertical__distance + self.neuron_radius) * max(n.number_of_neurons for n in self.layers)
max_yz = self.highest_neuron
plt.axis([-3, 33, -1, max_yz]) # plt.axis([-1, max_x, -1, 31])
plt.ylabel(inputs_label)
frame1 = plt.gca()
frame1.set_xticklabels([]) # frame1.axes.get_xaxis().set_visible(False)
frame1.set_yticklabels([])
ax2 = plt.twinx()
ax2.set_ylabel(outputs_label) # ax2.set_xlabel(r"Modified x-axis: $1/(1+X)$")
ax2.set_yticklabels([])
# plt.savefig('nesr.png')
# plt.show()
class PlotLayer:
"""
"""
def __init__(self, parent_network, number_of_neurons, weights, layer_type):
"""
Draws a layer in the current network
@param parent_network: the network that contains the current layer
@param number_of_neurons: as written
@param weights: as written
@param layer_type: either input, hidden, or output layer
"""
self.parent_net = parent_network
self.previous_layer = self.__get_previous_layer()
self.number_of_neurons = number_of_neurons
self.xz = self.__calculate_layer_xz_position()
self.weights = weights
self.neurons = self.__initialize_neurons(number_of_neurons)
self.layer_type = layer_type
self.neuron_labels = [''] * number_of_neurons
self.neron_types = [True] * number_of_neurons # True means Numeric
if layer_type == 'inputs':
self.neuron_labels = self.parent_net.labels[0][:self.parent_net.labels[1]]
if self.parent_net.numeric_categoric_list is not None:
self.neron_types = self.parent_net.numeric_categoric_list[0]
elif layer_type == 'outputs':
self.neuron_labels = self.parent_net.labels[0][self.parent_net.labels[1]:]
if self.parent_net.numeric_categoric_list is not None:
self.neron_types = self.parent_net.numeric_categoric_list[1]
def __initialize_neurons(self, number_of_neurons):
"""
initializes the neurons of the layer
@param number_of_neurons: of this layer
@return: a list of Neuron Class objects
"""
neurons = []
yz = self.left_margin(number_of_neurons)
for iteration in range(number_of_neurons):
neuron = self.PlotNeuron(yz, self.xz, self)
neurons.append(neuron)
yz += self.parent_net.vertical__distance
if self.parent_net.highest_neuron < yz:
self.parent_net.highest_neuron = yz
return neurons
def left_margin(self, number_of_neurons):
"""
calculate left margin_so_layer_is_centered
(previously it was bottom to top drawing, so the left was bottom)
@param number_of_neurons: of this layer
@return: the margin to be left to the left
"""
return self.parent_net.vertical__distance * (self.parent_net.widest_layer - number_of_neurons) / 2
def __calculate_layer_xz_position(self):
"""
calculates the starting position of the layer
@return: the horizontal coordinate
"""
if self.previous_layer:
return self.previous_layer.xz + self.parent_net.horizontal__distance
else:
return 0
def __get_previous_layer(self):
"""
specifies the previous layer to the current layer if any
@return: the layer if exists, or None
"""
if len(self.parent_net.layers) > 0:
return self.parent_net.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2, line_width):
"""
@param neuron1: the first neuron to join the synapsis from
@param neuron2: the second neuron to join the synapsis to
@param line_width: the width of the line
"""
angle = math.atan((neuron2.yz - neuron1.yz) / float(neuron2.xz - neuron1.xz))
yz_adjustment = self.parent_net.neuron_radius * math.sin(angle)
xz_adjustment = self.parent_net.neuron_radius * math.cos(angle)
line_yz_data = (neuron1.yz - yz_adjustment, neuron2.yz + yz_adjustment)
line_xz_data = (neuron1.xz - xz_adjustment, neuron2.xz + xz_adjustment)
col = 'r' if line_width < 0 else 'b'
line = plt.Line2D(line_xz_data, line_yz_data, linewidth=abs(line_width), color=col, alpha=0.7, zorder=1)
plt.gca().add_line(line)
def draw(self):
"""
A procedure to draw the current layer and put labels if any
"""
for this_layer_neuron_index | |
<a href="/items/96">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/51evtEktP1L._SL160_.jpg"
alt="Image of Under the Volcano " style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Under-Volcano-Novel-Malcolm-Lowry/dp/0061120154?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=0061120154"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
To describe his perennial theme, Lowry once borrowed the words of the critic <NAME>: "the
forces in man which cause him to be terrified of himself." You see exactly what he means in this
cor...
</p>
<div>
- <a href="http://www.time.com/time/2005/100books/0,24459,under_the_volcano,00.html"
target="_blank">Time</a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_96" class="finished_form" action="/items/96/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_96" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_96" class="wishlist_form" action="/items/96/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_96" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
76
. <a href="/items/134">A Farewell to Arms</a> by <a href="/authors/4714"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/134">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/41Bfcn8UKwL._SL160_.jpg"
alt="Image of A Farewell to Arms" style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Farewell-Arms-Ernest-Hemingway/dp/1986158675?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=1986158675"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
The novel is told through the point of view of <NAME>, an American serving as an
ambulance driver in the Italian army during World War I.
</p>
<div>
- <a href="http://en.wikipedia.org/wiki/A_Farewell_To_Arms" target="_blank">Wikipedia</a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_134" class="finished_form" action="/items/134/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_134" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_134" class="wishlist_form" action="/items/134/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_134" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
77
. <a href="/items/914">Journey to the End of The Night</a> by <a href="/authors/5287"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/914">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/41zZqgi4Y2L._SL160_.jpg"
alt="Image of Journey to the End of The Night" style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Journey-End-Night-Louis-Ferdinand-C%C3%A9line/dp/0811216543?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=0811216543"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
Journey to the End of Night is the first novel of Louis-Ferdinand Céline. This semi-autobiographical
work describes antihero <NAME>. His surname, Bardamu, is derived from the French word...
</p>
<div>
- <a href="http://en.wikipedia.org/wiki/Journey_to_the_End_of_the_Night"
target="_blank">Wikipedia</a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_914" class="finished_form" action="/items/914/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_914" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_914" class="wishlist_form" action="/items/914/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_914" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
78
. <a href="/items/455">The Castle</a> by <a href="/authors/4818"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/455">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/41Lwv6uYiHL._SL160_.jpg"
alt="Image of The Castle" style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Castle-Franz-Kafka/dp/0805211063?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=0805211063"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
The Castle is a novel by <NAME>. In it a protagonist, known only as K., struggles to gain
access to the mysterious authorities of a castle who govern the village where he wants to work as a
la...
</p>
<div>
- <a href="http://en.wikipedia.org/wiki/The_Castle_(novel)" target="_blank">Wikipedia</a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_455" class="finished_form" action="/items/455/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_455" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_455" class="wishlist_form" action="/items/455/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_455" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
79
. <a href="/items/445">A Sentimental Education</a> by <a href="/authors/4784"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/445">
<img class="media-object item img-responsive"
src="http://ecx.images-amazon.com/images/I/51VcaqVzvXL._SL160_.jpg"
alt="Image of A Sentimental Education" style="max-width: 100px;">
</a>
<div>
<a href="http://www.amazon.com/Sentimental-Education-Oxford-Worlds-Classics/dp/0199686637%3FSubscriptionId%3D1RN7ZZ7D7SDQHR7TRJG2%26tag%3Dshanesherman-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D165953%26creativeASIN%3D0199686637"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
The novel describes the life of a young man (<NAME>) living through the revolution of 1848
and the founding of the Second French Empire, and his love for an older woman (based on the wife ...
</p>
<div>
- <a href="http://en.wikipedia.org/wiki/Sentimental_Education" target="_blank">Wikipedia</a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_445" class="finished_form" action="/items/445/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_445" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_445" class="wishlist_form" action="/items/445/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_445" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
80
. <a href="/items/35">Gone With the Wind </a> by <a href="/authors/4670"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/35">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/51y1omNT91L._SL160_.jpg"
alt="Image of Gone With the Wind " style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Gone-Wind-Margaret-Mitchell/dp/1451635621?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=1451635621"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
Gone With the Wind is set in Jonesboro and Atlanta, Georgia during the American Civil War and
Reconstruction and follows the life of Scarlett O'Hara, the daughter of an Irish immigrant
plantation o...
</p>
<div>
- <a href="http://en.wikipedia.org/wiki/Gone_with_the_Wind" target="_blank">Wikipedia</a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_35" class="finished_form" action="/items/35/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_35" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_35" class="wishlist_form" action="/items/35/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_35" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
81
. <a href="/items/207">The Scarlet Letter</a> by <a href="/authors/4782"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/207">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/51tQrk2XsSL._SL160_.jpg"
alt="Image of The Scarlet Letter" style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Scarlet-Letter-Dover-Thrift-Editions/dp/0486280489?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=0486280489"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
<NAME> is a beautiful young woman. She is also an outcast. In the eyes of her neighbors she
has committed an unforgivable sin. Everyone knows that her little daughter, Pearl, is the product
...
</p>
<div>
- <a href="" target="_blank">Publisher </a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_207" class="finished_form" action="/items/207/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_207" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_207" class="wishlist_form" action="/items/207/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_207" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
82
. <a href="/items/475">Gargantua and Pantagruel</a> by <a href="/authors/4966"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/475">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/51MzBzdOgyL._SL160_.jpg"
alt="Image of Gargantua and Pantagruel" style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Gargantua-Pantagruel-Classics-Francois-Rabelais/dp/0140445501?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=0140445501"
target="_blank"><img class="most-popular media-object img-responsive"
src="/assets/buyfromamazon-4a7e69cee46b223ba2216d3666aeddff991f2570e9dbb733620e0dc0619df73e.gif"></a>
</div>
</div>
<div>
<p>
The Life of Gargantua and of Pantagruel (in French, La vie de Gargantua et de Pantagruel) is a
connected series of five novels written in the 16th century by <NAME>. It is the story of
t...
</p>
<div>
- <a href="http://en.wikipedia.org/wiki/Gargantua_and_Pantagruel" target="_blank">Wikipedia</a>
</div>
</div>
</div>
<ul class="list-inline finished_and_wishlist_container">
<li class="list-inline-item">
<form id="form_finished_item_475" class="finished_form" action="/items/475/finished_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="finished_item_475" type="checkbox" name="checked" value="1">
I've read this book
</label>
</form>
</li>
<li class="list-inline-item">
<form id="form_wishlist_item_475" class="wishlist_form" action="/items/475/wishlist_items/toggle"
accept-charset="UTF-8" data-remote="true" method="post"><input name="utf8" type="hidden" value="✓">
<label>
<input id="wishlist_item_475" type="checkbox" name="checked" value="1">
I want to read this book
</label>
</form>
</li>
</ul>
</div>
</div>
</div>
</li>
<li class="item pb-3 pt-3 border-bottom">
<div class="container">
<div class="row">
<div class="col">
<h4>
83
. <a href="/items/110">Rebecca</a> by <a href="/authors/4735"><NAME></a>
</h4>
<div class="pb-3">
<div class="pull-left mr-3">
<a href="/items/110">
<img class="media-object item img-responsive"
src="https://images-na.ssl-images-amazon.com/images/I/518AjFrjsyL._SL160_.jpg"
alt="Image of Rebecca" style="max-width: 100px;">
</a>
<div>
<a href="https://www.amazon.com/Rebecca-Daphne-Du-Maurier/dp/0380730405?SubscriptionId=1RN7ZZ7D7SDQHR7TRJG2&tag=shanesherman-20&linkCode=xm2&camp=2025&creative=165953&creativeASIN=0380730405"
target="_blank"><img class="most-popular | |
<filename>prediction/saambe/Mutation_pred.py
def usage():
print("Available command-line options:\n -i PDBfile\n -c Chain of mutation\n -r Resid of the mutation\n -w One letter of wild amino acid\n -m One letter of mutatant amino acid\n -f Input file\n -o Output file\n -h Display this command-line summary")
def net_volume(wild,mutation):
vol = {'A':'88.6','R':'173.4','N':'114.1','D':'111.1','C':'108.5','E':'138.4','Q':'143.8','G':'60.1','H':'153.2','I':'166.7','L':'166.7','K':'168.6','M':'162.9','F':'189.9','P':'112.7','S':'89.0','T':'116.1','W':'227.8','Y':'193.6','V':'140.0'}
return ('{:.1f}'.format(float(vol.get(mutation,'0'))-float(vol.get(wild,'0'))))
def net_hydrophobicity(wild,mutation):
hyd = {'A':'0','R':'3.71','N':'3.47','D':'2.95','C':'0.49','E':'1.64','Q':'3.01','G':'1.72','H':'4.76','I':'-1.56','L':'-1.81','K':'5.39','M':'-0.76','F':'-2.2','P':'-1.52','S':'1.83','T':'1.78','W':'-0.38','Y':'-1.09','V':'-0.78'}
return ('{:.1f}'.format(float(hyd.get(mutation,'0'))-float(hyd.get(wild,'0'))))
def flexibility(wild,mutation):
flex = {'A':'1','R':'81','N':'36','D':'18','C':'3','E':'54','Q':'108','G':'1','H':'36','I':'9','L':'9','K':'81','M':'27','F':'18','P':'2','S':'3','T':'3','W':'36','Y':'18','V':'3'}
return (int(flex.get(mutation,'0'))-int(flex.get(wild,'0')))
def mutation_hydrophobicity(wild,mutation):
if wild in ('I','V','L','F','C','M','A','W'):
if mutation in ('G','T','S','Y','P','H'):
return 0
if mutation in ('N','D','Q','E','K','R'):
return 1
if mutation in ('I','V','L','F','C','M','A','W'):
return 2
if wild in ('G','T','S','Y','P','H'):
if mutation in ('G','T','S','Y','P','H'):
return 3
if mutation in ('N','D','Q','E','K','R'):
return 4
if mutation in ('I','V','L','F','C','M','A','W'):
return 5
if wild in ('N','D','Q','E','K','R'):
if mutation in ('G','T','S','Y','P','H'):
return 6
if mutation in ('N','D','Q','E','K','R'):
return 7
if mutation in ('I','V','L','F','C','M','A','W'):
return 8
def mutation_polarity(wild,mutation):
if wild in ('R','H','K'):
if mutation in ('A','C','G','I','L','M','F','P','W','V'):
return 0
if mutation in ('R','H','K'):
return 1
if mutation in ('N','Q','S','T','Y'):
return 2
if mutation in ('D','E'):
return 3
if wild in ('A','C','G','I','L','M','F','P','W','V'):
if mutation in ('A','C','G','I','L','M','F','P','W','V'):
return 4
if mutation in ('R','H','K'):
return 5
if mutation in ('N','Q','S','T','Y'):
return 6
if mutation in ('D','E'):
return 7
if wild in ('N','Q','S','T','Y'):
if mutation in ('A','C','G','I','L','M','F','P','W','V'):
return 8
if mutation in ('R','H','K'):
return 9
if mutation in ('N','Q','S','T','Y'):
return 10
if mutation in ('D','E'):
return 11
if wild in ('D','E'):
if mutation in ('A','C','G','I','L','M','F','P','W','V'):
return 12
if mutation in ('R','H','K'):
return 13
if mutation in ('N','Q','S','T','Y'):
return 14
if mutation in ('D','E'):
return 15
def mutation_size(wild,mutation):
if wild in ('G','A','S'):
if mutation in ('C','D','P','N','T'):
return 0
if mutation in ('E','V','Q','H'):
return 1
if mutation in ('M','I','L','K','R'):
return 2
if mutation in ('F','Y','W'):
return 3
if mutation in ('G','A','S'):
return 4
if wild in ('C','D','P','N','T'):
if mutation in ('C','D','P','N','T'):
return 5
if mutation in ('E','V','Q','H'):
return 6
if mutation in ('M','I','L','K','R'):
return 7
if mutation in ('F','Y','W'):
return 8
if mutation in ('G','A','S'):
return 9
if wild in ('E','V','Q','H'):
if mutation in ('C','D','P','N','T'):
return 10
if mutation in ('E','V','Q','H'):
return 11
if mutation in ('M','I','L','K','R'):
return 12
if mutation in ('F','Y','W'):
return 13
if mutation in ('G','A','S'):
return 14
if wild in ('M','I','L','K','R'):
if mutation in ('C','D','P','N','T'):
return 15
if mutation in ('E','V','Q','H'):
return 16
if mutation in ('M','I','L','K','R'):
return 17
if mutation in ('F','Y','W'):
return 18
if mutation in ('G','A','S'):
return 19
if wild in ('F','Y','W'):
if mutation in ('C','D','P','N','T'):
return 20
if mutation in ('E','V','Q','H'):
return 21
if mutation in ('M','I','L','K','R'):
return 22
if mutation in ('F','Y','W'):
return 23
if mutation in ('G','A','S'):
return 24
def mutation_hbonds(wild,mutation):
if wild in ('R','W','K'):
if mutation in ('A','C','G','I','L','M','F','P','V'):
return 0
if mutation in ('R','W','K'):
return 1
if mutation in ('N','Q','S','T','H','Y'):
return 2
if mutation in ('D','E'):
return 3
if wild in ('A','C','G','I','L','M','F','P','V'):
if mutation in ('A','C','G','I','L','M','F','P','V'):
return 4
if mutation in ('R','W','K'):
return 5
if mutation in ('N','Q','S','T','Y','H'):
return 6
if mutation in ('D','E'):
return 7
if wild in ('N','Q','S','T','Y','H'):
if mutation in ('A','C','G','I','L','M','F','P','V'):
return 8
if mutation in ('R','W','K'):
return 9
if mutation in ('N','Q','S','T','Y','H'):
return 10
if mutation in ('D','E'):
return 11
if wild in ('D','E'):
if mutation in ('A','C','G','I','L','M','F','P','V'):
return 12
if mutation in ('R','W','K'):
return 13
if mutation in ('N','Q','S','T','Y','H'):
return 14
if mutation in ('D','E'):
return 15
def mutation_chemical(wild,mutation):
if wild in ('A','G','I','L','P','V'):
if mutation in ('C','R','H','K'):
return 0
if mutation in ('N','Q'):
return 1
if mutation in ('D','E'):
return 2
if mutation in ('C','M'):
return 3
if mutation in ('S','T'):
return 4
if mutation in ('F','W','Y'):
return 5
if mutation in ('A','G','I','L','P','V'):
return 6
if wild in ('C','R','H','K'):
if mutation in ('C','R','H','K'):
return 7
if mutation in ('N','Q'):
return 8
if mutation in ('D','E'):
return 9
if mutation in ('C','M'):
return 10
if mutation in ('S','T'):
return 11
if mutation in ('F','W','Y'):
return 12
if mutation in ('A','G','I','L','P','V'):
return 13
if wild in ('N','Q'):
if mutation in ('C','R','H','K'):
return 14
if mutation in ('N','Q'):
return 15
if mutation in ('D','E'):
return 16
if mutation in ('C','M'):
return 17
if mutation in ('S','T'):
return 18
if mutation in ('F','W','Y'):
return 19
if mutation in ('A','G','I','L','P','V'):
return 20
if wild in ('D','E'):
if mutation in ('C','R','H','K'):
return 21
if mutation in ('N','Q'):
return 22
if mutation in ('D','E'):
return 23
if mutation in ('C','M'):
return 24
if mutation in ('S','T'):
return 25
if mutation in ('F','W','Y'):
return 26
if mutation in ('A','G','I','L','P','V'):
return 27
if wild in ('C','M'):
if mutation in ('C','R','H','K'):
return 28
if mutation in ('N','Q'):
return 29
if mutation in ('D','E'):
return 30
if mutation in ('C','M'):
return 31
if mutation in ('S','T'):
return 32
if mutation in ('F','W','Y'):
return 33
if mutation in ('A','G','I','L','P','V'):
return 34
if wild in ('S','T'):
if mutation in ('C','R','H','K'):
return 35
if mutation in ('N','Q'):
return 36
if mutation in ('D','E'):
return 37
if mutation in ('C','M'):
return 38
if mutation in ('S','T'):
return 39
if mutation in ('F','W','Y'):
return 40
if mutation in ('A','G','I','L','P','V'):
return 41
if wild in ('F','W','Y'):
if mutation in ('C','R','H','K'):
return 42
if mutation in ('N','Q'):
return 43
if mutation in ('D','E'):
return 44
if mutation in ('C','M'):
return 45
if mutation in ('S','T'):
return 46
if mutation in ('F','W','Y'):
return 47
if mutation in ('A','G','I','L','P','V'):
return 48
def mutation_ala(wild,mutation):
if wild=='A' or mutation=='A':
return 1
else:
return 0
def translate_aa(three_letter):
trans = {'ALA':'A','ARG':'R','ASN':'N','ASP':'D','CYS':'C','GLU':'E','GLN':'Q','GLY':'G','HIS':'H','ILE':'I','LEU':'L','LYS':'K','MET':'M','PHE':'F','PRO':'P','SER':'S','THR':'T','TRP':'W','TYR':'Y','VAL':'V'}
return (trans[three_letter])
def mutation_aa_label(three_letter):
aa = {'ALA':'1','ARG':'2','ASN':'3','ASP':'4','CYS':'5','GLU':'6','GLN':'7','GLY':'8','HIS':'9','ILE':'10','LEU':'11','LYS':'12','MET':'13','PHE':'14','PRO':'15','SER':'16','THR':'17','TRP':'18','TYR':'19','VAL':'20'}
return (aa.get(three_letter,0))
def mutation_type(wild,mutation):
wild_lists = ['A','F','C','D','N','E','Q','G','H','L','I','K','M','P','R','S','T','V','W','Y']
mutation_lists = ['A','F','C','D','N','E','Q','G','H','L','I','K','M','P','R','S','T','V','W','Y']
label_1=0
label_2=0
for i in wild_lists:
for j in mutation_lists:
if i != j:
label_1 += 1
if wild == i:
if mutation == j and i != j:
label_2 = label_1
break
return label_2
#def mutation_pdb(pdbid):
# from Bio.PDB import PDBList
# pdbl = PDBList()
# pdbl.retrieve_pdb_file(pdbid,file_format='pdb',pdir='.',overwrite=True)
#def mutation_pdb(pdbid):
# null_value=os.system('wget \'https://files.rcsb.org/view/'+pdbid+'.pdb\' -O pdb'+pdbid+'.ent -q')
def mutation_pdb(pdbid):
import urllib
url="https://files.rcsb.org/view/"+pdbid+".pdb"
urllib.urlretrieve(url, pdbid)
def mutation_sequence(pdbid,resid,chain):
label_index=1
resid_label=[]
resid_label_aa=[]
mutation_coordinate=[]
resid_label_aa=[0 for _ in range(11)] #how many sequence to use
for i in range(len(resid_label_aa)):
resid_int=filter(lambda ch: ch in '-0123456789', resid.strip())
resid_label.append(int(resid_int)-(len(resid_label_aa)-1)/2+i)
for line in open(pdbid):
pdbstr=line.strip()
if pdbstr[0:4]=="ATOM":
if pdbstr[21:22]==chain:
if pdbstr[22:27].strip()==str(resid_label[i]) or (pdbstr[22:27].strip()==str(resid) and label_index==1):
if pdbstr[13:15]=="CA":
if pdbstr[22:27].strip()==str(resid):
mutation_coordinate=[float(pdbstr[29:38].strip()),float(pdbstr[38:46].strip()),float(pdbstr[46:55].strip())]
label_index=0
resid_label_aa[i]=pdbstr[17:20]
break
if len(mutation_coordinate) !=3:
error_index=1
else:
error_index=0
return resid_label_aa,mutation_coordinate,error_index
def mutation_distance(pdbid,chain,mutation_coordinate):
resid_label_dis_aa=[]
resid_label_aa=[]
resid_label_distance=[]
for line in open(pdbid):
pdbstr=line.strip()
if pdbstr[0:4]=="ATOM":
if pdbstr[13:15]=="CA":
if pdbstr[21:22]!=chain:
mutation_coordinate1=[float(pdbstr[29:38].strip()),float(pdbstr[38:46].strip()),float(pdbstr[46:55].strip())]
resid_label_aa.append(pdbstr[17:20])
resid_label_distance.append(np.sqrt(np.square(mutation_coordinate[0]-mutation_coordinate1[0])+np.square(mutation_coordinate[1]-mutation_coordinate1[1])+np.square(mutation_coordinate[2]-mutation_coordinate1[2])))
b=zip(resid_label_distance,range(len(resid_label_distance)))
b.sort(key = lambda x : x[0])
c = [x[1] for x in b]
sequ_num=10 #10 sequence in total,use last 7sequence
if len(c)>=sequ_num:
for j in range(0,sequ_num,1):
if b[j][0]<=10:
resid_label_dis_aa.append(resid_label_aa[c[j]])
else:
resid_label_dis_aa.append(0)
else:
for j in range(0,len(c),1):
if b[j][0]<=10:
resid_label_dis_aa.append(resid_label_aa[c[j]])
else:
resid_label_dis_aa.append(0)
while len(resid_label_dis_aa) < sequ_num:
resid_label_dis_aa.append(0)
return resid_label_dis_aa
def mutation_pdb_information(pdbid):
reso=0
r_value=0
temp=0
ph=0
for line in open(pdbid):
pdbstr=line.strip()
if pdbstr[0:22]=="REMARK 2 RESOLUTION.":
try:
reso=float(pdbstr[26:30].strip())
except ValueError:
reso=0
if pdbstr[0:45]=="REMARK 3 R VALUE (WORKING SET)":
try:
r_value=float(pdbstr[49:54].strip())
except ValueError:
r_value=0
if pdbstr[0:23]=="REMARK 200 TEMPERATURE":
try:
temp=float(pdbstr[45:48].strip())
except ValueError:
temp=0
if pdbstr[0:14]=="REMARK 200 PH":
ph=filter(lambda ch: ch in '0123456789.', pdbstr[45:48].strip())
try:
ph=float(ph)
except ValueError:
ph=0
break
return reso,r_value,temp,ph
def file_loop(pdb_id,mutation_chain,mutation_resid,wild_aa,mutation_aa,model_type):
delete_index=0
if len(wild_aa) == 3:
wild_aa=translate_aa(wild_aa)
if len(mutation_aa) == 3:
mutation_aa=translate_aa(mutation_aa)
label = []
label.append(net_volume(wild_aa,mutation_aa))
label.append(net_hydrophobicity(wild_aa,mutation_aa))
label.append(flexibility(wild_aa,mutation_aa))
label.append(mutation_hydrophobicity(wild_aa,mutation_aa))
label.append(mutation_polarity(wild_aa,mutation_aa))
label.append(mutation_type(wild_aa,mutation_aa))
label.append(mutation_size(wild_aa,mutation_aa))
label.append(mutation_hbonds(wild_aa,mutation_aa))
label.append(mutation_chemical(wild_aa,mutation_aa))
#label.append(mutation_ala(wild_aa,mutation_aa))
if os.path.isfile(pdb_id) == 0:
#mutation_pdb(pdb_id)
#pdb_id="pdb"+pdb_id+".ent"
return('Please check the PDB file')
(resid_label_aa,mutation_coordinate,error_index)=mutation_sequence(pdb_id,str(mutation_resid),mutation_chain)
if error_index == 1:
return('Please check the PDB file, ther is no coordinate at the mutation site')
label_aa_distance=mutation_distance(pdb_id,mutation_chain,mutation_coordinate)
for i in resid_label_aa:
label.append(mutation_aa_label(i))
for i in label_aa_distance:
label.append(mutation_aa_label(i))
(reso,r_value,temp,ph)=mutation_pdb_information(pdb_id)
label.append(reso)
label.append(temp)
label.append(ph)
#if delete_index ==1:
#os.remove(pdb_id)
return(pred_feature(label,model_type))
def pred_feature(label,model_type):
if str(model_type) == '1':
model=xgb.Booster(model_file='regression.model')
else:
model=xgb.Booster(model_file='classification.model')
x=np.array(label)
#x=pd.DataFrame(x)
#x=x.iloc[:,[-1]].values
x=x.reshape((1,len(label)))
x=xgb.DMatrix(x)
y_pred = model.predict(x)
if str(model_type) == '1':
return (y_pred[0])
else:
if y_pred[0]>0.5:
return('Disruptive')
else:
return('Nondisruptive')
#return (y_pred[0])
import sys, getopt
import os
import numpy as np
import xgboost as xgb
import pandas as pd
opts, args = getopt.getopt(sys.argv[1:], "hi:c:r:w:m:f:d:o:", ["help=", "pdbfile=","chain=","resid=","wild=","mutation_list","model","output="])
for op, value in opts:
if op == "-i":
pdb_id = value
#pdb_id = str.lower(pdb_id)
elif op == "-c":
mutation_chain = value
elif op == "-r":
mutation_resid = value
elif op == "-w":
wild_aa= value
elif op == "-m":
mutation_aa = value
elif op == "-d":
| |
dict()
self._pdescr = dict()
self._ptype = dict() #stores the type of the parameter
self._prange = dict() # stores the min-max range of the parameter in a tuple
self._localp = dict()
self.addlocalparameter("_ports_", dict(), "Ports calculated by geom")
self._x0 = 0
self._y0 = 0
self._hv = True
self._bf = True
self._ports = dict()
self._name = ""
self._description = "No description yet"
self.use_references = True
def __flatdict(self,d,parent_str):
flatdict=dict()
for key,value in d.items():
if type(value)==dict:
newdict = self.__flatdict(value,parent_str+key+"::")
for key,value in newdict.items():
flatdict[key]=value
else:
if type(value)!=list:
flatdict[parent_str+key]=value
return flatdict
def __hash__(self):
if(hasattr(self,"_seq")):
fldict = self.__flatdict(self._seq.options,"")
return hash((frozenset(self._p.items()), self._name, frozenset(fldict.items())))
return hash((frozenset(self._p.items()), self._name))
def angle(self):
"""
Returns the orientation of the device in radians.
Returns
-------
float
The orientation in radians (east = zero).
"""
return math.pi*(3-(self._hv+self._bf*2))/2
def set_angle(self,angle:float):
"""
Changes the orientation of the device
Parameters
----------
angle : float
The new angle in radians.
Returns
-------
None.
"""
i = round(3-angle*2/math.pi)%4
self._hv = i%2==1
self._bf = math.floor(i/2)==1
def set_position(self, x0: float, y0: float):
"""
Changes the position of the device
Parameters
----------
x0 : float
X offset.
y0 : float
Y offset.
Returns
-------
None.
"""
self._x0=x0
self._y0=y0
def addport(self,port: DevicePort):
"""
Call this from the ports() method to add a port to the device.
Parameters
----------
port : DevicePort
The device port.
Returns
-------
None.
"""
self._ports[port.name]=port
def addparameter(self, param_name: str, default_value, param_description: str, param_type=float, param_range=(0,np.infty)):
"""
Call this from the parameters() method to add a parameter to the device.
Parameters
----------
param_name : str
The name of the parameter.
default_value : TYPE
The default value.
param_description : str
A text describing the parameters.
param_type : TYPE, optional
The type of the parameter. The default is float.
param_range : tuple, optional
A tuple specifying the min and max value of the parameter . The default is (0,np.infty).
Returns
-------
None.
"""
if(param_name.find(":")!=-1):
print("Cannot define variable names containing ':'")
return
self._p[param_name] = default_value
self._pdescr[param_name] = param_description
self._ptype[param_name] = param_type
self._prange[param_name] = param_range
def addlocalparameter(self,param_name: str, default_value, param_description: str, param_type=float, param_range=(0,np.infty)):
"""
Defines a local parameter that is only used within the class and not
controllable from outside.
Parameters
----------
param_name : str
The parameter name.
default_value : TYPE
The value of the paramter.
param_description : str
Description of the parameter.
param_type : TYPE, optional
The type of the paramter. The default is float.
param_range : tuple, optional
A tuple specifying the min and max value of the parameter . The default is (0,np.infty).
Returns
-------
None.
"""
if(param_name.find(":")!=-1):
print("Cannot define variable names containing ':'")
return
self._localp[param_name] = default_value
self._pdescr[param_name] = param_description
self._ptype[param_name] = param_type
self._prange[param_name] = param_range
def addlocalport(self,port):
"""
Same as local parameter, but allows creating ports from the geom() function.
If you need some info from the geom() function to create ports just add
local ports and they will be automatically added to ports by the port() function.
Parameters
----------
port : DevicePort
The port to be added.
Returns
-------
None.
"""
self._localp["_ports_"][port.name] = port
def get_localport(self, portname: str):
"""
Returns the local port (i.e. within the geom() function.)
Parameters
----------
portname : str
The port name.
Returns
-------
port: DevicePort
The port (empty if it does not exists).
"""
lports = self._localp["_ports_"]
if(portname in lports):
return lports[portname]
else:
print("Could not find port named", portname, "in",self._name, "as it was not defined by device.")
return DevicePort(0,0,True,True)
def remove_localport(self, portname: str):
"""
Removes a local port.
Parameters
----------
portname : str
The name of the port to be removed.
Returns
-------
None.
"""
lports = self._localp["_ports_"]
if(portname in lports):
self._localp["_ports_"].pop(portname)
def set_param(self,param_name: str, value):
"""
Change a paramter. To be called after build().
Parameters
----------
param_name : str
The parameter to be changed.
value : TYPE
The new value of the parameter.
Returns
-------
None.
"""
param_hier = param_name.split("::")
p = self._p
for i in range(len(param_hier)):
if i==(len(param_hier)-1):
if(param_hier[i] in p):
p[param_hier[i]] = value
else:
print("Could not set parameter named", param_hier[i], "as it was not defined by device.")
return
else:
if(param_hier[i] in p):
p = p[param_hier[i]]
else:
print("Could not set parameter named", param_hier[i], "as it was not defined by device.")
return
def get_params(self, cast_types:bool = True, clip_in_range:bool = True) ->dict :
"""
To be called by geom() functions. Returns the dictionary with all parameters.
Parameters
----------
cast_types : bool, optional
Attempts to do a type-cast on the parameter. The default is True.
clip_in_range : bool, optional
Clips the value in the range specified. The default is True.
Returns
-------
dict
A dictionary with the parameter value map.
"""
if cast_types:
for p,val in self._p.items():
val=self._ptype[p](val)
if clip_in_range:
for p,val in self._p.items():
if val<self._prange[p][0]:
val = self._prange[p][0]
if val>self._prange[p][1]:
val = self._prange[p][1]
return self._p
def get_port(self,port_name: str):
"""
Should not be called by user. Returns the named port.
Parameters
----------
port_name : str
Name of the port.
Returns
-------
DevicePort
The DevicePort object associated to the port (empty if does not exist).
"""
if(port_name in self._ports):
return self._ports[port_name]
else:
print("Could not find port named", port_name, "in",self._name, "as it was not defined by device.")
return DevicePort(0,0,True,True)
def set_name(self, name: str):
"""
Sets the device name (should be called from initialize).
Parameters
----------
name : str
The device name.
Returns
-------
None.
"""
self._name = name
def set_description(self, descr: str):
"""
Sets the device description (should be called from initialize)
Parameters
----------
descr : str
the device description.
Returns
-------
None.
"""
self._description = descr
def initialize(self):
"""
Re-implement this function in your device to initialize and set the device name
Returns
-------
None.
"""
pass
def parameters(self):
"""
Re-implement this function to define parameters of the device
Returns
-------
None.
"""
pass
def geom(self):
"""
Re-implement this function to generate the geometry of the device.
Returns
-------
None.
"""
pass
def run(self):
"""
Runs the device and generates a geometry.
Returns
-------
g : samplemaker.shapes.GeomGroup
The geometry of the device.
"""
if(self.use_references):
# Check if it is in the device pool
hsh = self.__hash__()
if "NETLIST" in self._p:
srefname = self._p["NETLIST"].name
else:
srefname = self._name
if srefname not in _DeviceCountPool:
_DeviceCountPool[srefname]=0
if hsh not in _DevicePool:
_DeviceCountPool[srefname] += 1
srefname += "_%0.4i"%_DeviceCountPool[srefname]
LayoutPool[srefname] = self.geom()
_BoundingBoxPool[srefname] = LayoutPool[srefname].bounding_box()
_DevicePool[hsh] = srefname
_DeviceLocalParamPool[hsh] = deepcopy(self._localp)
else:
srefname += "_%0.4i"%_DeviceCountPool[srefname]
self._localp = _DeviceLocalParamPool[hsh]
# now create a ref
g = make_sref(self._x0,self._y0, _DevicePool[hsh],
LayoutPool[_DevicePool[hsh]],
angle=math.degrees(self.angle()))
else:
g = self.geom()
g.rotate_translate(self._x0,self._y0,math.degrees(self.angle()))
#g.rotate(0,0,math.degrees(self.angle()))
#g.translate(self._x0,self._y0)
self.ports() # this will get the proper local parameters as if self.geom() ran properly
# Now rotate/translate all ports
for port in self._ports.values():
port.rotate(0,0,math.degrees(self.angle()))
port.x0 += self._x0
port.y0 += self._y0
return g
def ports(self):
"""
Re-implement this to define ports.
If localports are used via the geom() function do not re-implement.
Returns
-------
None.
"""
if("_ports_" in self._localp.keys()):
for p in self._localp["_ports_"].values():
self.addport(deepcopy(p))
pass
@staticmethod
def build_registered(name: str):
"""
Builds a device from the pool of registered device names.
Parameters
----------
name : str
The device name to be built.
Returns
-------
Device
The device to be built.
"""
if(name in _DeviceList):
return _DeviceList[name].build()
else:
print("No device named",name,"found.")
@classmethod
def build(cls):
"""
Class method to build a device.
Parameters
----------
cls : Device
The Device class.
Returns
-------
device : Device
Instance of the Device ready to be rendered via the run() method.
"""
device = cls()
device.initialize()
device.parameters()
device.ports()
return device
class NetListEntry:
def __init__(self,devname: str, x0: float, y0: float, rot: str,
portmap: dict, params: dict):
"""
Defines a single entry in a NetList.
Parameters
----------
devname : str
The registered device name.
x0 : float
x coordinate of the device.
y0 : float
y coordinate of the device.
rot : str
String that defines the orientation of the device (can only | |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Key range representation and splitting."""
import os
try:
import simplejson
except ImportError:
simplejson = None
from google.appengine.api import datastore
from google.appengine.api import namespace_manager
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
class Error(Exception):
"""Base class for exceptions in this module."""
class KeyRangeError(Error):
"""Error while trying to generate a KeyRange."""
class SimplejsonUnavailableError(Error):
"""Error while using json functionality whith unavailable simplejson."""
class KeyRange(object):
"""Represents a range of keys in the datastore.
A KeyRange object represents a key range
(key_start, include_start, key_end, include_end)
and a scan direction (KeyRange.DESC or KeyRange.ASC).
"""
DESC = "DESC"
ASC = "ASC"
def __init__(self,
key_start=None,
key_end=None,
direction=None,
include_start=True,
include_end=True,
namespace=None,
_app=None):
"""Initialize a KeyRange object.
Args:
key_start: The starting key for this range.
key_end: The ending key for this range.
direction: The direction of the query for this range.
include_start: Whether the start key should be included in the range.
include_end: Whether the end key should be included in the range.
namespace: The namespace for this range. If None then the current
namespace is used.
"""
if direction is None:
direction = KeyRange.ASC
assert direction in (KeyRange.ASC, KeyRange.DESC)
self.direction = direction
self.key_start = key_start
self.key_end = key_end
self.include_start = include_start
self.include_end = include_end
if namespace is not None:
self.namespace = namespace
else:
self.namespace = namespace_manager.get_namespace()
self._app = _app
def __str__(self):
if self.include_start:
left_side = "["
else:
left_side = "("
if self.include_end:
right_side = "]"
else:
right_side = ")"
return "%s%s%r to %r%s" % (self.direction, left_side, self.key_start,
self.key_end, right_side)
def __repr__(self):
return ("key_range.KeyRange(key_start=%r,key_end=%r,direction=%r,"
"include_start=%r,include_end=%r, namespace=%r)") % (
self.key_start,
self.key_end,
self.direction,
self.include_start,
self.include_end,
self.namespace)
def advance(self, key):
"""Updates the start of the range immediately past the specified key.
Args:
key: A db.Key.
"""
self.include_start = False
self.key_start = key
def filter_query(self, query):
"""Add query filter to restrict to this key range.
Args:
query: A db.Query instance.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, db.Query)
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.filter("__key__ %s" % start_comparator, self.key_start)
if self.key_end:
query.filter("__key__ %s" % end_comparator, self.key_end)
return query
def filter_datastore_query(self, query):
"""Add query filter to restrict to this key range.
Args:
query: A datastore.Query instance.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, datastore.Query)
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.update({"__key__ %s" % start_comparator: self.key_start})
if self.key_end:
query.update({"__key__ %s" % end_comparator: self.key_end})
return query
def __get_direction(self, asc, desc):
"""Check that self.direction is in (KeyRange.ASC, KeyRange.DESC).
Args:
asc: Argument to return if self.direction is KeyRange.ASC
desc: Argument to return if self.direction is KeyRange.DESC
Returns:
asc or desc appropriately
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
if self.direction == KeyRange.ASC:
return asc
elif self.direction == KeyRange.DESC:
return desc
else:
raise KeyRangeError("KeyRange direction unexpected: %s", self.direction)
def make_directed_query(self, kind_class, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind_class: A kind implementation class.
keys_only: bool, default False, use keys_only on Query?
Returns:
A db.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
assert self._app is None, '_app is not supported for db.Query'
direction = self.__get_direction("", "-")
query = db.Query(kind_class, namespace=self.namespace, keys_only=keys_only)
query.order("%s__key__" % direction)
query = self.filter_query(query)
return query
def make_directed_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
direction = self.__get_direction(datastore.Query.ASCENDING,
datastore.Query.DESCENDING)
query = datastore.Query(kind, _app=self._app, keys_only=keys_only)
query.Order(("__key__", direction))
query = self.filter_datastore_query(query)
return query
def make_ascending_query(self, kind_class, keys_only=False):
"""Construct a query for this key range without setting the scan direction.
Args:
kind_class: A kind implementation class.
keys_only: bool, default False, query only for keys.
Returns:
A db.Query instance.
"""
assert self._app is None, '_app is not supported for db.Query'
query = db.Query(kind_class, namespace=self.namespace, keys_only=keys_only)
query.order("__key__")
query = self.filter_query(query)
return query
def make_ascending_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range without setting the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
"""
query = datastore.Query(kind,
namespace=self.namespace,
_app=self._app,
keys_only=keys_only)
query.Order(("__key__", datastore.Query.ASCENDING))
query = self.filter_datastore_query(query)
return query
def split_range(self, batch_size=0):
"""Split this key range into a list of at most two ranges.
This method attempts to split the key range approximately in half.
Numeric ranges are split in the middle into two equal ranges and
string ranges are split lexicographically in the middle. If the
key range is smaller than batch_size it is left unsplit.
Note that splitting is done without knowledge of the distribution
of actual entities in the key range, so there is no guarantee (nor
any particular reason to believe) that the entities of the range
are evenly split.
Args:
batch_size: The maximum size of a key range that should not be split.
Returns:
A list of one or two key ranges covering the same space as this range.
"""
key_start = self.key_start
key_end = self.key_end
include_start = self.include_start
include_end = self.include_end
key_pairs = []
if not key_start:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.ASC))
elif not key_end:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.DESC))
else:
key_split = KeyRange.split_keys(key_start, key_end, batch_size)
first_include_end = True
if key_split == key_start:
first_include_end = first_include_end and include_start
key_pairs.append((key_start, include_start,
key_split, first_include_end,
KeyRange.DESC))
second_include_end = include_end
if key_split == key_end:
second_include_end = False
key_pairs.append((key_split, False,
key_end, second_include_end,
KeyRange.ASC))
ranges = [KeyRange(key_start=start,
include_start=include_start,
key_end=end,
include_end=include_end,
direction=direction,
namespace=self.namespace,
_app=self._app)
for (start, include_start, end, include_end, direction)
in key_pairs]
return ranges
def __hash__(self):
return hash([self.key_start,
self.key_end,
self.direction,
self._app,
self.namespace])
def __cmp__(self, other):
"""Compare two key ranges.
Key ranges with a value of None for key_start or key_end, are always
considered to have include_start=False or include_end=False, respectively,
when comparing. Since None indicates an unbounded side of the range,
the include specifier is meaningless. The ordering generated is total
but somewhat arbitrary.
Args:
other: An object to compare to this one.
Returns:
-1: if this key range is less than other.
0: if this key range is equal to other.
1: if this key range is greater than other.
"""
if not isinstance(other, KeyRange):
return 1
self_list = [self.key_start, self.key_end, self.direction,
self.include_start, self.include_end, self._app,
self.namespace]
if not self.key_start:
self_list[3] = False
if not self.key_end:
self_list[4] = False
other_list = [other.key_start,
other.key_end,
other.direction,
other.include_start,
other.include_end,
other._app,
other.namespace]
if not other.key_start:
other_list[3] = False
if not other.key_end:
other_list[4] = False
return cmp(self_list, other_list)
@staticmethod
def bisect_string_range(start, end):
"""Returns a string that is approximately in the middle of the range.
(start, end) is treated as a string range, and it is assumed
start <= end in the usual lexicographic string ordering. The output key
mid is guaranteed to satisfy start <= mid <= end.
The method proceeds by comparing initial characters of start and
end. When the characters are equal, they are appended to the mid
string. In the first place that the characters differ, the
difference characters are averaged and this average is appended to
the mid string. If averaging resulted in rounding down, and
additional character is added to the mid string to make up for the
rounding down. This extra step is necessary for correctness in
the case that the average of the two characters is equal to the
character in the start string.
This method makes the assumption | |
"""
Code borrowed from PLCAPI.
URL: http://svn.planet-lab.org/svn/PLCAPI/trunk/PLC/Table.py
Modifications by <NAME>
"""
from types import StringTypes, IntType, LongType
import time
import calendar
from SMDS.timestamp import Timestamp
from SMDS.faults import *
from SMDS.parameter import Parameter
class Row(dict):
"""
Representation of a row in a database table. To use, optionally
instantiate with a dict of values. Update as you would a
dict. Commit to the database with sync().
"""
# Set this to the name of the table that stores the row.
# e.g. table_name = "nodes"
table_name = None
# Set this to the name of the primary key of the table. It is
# assumed that the this key is a sequence if it is not set when
# sync() is called.
# e.g. primary_key="node_id"
primary_key = None
# Set this to the names of tables that reference this table's
# primary key.
join_tables = []
# Set this to a dict of the valid fields of this object and their
# types. Not all fields (e.g., joined fields) may be updated via
# sync().
fields = {}
def __init__(self, api, fields = {}):
dict.__init__(self, fields)
self.api = api
# run the class_init initializer once
cls=self.__class__
if not hasattr(cls,'class_inited'):
#cls.class_init (api)
cls.class_inited=True # actual value does not matter
def validate(self):
"""
Validates values. Will validate a value with a custom function
if a function named 'validate_[key]' exists.
"""
# Warn about mandatory fields
mandatory_fields = self.api.db.fields(self.table_name, notnull = True, hasdef = False)
for field in mandatory_fields:
if not self.has_key(field) or self[field] is None:
raise MDInvalidArgument, field + " must be specified and cannot be unset in class %s"%self.__class__.__name__
# Validate values before committing
for key, value in self.iteritems():
if value is not None and hasattr(self, 'validate_' + key):
validate = getattr(self, 'validate_' + key)
self[key] = validate(value)
def separate_types(self, items):
"""
Separate a list of different typed objects.
Return a list for each type (ints, strs and dicts)
"""
if isinstance(items, (list, tuple, set)):
ints = filter(lambda x: isinstance(x, (int, long)), items)
strs = filter(lambda x: isinstance(x, StringTypes), items)
dicts = filter(lambda x: isinstance(x, dict), items)
return (ints, strs, dicts)
else:
raise MDInvalidArgument, "Can only separate list types"
def associate(self, *args):
"""
Provides a means for high level api calls to associate objects
using low level calls.
"""
if len(args) < 3:
raise MDInvalidArgumentCount, "auth, field, value must be specified"
elif hasattr(self, 'associate_' + args[1]):
associate = getattr(self, 'associate_'+args[1])
associate(*args)
else:
raise MDInvalidArguemnt, "No such associate function associate_%s" % args[1]
def validate_timestamp (self, timestamp):
return Timestamp.sql_validate(timestamp)
def add_object(self, classobj, join_table, columns = None):
"""
Returns a function that can be used to associate this object
with another.
"""
def add(self, obj, columns = None, commit = True):
"""
Associate with the specified object.
"""
# Various sanity checks
assert isinstance(self, Row)
assert self.primary_key in self
assert join_table in self.join_tables
assert isinstance(obj, classobj)
assert isinstance(obj, Row)
assert obj.primary_key in obj
assert join_table in obj.join_tables
# By default, just insert the primary keys of each object
# into the join table.
if columns is None:
columns = {self.primary_key: self[self.primary_key],
obj.primary_key: obj[obj.primary_key]}
params = []
for name, value in columns.iteritems():
params.append(self.api.db.param(name, value))
self.api.db.do("INSERT INTO %s (%s) VALUES(%s)" % \
(join_table, ", ".join(columns), ", ".join(params)),
columns)
if commit:
self.api.db.commit()
return add
add_object = classmethod(add_object)
def remove_object(self, classobj, join_table):
"""
Returns a function that can be used to disassociate this
object with another.
"""
def remove(self, obj, commit = True):
"""
Disassociate from the specified object.
"""
assert isinstance(self, Row)
assert self.primary_key in self
assert join_table in self.join_tables
assert isinstance(obj, classobj)
assert isinstance(obj, Row)
assert obj.primary_key in obj
assert join_table in obj.join_tables
self_id = self[self.primary_key]
obj_id = obj[obj.primary_key]
self.api.db.do("DELETE FROM %s WHERE %s = %s AND %s = %s" % \
(join_table,
self.primary_key, self.api.db.param('self_id', self_id),
obj.primary_key, self.api.db.param('obj_id', obj_id)),
locals())
if commit:
self.api.db.commit()
return remove
remove_object = classmethod(remove_object)
# convenience: check in dict (self.fields) that a key is writable
@staticmethod
def is_writable (key,value,dict):
# if not mentioned, assume it's writable (e.g. deleted ...)
if key not in dict: return True
# if mentioned but not linked to a Parameter object, idem
if not isinstance(dict[key], Parameter): return True
# if not marked ro, it's writable
if not dict[key].ro: return True
return False
def db_fields(self, obj = None):
"""
Return only those fields that can be set or updated directly
(i.e., those fields that are in the primary table (table_name)
for this object, and are not marked as a read-only Parameter.
"""
if obj is None: obj = self
db_fields = self.api.db.fields(self.table_name)
return dict ( [ (key,value) for (key,value) in obj.items()
if key in db_fields and
Row.is_writable(key,value,self.fields) ] )
# takes as input a list of columns
# returns one dict and one list : fields, rejected
@classmethod
def parse_columns (cls, columns):
(fields,rejected)=({},[])
for column in columns:
if column in cls.fields: fields[column]=cls.fields[column]
else: rejected.append(column)
return (fields,rejected)
# compute the 'accepts' part of a method, from a list of column names, and a fields dict
# use exclude=True to exclude the column names instead
# typically accepted_fields (Node.fields,['hostname','model',...])
@staticmethod
def accepted_fields (update_columns, fields_dict, exclude=False):
result={}
for (k,v) in fields_dict.iteritems():
if (not exclude and k in update_columns) or (exclude and k not in update_columns):
result[k]=v
return result
# filter out user-provided fields that are not part of the declared acceptance list
# keep it separate from split_fields for simplicity
# typically check_fields (<user_provided_dict>,{'hostname':Parameter(str,...),'model':Parameter(..)...})
@staticmethod
def check_fields (user_dict, accepted_fields):
# avoid the simple, but silent, version
# return dict ([ (k,v) for (k,v) in user_dict.items() if k in accepted_fields ])
result={}
for (k,v) in user_dict.items():
if k in accepted_fields: result[k]=v
else: raise MDInvalidArgument ('Trying to set/change unaccepted key %s'%k)
return result
# given a dict (typically passed to an Update method), we check and sort
# them against a list of dicts, e.g. [Node.fields, Node.related_fields]
# return is a list that contains n+1 dicts, last one has the rejected fields
@staticmethod
def split_fields (fields, dicts):
result=[]
for x in dicts: result.append({})
rejected={}
for (field,value) in fields.iteritems():
found=False
for i in range(len(dicts)):
candidate_dict=dicts[i]
if field in candidate_dict.keys():
result[i][field]=value
found=True
break
if not found: rejected[field]=value
result.append(rejected)
return result
def __eq__(self, y):
"""
Compare two objects.
"""
# Filter out fields that cannot be set or updated directly
# (and thus would not affect equality for the purposes of
# deciding if we should sync() or not).
x = self.db_fields()
y = self.db_fields(y)
return dict.__eq__(x, y)
def sync(self, commit = True, insert = None):
"""
Flush changes back to the database.
"""
# Validate all specified fields
self.validate()
# Filter out fields that cannot be set or updated directly
db_fields = self.db_fields()
# Parameterize for safety
keys = db_fields.keys()
values = [self.api.db.param(key, value) for (key, value) in db_fields.items()]
# If the primary key (usually an auto-incrementing serial
# identifier) has not been specified, or the primary key is the
# only field in the table, or insert has been forced.
if not self.has_key(self.primary_key) or \
keys == [self.primary_key] or \
insert is True:
# If primary key id is a serial int and it isnt included, get next id
if self.fields[self.primary_key].type in (IntType, LongType) and \
self.primary_key not in self:
pk_id = self.api.db.next_id(self.table_name, self.primary_key)
self[self.primary_key] = pk_id
db_fields[self.primary_key] = pk_id
keys = db_fields.keys()
values = [self.api.db.param(key, value) for (key, value) in db_fields.items()]
# Insert new row
sql = "INSERT INTO %s (%s) VALUES (%s)" % \
(self.table_name, ", ".join(keys), ", ".join(values))
else:
# Update existing row
columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)]
sql = "UPDATE %s SET " % self.table_name + \
", ".join(columns) + \
" WHERE %s = %s" % \
(self.primary_key,
self.api.db.param(self.primary_key, self[self.primary_key]))
self.api.db.do(sql, db_fields)
if commit:
self.api.db.commit()
def commit(self):
self.api.db.commit()
def delete(self, commit = True):
"""
Delete row from its primary table, and from any tables that
reference it.
"""
assert self.primary_key | |
for i, ann_line in enumerate(ann_list):
if len(ann_line['boxes']) == 0:
continue
width = ann_line['width']
height = ann_line['height']
bboxes = np.array(ann_line['boxes'])
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 1, width - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 1, height - 1)
labels = ann_line['class_id']
# attrs = [[3] * random.randint(1, 3)] * len(labels)
attrs = self.pad_attr(ann_line['attribute_idx'])
try:
labels = np.array(labels).astype(np.int64)
attrs = np.array(attrs).astype(np.int64)
bboxes = bboxes.astype(np.float32)
if (attrs > len(VG_ATTRS)).any():
raise RuntimeError()
except:
import pdb; pdb.set_trace()
data_infos.append(
dict(
filename=ann_line['image_name'],
width=width,
height=height,
ann=dict(
bboxes=bboxes,
labels=labels,
attrs=attrs)
))
return data_infos
def get_ann_info(self, idx):
return self.data_infos[idx]['ann']
GQA_CLASSES = [
'food trucks',
'wolves',
'tractors',
'monkeys',
'lavender',
'plier',
'nightstands',
'citrus',
'sporting equipment',
'castles',
'pikachu',
'mice',
'televisions',
'dvd players',
'fire trucks',
'snakes',
'panda bears',
'eagles',
'printers',
'ravioli',
'bread loaf',
'rhinos',
'pigs',
'cooking oil',
'snoopy',
'hippos',
'parrots',
'waffles',
'kittens',
'parachutes',
'sharks',
'serving dish',
'soccer balls',
'farmers',
'vitamins',
'beer mug',
'snail',
'pandas',
'lilies',
'orchids',
'mattresses',
'bugs',
'dinosaurs',
'blenders',
'pretzels',
'dolphins',
'dragons',
'hamburgers',
'penguins',
'antelopes',
'kangaroo',
'pizza shop',
'xbox controller',
'owls',
'supermarket',
'entree',
'swans',
'taxis',
'stuffed bears',
'guests',
'pudding',
'boar',
'lunch box',
'donkeys',
'hibiscus',
'leopard',
'dragonfly',
'wok',
'cappuccino',
'mexican food',
'juice box',
'nutella',
'wii controllers',
'restaurants',
'performer',
'egg carton',
'pistachio',
'salon',
'ice-cream cone',
'lab coat',
'kiwis',
'wardrobe',
'gourd',
'french toast',
'ladles',
'sugar packet',
'ginger',
'croissants',
'smoothie',
'ostriches',
'coconuts',
'vinegar',
'oatmeal',
'coffee shop',
'masks',
'biscuits',
'pizza boxes',
'pita',
'hummus',
'kimono',
'lions',
'turbines',
'amusement park',
'snow shoe',
'swamp',
'coral',
'beetle',
'vanilla',
'weapon',
'fudge',
'couches',
'oreo',
'egg white',
'cafeteria',
'attic',
'pub',
'artichokes',
'armor',
'appetizers',
'buffet',
'pizza pie',
'soap dispensers',
'granola',
'anchovies',
'food container',
'dog food',
'mint',
'waitress',
'gifts',
'school',
'whale',
'cookbook',
'farmer',
'taco',
'robot',
'cemetery',
'cake pan',
'cheeseburger',
'soft drink',
'shopping center',
'bird house',
'ovens',
'menus',
'egg roll',
'riding boots',
'groceries',
'roast beef',
'gym',
'poodle',
'office supplies',
'hairbrush',
'baseball bats',
'theater',
'lobby',
'bar stools',
'chicken breast',
'potato salad',
'hand dryer',
'orchard',
'wines',
'lizard',
'elmo',
'life jackets',
'coffee beans',
'wolf',
'beer cans',
'gorilla',
'stir fry',
'son',
'pizza oven',
'caramel',
'visitors',
'pills',
'moose',
'customers',
'waiter',
'walnuts',
'cinnamon',
'utensil holder',
'pesto',
'casserole',
'meats',
'towel dispenser',
'watermelons',
'bee',
'lily',
'vendors',
'chocolate chips',
'cookie jar',
'chickens',
'cookie dough',
'chickpeas',
'sweet potatoes',
'cotton dessert',
'gummy bear',
'baked good',
'picnic',
'policemen',
'shuttle',
'mannequins',
'napkin dispenser',
'cake stand',
'orchid',
'melons',
'bomb',
'crab',
'fisherman',
'cheetah',
'shoppers',
'coffee cups',
'snow flakes',
'rifle',
'shaving cream',
'lemonade',
'batteries',
'cotton candy',
'angry bird',
'flatbread',
'swimmer',
'monster',
'pork',
'hotdog bun',
'gadget',
'beets',
'action figure',
'avocados',
'popcorn',
'dolls',
'banana bunches',
'bartender',
'stew',
'keyboards',
'snow shoes',
'champagne',
'milkshake',
'pasta salad',
'cooking utensil',
'rice cooker',
'wedding',
'palace',
'chess piece',
'soldiers',
'blackberries',
'wii game',
'picnic tables',
'cages',
'outfits',
'dream catcher',
'lounge',
'trumpet',
'ice cube',
'appetizer',
'bread box',
'pecan',
'hospital',
'pocket watch',
'peacock',
'food processor',
'dry-erase board',
'turtle',
'sticky notes',
'baker',
'daughter',
'cranberries',
'pumpkins',
'parmesan cheese',
'seafood',
'baking pan',
'auditorium',
'shopper',
'dumplings',
'artichoke',
'pizza slices',
'dish drainer',
'pizza tray',
'desktop computer',
'officers',
'pantry',
'tuna',
'bakery',
'loaf',
'paint brush',
'piercing',
'spray can',
'squirrel',
'meatballs',
'can opener',
'camel',
'backpacks',
'toolbox',
'visitor',
'onion rings',
'ear buds',
'cream cheese',
'paper dispenser',
'accessory',
'mustard bottle',
'kitchen towel',
'almond',
'tourists',
'hippo',
'snake',
'ramekin',
'thermometer',
'dresses',
'soda cans',
'lambs',
'paper container',
'pizza pan',
'eiffel tower',
'mixing bowl',
'burrito',
'pliers',
'mickey mouse',
'panda bear',
'cinnamon roll',
'salad dressing',
'scooters',
'octopus',
'hair clip',
'cooking pot',
'cat food',
'cereal box',
'alien',
'snowboards',
'toothpicks',
'sweet potato',
'shopping cart',
'chef hat',
'baked goods',
'baking sheet',
'factory',
'microwave oven',
'lobster',
'spider',
'hard drive',
'vendor',
'ice cubes',
'liquor',
'vacuum',
'cigar',
'waffle',
'muffins',
'ingredient',
'cranberry',
'egg shell',
'beer can',
'marshmallow',
'students',
'parent',
'scrub brush',
'egg yolk',
'olive oil',
'rolling pin',
'broth',
'sour cream',
'grater',
'rhino',
'elevator',
'necklaces',
'mangoes',
'cigarettes',
'guacamole',
'beverages',
'mayonnaise',
'library',
'cheesecake',
'horse hoof',
'pretzel',
'bell tower',
'ice maker',
'skateboards',
'walnut',
'oak tree',
'mall',
'cathedral',
'beds',
'temple',
'whisk',
'spear',
'brownies',
'butterflies',
'bison',
'sunflowers',
'coleslaw',
'rubber duck',
'sugar packets',
'remote controls',
'salmon',
'milk carton',
'wig',
'omelette',
'desserts',
'grinder',
'peanuts',
'almonds',
'raisins',
'dolphin',
'rabbit',
'peanut',
'hammer',
'coffee bean',
'plantains',
'water bottles',
'beet',
'mountain side',
'shampoo bottle',
'lego',
'gift',
'nest',
'shark',
'stuffed dog',
'vests',
'geese',
'merchandise',
'mountain peak',
'rackets',
'engineer',
'wildflower',
'sausages',
'bird cage',
'onion ring',
'papaya',
'seat belt',
'phones',
'cheese cube',
'obstacle',
'raspberries',
'skyscrapers',
'hand soap',
'father',
'potato chips',
'bookshelves',
'feta cheese',
'hurdle',
'packages',
'alligator',
'pomegranate',
'toothbrushes',
'classroom',
'frog',
'cameras',
'toy car',
'football',
'tofu',
'fishing pole',
'bug',
'chinese food',
'storage box',
'pig',
'ambulance',
'diaper',
'sailboats',
'snow boots',
'snacks',
'sconce',
'games',
'seagulls',
'gas pump',
'mugs',
'tiger',
'grapefruit',
'helicopter',
'sushi',
'donkey',
'video games',
'dish soap',
'earphones',
'cherries',
'mashed potatoes',
'vending machine',
'parrot',
'pizza box',
'museum',
'router',
'gown',
'medicine cabinet',
'aquarium',
'peanut butter',
'waste basket',
'pancakes',
'instrument',
'restroom',
'washing machine',
'cash register',
'underwear',
'cakes',
'backyard',
'computer desk',
'cell phones',
'crackers',
'hair dryer',
'pineapples',
'fruit stand',
'snack',
'chopstick',
'garment',
'bagels',
'skillet',
'student',
'ostrich',
'calculator',
'jewelry',
'monitors',
'cracker',
'honey',
'soap bottle',
'alcohol',
'hangar',
'shoe laces',
'buns',
'bats',
'stores',
'ribs',
'penguin',
'village',
'brownie',
'figurines',
'tennis balls',
'shops',
'raisin',
'shampoo',
'raspberry',
'bunny',
'christmas light',
'owl',
'coin',
'employee',
'seal',
'gentleman',
'christmas lights',
'electric toothbrush',
'soccer player',
'eagle',
'customer',
'workers',
'blossoms',
'macaroni',
'banana peel',
'yogurt',
'food truck',
'battery',
'candies',
'bath towel',
'kiosk',
'pizza cutter',
'video camera',
'serving tray',
'toaster oven',
'cafe',
'mango',
'ornaments',
'school bus',
'beach umbrella',
'goats',
'waterfall',
'raincoat',
'blossom',
'knife block',
'peaches',
'suits',
'sword',
'pizzas',
'pencils',
'marina',
'money',
'laptops',
'chalkboard',
'antennas',
'spray bottle',
'cats',
'turkey',
'tortilla',
'hotel room',
'cooking utensils',
'projector',
'coconut',
'tattoos',
'ketchup bottle',
'puppy',
'panda',
'teddy bears',
'canisters',
'bedding',
'receipt',
'pouch',
'wheelchair',
'dragon',
'bottle cap',
'desk lamp',
'dining room',
'eggplant',
'stapler',
'chili',
'paintings',
'hilltop',
'flames',
'pigeons',
'sunflower',
'cowboy',
'desert',
'tea pot',
'athlete',
'tourist',
'hamburger',
'pizza crust',
'ladle',
'luggage cart',
'croissant',
'soda bottle',
'gas stove',
'courtyard',
'touchpad',
'bandage',
'uniforms',
'cards',
'toddler',
'bar stool',
'baseball mitt',
'cds',
'cooker',
'biscuit',
'comb',
'farm',
'buoys',
'office chair',
'coats',
'bus driver',
'meadow',
'dinosaur',
'baseball players',
'bone',
'blueberries',
'table lamp',
'parachute',
'daisy',
'hot dogs',
'dining table',
'sea foam',
'soldier',
'sandwiches',
'trays',
'pears',
'pillowcase',
'steak',
'shower door',
'suitcases',
'lunch',
'plain',
'notepad',
'drum',
'hearts',
'kitten',
'apartment',
'drawings',
'coach',
'pajamas',
'blood',
'breakfast',
'turbine',
'goose',
'dip',
'carts',
'heels',
'jackets',
'computers',
'wine bottles',
'cabinet doors',
'zucchini',
'dinner',
'mixer',
'guys',
'gas station',
'keypad',
'console',
'cloths',
'tractor',
'melon',
'sunglasses',
'characters',
'antelope',
'envelope',
'pickles',
'broom',
'fire truck',
'lion',
'jumpsuit',
'toilet brush',
'tissue box',
'garlic',
'water glass',
'outlets',
'stuffed animals',
'dryer',
'blankets',
'dressing',
'baskets',
'whipped cream',
'forks',
'tablet',
'pancake',
'spoons',
'cucumbers',
'control panel',
'chef',
'cupboards',
'light bulbs',
'tents',
'syrup',
'price tag',
'wine glasses',
'rooftop',
'kiwi',
'pump',
'life preserver',
'homes',
'armchair',
'apartment building',
'tea kettle',
'polar bear',
'gun',
'ducks',
'burger',
'piano',
'jockey',
'trash bag',
'video game',
'terminal',
'tea',
'bagel',
'dvd player',
'trucks',
'tangerine',
'asparagus',
'castle',
'tv stand',
'monkey',
'drape',
'tools',
'sponge',
'alarm clock',
'crates',
'beef',
'scaffolding',
'towers',
'flour',
'swan',
'deer',
'coffee pot',
'dumpster',
'gravy',
'cane',
'jars',
'pans',
'herb',
'parsley',
'skater',
'ipod',
'packet',
'butter knife',
'worker',
'flamingo',
'charger',
'powder',
'tissues',
'mother',
'cereal',
'pie',
'pastries',
'hotel',
'entertainment center',
'soda can',
'banana bunch',
'goal',
'coke',
'drinks',
'heater',
'lipstick',
'wildflowers',
'parking sign',
'herbs',
'surfboards',
'toiletries',
'speakers',
'muffin',
'cactus',
'moon',
'closet',
'vine',
'radio',
'cyclist',
'oven door',
'sack',
'photographer',
'minivan',
'oil',
'wallet',
'mannequin',
'helmets',
'peas',
'boulders',
'fire',
'trains',
'lemons',
'shield',
'cupcakes',
'dvds',
'shopping bag',
'clock hand',
'beach chair',
'feeder',
'window frame',
'butterfly',
'tongs',
'glaze',
'lime',
'floor lamp',
'avocado',
'safety jacket',
'garnish',
'stage',
'cookies',
'hedges',
'apple logo',
'team',
'peach',
'fire extinguisher',
'pear',
'elbow pad',
'mozzarella',
'fog',
'dogs',
'tags',
'game controller',
'balloons',
'paper towel',
'athletic shoe',
'satellite dish',
'toothpaste',
'vases',
'ice cream',
'wagon',
'salt',
'toast',
'bracelets',
'soap dish',
'beer bottle',
'chopsticks',
'toilet lid',
'smoke stack',
'pea',
'girls',
'clocks',
'dress shirt',
'officer',
'dough',
'briefcase',
'tool',
'leggings',
'pepper shaker',
'harbor',
'binder',
'pens',
'side table',
'blazer',
'garland',
'buses',
'appliance',
'nuts',
'garage door',
'soap dispenser',
'roadside',
'name tag',
'town',
'undershirt',
'hallway',
'powdered sugar',
'sign post',
'shrimp',
'ropes',
'bears',
'chains',
'toys',
'toilet tank',
'door frame',
'crown',
'polo shirt',
'produce',
'robe',
'cans',
'face mask',
'fountain',
'jeep',
'match',
'lighthouse',
'celery',
'roses',
'printer',
'squash',
'herd',
'cauliflower',
'pizza slice',
'pencil',
'shelter',
'pockets',
'candle holder',
'office',
'tree leaves',
'pilot',
'vines',
'minute hand',
'pedestrians',
'canister',
'manhole cover',
'mirrors',
'earrings',
'costume',
'watermelon',
'blueberry',
| |
def is_library(self):
return self.get_fbconfig_rule_type() == 'cpp_library'
def is_extension(self):
return self.get_fbconfig_rule_type() in (
'cpp_python_extension',
'cpp_java_extension',
'cpp_lua_extension')
def get_fbconfig_rule_type(self):
return self._rule_type
def get_buck_rule_type(self):
rule_type = self.RULE_TYPE_MAP[self._rule_type]
if callable(rule_type):
rule_type = rule_type(self._context.mode)
return rule_type
def split_matching_extensions_and_other(self, srcs, exts):
"""
Split a list into two based on the extension of the items.
Returns a tuple (mathing, other), where matching is a list of
items from srcs whose extensions are in exts and other is a
list of the remaining items from srcs.
"""
matches = []
leftovers = []
for src in (srcs or []):
base, ext = os.path.splitext(src)
if ext in exts:
matches.append(src)
else:
leftovers.append(src)
return (matches, leftovers)
def get_headers_from_sources(self, base_path, srcs):
"""
Return the headers likely associated with the given sources.
"""
glob = self._context.buck_ops.glob
source_exts = self.SOURCE_EXTS # use a local for faster lookups in a loop
# Check for // in case this src is a rule
split_srcs = (
os.path.splitext(src)
for src in srcs
if '//' not in src and not src.startswith(':'))
headers = glob([
base + hext
for base, ext in split_srcs if ext in source_exts
for hext in cxx_sources.HEADER_SUFFIXES])
return headers
def get_dlopen_info(self, dlopen_enabled):
"""
Parse the `dlopen_enabled` parameter into a dictionary.
"""
dlopen_info = None
if dlopen_enabled:
dlopen_info = {}
if isinstance(dlopen_enabled, str):
dlopen_info['soname'] = dlopen_enabled
elif isinstance(dlopen_enabled, dict):
dlopen_info.update(dlopen_enabled)
return dlopen_info
def get_sanitizer_binary_ldflags(self):
"""
Return any linker flags to use when linking binaries with sanitizer
support.
"""
sanitizer = self.get_sanitizer()
assert sanitizer is not None
flags = []
if sanitizer.startswith('address'):
flags.append(
'-Wl,--dynamic-list='
'$(location //tools/build/buck:asan_dynamic_list.txt)')
return flags
def get_sanitizer_non_binary_deps(self):
"""
Return deps needed when using sanitizers.
"""
sanitizer = self.get_sanitizer()
assert sanitizer is not None
deps = []
# We link ASAN weak stub symbols into every DSO so that we don't leave
# undefined references to *SAN symbols at shared library link time,
# which allows us to pass `--no-undefined` to the linker to prevent
# undefined symbols.
if (sanitizer.startswith('address') and
self.get_link_style() == 'shared'):
deps.append(RootRuleTarget('tools/build/sanitizers', 'asan-stubs'))
return deps
def get_coverage_ldflags(self, base_path):
"""
Return compiler flags needed to support coverage builds.
"""
flags = []
coverage = self.is_coverage_enabled(base_path)
if coverage and self.get_sanitizer() is None:
# Add flags to enable LLVM's Coverage Mapping.
flags.append('-fprofile-instr-generate')
flags.append('-fcoverage-mapping')
return flags
def convert_lex(self, name, lex_flags, lex_src, platform, visibility):
"""
Create rules to generate a C/C++ header and source from the given lex
file.
"""
name_base = '{}={}'.format(name.replace(os.sep, '-'), lex_src)
header_name = name_base + '.h'
source_name = name_base + '.cc'
base = lex_src
header = base + '.h'
source = base + '.cc'
attrs = collections.OrderedDict()
attrs['name'] = name_base
if visibility is not None:
attrs['visibility'] = visibility
attrs['out'] = base + '.d'
attrs['srcs'] = [lex_src]
attrs['cmd'] = ' && '.join([
'mkdir -p $OUT',
'$(exe {lex}) {args} -o$OUT/{src} --header-file=$OUT/{hdr}'
' $SRCS'
.format(
lex=self.get_tool_target(LEX, platform),
args=' '.join([pipes.quote(f) for f in lex_flags]),
src=pipes.quote(source),
hdr=pipes.quote(header)),
r"""(cd "$GEN_DIR"/{fbcode} &&"""
r""" perl -pi -e 's!\Q'"$PWD"'/\E!!' "$OUT"/{src} "$OUT"/{hdr})"""
.format(
fbcode=self.get_fbcode_dir_from_gen_dir(),
src=pipes.quote(source),
hdr=pipes.quote(header)),
])
rules = []
rules.append(Rule('genrule', attrs))
rules.append(
self.copy_rule(
'$(location :{})/{}'.format(name_base, header),
header_name,
header))
rules.append(
self.copy_rule(
'$(location :{})/{}'.format(name_base, source),
source_name,
source))
return (':' + header_name, ':' + source_name, rules)
def convert_yacc(self, base_path, name, yacc_flags, yacc_src, platform, visibility):
"""
Create rules to generate a C/C++ header and source from the given yacc
file.
"""
is_cpp = ('--skeleton=lalr1.cc' in yacc_flags)
name_base = '{}={}'.format(name.replace(os.sep, '-'), yacc_src)
header_name = name_base + '.h'
source_name = name_base + '.cc'
base = yacc_src
header = base + '.h'
source = base + '.cc'
if is_cpp:
stack_header_name = '{}=stack.hh'.format(name.replace(os.sep, '-'))
stack_header = 'stack.hh'
commands = [
'mkdir -p $OUT',
'$(exe {yacc}) {args} -o "$OUT/{base}.c" $SRCS',
# Sanitize the header and source files of original source line-
# markers and include guards.
'sed -i.bak'
r""" -e 's|'"$SRCS"'|'{src}'|g' """
r""" -e 's|YY_YY_.*_INCLUDED|YY_YY_{defn}_INCLUDED|g' """
' "$OUT/{base}.c" "$OUT/{base}.h"',
# Sanitize the source file of self-referencing line-markers.
'sed -i.bak'
r""" -e 's|\b{base}\.c\b|{base}.cc|g' """
r""" -e 's|'"$OUT"'/'{base}'\.cc\b|'{out_cc}'|g' """
' "$OUT/{base}.c"',
# Sanitize the header file of self-referencing line-markers.
'sed -i.bak'
r""" -e 's|'"$OUT"'/'{base}'\.h\b|'{out_h}'|g' """
' "$OUT/{base}.h"',
'rm -f "$OUT/{base}"*.bak',
'mv "$OUT/{base}.c" "$OUT/{base}.cc"'
]
if is_cpp:
commands.append(
# Patch the header file to add include header file prefix
# e.g.: thrifty.yy.h => thrift/compiler/thrifty.yy.h
'sed -i.bak'
r""" -e 's|#include "{base}.h"|#include "{base_path}/{base}.h"|g' """
' "$OUT/{base}.cc"'
)
attrs = collections.OrderedDict()
attrs['name'] = name_base
attrs['out'] = base + '.d'
attrs['srcs'] = [yacc_src]
attrs['cmd'] = ' && '.join(commands).format(
yacc=self.get_tool_target(YACC, platform),
args=' '.join(
[pipes.quote(f) for f in YACC_FLAGS + list(yacc_flags)]),
src=pipes.quote(os.path.join(base_path, yacc_src)),
out_cc=pipes.quote(
os.path.join(
'buck-out',
'gen',
base_path,
base + '.cc',
base + '.cc')),
out_h=pipes.quote(
os.path.join(
'buck-out',
'gen',
base_path,
base + '.h',
base + '.h')),
defn=re.sub('[./]', '_', os.path.join(base_path, header)).upper(),
base=pipes.quote(base),
base_path=base_path)
rules = []
rules.append(Rule('genrule', attrs))
rules.append(
self.copy_rule(
'$(location :{})/{}'.format(name_base, header),
header_name,
header,
visibility=visibility))
rules.append(
self.copy_rule(
'$(location :{})/{}'.format(name_base, source),
source_name,
source,
visibility=visibility))
if is_cpp:
rules.append(
self.copy_rule(
'$(location :{})/{}'.format(name_base, stack_header),
stack_header_name,
stack_header,
visibility=visibility))
returned_headers = [':' + header_name]
if is_cpp:
returned_headers.append(':' + stack_header_name)
return (returned_headers, ':' + source_name, rules)
def has_cuda_dep(self, dependencies):
"""
Returns whether there is any dependency on CUDA tp2.
"""
for dep in dependencies:
if dep.repo is not None and dep.base_path == 'cuda':
return True
return False
def is_cuda(self, srcs):
"""
Return whether this rule has CUDA sources.
"""
for src in srcs:
# If this is a generated rule reference, then extract the source
# name.
if '=' in src:
src = src.rsplit('=', 1)[1]
# Assume generated sources without explicit extensions are non-CUDA
if src.startswith(('@', ':', '//')):
continue
# If the source extension is `.cu` it's cuda.
_, ext = os.path.splitext(src)
if ext == '.cu':
return True
return False
def get_lua_base_module_parts(self, base_path, base_module):
"""
Get the list of base module parts for this rule.
"""
# If base module is unset, prepare a default.
if base_module is None:
return ['fbcode'] + base_path.split(os.sep)
# If base module is empty, return the empty list.
elif not base_module:
return []
# Otherwise, split it on the module separater.
else:
return base_module.split('.')
def get_lua_base_module(self, base_path, base_module):
parts = self.get_lua_base_module_parts(base_path, base_module)
return '.'.join(parts)
def get_lua_init_symbol(self, base_path, name, base_module):
parts = self.get_lua_base_module_parts(base_path, base_module)
return '_'.join(['luaopen'] + parts + [name])
@classmethod
def get_auto_headers(cls, headers, auto_headers, read_config):
"""
Get the level of auto-headers to apply to the rule.
"""
# If `auto_headers` is set, use that.
if auto_headers is not None:
return auto_headers
# For backwards compatibility, if the `headers` parameter is a string,
# then it refers to an auto-headers setting.
if isinstance(headers, basestring):
return headers
# If it's `None`, then return the global default.
return read_config(
'cxx',
'auto_headers',
global_defns.AutoHeaders.SOURCES)
def get_implicit_deps(self):
"""
Add additional dependencies we need to implicitly add to the build for
various reasons.
"""
deps = []
# TODO(#13588666): When using clang with the gcc-5-glibc-2.23 platform,
# `-latomic` isn't automatically added to the link line, meaning uses
# of `std::atomic<T>` fail to link with undefined reference errors.
# So implicitly add this dep here.
#
# TODO(#17067102): `cpp_precompiled_header` rules currently don't
# support `platform_deps` parameter.
if self.get_fbconfig_rule_type() != 'cpp_precompiled_header':
deps.append(ThirdPartyRuleTarget('libgcc', 'atomic'))
return deps
def verify_linker_flags(self, flags):
"""
Check for invalid linker flags.
"""
# PLEASE DON'T UPDATE WITHOUT REACHING OUT TO FBCODE FOUNDATION FIRST.
# Using arbitrary linker flags in libraries can cause unexpected issues
# for upstream dependencies, so we make sure to restrict to a safe(r)
# subset of potential flags.
prefixes = [
'-L',
'-u',
'-rpath',
'--wrap',
'--dynamic-list',
'--export-dynamic',
'--enable-new-dtags',
]
for flag in flags:
if not re.match('|'.join(prefixes), flag):
raise ValueError(
'using disallowed linker flag in a library: ' + flag)
def verify_preprocessor_flags(self, param, flags):
"""
Make sure the given flags are valid preprocessor flags.
"""
# Check that we're getting an actual preprocessor flag (e.g. and not a
# compiler flag).
for flag in flags:
if not re.match('-[DI]', flag):
raise ValueError(
'`{}`: invalid preprocessor flag (expected `-[DI]*`): {}'
.format(param, flag))
# Check for includes pointing to system paths.
bad_flags = [flag for flag in flags if SYS_INC.search(flag)]
if bad_flags:
raise ValueError(
'The flags \"{}\" in \'preprocessor_flags\' | |
in tblines)
self.assertTrue("raise NonserializableError((\"xantippe" in tblines)
finally:
Pyro4.config.SERIALIZER = "serpent"
def testBatchProxy(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.divide(999, 3))
self.assertIsNone(batch.ping())
self.assertIsNone(batch.divide(999, 0)) # force an exception here
self.assertIsNone(batch.multiply(3, 4)) # this call should not be performed after the error
results = batch()
self.assertEqual(42, next(results))
self.assertEqual(333, next(results))
self.assertIsNone(next(results))
self.assertRaises(ZeroDivisionError, next, results) # 999//0 should raise this error
self.assertRaises(StopIteration, next, results) # no more results should be available after the error
def testAsyncProxy(self):
with Pyro4.core.Proxy(self.objectUri) as p:
async = Pyro4.async(p)
async._pyroBind() # force that any metadata is processed
begin = time.time()
result = async.delayAndId(1, 42)
duration = time.time() - begin
self.assertTrue(duration < 0.1)
self.assertFalse(result.ready)
self.assertFalse(result.wait(0.5)) # not available within 0.5 sec
self.assertEqual("slept for 42", result.value)
self.assertTrue(result.ready)
self.assertTrue(result.wait())
def testAsyncProxyCallchain(self):
class FuncHolder(object):
count = threadutil.AtomicCounter()
def function(self, value, increase=1):
self.count.incr()
return value + increase
with Pyro4.core.Proxy(self.objectUri) as p:
async = Pyro4.async(p)
async._pyroBind() # force that any metadata is processed
holder = FuncHolder()
begin = time.time()
result = async.multiply(2, 3)
result.then(holder.function, increase=10) \
.then(holder.function, increase=5) \
.then(holder.function)
duration = time.time() - begin
self.assertTrue(duration < 0.1)
value = result.value
self.assertTrue(result.ready)
self.assertEqual(22, value)
self.assertEqual(3, holder.count.value)
def testBatchOneway(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.delay(1)) # a delay shouldn't matter with oneway
self.assertIsNone(batch.multiply(3, 4))
begin = time.time()
results = batch(oneway=True)
duration = time.time() - begin
self.assertTrue(duration < 0.1, "oneway batch with delay should return almost immediately")
self.assertIsNone(results)
def testBatchAsync(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.delay(1)) # a delay shouldn't matter with async
self.assertIsNone(batch.multiply(3, 4))
begin = time.time()
asyncresult = batch(async=True)
duration = time.time() - begin
self.assertTrue(duration < 0.1, "async batch with delay should return almost immediately")
results = asyncresult.value
self.assertEqual(42, next(results))
self.assertEqual("slept 1 seconds", next(results))
self.assertEqual(12, next(results))
self.assertRaises(StopIteration, next, results) # no more results should be available
def testBatchAsyncCallchain(self):
class FuncHolder(object):
count = threadutil.AtomicCounter()
def function(self, values):
result = [value + 1 for value in values]
self.count.incr()
return result
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.multiply(3, 4))
result = batch(async=True)
holder = FuncHolder()
result.then(holder.function).then(holder.function)
value = result.value
self.assertTrue(result.ready)
self.assertEqual([44, 14], value)
self.assertEqual(2, holder.count.value)
def testPyroTracebackNormal(self):
with Pyro4.core.Proxy(self.objectUri) as p:
try:
p.divide(999, 0) # force error here
self.fail("expected error")
except ZeroDivisionError:
# going to check if the magic pyro traceback attribute is available for batch methods too
tb = "".join(Pyro4.util.getPyroTraceback())
self.assertIn("Remote traceback:", tb) # validate if remote tb is present
self.assertIn("ZeroDivisionError", tb) # the error
self.assertIn("return x // y", tb) # the statement
def testPyroTracebackBatch(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.divide(999, 0)) # force an exception here
results = batch()
try:
next(results)
self.fail("expected error")
except ZeroDivisionError:
# going to check if the magic pyro traceback attribute is available for batch methods too
tb = "".join(Pyro4.util.getPyroTraceback())
self.assertIn("Remote traceback:", tb) # validate if remote tb is present
self.assertIn("ZeroDivisionError", tb) # the error
self.assertIn("return x // y", tb) # the statement
self.assertRaises(StopIteration, next, results) # no more results should be available after the error
def testAutoProxy(self):
obj = ServerTestObject()
Pyro4.config.SERIALIZER = "pickle"
try:
with Pyro4.core.Proxy(self.objectUri) as p:
Pyro4.config.AUTOPROXY = False # make sure autoproxy is disabled
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject)
self.daemon.register(obj)
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "with autoproxy off the object should be an instance of the class")
self.daemon.unregister(obj)
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "serialized object must still be normal object")
Pyro4.config.AUTOPROXY = True # make sure autoproxying is enabled
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "non-pyro object must be returned as normal class")
self.daemon.register(obj)
result = p.echo(obj)
self.assertIsInstance(result, Pyro4.core.Proxy, "serialized pyro object must be a proxy")
self.daemon.unregister(obj)
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "unregistered pyro object must be normal class again")
# note: the custom serializer may still be active but it should be smart enough to see
# that the object is no longer a pyro object, and therefore, no proxy should be created.
finally:
Pyro4.config.AUTOPROXY = True
Pyro4.config.SERIALIZER = "serpent"
def testConnectOnce(self):
with Pyro4.core.Proxy(self.objectUri) as proxy:
self.assertTrue(proxy._pyroBind(), "first bind should always connect")
self.assertFalse(proxy._pyroBind(), "second bind should not connect again")
def testConnectingThreads(self):
class ConnectingThread(threadutil.Thread):
new_connections = threadutil.AtomicCounter()
def __init__(self, proxy, event):
threadutil.Thread.__init__(self)
self.proxy = proxy
self.event = event
self.setDaemon(True)
self.new_connections.reset()
def run(self):
self.event.wait()
if self.proxy._pyroBind():
ConnectingThread.new_connections.incr() # 1 more new connection done
with Pyro4.core.Proxy(self.objectUri) as proxy:
event = threadutil.Event()
threads = [ConnectingThread(proxy, event) for _ in range(20)]
for t in threads:
t.start()
event.set()
for t in threads:
t.join()
self.assertEqual(1, ConnectingThread.new_connections.value) # proxy shared among threads must still have only 1 connect done
def testMaxMsgSize(self):
with Pyro4.core.Proxy(self.objectUri) as p:
bigobject = [42] * 1000
result = p.echo(bigobject)
self.assertEqual(result, bigobject)
Pyro4.config.MAX_MESSAGE_SIZE = 999
try:
_ = p.echo(bigobject)
self.fail("should fail with ProtocolError msg too large")
except Pyro4.errors.ProtocolError:
pass
Pyro4.config.MAX_MESSAGE_SIZE = 0
def testCleanup(self):
p1 = Pyro4.core.Proxy(self.objectUri)
p2 = Pyro4.core.Proxy(self.objectUri)
p3 = Pyro4.core.Proxy(self.objectUri)
p1.echo(42)
p2.echo(42)
p3.echo(42)
# we have several active connections still up, see if we can cleanly shutdown the daemon
# (it should interrupt the worker's socket connections)
time.sleep(0.1)
self.daemon.shutdown()
self.daemon = None
p1._pyroRelease()
p2._pyroRelease()
p3._pyroRelease()
class ServerTestsThreadNoTimeout(unittest.TestCase):
SERVERTYPE = "thread"
COMMTIMEOUT = None
def setUp(self):
Pyro4.config.LOGWIRE = True
Pyro4.config.POLLTIMEOUT = 0.1
Pyro4.config.SERVERTYPE = self.SERVERTYPE
Pyro4.config.COMMTIMEOUT = self.COMMTIMEOUT
Pyro4.config.SERIALIZERS_ACCEPTED.add("pickle")
self.daemon = Pyro4.core.Daemon(port=0)
obj = ServerTestObject()
uri = self.daemon.register(obj, "something")
self.objectUri = uri
self.daemonthread = DaemonLoopThread(self.daemon)
self.daemonthread.start()
self.daemonthread.running.wait()
time.sleep(0.05)
def tearDown(self):
time.sleep(0.05)
self.daemon.shutdown()
self.daemonthread.join()
Pyro4.config.SERVERTYPE = "thread"
Pyro4.config.COMMTIMEOUT = None
Pyro4.config.SERIALIZERS_ACCEPTED.discard("pickle")
def testConnectionStuff(self):
p1 = Pyro4.core.Proxy(self.objectUri)
p2 = Pyro4.core.Proxy(self.objectUri)
self.assertIsNone(p1._pyroConnection)
self.assertIsNone(p2._pyroConnection)
p1.ping()
p2.ping()
_ = p1.multiply(11, 5)
_ = p2.multiply(11, 5)
self.assertIsNotNone(p1._pyroConnection)
self.assertIsNotNone(p2._pyroConnection)
p1._pyroRelease()
p1._pyroRelease()
p2._pyroRelease()
p2._pyroRelease()
self.assertIsNone(p1._pyroConnection)
self.assertIsNone(p2._pyroConnection)
p1._pyroBind()
_ = p1.multiply(11, 5)
_ = p2.multiply(11, 5)
self.assertIsNotNone(p1._pyroConnection)
self.assertIsNotNone(p2._pyroConnection)
self.assertEqual("PYRO", p1._pyroUri.protocol)
self.assertEqual("PYRO", p2._pyroUri.protocol)
p1._pyroRelease()
p2._pyroRelease()
def testReconnectAndCompression(self):
# try reconnects
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertIsNone(p._pyroConnection)
p._pyroReconnect(tries=100)
self.assertIsNotNone(p._pyroConnection)
self.assertIsNone(p._pyroConnection)
# test compression:
try:
with Pyro4.core.Proxy(self.objectUri) as p:
Pyro4.config.COMPRESSION = True
self.assertEqual(55, p.multiply(5, 11))
self.assertEqual("*" * 1000, p.multiply("*" * 500, 2))
finally:
Pyro4.config.COMPRESSION = False
def testOnewayMetaOn(self):
Pyro4.config.METADATA = True
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual(set(), p._pyroOneway) # when not bound, no meta info exchange has been done
p._pyroBind()
self.assertIn("oneway_multiply", p._pyroOneway) # after binding, meta info has been processed
self.assertEqual(55, p.multiply(5, 11)) # not tagged as @Pyro4.oneway
self.assertIsNone(p.oneway_multiply(5, 11)) # tagged as @Pyro4.oneway
p._pyroOneway = set()
self.assertEqual(55, p.multiply(5, 11))
self.assertEqual(55, p.oneway_multiply(5, 11))
# check nonexisting method behavoir for oneway methods
with self.assertRaises(AttributeError):
p.nonexisting_method()
p._pyroOneway.add("nonexisting_method")
# now it should still fail because of metadata telling Pyro what methods actually exist
with self.assertRaises(AttributeError):
p.nonexisting_method()
def testOnewayMetaOff(self):
Pyro4.config.METADATA = False
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual(set(), p._pyroOneway) # when not bound, no meta info exchange has been done
p._pyroBind()
self.assertEqual(set(), p._pyroOneway) # after binding, no meta info exchange has been done because disabled
self.assertEqual(55, p.multiply(5, 11))
self.assertEqual(55, p.oneway_multiply(5, 11))
# check nonexisting method behavoir for oneway methods
with self.assertRaises(AttributeError):
p.nonexisting_method()
p._pyroOneway.add("nonexisting_method")
# now it shouldn't fail because of oneway semantics (!) (and becaue there's no metadata to tell Pyro that the method doesn't exist)
p.nonexisting_method()
Pyro4.config.METADATA = True
def testOnewayWithProxySubclass(self):
Pyro4.config.METADATA = False
class ProxyWithOneway(Pyro4.core.Proxy):
def __init__(self, arg):
super(ProxyWithOneway, self).__init__(arg)
self._pyroOneway = {"oneway_multiply", "multiply"}
with ProxyWithOneway(self.objectUri) as p:
self.assertIsNone(p.oneway_multiply(5, 11))
self.assertIsNone(p.multiply(5, 11))
p._pyroOneway = set()
self.assertEqual(55, p.oneway_multiply(5, 11))
self.assertEqual(55, p.multiply(5, 11))
Pyro4.config.METADATA = True
def testOnewayDelayed(self):
try:
with Pyro4.core.Proxy(self.objectUri) as p:
p.ping()
Pyro4.config.ONEWAY_THREADED = True # the default
now = time.time()
p.oneway_delay(1) # oneway so we should continue right away
self.assertTrue(time.time() - now < 0.2, "delay should be running as oneway")
now = time.time()
self.assertEqual(55, p.multiply(5, 11), "expected a normal result from a non-oneway call")
self.assertTrue(time.time() - now < 0.2, "delay should be running in its own thread")
# make oneway calls run in the server thread
# we can change the config here and the server will pick it up on the fly
Pyro4.config.ONEWAY_THREADED = False
now = time.time()
p.oneway_delay(1) # oneway so we should continue right away
self.assertTrue(time.time() - now < 0.2, "delay should be running as oneway")
now = time.time()
self.assertEqual(55, p.multiply(5, 11), "expected a normal result from a non-oneway call")
self.assertFalse(time.time() - now < 0.2, "delay should be running in | |
<filename>pysaint/api.py
"""
End User를 위한 간단한 api
"""
from .constants import Line
from .saint import Saint
import copy
from tqdm import tqdm
from datetime import datetime
def get(course_type, year_range, semesters, line=Line.FIVE_HUNDRED, **kwargs):
"""
THIS IS THE END POINT OF pysaint API
USAGE::
>>> import pysaint
>>> res = pysaint.get('전공', ['2018'], ['2 학기'])
>>> print(res)
>>> res = pysaint.get('교양필수', range(2015, 2017), ['1 학기', '여름학기', '2 학기', '겨울학기'])
>>> print(res)
>>> res = pysaint.get('교양선택', (2016, 2017, 2018), ('1 학기', ))
>>> print(res)
>>> res = pysaint.get('전공', ['2018'], ['2 학기'], line=200)
>>> print(res)
:param course_type:
:type course_type: str
example )
'교양필수'
'전공'
'연계전공'
'교양선택'
'교직'
'채플'
:param year_range:
:type year_range: list or tuple or range or str or int
example )
'2018'
['2018']
[2018]
['2017', '2018']
[2017, 2018]
(2015, 2016, 2017)
('2016', '2017', '2018')
range(2015, 2019)
:param semesters:
:type semesters: list or tuple or str
example )
'1 학기'
['1 학기', '여름학기', '2 학기', '겨울학기']
('1 학기', '2 학기', )
:param line:
:type line: int
example )
10
20
50
100
200
500
:param silent: decide progress bar silent or not
:return: dict
"""
if type(year_range) not in (tuple, list, range, str, int):
raise ValueError("get() got wrong arguments year_range: {}\n"
"expected tuple type or list, or range type but got {} type".format(year_range, type(year_range)))
if type(semesters) not in (tuple, list, str):
raise ValueError("get() got wrong arguments semesters: {}\n"
"expected tuple type or list type but got {} type".format(semesters, type(semesters)))
if type(year_range) in (str, int):
year_range = [year_range]
if type(semesters) is str:
semesters = [semesters]
if not Line.has_value(line):
raise ValueError("get() got wrong arguments line: {}\n"
"line should be one of {}".format(line, Line.list()))
reformed_year_range = []
current_year = datetime.now().year
for year in year_range:
if 2000 < int(year) <= current_year:
pass
else:
raise ValueError("get() got wrong arguments year_range: {}\n"
"expected to be in year range(2000, 2021) but got {}".format(year_range, int(year)))
reformed_year_range.append("{}".format(year))
if course_type == '교양필수':
return _liberal_arts(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs)
elif course_type == '전공':
return _major(year_range=reformed_year_range, semesters=semesters, line=line,**kwargs)
elif course_type == '교양선택':
return _selective_liberal(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs)
elif course_type == '연계전공':
return _related_major(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs)
elif course_type == '융합전공':
return _fusion_major(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs)
elif course_type == '교직':
return _teaching(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs)
elif course_type == '채플':
return _chapel(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs)
else:
raise ValueError("get() got wrong arguments course_type: {} \n"
"expected to get '교양필수', '전공', '교양선택'".format(course_type))
def grade(id, password=None):
"""
get grade card from saint.ssu.ac.kr
:param id: student id
e.g.) 2015xxxx
:param password: <PASSWORD>
:return:
list
"""
saint = login(id, password)
grade_card = saint.get_grade()
return grade_card
def _liberal_arts(year_range=[], semesters=[], line=int(Line.FIVE_HUNDRED), silent=False):
"""
교양필수 과목들을 학기 단위로 묶어서 반환한다.
:param year_range:
:type year_range: list or tuple
example input )
[2013, 2014, 2015, 2016, 2017, 2018]
or
(2017, 2018)
:param semesters:
:type semesters: list or tuple
example input )
['1 학기', '여름학기', '2 학기', '겨울학기']
or
('1 학기')
:param line:
:type line: int
example )
10
20
50
100
200
500
:return:
{
2013: {
'전체학년': {
'CHAPEL': [
{
dictionary which has
dict_keys(['계획', '이수구분(주전공)',
'이수구분(다전공)', '공학인증', '교과영역',
'과목번호', '과목명', '분반', '교수명',
'개설학과', '시간/학점(설계)', '수강인원',
'여석', '강의시간(강의실)', '수강대상'])
}
],
'컴퓨터활용1(Excel)': [],
'컴퓨터활용2(PPT)': [],
'Practical Reading & Writing': [],
'현대인과성서2': []
}
}
'1학년': {...},
'2학년': {...},
'3학년': {...},
'4학년': {...},
'5학년': {...}
},
year: {
grade: {
course_name: [] <- list which has dictionaries as it's elements
}
}
}
"""
ret = {year: {} for year in year_range}
saint = Saint()
saint.select_course_section('교양필수')
def __get_whole_course(year, semester, _line=line):
saint.select_year(year)
saint.select_semester(semester)
saint.select_line(_line)
liberal_map = saint.get_liberal_arts_map()
course_map = {name: [] for name in liberal_map}
pbar = tqdm(liberal_map, disable=silent)
for course_name in pbar:
pbar.set_description("Processing {:8s}".format(course_name))
course_map[course_name] = saint.select_on_liberal_arts(course_name)
return course_map
year_bar = tqdm(year_range, disable=silent)
for year in year_bar:
year_bar.set_description("Year: {:4s}".format(year))
semester_bar = tqdm(semesters, disable=silent)
for semester in semester_bar:
semester_bar.set_description("Semester: {:6s}".format(semester))
course_bunch = __get_whole_course(year, semester)
ret[year][semester] = course_bunch
return ret
def _major(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False):
"""
전공 과목들을 학기 단위로 묶어서 반환한다.
:param year_range:
:type year_range: list or tuple
:param semesters:
:type semesters: list or tuple
:param line:
:type line: int
:return:
{
'2017': {
'1 학기': {
'인문대학': {
'중어중문학과': {
'중어중문학과': [
{
'계획': '\xa0',
'이수구분(주전공)': '전선-중문',
'이수구분(다전공)': '복선-중문/부선-중문',
'공학인증': '\xa0',
'교과영역': '7+1교과목\n인턴쉽(장기과정)\n인턴쉽',
'과목번호': '5010611601',
'과목명': '국내장기현장실습(3)',
'분반': '\xa0',
'교수명': '\xa0',
'개설학과': '경력개발팀',
'시간/학점(설계)': '3.00 /3',
'수강인원': '1',
'여석': '199',
'강의시간(강의실)': '\xa0',
'수강대상': '전체'
},
{
...
dict_keys(['계획', '이수구분(주전공)', '이수구분(다전공)', '공학인증', '교과영역',
'과목번호', '과목명', '분반', '교수명', '개설학과', '시간/학점(설계)', '수강인원',
'여석', '강의시간(강의실)', '수강대상'])
}
]
},
'국어국문학과': {},
'일어일문학과': {},
'영어영문학과': {},
'불어불문학과': {},
'철학과': {},
'사학과': {},
'기독교학과': {},
},
'자연과학대학': {},
'법과대학': {},
'사회과학대학': {},
'경제통상대학': {},
'경영대학': {},
'공과대학': {},
'IT대학': {},
'베어드학부대학': {},
'예술창작학부': {},
'스포츠학부': {},
'융합특성화자유전공학부': {}
}
},
'year': {
'semester': {
'college': {
'faculty': {
'major': [
{
dict_keys(['계획', '이수구분(주전공)', '이수구분(다전공)', '공학인증', '교과영역',
'과목번호', '과목명', '분반', '교수명', '개설학과', '시간/학점(설계)', '수강인원',
'여석', '강의시간(강의실)', '수강대상'])
}
]
}
}
}
}
}
"""
ret = {year: {} for year in year_range}
saint = Saint()
def __get_whole_course(year, semester, _line=line):
saint.select_year(year)
saint.select_semester(semester)
saint.select_line(_line)
major_map = saint.get_major_map()
course_map = copy.deepcopy(major_map)
for college in major_map:
for faculty in major_map[college]:
course_map[college][faculty] = {key: [] for key in major_map[college][faculty]}
college_bar = tqdm(major_map, disable=silent)
for college in college_bar:
college_bar.set_description("Processing {:8s}".format(college))
faculty_bar = tqdm(major_map[college], disable=silent)
for faculty in faculty_bar:
faculty_bar.set_description_str("Processing {:8s}".format(faculty))
for major in major_map[college][faculty]:
course_map[college][faculty][major] = saint.select_on_major(college, faculty, major)
return course_map
year_bar = tqdm(year_range, disable=silent)
for year in year_bar:
year_bar.set_description("Year: {:4}".format(year))
semester_bar = tqdm(semesters, disable=silent)
for semester in semester_bar:
semester_bar.set_description_str("Semester: {:6}".format(semester))
course_bunch = __get_whole_course(year, semester)
ret[year][semester] = course_bunch
return ret
def _selective_liberal(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False):
"""
교양선택 과목들을 학기 단위로 묶어서 반환한다.
:param year_range:
:param semesters:
:param line:
:return: dict
{
2017: {
'1 학기': {
전체: [
{
'계획': '\xa0',
'이수구분(주전공)': '교선',
'이수구분(다전공)': '\xa0',
'공학인증': '\xa0',
'교과영역': '*세계의언어(핵심-창의)\n(기초역량-국제어문)영어',
'과목번호': '2150017601',
'과목명': 'Advanced Writing and speaking English I',
'분반': '\xa0',
'교수명': '이종일\n이종일\n이종일',
'개설학과': '벤처경영학과(계약학과)',
'시간/학점(설계)': '3.00 /3',
'수강인원': '11',
'여석': '39',
'강의시간(강의실)': '월 19:00-19:50 (조만식기념관 12530-이종일)\n월 20:00-20:50 (조만식기념관 12530-이종일)\n월 21:00-21:50 (조만식기념관 12530-이종일)',
'수강대상': '전체학년 벤처경영학과(계약학과) (대상외수강제한)(대상외수강제한)'
},
{
dict_keys(['계획', '이수구분(주전공)', '이수구분(다전공)',
'공학인증', '교과영역', '과목번호', '과목명', '분반', '교수명',
'개설학과', '시간/학점(설계)', '수강인원', '여석', '강의시간(강의실)', '수강대상'])
}
]
*문학과 예술(융합-인문): []
*역사와철학(융합-인문): []
*정보와기술(융합-자연): []
*창의성과의사소통능력(핵심-창의): []
*세계의언어(핵심-창의): []
*세계의문화와국제관계(핵심-창의): []
*인간과사회(융합-사회): []
*정치와경제(융합-사회): []
*자연과학과수리(융합-자연): []
*생활과건강(실용-생활): []
*학문과진로탐색(실용-생활): []
*인성과리더쉽(핵심-창의): []
숭실품성(인성-종교가치인성교육): []
숭실품성(인성-가치관및윤리교육): []
숭실품성(인성-공동체인성교육): []
숭실품성(리더십-통일리더십): []
숭실품성(리더십-리더십이론및실천): []
기초역량(사고력-논리및비판적사고): []
기초역량(사고력-창의및융합적사고): []
기초역량(사고력-수리적사고): []
기초역량(한국어의사소통-읽기와쓰기): []
기초역량(한국어의사소통-의사소통): []
(기초역량-국제어문)영어: []
기초역량(국제어문-국제어): []
기초역량(국제어문-고전어문 ): []
기초역량(과학정보기술-과학): []
기초역량(과학정보기술-정보기술): []
균형교양(인문학-문학/어학/예술): []
균형교양(인문학-역사): []
균형교양(인문학-철학/사상): []
균형교양(사회과학-사회/정치/경제): []
균형교양(사회과학-문화및문명): []
균형교양(자연과학-자연과학): []
실용교양(개인과가족생활): []
실용교양(경제경영): []
실용교양(공공생활): []
실용교양(기술생활): []
실용교양(자기개발과진로탐색)
}
},
year: {
'semester': {
'section': [
{
dict_keys(['계획', '이수구분(주전공)', '이수구분(다전공)',
'공학인증', '교과영역', '과목번호', '과목명', '분반', '교수명',
'개설학과', '시간/학점(설계)', '수강인원', '여석', '강의시간(강의실)', '수강대상'])
}
]
}
}
}
"""
ret = {year: {} for year in year_range}
saint = Saint()
saint.select_course_section('교양선택')
# is this necessary job?
saint.select_year('2017')
saint.select_semester('2 학기')
def __get_whole_course(year, semester, _line=line):
saint.select_year(year)
saint.select_semester(semester)
saint.select_line(_line)
selective_map = saint.get_selective_liberal_map()
course_map = {course_name: {} for course_name in selective_map}
pbar = tqdm(selective_map, disable=silent)
for course_name in pbar:
pbar.set_description("Processing {:8s}".format(course_name))
if course_name != '':
course_map[course_name] = saint.select_on_selective_liberal(course_name)
return course_map
year_bar = tqdm(year_range, disable=silent)
for year in year_bar:
year_bar.set_description("Year: {:4s}".format(year))
semester_bar = tqdm(semesters, disable=silent)
for semester in semester_bar:
semester_bar.set_description("semester: {:6s}".format(semester))
course_bunch = __get_whole_course(year, semester)
ret[year][semester] = course_bunch
return ret
def _related_major(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False):
"""
교양선택 과목들을 학기 단위로 묶어서 반환한다.
:param year_range:
:param semesters:
:param line:
:return: dict
{
2017: {
'1 학기': {
"중국어경제국제통상연계전공": [
{
"계획": " ",
"이수구분(주전공)": "전선-경제",
"이수구분(다전공)": "복선-경제/부선-경제/연계2-벤처자본경제학/연계2-일본어경제통상/연계2-중국어경제통상",
"공학인증": " ",
"교과영역": " ",
"과목번호": "2150191901",
"과목명": "공공경제학(실시간화상강의) (온라인)",
"분반": " ",
"교수명": "우진희\n우진희",
"개설학과": "경제학과",
"시간/학점(설계)": "3.00 /3.0 (0 )",
"수강인원": "0",
"여석": "35",
"강의시간(강의실)": "월 15:00-16:15 (-우진희)\n수 13:30-14:45 (숭덕경상관 02109-우진희)",
"수강대상": "3학년 경제,벤처자본경제학,일본어경제통상,중국어경제통상"
}
]
일본어경제국제통상연계전공: []
금융공학·보험계리연계전공: []
영어·중국어연계전공: []
PreMed연계전공: []
벤처자본경제학연계전공: []
보험계리·리스크연계전공: []
융합창업연계: []
}
},
year: {
'semester': {
'section': [
{
dict_keys(['계획', '이수구분(주전공)', '이수구분(다전공)',
'공학인증', '교과영역', '과목번호', '과목명', '분반', '교수명',
'개설학과', '시간/학점(설계)', '수강인원', '여석', '강의시간(강의실)', '수강대상'])
}
]
}
}
}
"""
ret | |
<reponame>Sehgal-Arjun/DataSorter
#copyright ReportLab Europe Limited. 2000-2016
#see license.txt for license details
__version__='3.3.0'
__all__= (
'BarcodeI2of5',
'BarcodeCode128',
'BarcodeStandard93',
'BarcodeExtended93',
'BarcodeStandard39',
'BarcodeExtended39',
'BarcodeMSI',
'BarcodeCodabar',
'BarcodeCode11',
'BarcodeFIM',
'BarcodePOSTNET',
'BarcodeUSPS_4State',
)
from reportlab.lib.validators import isInt, isNumber, isString, isColorOrNone, isBoolean, EitherOr, isNumberOrNone
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.lib.colors import black
from reportlab.lib.utils import rl_exec
from reportlab.graphics.shapes import Rect, Group, String
from reportlab.graphics.charts.areas import PlotArea
'''
#snippet
#first make your Drawing
from reportlab.graphics.shapes import Drawing
d= Drawing(100,50)
#create and set up the widget
from reportlab.graphics.barcode.widgets import BarcodeStandard93
bc = BarcodeStandard93()
bc.value = 'RGB-123456'
#add to the drawing and save
d.add(bc)
# d.save(formats=['gif','pict'],fnRoot='bc_sample')
'''
class _BarcodeWidget(PlotArea):
_attrMap = AttrMap(BASE=PlotArea,
barStrokeColor = AttrMapValue(isColorOrNone, desc='Color of bar borders.'),
barFillColor = AttrMapValue(isColorOrNone, desc='Color of bar interior areas.'),
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
value = AttrMapValue(EitherOr((isString,isNumber)), desc='Value.'),
textColor = AttrMapValue(isColorOrNone, desc='Color of human readable text.'),
valid = AttrMapValue(isBoolean),
validated = AttrMapValue(isString,desc="validated form of input"),
encoded = AttrMapValue(None,desc="encoded form of input"),
decomposed = AttrMapValue(isString,desc="decomposed form of input"),
canv = AttrMapValue(None,desc="temporarily used for internal methods"),
gap = AttrMapValue(isNumberOrNone, desc='Width of inter character gaps.'),
)
textColor = barFillColor = black
barStrokeColor = None
barStrokeWidth = 0
_BCC = None
def __init__(self,_value='',**kw):
PlotArea.__init__(self)
if 'width' in self.__dict__: del self.__dict__['width']
if 'height' in self.__dict__: del self.__dict__['height']
self.x = self.y = 0
kw.setdefault('value',_value)
self._BCC.__init__(self,**kw)
def rect(self,x,y,w,h,**kw):
self._Gadd(Rect(self.x+x,self.y+y,w,h,
strokeColor=self.barStrokeColor,strokeWidth=self.barStrokeWidth, fillColor=self.barFillColor))
def draw(self):
if not self._BCC: raise NotImplementedError("Abstract class %s cannot be drawn" % self.__class__.__name__)
self.canv = self
G = Group()
self._Gadd = G.add
self._Gadd(Rect(self.x,self.y,self.width,self.height,fillColor=None,strokeColor=None,strokeWidth=0.0001))
self._BCC.draw(self)
del self.canv, self._Gadd
return G
def annotate(self,x,y,text,fontName,fontSize,anchor='middle'):
self._Gadd(String(self.x+x,self.y+y,text,fontName=fontName,fontSize=fontSize,
textAnchor=anchor,fillColor=self.textColor))
def _BCW(doc,codeName,attrMap,mod,value,**kwds):
"""factory for Barcode Widgets"""
_pre_init = kwds.pop('_pre_init','')
_methods = kwds.pop('_methods','')
name = 'Barcode'+codeName
ns = vars().copy()
code = 'from %s import %s' % (mod,codeName)
rl_exec(code,ns)
ns['_BarcodeWidget'] = _BarcodeWidget
ns['doc'] = ("\n\t'''%s'''" % doc) if doc else ''
code = '''class %(name)s(_BarcodeWidget,%(codeName)s):%(doc)s
\t_BCC = %(codeName)s
\tcodeName = %(codeName)r
\tdef __init__(self,**kw):%(_pre_init)s
\t\t_BarcodeWidget.__init__(self,%(value)r,**kw)%(_methods)s''' % ns
rl_exec(code,ns)
Klass = ns[name]
if attrMap: Klass._attrMap = attrMap
for k, v in kwds.items():
setattr(Klass,k,v)
return Klass
BarcodeI2of5 = _BCW(
"""Interleaved 2 of 5 is used in distribution and warehouse industries.
It encodes an even-numbered sequence of numeric digits. There is an optional
module 10 check digit; if including this, the total length must be odd so that
it becomes even after including the check digit. Otherwise the length must be
even. Since the check digit is optional, our library does not check it.
""",
"I2of5",
AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default .0075):
X-Dimension, or width of the smallest element
Minumum is .0075 inch (7.5 mils).'''),
ratio = AttrMapValue(isNumber,'''(float, default 2.2):
The ratio of wide elements to narrow elements.
Must be between 2.0 and 3.0 (or 2.2 and 3.0 if the
barWidth is greater than 20 mils (.02 inch))'''),
gap = AttrMapValue(isNumberOrNone,'''(float or None, default None):
width of intercharacter gap. None means "use barWidth".'''),
barHeight = AttrMapValue(isNumber,'''(float, see default below):
Height of the symbol. Default is the height of the two
bearer bars (if they exist) plus the greater of .25 inch
or .15 times the symbol's length.'''),
checksum = AttrMapValue(isBoolean,'''(bool, default 1):
Whether to compute and include the check digit'''),
bearers = AttrMapValue(isNumber,'''(float, in units of barWidth. default 3.0):
Height of bearer bars (horizontal bars along the top and
bottom of the barcode). Default is 3 x-dimensions.
Set to zero for no bearer bars. (Bearer bars help detect
misscans, so it is suggested to leave them on).'''),
quiet = AttrMapValue(isBoolean,'''(bool, default 1):
Whether to include quiet zones in the symbol.'''),
lquiet = AttrMapValue(isNumber,'''(float, see default below):
Quiet zone size to left of code, if quiet is true.
Default is the greater of .25 inch, or .15 times the symbol's
length.'''),
rquiet = AttrMapValue(isNumber,'''(float, defaults as above):
Quiet zone size to right left of code, if quiet is true.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
stop = AttrMapValue(isBoolean, desc='if we use start/stop symbols (default 1)'),
),
'reportlab.graphics.barcode.common',
1234,
_tests = [
'12',
'1234',
'123456',
'12345678',
'1234567890'
],
)
BarcodeCode128 = _BCW("""Code 128 encodes any number of characters in the ASCII character set.""",
"Code128",
AttrMap(BASE=BarcodeI2of5,UNWANTED=('bearers','checksum','ratio','checksum','stop')),
'reportlab.graphics.barcode.code128',
"AB-12345678",
_tests = ['ReportLab Rocks!', 'PFWZF'],
)
BarcodeCode128Auto = _BCW(
'Modified Code128 to use auto encoding',
'Code128Auto',
AttrMap(BASE=BarcodeCode128),
'reportlab.graphics.barcode.code128',
'XY149740345GB'
)
BarcodeStandard93=_BCW("""This is a compressed form of Code 39""",
"Standard93",
AttrMap(BASE=BarcodeCode128,
stop = AttrMapValue(isBoolean, desc='if we use start/stop symbols (default 1)'),
),
'reportlab.graphics.barcode.code93',
"CODE 93",
)
BarcodeExtended93=_BCW("""This is a compressed form of Code 39, allowing the full ASCII charset""",
"Extended93",
AttrMap(BASE=BarcodeCode128,
stop = AttrMapValue(isBoolean, desc='if we use start/stop symbols (default 1)'),
),
'reportlab.graphics.barcode.code93',
"L@@K! Code 93 ;-)",
)
BarcodeStandard39=_BCW("""Code39 is widely used in non-retail, especially US defence and health.
Allowed characters are 0-9, A-Z (caps only), space, and -.$/+%*.""",
"Standard39",
AttrMap(BASE=BarcodeI2of5),
'reportlab.graphics.barcode.code39',
"A012345B%R",
)
BarcodeExtended39=_BCW("""Extended 39 encodes the full ASCII character set by encoding
characters as pairs of Code 39 characters; $, /, % and + are used as
shift characters.""",
"Extended39",
AttrMap(BASE=BarcodeI2of5),
'reportlab.graphics.barcode.code39',
"A012345B}",
)
BarcodeMSI=_BCW("""MSI is used for inventory control in retail applications.
There are several methods for calculating check digits so we
do not implement one.
""",
"MSI",
AttrMap(BASE=BarcodeI2of5),
'reportlab.graphics.barcode.common',
1234,
)
BarcodeCodabar=_BCW("""Used in blood banks, photo labs and FedEx labels.
Encodes 0-9, -$:/.+, and four start/stop characters A-D.""",
"Codabar",
AttrMap(BASE=BarcodeI2of5),
'reportlab.graphics.barcode.common',
"A012345B",
)
BarcodeCode11=_BCW("""Used mostly for labelling telecommunications equipment.
It encodes numeric digits.""",
'Code11',
AttrMap(BASE=BarcodeI2of5,
checksum = AttrMapValue(isInt,'''(integer, default 2):
Whether to compute and include the check digit(s).
(0 none, 1 1-digit, 2 2-digit, -1 auto, default -1):
How many checksum digits to include. -1 ("auto") means
1 if the number of digits is 10 or less, else 2.'''),
),
'reportlab.graphics.barcode.common',
"01234545634563",
)
BarcodeFIM=_BCW("""
FIM was developed as part of the POSTNET barcoding system.
FIM (Face Identification Marking) is used by the cancelling machines
to sort mail according to whether or not they have bar code
and their postage requirements. There are four types of FIM
called FIM A, FIM B, FIM C, and FIM D.
The four FIM types have the following meanings:
FIM A- Postage required pre-barcoded
FIM B - Postage pre-paid, no bar code exists
FIM C- Postage prepaid prebarcoded
FIM D- Postage required, no bar code exists""",
"FIM",
AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default 1/32in): the bar width.'''),
spaceWidth = AttrMapValue(isNumber,'''(float or None, default 1/16in):
width of intercharacter gap. None means "use barWidth".'''),
barHeight = AttrMapValue(isNumber,'''(float, default 5/8in): The bar height.'''),
quiet = AttrMapValue(isBoolean,'''(bool, default 0):
Whether to include quiet zones in the symbol.'''),
lquiet = AttrMapValue(isNumber,'''(float, default: 15/32in):
Quiet zone size to left of code, if quiet is true.'''),
rquiet = AttrMapValue(isNumber,'''(float, default 1/4in):
Quiet zone size to right left of code, if quiet is true.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
),
'reportlab.graphics.barcode.usps',
"A",
)
BarcodePOSTNET=_BCW('',
"POSTNET",
AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default 0.018*in): the bar width.'''),
spaceWidth = AttrMapValue(isNumber,'''(float or None, default 0.0275in): width of intercharacter gap.'''),
shortHeight = AttrMapValue(isNumber,'''(float, default 0.05in): The short bar height.'''),
barHeight = AttrMapValue(isNumber,'''(float, default 0.125in): The full bar height.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
),
'reportlab.graphics.barcode.usps',
"78247-1043",
)
BarcodeUSPS_4State=_BCW('',
"USPS_4State",
AttrMap(BASE=_BarcodeWidget,
widthSize = AttrMapValue(isNumber,'''(float, default 1): the bar width size adjustment between 0 and 1.'''),
heightSize = AttrMapValue(isNumber,'''(float, default 1): the bar height size adjustment between 0 and 1.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
tracking = AttrMapValue(isString, desc='tracking data'),
routing = AttrMapValue(isString, desc='routing data'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
barWidth = AttrMapValue(isNumber, desc='barWidth'),
barHeight = AttrMapValue(isNumber, desc='barHeight'),
pitch = AttrMapValue(isNumber, desc='pitch'),
),
'reportlab.graphics.barcode.usps4s',
'01234567094987654321',
_pre_init="\n\t\tkw.setdefault('routing','01234567891')\n",
_methods = "\n\tdef annotate(self,x,y,text,fontName,fontSize,anchor='middle'):\n\t\t_BarcodeWidget.annotate(self,x,y,text,fontName,fontSize,anchor='start')\n"
)
BarcodeECC200DataMatrix = _BCW(
'ECC200DataMatrix',
'ECC200DataMatrix',
AttrMap(BASE=_BarcodeWidget,
x=AttrMapValue(isNumber, desc='X position of the lower-left corner of the barcode.'),
y=AttrMapValue(isNumber, desc='Y position of the lower-left corner of the barcode.'),
barWidth=AttrMapValue(isNumber, desc='Size of data modules.'),
barFillColor=AttrMapValue(isColorOrNone, desc='Color of data modules.'),
value=AttrMapValue(EitherOr((isString,isNumber)), desc='Value.'),
height=AttrMapValue(None, desc='ignored'),
width=AttrMapValue(None, desc='ignored'),
strokeColor=AttrMapValue(None, desc='ignored'),
strokeWidth=AttrMapValue(None, desc='ignored'),
fillColor=AttrMapValue(None, desc='ignored'),
background=AttrMapValue(None, desc='ignored'),
debug=AttrMapValue(None, desc='ignored'),
gap=AttrMapValue(None, desc='ignored'),
row_modules=AttrMapValue(None, desc='???'),
col_modules=AttrMapValue(None, desc='???'),
row_regions=AttrMapValue(None, desc='???'),
col_regions=AttrMapValue(None, desc='???'),
cw_data=AttrMapValue(None, desc='???'),
cw_ecc=AttrMapValue(None, desc='???'),
row_usable_modules = AttrMapValue(None, desc='???'),
col_usable_modules = | |
maybe.",
"And on that terrible night, that night it happened, did anything particularly important happen at the dinner? No, not that I recall.",
"Honestly, I thought I was going to find you and Peter around the next corner, playing some trick on me.",
"My mother.",
"Your father.",
"More than the shock, the sinfulness, the forbidden act, there was this.",
"I enjoyed it.",
"Something whispered.",
"I listened.",
"Perhaps it has always been there, this thing, this demon inside me.",
"Or behind my back, waiting for me to turn around.",
"I didn't tell you what I saw.",
"How could I? There was no need for you to grow up so fast.",
"- Tomorrow, we should go into town.",
"- Yes.",
"You could not have endured it.",
"Or so I told myself.",
"Perhaps I was just cherishing the secrecy of it as a hidden sin.",
"But in me, there was a change.",
"I marked it from that night in the hedge maze.",
"Perhaps it was always there.",
"Little acts of wickedness.",
"Harmless, of course.",
"Something any girl would do.",
"I told myself it was no more than mischief.",
"But I knew it was more.",
"Of course I did.",
"Cold blows the wind To my true love And gently drops the rain I have never had but one true love And in Greenwood he lies slain And as we grew up, and you grew lovelier, it was no wonder to me that you met your gentleman first.",
"Or that he had luxurious mustaches.",
"Such a gallant young officer.",
"What a handsome couple you were.",
"Found you.",
"Thank you.",
"It was the season of Peter's inadequate beard, I watched your courtship with <NAME> flourish.",
"All the stratagems of advance and retreat.",
"I always thought of myself as the stronger one.",
"But here, you were so valiant.",
"How could he not be conquered? I think you set your future at that table.",
"He spoke of India and suddenly I saw you going.",
"When would I see you again? Isn't India so terribly far away? What would I do? Marry Peter? God, how I envied you.",
"Perhaps I even hated you.",
"How was it possible that you, always so meek and obliging, were to have this greatest of adventures before me? You would know love, you would know a man's touch, while I, the courageous one, knew nothing of life.",
"Mina, my dear.",
"Of course, he's a fine man.",
"I've no doubt of that.",
"He'll make a proper husband.",
"But I hate to think of her out in India with all that heat and those filthy wogs.",
"What do you think Africa will be like? Oh, God, Van.",
"Don't mention that.",
"Every time I try and talk with father about it, he shies away.",
"Maybe you shouldn't ask anymore.",
"It's all I've ever dreamed about.",
"You know that.",
"My father and I off on an adventure, blazing some daring new trail.",
"Perhaps What? Perhaps then, I wouldn't be such a disappointment to him.",
"I was never the son he wanted.",
"Always ill, never good at games.",
"A disappointment.",
"He needed someone more like Me? You'll be all alone when I go.",
"Mina in India, me with Father.",
"That'll be sad for you.",
"But I suppose The minister's coming at seven and mother wants us there.",
"You know how she is about time.",
"If I could only go back now, I would run after him and say, 'Don't go.'",
"Don't go to Africa.",
"You'll never survive, you're too weak.",
"You're beautifully weak, Peter.",
"I love you for your weakness.",
" For at that moment, Mina, I saw the future crystal clear.",
"I tried to pray that night.",
"God didn't answer me.",
"But another did.",
"Soon, child.",
"What games we will have.",
"You were never happier than that weekend.",
"On the following morning you would be married.",
"One more night as <NAME> before you became Mrs.",
"<NAME>.",
"All right.",
"Bye-bye.",
"You didn't seem to mind this loss of self.",
"Perhaps I minded it for you.",
"You've found me out.",
"Last night of freedom, eh? Would you like to see something interesting? No.",
"My Mina? Your Mina.",
"Although you'll be relieved to know she only worked with the most pacific of animals.",
"This is her squirrel.",
"Which are yours? Not the docile ones.",
"That doesn't surprise me.",
"I've always felt you have to name a thing before it comes to life, like a witch's spell.",
"His name is Ariel.",
"The most challenging bit is the eyes.",
"They're glass, of course, so by nature are dead and dull.",
"But that wouldn't do for my great predator.",
"So I put mirrors behind the glass eyes, so they would spark.",
"You see? It's like they're alive.",
"They are.",
"I would put mirrors behind the entire world if I could.",
"There, there.",
"Vanessa! Vanessa, come back! - This is obscene! You mustn't! - How can I not? Have you no shame? Get upstairs this instant! How dare you speak to me of shame? Get upstairs yourself and make amends to my father.",
"I have to see her.",
"To do what? Make this right.",
"I always thought my traveling would kill my family.",
"Being away so long.",
"My thoughtlessness I never imagined it would be a cruel little girl.",
"When the illness came, it was absolute.",
"I was aware of almost nothing.",
"The doctors were at a loss to explain it.",
"Because, you see, it was inexplicable.",
"Thank you.",
"Thank you.",
"I I have her.",
"I have her.",
"You must eat.",
"They don't think it's epilepsy anymore.",
"Did I tell you that? Dr.",
"Kingston doesn't know what to think.",
"Ridiculous little man.",
"They fear They fear that, perhaps, it's your brain Something inside you.",
"What else What else? Um, Mina's gone.",
"She didn't want to stay here.",
"Should we not talk of Mina? I don't know.",
"I don't know.",
"We don't talk to the Murrays, or they to us.",
"I tried going over there.",
"I was not welcome.",
"Sir Mal Sir Malcolm They don't know what's wrong with you, darling.",
"The seizures The seizures are so terrible.",
"We shall be consulting a specialist in London.",
"You always liked London.",
"So, perhaps the change We've booked rooms at the Strand.",
"There's a There's a clinic nearby that are well known for treating women's disorders.",
"An asylum.",
"I am not mad, Mother.",
"I am not unhappy.",
"You should let me die.",
"Vanessa, do you understand what I've said? I think it best if you stepped outside, Mrs.",
"Ives.",
"I'll stay.",
"I must speak frankly to your husband.",
"I'd like to stay.",
"This form of catatonia is not unknown to me.",
"The unusual physical exertions are manifestations of They were not exertions.",
"She was being tormented.",
"I have seen epileptic seizures that have broken bones, Mrs.",
"Ives.",
"None of this impossible, just rare.",
"Hysteria of a psychosexual nature can be treated.",
"The treatments involve narcotics and escalating hydrotherapy.",
"Cold water reduces circulation to the brain, thereby reducing the metabolism and motor activity.",
"The agitation and mental trauma will slow and then cease.",
"I've seen it work, Mr.",
"Ives, you can have no doubt.",
"And what if it doesn't work? If we see no improvement, there are surgical options open to us.",
"Yes.",
"Let's do that.",
"Mr.",
"and Mrs.",
"Ives, please leave the room immediately.",
"It is very good to hear your voice, Vanessa.",
"I'm Dr.",
"<NAME> and I hope to help you.",
"Your parents have I want to be out there.",
"You will be.",
"We have a lovely garden that I prefer the ocean.",
"Have you seen someone who's drowned? I have.",
"Not a dog or a cat.",
"Not enough soul in a dog or a cat.",
"I mean a man.",
"A hundred men.",
"A slave ship.",
"Yes.",
"More than anything.",
"Pulled right down, with the men still chained.",
"Souls in torment who now find themselves in greater torment.",
"Have you seen that, Dr.",
"<NAME>? Vanessa, it's very important to me that you sit down now.",
"Who's Vanessa? Mrs.",
"Ives, may I step in? Of course.",
"Mr.",
"Ives.",
"Peter.",
"I've come to say goodbye.",
"I'm, um, off to Africa with my father.",
"- May I see her? - I - Yes.",
"- don't think Stay here.",
"You You mustn't be shocked.",
"She's much changed.",
"<NAME>, Van Will she die? If there is a God.",
"Is she always like this? No.",
"There are episodes of activity.",
"May I sit with her? Yes.",
"Of course.",
"Does she know me? We don't know.",
"But talk to her, it's meant to help.",
"Hello, Van.",
"It's Peter.",
"Closer, I'm afraid.",
"It's Peter.",
"I'm off to Africa.",
"Can you believe it? Finally having that adventure we talked about.",
"Very excited.",
"I walked by the shore yesterday where we used to swim.",
"You should have kissed me.",
"Will you kiss me now? You're going to die there.",
"You're going to die there.",
"Peter.",
"Good luck on your trip.",
"And thank you for coming.",
"Of | |
loopingcall.LoopingCallDone()
if utils.check_timeout(start_time, _LDEV_STATUS_WAITTIME):
raise loopingcall.LoopingCallDone(False)
loop = loopingcall.FixedIntervalLoopingCall(
_wait_for_ldev_status, timeutils.utcnow(), ldev, *args)
if not loop.start(interval=_LDEV_CHECK_INTERVAL).wait():
msg = utils.output_log(msg_id, ldev=ldev)
raise exception.VSPError(msg)
def create_ldev_on_storage(self, ldev, size, is_vvol):
"""Create an LDEV on the storage system."""
args = ['add', 'ldev', '-ldev_id', ldev, '-capacity', '%sG' % size,
'-emulation', 'OPEN-V', '-pool']
if is_vvol:
args.append('snap')
else:
args.append(self.conf.vsp_pool)
self.run_raidcom(*args)
def get_unused_ldev(self):
"""Find an unused LDEV and return its LDEV number."""
if not self.storage_info['ldev_range']:
ldev_info = self.get_ldev_info(
['ldev'], '-ldev_list', 'undefined', '-cnt', '1')
ldev = ldev_info.get('ldev')
else:
ldev = self._find_unused_ldev_by_range()
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is None'.
if ldev is None:
msg = utils.output_log(MSG.NO_AVAILABLE_RESOURCE, resource='LDEV')
raise exception.VSPError(msg)
return ldev
def _find_unused_ldev_by_range(self):
"""Return the LDEV number of an unused LDEV in the LDEV range."""
success_code = HORCM_EXIT_CODE.union(_INVALID_RANGE)
start, end = self.storage_info['ldev_range'][:2]
while start <= end:
if end - start + 1 > _GETSTORAGEARRAY_ONCE:
cnt = _GETSTORAGEARRAY_ONCE
else:
cnt = end - start + 1
ldev_info = self.get_ldev_info(
['undefined_ldev'], '-ldev_id', start, '-cnt', cnt,
'-key', 'front_end', success_code=success_code)
ldev = ldev_info.get('undefined_ldev')
# When 'ldev' is 0, it should be true.
# Therefore, it cannot remove 'is not None'.
if ldev is not None:
return ldev
start += _GETSTORAGEARRAY_ONCE
return None
def get_ldev_info(self, keys, *args, **kwargs):
"""Return a dictionary of LDEV-related items."""
data = {}
result = self.run_raidcom('get', 'ldev', *args, **kwargs)
for key in keys:
data[key] = find_value(result[1], key)
return data
def copy_on_storage(self, pvol, size, metadata, sync):
"""Check if the LDEV can be copied on the storage."""
ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', pvol)
if ldev_info['sts'] != NORMAL_STS:
msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
raise exception.VSPError(msg)
if VVOL_ATTR in ldev_info['vol_attr']:
raise exception.VSPNotSupported()
return super(VSPHORCM, self).copy_on_storage(pvol, size, metadata,
sync)
@coordination.synchronized('{self.lock[create_pair]}')
def create_pair_on_storage(self, pvol, svol, is_thin):
"""Create a copy pair on the storage."""
path_list = []
vol_type, pair_info = self._get_vol_type_and_pair_info(pvol)
if vol_type == SVOL:
self._delete_pair_based_on_svol(
pair_info['pvol'], pair_info['svol_info'],
no_restart=True)
if vol_type != PVOL:
self._initialize_pair_connection(pvol)
path_list.append(pvol)
try:
self._initialize_pair_connection(svol)
path_list.append(svol)
self._create_pair_on_storage_core(pvol, svol, is_thin, vol_type)
except exception.VSPError:
with excutils.save_and_reraise_exception():
for ldev in path_list:
try:
self._terminate_pair_connection(ldev)
except exception.VSPError:
utils.output_log(MSG.UNMAP_LDEV_FAILED, ldev=ldev)
def _create_pair_on_storage_core(self, pvol, svol, is_thin, vol_type):
"""Create a copy pair on the storage depending on the copy method."""
if is_thin:
self._create_thin_copy_pair(pvol, svol)
else:
self._create_full_copy_pair(pvol, svol, vol_type)
def _create_thin_copy_pair(self, pvol, svol):
"""Create a THIN copy pair on the storage."""
snapshot_name = _SNAP_NAME + six.text_type(svol % _SNAP_HASH_SIZE)
self.run_raidcom(
'add', 'snapshot', '-ldev_id', pvol, svol, '-pool',
self.conf.vsp_thin_pool, '-snapshot_name',
snapshot_name, '-copy_size', self.conf.vsp_copy_speed)
try:
self.wait_thin_copy(svol, PAIR)
self.run_raidcom(
'modify', 'snapshot', '-ldev_id', svol,
'-snapshot_data', 'create')
self.wait_thin_copy(svol, PSUS)
except exception.VSPError:
with excutils.save_and_reraise_exception():
interval = self.conf.vsp_async_copy_check_interval
try:
self._delete_thin_copy_pair(pvol, svol, interval)
except exception.VSPError:
utils.output_log(MSG.DELETE_TI_PAIR_FAILED, pvol=pvol,
svol=svol)
def _create_full_copy_pair(self, pvol, svol, vol_type):
"""Create a FULL copy pair on the storage."""
mun = 0
if vol_type == PVOL:
mun = self._get_unused_mun(pvol)
copy_group = self._copy_groups[mun]
ldev_name = _LDEV_NAME % (pvol, svol)
restart = False
create = False
try:
self._add_pair_config(pvol, svol, copy_group, ldev_name, mun)
self._restart_horcmgr(_PAIR_HORCMGR)
restart = True
self._run_pair_cmd(
'paircreate', '-g', copy_group, '-d', ldev_name,
'-c', self.conf.vsp_copy_speed,
'-vl', '-split', '-fq', 'quick')
create = True
self._wait_full_copy(svol, set([PSUS, COPY]))
except exception.VSPError:
with excutils.save_and_reraise_exception():
if create:
try:
self._wait_full_copy(svol, set([PAIR, PSUS, PSUE]))
except exception.VSPError:
utils.output_log(MSG.WAIT_SI_PAIR_STATUS_FAILED,
pvol=pvol, svol=svol)
interval = self.conf.vsp_async_copy_check_interval
try:
self._delete_full_copy_pair(pvol, svol, interval)
except exception.VSPError:
utils.output_log(MSG.DELETE_SI_PAIR_FAILED, pvol=pvol,
svol=svol)
try:
if self._is_smpl(svol):
self._delete_pair_config(
pvol, svol, copy_group, ldev_name)
except exception.VSPError:
utils.output_log(MSG.DELETE_DEVICE_GRP_FAILED, pvol=pvol,
svol=svol)
if restart:
try:
self._restart_horcmgr(_PAIR_HORCMGR)
except exception.VSPError:
utils.output_log(
MSG.HORCM_RESTART_FOR_SI_FAILED,
inst=self.conf.vsp_horcm_numbers[1])
def _get_unused_mun(self, ldev):
"""Return the number of an unused mirror unit."""
pair_list = []
for mun in range(_MAX_MUNS):
pair_info = self._get_full_copy_pair_info(ldev, mun)
if not pair_info:
return mun
pair_list.append((pair_info['svol_info'], mun))
for svol_info, mun in pair_list:
if svol_info['is_psus']:
self._delete_pair_based_on_svol(
ldev, svol_info, no_restart=True)
return mun
utils.output_log(MSG.NO_AVAILABLE_MIRROR_UNIT,
copy_method=utils.FULL, pvol=ldev)
raise exception.VSPBusy()
def _get_vol_type_and_pair_info(self, ldev):
"""Return a tuple of the LDEV's Shadow Image pair status and info."""
ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev)
if ldev_info['sts'] != NORMAL_STS:
return (SMPL, None)
if THIN_ATTR in ldev_info['vol_attr']:
return (PVOL, None)
if FULL_ATTR in ldev_info['vol_attr']:
pair_info = self._get_full_copy_pair_info(ldev, 0)
if not pair_info:
return (PVOL, None)
if pair_info['pvol'] != ldev:
return (SVOL, pair_info)
return (PVOL, None)
return (SMPL, None)
def _get_full_copy_info(self, ldev):
"""Return a tuple of P-VOL and S-VOL's info of a Shadow Image pair."""
vol_type, pair_info = self._get_vol_type_and_pair_info(ldev)
svol_info = []
if vol_type == SMPL:
return (None, None)
elif vol_type == SVOL:
return (pair_info['pvol'], [pair_info['svol_info']])
for mun in range(_MAX_MUNS):
pair_info = self._get_full_copy_pair_info(ldev, mun)
if pair_info:
svol_info.append(pair_info['svol_info'])
return (ldev, svol_info)
@coordination.synchronized('{self.lock[create_pair]}')
def delete_pair(self, ldev, all_split=True):
"""Delete the specified LDEV in a synchronized section."""
super(VSPHORCM, self).delete_pair(ldev, all_split=all_split)
def delete_pair_based_on_pvol(self, pair_info, all_split):
"""Disconnect all volume pairs to which the specified P-VOL belongs."""
svols = []
restart = False
try:
for svol_info in pair_info['svol_info']:
if svol_info['is_thin'] or not svol_info['is_psus']:
svols.append(six.text_type(svol_info['ldev']))
continue
self.delete_pair_from_storage(
pair_info['pvol'], svol_info['ldev'], False)
restart = True
self._terminate_pair_connection(svol_info['ldev'])
if not svols:
self._terminate_pair_connection(pair_info['pvol'])
finally:
if restart:
self._restart_horcmgr(_PAIR_HORCMGR)
if all_split and svols:
utils.output_log(
MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'],
svol=', '.join(svols))
raise exception.VSPBusy()
def delete_pair_based_on_svol(self, pvol, svol_info):
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
self._delete_pair_based_on_svol(pvol, svol_info)
def _delete_pair_based_on_svol(self, pvol, svol_info, no_restart=False):
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
do_restart = False
if not svol_info['is_psus']:
utils.output_log(MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol,
svol=svol_info['ldev'])
raise exception.VSPBusy()
try:
self.delete_pair_from_storage(
pvol, svol_info['ldev'], svol_info['is_thin'])
do_restart = True
self._terminate_pair_connection(svol_info['ldev'])
self._terminate_pair_connection(pvol)
finally:
if not no_restart and do_restart:
self._restart_horcmgr(_PAIR_HORCMGR)
def delete_pair_from_storage(self, pvol, svol, is_thin):
"""Disconnect the volume pair that consists of the specified LDEVs."""
interval = self.conf.vsp_async_copy_check_interval
if is_thin:
self._delete_thin_copy_pair(pvol, svol, interval)
else:
self._delete_full_copy_pair(pvol, svol, interval)
def _delete_thin_copy_pair(self, pvol, svol, interval):
"""Disconnect a THIN volume pair."""
result = self.run_raidcom(
'get', 'snapshot', '-ldev_id', svol)
if not result[1]:
return
mun = result[1].splitlines()[1].split()[5]
self.run_raidcom(
'unmap', 'snapshot', '-ldev_id', svol,
success_code=ALL_EXIT_CODE)
self.run_raidcom(
'delete', 'snapshot', '-ldev_id', pvol, '-mirror_id', mun)
self._wait_thin_copy_deleting(svol, interval=interval)
def _wait_thin_copy_deleting(self, ldev, **kwargs):
"""Wait until the LDEV is no longer in a THIN volume pair."""
interval = kwargs.pop(
'interval', self.conf.vsp_async_copy_check_interval)
def _wait_for_thin_copy_smpl(start_time, ldev, **kwargs):
"""Raise True if the LDEV is no longer in a THIN volume pair."""
timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME)
ldev_info = self.get_ldev_info(
['sts', 'vol_attr'], '-ldev_id', ldev)
if (ldev_info['sts'] != NORMAL_STS or
THIN_ATTR not in ldev_info['vol_attr']):
raise loopingcall.LoopingCallDone()
if utils.check_timeout(start_time, timeout):
raise loopingcall.LoopingCallDone(False)
loop = loopingcall.FixedIntervalLoopingCall(
_wait_for_thin_copy_smpl, timeutils.utcnow(), ldev, **kwargs)
if not loop.start(interval=interval).wait():
msg = utils.output_log(MSG.TI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
raise exception.VSPError(msg)
def _delete_full_copy_pair(self, pvol, svol, interval):
"""Disconnect a FULL volume pair."""
stdout = self._run_pairdisplay(
'-d', self.conf.vsp_storage_id, svol, 0)
if not stdout:
return
copy_group = stdout.splitlines()[2].split()[0]
ldev_name = _LDEV_NAME % (pvol, svol)
if stdout.splitlines()[1].split()[9] != 'P-VOL':
self._restart_horcmgr(_PAIR_HORCMGR)
try:
self._run_pair_cmd(
'pairsplit', '-g', copy_group, '-d', ldev_name, '-S')
self._wait_full_copy(svol, set([SMPL]), interval=interval)
finally:
if self._is_smpl(svol):
self._delete_pair_config(pvol, svol, copy_group, ldev_name)
def _initialize_pair_connection(self, ldev):
"""Initialize server-volume connection for volume copy."""
port, gid = None, None
for port, gid in self._pair_targets:
try:
targets = {
'list': [(port, gid)],
'lun': {},
}
return self.map_ldev(targets, ldev)
except exception.VSPError:
utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port,
id=gid, lun=None)
msg = utils.output_log(MSG.NO_MAPPING_FOR_LDEV, ldev=ldev)
raise exception.VSPError(msg)
def _terminate_pair_connection(self, ldev):
"""Terminate server-volume connection for volume copy."""
targets = {
'list': [],
}
ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev)
if (ldev_info['sts'] == NORMAL_STS and
FULL_ATTR in ldev_info['vol_attr'] or
self._get_thin_copy_svol_status(ldev) != SMPL):
LOG.debug(
'The specified LDEV has pair. Therefore, unmapping '
'operation was skipped. '
'(LDEV: %(ldev)s, vol_attr: %(info)s)',
{'ldev': ldev, 'info': ldev_info['vol_attr']})
return
self._find_mapped_targets_from_storage(
targets, ldev, self.storage_info['controller_ports'], is_pair=True)
self.unmap_ldev(targets, ldev)
def check_param(self):
"""Check parameter values and consistency among them."""
super(VSPHORCM, self).check_param()
utils.check_opts(self.conf, horcm_opts)
insts = self.conf.vsp_horcm_numbers
if len(insts) != 2 or insts[_HORCMGR] == insts[_PAIR_HORCMGR]:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='vsp_horcm_numbers')
raise exception.VSPError(msg)
if (not self.conf.vsp_target_ports and
not self.conf.vsp_horcm_pair_target_ports):
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='vsp_target_ports or '
'vsp_horcm_pair_target_ports')
raise exception.VSPError(msg)
utils.output_log(MSG.SET_CONFIG_VALUE, object='LDEV range',
value=self.storage_info['ldev_range'])
for opt in _REQUIRED_HORCM_OPTS:
if not self.conf.safe_get(opt):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
raise exception.VSPError(msg)
def _set_copy_groups(self, host_ip):
"""Initialize an instance variable for Shadow Image copy groups."""
serial = self.conf.vsp_storage_id
inst = self.conf.vsp_horcm_numbers[_PAIR_HORCMGR]
for mun in range(_MAX_MUNS):
copy_group = _COPY_GROUP % (host_ip, serial, inst, mun)
self._copy_groups[mun] = copy_group
utils.output_log(MSG.SET_CONFIG_VALUE, object='copy group list',
value=self._copy_groups)
def connect_storage(self):
"""Prepare for using the storage."""
self._set_copy_groups(CONF.my_ip)
if self.conf.vsp_horcm_add_conf:
self._create_horcm_conf()
self._create_horcm_conf(horcmgr=_PAIR_HORCMGR)
self._restart_horcmgr(_HORCMGR)
self._restart_horcmgr(_PAIR_HORCMGR)
self._run_raidcom_login()
super(VSPHORCM, self).connect_storage()
self._pattern['p_pool'] = re.compile(
(r"^%03d +\S+ +\d+ +\d+ +(?P<tp_cap>\d+) +\d+ +\d+ +\d+ +\w+ +"
r"\d+ +(?P<tl_cap>\d+)") % self.storage_info['pool_id'], re.M)
self._pattern['pool'] = re.compile(
r"^%03d +\S+ +\d+ +\S+ +\w+ +\d+ +\w+ +\d+ +(?P<vcap>\S+)" %
self.storage_info['pool_id'], re.M)
def _find_lun(self, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import importlib
import json
import logging
import os
import re
import collections
import copy
import shutil
import datetime
import time
import uuid
import imp
import sys
from oic.oauth2.message import REQUIRED_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import OPTIONAL_LIST_OF_STRINGS
from oic.oauth2.message import OPTIONAL_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import REQUIRED_LIST_OF_STRINGS
from oic.oic.message import ProviderConfigurationResponse
from configuration_server.config_values import CONFIG_FILE_KEYS, GUI_CONFIG_STRUCTURE_KEYS, \
CONTACT_EMAIL
__author__ = 'danielevertsson'
CONFIG_DICT_INSTANCE_ID_KEY = 'instance_id'
LOGGER = logging.getLogger("configuration_server.configuration")
class UnKnownResponseTypeAbbreviation(Exception):
pass
class GuiConfig:
def __init__(self, gui_config_structure=None):
if not gui_config_structure:
gui_config_structure = create_new_configuration_dict()
self.config_structure = gui_config_structure
def get_dynamic_discovery_issuer(self):
return self.config_structure['fetchDynamicInfoFromServer']['input_field']['value']
def set_dynamic_discovery_issuer(self, issuer):
self.config_structure['fetchDynamicInfoFromServer']['input_field']['value'] = issuer
def set_dynamic_discovery_visibility(self, visible):
self.config_structure['fetchDynamicInfoFromServer']['showInputField'] = visible
def get_static_discovery_issuer(self):
input_fields = self.config_structure['fetchStaticProviderInfo']['input_fields']
issuer_field = find_static_provider_info_field(input_fields, "issuer")
return issuer_field['values']
def set_static_discovery_issuer(self, issuer):
input_fields = self.config_structure['fetchStaticProviderInfo']['input_fields']
issuer_field = find_static_provider_info_field(input_fields, "issuer")
issuer_field['values'] = issuer
def get_config_file_path(port, rp_config_folder):
if not rp_config_folder.endswith("/"):
rp_config_folder += "/"
return rp_config_folder + generate_config_module_name(port)
def parse_crypto_feature_abbreviation(config_gui_structure):
arg = ""
for feature in config_gui_structure['signingEncryptionFeaturesCheckboxes']['features']:
if feature['selected']:
arg += feature['abbreviation']
return arg
def convert_dynamic_client_registration_to_abbreviation(config_gui_structure):
if config_gui_structure['dynamicClientRegistrationDropDown']['value'] == "yes":
return "T"
return "F"
def convert_dynamic_discovery_to_abbreviation(config_gui_structure):
if contains_dynamic_discovery_info(config_gui_structure):
return "T"
return "F"
def convert_response_type_to_abbreviation(response_type):
abbreviations_dict = {
"code": "C",
"id_token": "I",
"id_token token": "IT",
"code id_token": "CI",
"code token": "CT",
"code id_token token": "CIT"
}
return abbreviations_dict[response_type]
def generate_profile(config_gui_structure):
response_type_abbr = convert_response_type_to_abbreviation(
config_gui_structure["responseTypeDropDown"]["value"])
dynamic_discovery_abbr = convert_dynamic_discovery_to_abbreviation(
config_gui_structure)
dynamic_client_registration_abbr = \
convert_dynamic_client_registration_to_abbreviation(
config_gui_structure)
crypto_features_abbr = parse_crypto_feature_abbreviation(
config_gui_structure)
profile = "%s.%s.%s.%s." % (response_type_abbr,
dynamic_discovery_abbr,
dynamic_client_registration_abbr,
crypto_features_abbr)
return profile
def load_config_module(module, full_module_path=None):
sys.dont_write_bytecode = True
if not full_module_path:
test_conf = importlib.import_module(module)
else:
# importlib.import_module uses the compiled rp_conf_XXXX module when loading the
# information from the module
test_conf = imp.load_source(module, full_module_path)
try:
return test_conf.CLIENT
except AttributeError as ex:
raise AttributeError("Module (%s) has no attribute 'CLIENT'" % module)
finally:
sys.dont_write_bytecode = False
def identify_existing_config_file(port, oprp_dir_path):
if not oprp_dir_path.endswith("/"):
oprp_dir_path = oprp_dir_path + "/"
files = [f for f in os.listdir(oprp_dir_path)]
config_file_pattern = re.compile("rp_conf_[0-9]+.py$")
for filename in files:
if config_file_pattern.match(filename):
module = filename[:-3]
file_port = int(module.split("_")[2])
if file_port == port:
return load_config_module(module, full_module_path=oprp_dir_path + filename)
return None
def generate_config_module_name(port, file_extension=".py"):
return "rp_conf_" + str(port) + file_extension
def backup_existing_config_file(config_file_path, oprp_dir_path, port):
if not oprp_dir_path.endswith("/"):
oprp_dir_path += "/"
backup_dir = oprp_dir_path + "config_backup"
try:
os.makedirs(backup_dir)
except OSError:
pass
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H.%M.%S')
config_file_name = generate_config_module_name(port)
backup_file = os.path.join(backup_dir, config_file_name + "_" + time_stamp)
try:
shutil.copy(config_file_path, backup_file)
except:
LOGGER.debug("Failed to make a backup of config file: %s" % config_file_path)
pass
def write_config_file(config_file_path, config_module, port, oprp_dir_path="."):
backup_existing_config_file(config_file_path, oprp_dir_path, port)
with open(config_file_path, "w", 0) as _file:
_file.write(config_module)
def convert_to_uft8(data):
if isinstance(data, basestring):
try:
return str(data)
except UnicodeEncodeError as ex:
return data.encode('utf8')
elif isinstance(data, collections.Mapping):
return dict(map(convert_to_uft8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert_to_uft8, data))
else:
return data
def create_module_string(client_config, port, base_url, conf=None, ssl_module=None):
_client = copy.deepcopy(client_config)
if not ssl_module:
ssl_module = conf.OPRP_SSL_MODULE
_client['client_info'] = {
"application_type": "web",
"application_name": "OIC test tool",
"contacts": ["<EMAIL>"],
"redirect_uris": ["%sauthz_cb" % base_url],
"post_logout_redirect_uris": ["%slogout" % base_url]
}
if 'client_registration' in _client:
del _client['client_info']
_client['key_export_url'] = "%sexport/jwk_%%s.json" % base_url
_client['base_url'] = base_url
_client = convert_to_uft8(_client)
return "from " + ssl_module + " import *\nPORT = " + str(
port) + "\nBASE =\'" + str(base_url) + "\'\nCLIENT = " + str(_client)
def get_default_client():
default = importlib.import_module("configuration_server.default_oprp_config")
return copy.deepcopy(default.CLIENT)
def clear_config_keys(config_dict):
optional_fields = ['webfinger_subject',
'login_hint',
'sub_claim',
'ui_locales',
'claims_locales',
'acr_values',
'provider_info',
'srv_discovery_url',
'webfinger_url',
'webfinger_email']
for field in optional_fields:
if field in config_dict:
del config_dict[field]
return config_dict
def convert_to_list(value_dict):
_list = []
for element in value_dict:
_list.append(element['value'])
return _list
def static_provider_info_to_config_file_dict(config_gui_structure,
config_file_dict):
"""
Converts static information in the internal data structure and updates
the configDict
which follows the "Configuration file structure", see setup.rst
:param config_gui_structure: Data structure used to hold and show
configuration information in the Gui
:param config_file_dict: configuration dictionary which follows the
"Configuration file structure"
:return Configuration dictionary updated with the new static information
"""
visible_input_field_list = []
provider_attribute_dict = {}
for input_field in config_gui_structure['fetchStaticProviderInfo']['input_fields']:
if input_field['show']:
visible_input_field_list.append(input_field)
for input_field in visible_input_field_list:
attribut_id = input_field['id']
if input_field['isList']:
provider_attribute_dict[attribut_id] = convert_to_list(
input_field['values'])
else:
provider_attribute_dict[attribut_id] = input_field['values']
config_file_dict['provider_info'] = provider_attribute_dict
return config_file_dict
def client_registration_to_config_file_dict(config_gui_structure,
config_file_dict):
"""
Converts required information in the web interface to the
a configuration dictionary which follows the "Configuration file
structure", see setup.rst
:param config_gui_structure: Data structure used to hold and show
configuration information in the Gui
:param config_file_dict: configuration dictionary which follows the
"Configuration file structure"
:return Configuration dictionary updated with the new required information
"""
support_dynamic_client_registration = \
config_gui_structure['dynamicClientRegistrationDropDown'][
'value'] == 'yes'
if not support_dynamic_client_registration:
for attribute in config_gui_structure['supportsStaticClientRegistrationTextFields']:
if 'client_registration' not in config_file_dict:
config_file_dict['client_registration'] = {}
if attribute['id'] == 'client_id':
config_file_dict['client_registration']['client_id'] = \
attribute['textFieldContent']
elif attribute['id'] == 'client_secret':
config_file_dict['client_registration']['client_secret'] = \
attribute['textFieldContent']
elif attribute['id'] == 'redirect_uris':
config_file_dict['client_registration']['redirect_uris'] = [
attribute['textFieldContent']]
else:
try:
del config_file_dict['client_registration']['client_id']
except KeyError:
pass
try:
del config_file_dict['client_registration']['client_secret']
except KeyError:
pass
return config_file_dict
def create_key_dict_pair_if_non_exist(key, dict):
if key not in dict:
dict[key] = {}
return dict
def subject_type_to_config_file_dict(config_dict, config_gui_structure):
config_dict = create_key_dict_pair_if_non_exist('preferences', config_dict)
config_dict['preferences']['subject_type'] = \
config_gui_structure["clientSubjectType"]["value"]
return config_dict
def profile_to_config_file_dict(config_dict, config_gui_structure):
config_dict = create_key_dict_pair_if_non_exist('behaviour', config_dict)
config_dict['behaviour']['profile'] = generate_profile(config_gui_structure)
return config_dict
class UserFriendlyException(Exception):
def __init__(self, message, log_info=None, show_trace=True):
super(UserFriendlyException, self).__init__(message)
self.log_info = log_info
self.show_trace = show_trace
def log_exception(event_id, exception):
logged_exception = type(exception)("[" + event_id + "] " + exception.message)
LOGGER.exception(str(logged_exception))
def handle_exception(exception, response_encoder, message="", failed_to_message=""):
if failed_to_message:
message = "Failed to %s. Please contact technical support." % failed_to_message
event_id = str(uuid.uuid4())
if isinstance(exception, UserFriendlyException):
if exception.show_trace:
log_exception(event_id, exception)
else:
log_exception(event_id, exception)
if response_encoder:
if isinstance(exception, UserFriendlyException):
if exception.log_info:
LOGGER.error("[" + event_id + "] " + exception.log_info)
return response_encoder.service_error(exception.message, event_id=event_id)
LOGGER.error("[" + event_id + "] " + message)
return response_encoder.service_error(message, event_id=event_id)
return None
def does_configuration_exists(port_database, issuer, instance_id, conf):
port = port_database.get_port(issuer=issuer, instance_id=instance_id)
config = port_database.get_configuration(issuer=issuer, instance_id=instance_id)
if not config:
config = identify_existing_config_file(port, conf.OPRP_DIR_PATH)
return config is not None
def convert_config_gui_structure(config_gui_structure, port, instance_id,
is_port_in_database, conf):
"""
Converts the internal data structure to a dictionary which follows the
"Configuration file structure", see setup.rst
:param config_gui_structure: Data structure used to hold and show
configuration information in the Gui
:return A dictionary which follows the "Configuration file structure",
see setup.rst
"""
config_dict = identify_existing_config_file(port, conf.OPRP_DIR_PATH)
if not is_port_in_database and config_dict:
file_path = get_config_file_path(port, conf.OPRP_DIR_PATH)
LOGGER.error("The identified configuration file does not exist in the database. "
"File path: %s" % file_path)
if not (is_port_in_database and config_dict):
config_dict = get_default_client()
config_dict = clear_config_keys(config_dict)
if instance_id:
config_dict[CONFIG_DICT_INSTANCE_ID_KEY] = instance_id
if contains_dynamic_discovery_info(config_gui_structure):
gui_config = GuiConfig(config_gui_structure)
config_dict['srv_discovery_url'] = gui_config.get_dynamic_discovery_issuer()
elif config_gui_structure['fetchStaticProviderInfo']['showInputFields']:
config_dict = static_provider_info_to_config_file_dict(config_gui_structure,
config_dict)
config_dict = client_registration_to_config_file_dict(config_gui_structure, config_dict)
config_dict = subject_type_to_config_file_dict(config_dict, config_gui_structure)
config_dict = profile_to_config_file_dict(config_dict, config_gui_structure)
if config_gui_structure['webfingerSubject'] != "":
config_dict['webfinger_subject'] = config_gui_structure['webfingerSubject']
if config_gui_structure['loginHint'] != "":
config_dict['login_hint'] = config_gui_structure['loginHint']
if config_gui_structure['uiLocales'] != "":
config_dict['ui_locales'] = config_gui_structure['uiLocales']
if config_gui_structure['claimsLocales'] != "":
config_dict['claims_locales'] = config_gui_structure['claimsLocales']
if config_gui_structure['acrValues'] != "":
config_dict['acr_values'] = config_gui_structure['acrValues']
if config_gui_structure['webfinger_url'] != "":
config_dict['webfinger_url'] = config_gui_structure['webfinger_url']
if config_gui_structure['webfinger_email'] != "":
config_dict['webfinger_email'] = config_gui_structure['webfinger_email']
return config_dict
def find_static_provider_info_field(input_fields, fields_id):
for input_field in input_fields:
if input_field['id'] == fields_id:
return input_field
def contains_dynamic_discovery_info(config_gui_structure):
return config_gui_structure['fetchDynamicInfoFromServer']['showInputField'] is True
def get_issuer_from_gui_config(gui_config_structure):
issuer = None
gui_config = GuiConfig(gui_config_structure)
if contains_dynamic_discovery_info(gui_config_structure):
issuer = gui_config.get_dynamic_discovery_issuer()
else:
issuer = gui_config.get_static_discovery_issuer()
if issuer.endswith("/"):
issuer = issuer[:-1]
return issuer
def is_using_dynamic_client_registration(config_gui_structure):
return config_gui_structure['dynamicClientRegistrationDropDown']['value'] == "yes"
def set_dynamic_discovery_issuer_config_gui_structure(issuer,
config_gui_structure,
show_field=True):
gui_config = GuiConfig(config_gui_structure)
gui_config.set_dynamic_discovery_visibility(show_field)
gui_config.set_dynamic_discovery_issuer(issuer)
return gui_config.config_structure
def convert_to_gui_drop_down(config_file_dict):
gui_list = []
for element in config_file_dict:
gui_list.append({"type": element, "name": element})
return gui_list
def convert_abbreviation_to_response_type(response_type_abbreviation):
response_types = {
"C": "code",
"I": "id_token",
"IT": "id_token token",
"CI": "code id_token",
"CT": "code token",
"CIT": "code id_token token"
}
try:
return response_types[response_type_abbreviation]
except KeyError:
raise UnKnownResponseTypeAbbreviation(
"The supplied response type abbreviation (%s) is not recognized"
% response_type_abbreviation)
def parse_profile(profile):
if not isinstance(profile, basestring):
raise ValueError("profile value of wrong type")
_args = profile.split(".")
response_type = convert_abbreviation_to_response_type(_args[0])
crypto_feature_support = _args[3]
return response_type, crypto_feature_support
def set_feature_list(config_structure_dict, oprp_arg):
feature_list = config_structure_dict['signingEncryptionFeaturesCheckboxes'][
'features']
for feature in feature_list:
feature['selected'] = feature['abbreviation'] in oprp_arg
def set_test_specific_request_parameters(config_file_dict, config_structure_dict):
for (key, value) in CONFIG_FILE_KEYS.iteritems():
if value in config_file_dict:
gui_config_key = GUI_CONFIG_STRUCTURE_KEYS[key]
config_structure_dict[gui_config_key] = config_file_dict[value]
return config_structure_dict
def convert_config_file(config_file_dict):
"""
Converts a config file structure to a config GUI structure
:param config_file_dict: The configuration file from which should be
converted
:return The updated configuration GUI data structure
"""
config_structure_dict = create_new_configuration_dict()
if "srv_discovery_url" in config_file_dict:
config_structure_dict = dynamic_discovery_to_gui_structure(
config_file_dict,
config_structure_dict)
elif "provider_info" in config_file_dict:
# Now we know it's an static provider
config_structure_dict = convert_static_provider_info_to_gui(
config_file_dict,
config_structure_dict)
config_structure_dict = client_registration_supported_to_gui(
config_file_dict,
config_structure_dict)
config_structure_dict['clientSubjectType']['value'] = \
config_file_dict['preferences']['subject_type']
response_type, crypto_feature_support = parse_profile(
config_file_dict['behaviour']['profile'])
config_structure_dict['responseTypeDropDown']['value'] = response_type
if crypto_feature_support:
set_feature_list(config_structure_dict, crypto_feature_support)
| |
genesis block
logger.error("epoch seed is None for the genesis block!!!!!")
self.epoch_seed = sha256(b'INVALID_EPOCH_SEED')
# Prepare Metadata inputs
if block.block_number == 0 or self._chain.height + 1 == block.block_number:
prev_prev_sv_tracker = copy.deepcopy(self._chain.pstate.prev_stake_validators_tracker)
prev_sv_tracker = copy.deepcopy(self._chain.pstate.stake_validators_tracker)
address_state_dict = dict()
hash_chain = None
seed = self.epoch_seed
else:
prev_prev_sv_tracker = self.get_stake_validators_tracker(block.block_number - 1)
prev_block_metadata = self.blocks[block.block_number - 1]
prev_sv_tracker = copy.deepcopy(prev_block_metadata.stake_validators_tracker)
address_state_dict = copy.deepcopy(prev_block_metadata.address_state_dict)
hash_chain = copy.deepcopy(prev_block_metadata.hash_chain)
seed = prev_block_metadata.next_seed
for raw_vote in block.vote:
vote = Transaction.from_pbdata(raw_vote)
self.add_vote(vote)
if block.block_number > 0:
voteMetadata = self.get_consensus(block.block_number - 1)
consensus_headerhash = self.get_consensus_headerhash(block.block_number - 1)
if block.block_number == 1:
total_stake_amount = self.get_genesis_total_stake()
else:
total_stake_amount = prev_prev_sv_tracker.get_total_stake_amount()
consensus_ratio = voteMetadata.total_stake_amount / total_stake_amount
if consensus_ratio < 0.51:
logger.warning('Block #%s Rejected, Consensus lower than 51%%..', block.block_number)
logger.warning('%s/%s', voteMetadata.total_stake_amount, total_stake_amount)
return False
elif consensus_headerhash != prev_block.headerhash:
logger.warning('Consensus headerhash doesnt match')
logger.warning('Consensus Previous Headerhash %s', consensus_headerhash)
logger.warning('Current Previous Headerhash %s', prev_block.headerhash)
logger.warning('Previous blocknumber #%s', prev_block.block_number)
# TODO: Fork Recovery Logic
return False
if not self._state_add_block_buffer(block, prev_sv_tracker, address_state_dict):
logger.warning('State_validate_block failed inside chainbuffer #%s', block.block_number)
return False
block_metadata = BlockMetadata(block=block,
hash_chain=hash_chain,
epoch_seed=seed,
balance=block_balance)
block_metadata.stake_validators_tracker = prev_sv_tracker
block_metadata.address_state_dict = address_state_dict
block_metadata.update_stxn_state(self._chain.pstate)
if block.block_number > 1: # FIXME: Temporarily 1, once sv added to Genesis Block, change it to 0
block_metadata.update_vote_metadata(prev_prev_sv_tracker)
# add/replace if new option is better
if old_block_metadata is None or block_metadata.sorting_key < old_block_metadata.sorting_key:
self.blocks[block.block_number] = block_metadata
self._remove_blocks(block.block_number + 1)
# Move to stable chain if necessary
return self._move_to_mainchain()
def _update(self,
block: Block,
stake_validators_tracker: StakeValidatorsTracker,
address_txn: Dict[bytes, AddressState]) -> bool:
if block.block_number > 0:
if block.stake_selector not in stake_validators_tracker.sv_dict:
logger.warning('stake selector not in stake_list_get')
return False
if stake_validators_tracker.sv_dict[block.stake_selector].is_banned:
logger.warning('stake selector is in banned list')
return False
if not stake_validators_tracker.sv_dict[block.stake_selector].is_active:
logger.warning('stake selector is in inactive')
return False
# FIX ME : Temporary fix, to include only either ST txn or TransferCoin txn for an address
stake_txn = set()
transfercoin_txn = set()
destake_txn = set()
message_txn = set()
token_txn = set()
transfer_token_txn = set()
# cycle through every tx in the new block to check state
for protobuf_tx in block.transactions:
# FIXME: Simplify this.. too complex. delegate to objects, etc.
tx = Transaction.from_pbdata(protobuf_tx)
if tx.subtype == qrl_pb2.Transaction.COINBASE:
expected_nonce = stake_validators_tracker.sv_dict[tx.txfrom].nonce + 1
else:
expected_nonce = address_txn[tx.txfrom].nonce + 1
if tx.nonce != expected_nonce:
logger.warning('nonce incorrect, invalid tx')
logger.warning('subtype: %s', tx.subtype)
logger.warning('%s actual: %s expected: %s', tx.txfrom, tx.nonce, expected_nonce)
return False
# TODO: To be fixed later
if tx.pubhash in address_txn[tx.txfrom].pubhashes:
logger.warning('pubkey reuse detected: invalid tx %s', tx.txhash)
logger.warning('subtype: %s', tx.subtype)
return False
if tx.subtype == qrl_pb2.Transaction.TRANSFER:
if tx.txfrom in stake_txn:
logger.warning("Transfer coin done by %s address is a Stake Validator", tx.txfrom)
return False
if tx.txfrom in stake_validators_tracker.sv_dict and stake_validators_tracker.sv_dict[
tx.txfrom].is_active:
logger.warning("Source address is a Stake Validator, balance is locked while staking")
return False
if (tx.txfrom in stake_validators_tracker.future_stake_addresses and
stake_validators_tracker.future_stake_addresses[tx.txfrom].is_active):
logger.warning("Source address is in Future Stake Validator List, balance is locked")
return False
if address_txn[tx.txfrom].balance < tx.amount:
logger.warning('%s %s exceeds balance, invalid tx', tx, tx.txfrom)
logger.warning('subtype: %s', tx.subtype)
logger.warning('Buffer State Balance: %s Transfer Amount %s', address_txn[tx.txfrom].balance, tx.amount)
return False
transfercoin_txn.add(tx.txfrom)
elif tx.subtype == qrl_pb2.Transaction.STAKE:
if tx.txfrom in (transfercoin_txn, message_txn, token_txn, transfer_token_txn):
logger.warning('Block cannot have both st txn & %s txn from same address %s', tx.subtype, tx.txfrom)
return False
if tx.txfrom in stake_txn:
logger.warning('Block cannot have multiple Stake Txn from same address %s', tx.txfrom)
return False
if tx.txfrom in destake_txn:
logger.warning('Block may not have both Stake and Destake txn of same address %s', tx.txfrom)
return False
if tx.txfrom in stake_validators_tracker.sv_dict:
expiry = stake_validators_tracker.sv_dict[tx.txfrom].activation_blocknumber + \
config.dev.blocks_per_epoch
if block.block_number > 1 and tx.activation_blocknumber < expiry:
logger.warning('Failed %s is already active for the given range', tx.txfrom)
return False
activation_limit = block.block_number + config.dev.blocks_per_epoch + 1
if tx.activation_blocknumber > activation_limit:
logger.warning('Failed %s activation_blocknumber beyond limit', tx.txfrom)
logger.warning('Found %s', tx.activation_blocknumber)
logger.warning('Must be less than %s', tx.activation_limit)
return False
future_stake_addresses = stake_validators_tracker.future_stake_addresses
if tx.txfrom not in future_stake_addresses:
if tx.txfrom in address_txn:
balance = address_txn[tx.txfrom].balance
else:
balance = self._chain.pstate._get_address_state(tx.txfrom).balance
stake_validators_tracker.add_sv(balance, tx, block.block_number)
stake_txn.add(tx.txfrom)
elif tx.subtype == qrl_pb2.Transaction.DESTAKE:
if tx.txfrom in stake_txn:
logger.warning('Block may not have both Destake and Stake txn of same address %s', tx.txfrom)
return False
if tx.txfrom in destake_txn:
logger.warning('Block cannot have multiple Destake Txn from same address %s', tx.txfrom)
return False
if tx.txfrom not in stake_validators_tracker.sv_dict and tx.txfrom not in stake_validators_tracker.future_stake_addresses:
logger.warning('Failed due to destake %s is not a stake validator', tx.txfrom)
return False
if tx.txfrom in stake_validators_tracker.sv_dict:
stake_validators_tracker.sv_dict[tx.txfrom]._is_active = False
if tx.txfrom in stake_validators_tracker.future_stake_addresses:
stake_validators_tracker.future_stake_addresses[tx.txfrom]._is_active = False
destake_txn.add(tx.txfrom)
elif tx.subtype == qrl_pb2.Transaction.MESSAGE:
if tx.txfrom in stake_txn:
logger.warning("Message Txn done by %s address is a Stake Validator", tx.txfrom)
return False
if tx.txfrom in stake_validators_tracker.sv_dict and stake_validators_tracker.sv_dict[
tx.txfrom].is_active:
logger.warning("Source address is a Stake Validator, balance is locked while staking")
logger.warning("Message Txn dropped")
return False
if (tx.txfrom in stake_validators_tracker.future_stake_addresses and
stake_validators_tracker.future_stake_addresses[tx.txfrom].is_active):
logger.warning("Source address is in Future Stake Validator List, balance is locked")
logger.warning("Message Txn dropped")
return False
if address_txn[tx.txfrom].balance < tx.fee:
logger.warning('%s %s exceeds balance, invalid message tx', tx, tx.txfrom)
logger.warning('subtype: %s', tx.subtype)
logger.warning('Buffer State Balance: %s Free %s', address_txn[tx.txfrom].balance, tx.fee)
return False
message_txn.add(tx.txfrom)
elif tx.subtype == qrl_pb2.Transaction.TOKEN:
if tx.txfrom in stake_txn:
logger.warning("Token Transaction done by %s address is a Stake Validator", tx.txfrom)
return False
if tx.txfrom in stake_validators_tracker.sv_dict and stake_validators_tracker.sv_dict[
tx.txfrom].is_active:
logger.warning("Source address is a Stake Validator, balance is locked while staking")
logger.warning("Token Txn dropped")
return False
if (tx.txfrom in stake_validators_tracker.future_stake_addresses and
stake_validators_tracker.future_stake_addresses[tx.txfrom].is_active):
logger.warning("Source address is in Future Stake Validator List, balance is locked")
logger.warning("Token Txn dropped")
return False
if address_txn[tx.txfrom].balance < tx.fee:
logger.warning('%s %s exceeds balance, invalid Token tx', tx, tx.txfrom)
logger.warning('subtype: %s', tx.subtype)
logger.warning('Buffer State Balance: %s Fee %s',
address_txn[tx.txfrom].balance,
tx.fee)
return False
token_txn.add(tx.txfrom)
elif tx.subtype == qrl_pb2.Transaction.TRANSFERTOKEN:
if tx.txfrom in stake_txn:
logger.warning("Transfer Token Transaction done by %s address is a Stake Validator", tx.txfrom)
return False
if tx.txfrom in stake_validators_tracker.sv_dict and stake_validators_tracker.sv_dict[
tx.txfrom].is_active:
logger.warning("Source address is a Stake Validator, balance is locked while staking")
logger.warning("Transfer Token Txn dropped")
return False
if (tx.txfrom in stake_validators_tracker.future_stake_addresses and
stake_validators_tracker.future_stake_addresses[tx.txfrom].is_active):
logger.warning("Source address is in Future Stake Validator List, balance is locked")
logger.warning("Transfer Token Txn dropped")
return False
if address_txn[tx.txfrom].balance < tx.fee:
logger.warning('%s %s exceeds balance, invalid Transfer Token Txn', tx, tx.txfrom)
logger.warning('subtype: %s', tx.subtype)
logger.warning('Buffer State Balance: %s Free %s', address_txn[tx.txfrom].balance, tx.fee)
return False
if bin2hstr(tx.token_txhash).encode() not in address_txn[tx.txfrom].tokens:
logger.warning('%s doesnt own any token with token_txnhash %s', tx.txfrom,
bin2hstr(tx.token_txhash).encode())
return False
if address_txn[tx.txfrom].tokens[bin2hstr(tx.token_txhash).encode()] < tx.amount:
logger.warning('Token Transfer amount exceeds available token')
logger.warning('Token Txhash %s', bin2hstr(tx.token_txhash).encode())
logger.warning('Available Token Amount %s',
address_txn[tx.txfrom].tokens[bin2hstr(tx.token_txhash).encode()])
logger.warning('Transaction Amount %s', tx.amount)
return False
transfer_token_txn.add(tx.txfrom)
if tx.subtype != qrl_pb2.Transaction.COINBASE:
address_txn[tx.txfrom].increase_nonce()
if tx.subtype == qrl_pb2.Transaction.TRANSFER:
address_txn[tx.txfrom].balance -= tx.amount + tx.fee
if tx.subtype in (qrl_pb2.Transaction.MESSAGE,
qrl_pb2.Transaction.TOKEN,
qrl_pb2.Transaction.TRANSFERTOKEN):
address_txn[tx.txfrom].balance -= tx.fee
if tx.subtype == qrl_pb2.Transaction.TOKEN:
for initial_balance in tx.initial_balances:
address_txn[initial_balance.address].tokens[bin2hstr(tx.txhash).encode()] += initial_balance.amount
if tx.subtype == qrl_pb2.Transaction.TRANSFERTOKEN:
address_txn[tx.txfrom].tokens[bin2hstr(tx.token_txhash).encode()] -= tx.amount
# Remove Token from address_state when token balance is Zero
if address_txn[tx.txfrom].tokens[bin2hstr(tx.token_txhash).encode()] == 0:
del address_txn[tx.txfrom].tokens[bin2hstr(tx.token_txhash).encode()]
address_txn[tx.txto].tokens[bin2hstr(tx.token_txhash).encode()] += tx.amount
if tx.subtype in (qrl_pb2.Transaction.TRANSFER, qrl_pb2.Transaction.COINBASE):
address_txn[tx.txto].balance += tx.amount
address_txn[tx.txfrom].pubhashes.append(tx.pubhash)
address_txn[tx.txfrom].transaction_hashes.append(tx.txhash)
return True
def remove_last_buffer_block(self):
last_block_number = self.height
self._remove_blocks(last_block_number)
def _remove_blocks(self, starting_blocknumber: int):
if starting_blocknumber not in self.blocks:
return
while starting_blocknumber in self.blocks:
del self.blocks[starting_blocknumber]
if starting_blocknumber - 1 in self._vote_tracker:
del self._vote_tracker[starting_blocknumber - 1]
starting_blocknumber += 1
def load_address_state(self, block: Block,
address_txn: Dict[bytes, AddressState]) -> Dict[bytes, AddressState]:
for protobuf_tx in block.transactions:
tx = Transaction.from_pbdata(protobuf_tx)
if tx.txfrom not in address_txn:
# FIXME: Access to chain buffer from here
address_txn[tx.txfrom] = self.get_stxn_state(block.block_number, tx.txfrom)
if tx.subtype in (qrl_pb2.Transaction.TRANSFER,
qrl_pb2.Transaction.COINBASE,
qrl_pb2.Transaction.TRANSFERTOKEN):
if tx.txto not in address_txn:
# FIXME: Access to chain buffer from here
address_txn[tx.txto] = self.get_stxn_state(block.block_number, tx.txto)
if tx.subtype == qrl_pb2.Transaction.TOKEN:
if tx.owner not in address_txn:
address_txn[tx.owner] = self.get_stxn_state(block.block_number, tx.owner)
for initial_balance in tx.initial_balances:
if initial_balance.address not in address_txn:
address_txn[initial_balance.address] = self.get_stxn_state(block.block_number,
initial_balance.address)
# FIXME: Modifying input. Side effect, etc.
return address_txn
# Returns the number of blocks left before next epoch
@staticmethod
def get_blocks_left(blocknumber: int) -> int:
epoch = blocknumber // config.dev.blocks_per_epoch
blocks_left = blocknumber - (epoch * config.dev.blocks_per_epoch)
blocks_left = config.dev.blocks_per_epoch - blocks_left
return blocks_left
def _state_add_block_buffer(self,
block: Block,
stake_validators_tracker: StakeValidatorsTracker,
address_state_dict: Dict[bytes, AddressState]):
# FIXME: This is mixing states
| |
<reponame>troglodyne/ccs-twistedextensions
# -*- test-case-name: twext.internet.test.test_sendfdport -*-
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Implementation of a TCP/SSL port that uses send1msg/recv1msg as implemented by
L{twisted.python.sendfd}.
"""
from os import close
from errno import EAGAIN, ENOBUFS
from socket import (
socketpair, fromfd, error as SocketError, AF_UNIX, SOCK_STREAM, SOCK_DGRAM
)
from zope.interface import Interface
from twisted.python.sendmsg import send1msg, recv1msg, getsockfam
from twisted.internet.abstract import FileDescriptor
from twisted.internet.protocol import Protocol, Factory
from twext.python.log import Logger
from twext.python.sendfd import sendfd, recvfd
log = Logger()
class InheritingProtocol(Protocol, object):
"""
When a connection comes in on this protocol, stop reading and writing, and
dispatch the socket to another process via its factory.
"""
def connectionMade(self):
"""
A connection was received; transmit the file descriptor to another
process via L{InheritingProtocolFactory} and remove my transport from
the reactor.
"""
self.transport.stopReading()
self.transport.stopWriting()
skt = self.transport.getHandle()
self.factory.sendSocket(skt)
class InheritingProtocolFactory(Factory, object):
"""
An L{InheritingProtocolFactory} is a protocol factory which listens for
incoming connections in a I{master process}, then sends those connections
off to be inherited by a I{worker process} via an
L{InheritedSocketDispatcher}.
L{InheritingProtocolFactory} is instantiated in the master process.
@ivar dispatcher: an L{InheritedSocketDispatcher} to use to dispatch
incoming connections to an appropriate subprocess.
@ivar description: the string to send along with connections received on
this factory.
"""
protocol = InheritingProtocol
def __init__(self, dispatcher, description):
self.dispatcher = dispatcher
self.description = description
def sendSocket(self, socketObject):
"""
Send the given socket object on to my dispatcher.
"""
self.dispatcher.sendFileDescriptor(socketObject, self.description)
class _SubprocessSocket(FileDescriptor, object):
"""
A socket in the master process pointing at a file descriptor that can be
used to transmit sockets to a subprocess.
@ivar outSocket: the UNIX socket used as the send1msg() transport.
@type outSocket: L{socket.socket}
@ivar outgoingSocketQueue: an outgoing queue of sockets to send to the
subprocess, along with their descriptions (strings describing their
protocol so that the subprocess knows how to handle them; as of this
writing, either C{"TCP"} or C{"SSL"})
@ivar outgoingSocketQueue: a C{list} of 2-tuples of C{(socket-object,
bytes)}
@ivar status: a record of the last status message received (via
L{recv1msg}) from the subprocess: this is an application-specific
indication of how ready this subprocess is to receive more connections.
A typical usage would be to count the open connections: this is what is
passed to
@type status: See L{IStatusWatcher} for an explanation of which methods
determine this type.
@ivar dispatcher: The socket dispatcher that owns this L{_SubprocessSocket}
@type dispatcher: L{InheritedSocketDispatcher}
"""
def __init__(self, dispatcher, inSocket, outSocket, status, slavenum):
FileDescriptor.__init__(self, dispatcher.reactor)
self.status = status
self.slavenum = slavenum
self.dispatcher = dispatcher
self.inSocket = inSocket
self.outSocket = outSocket # XXX needs to be set non-blocking by somebody
self.fileno = outSocket.fileno
self.outgoingSocketQueue = []
self.pendingCloseSocketQueue = []
def childSocket(self):
"""
Return the socket that the child process will use to communicate with the master.
"""
return self.inSocket
def start(self):
"""
The master process monitor is about to start the child process associated with this socket.
Update status to ensure dispatcher know what is going on.
"""
self.status.start()
self.dispatcher.statusChanged()
def restarted(self):
"""
The child process associated with this socket has signaled it is ready.
Update status to ensure dispatcher know what is going on.
"""
self.status.restarted()
self.dispatcher.statusChanged()
def stop(self):
"""
The master process monitor has determined the child process associated with this socket
has died. Update status to ensure dispatcher know what is going on.
"""
self.status.stop()
self.dispatcher.statusChanged()
def remove(self):
"""
Remove this socket.
"""
self.status.stop()
self.dispatcher.statusChanged()
self.dispatcher.removeSocket()
def sendSocketToPeer(self, skt, description):
"""
Enqueue a socket to send to the subprocess.
"""
self.outgoingSocketQueue.append((skt, description))
self.startWriting()
def doRead(self, recvmsg=recv1msg):
"""
Receive a status / health message and record it.
"""
try:
data, _ignore_flags, _ignore_ancillary = recvmsg(
self.outSocket.fileno()
)
except SocketError, se:
if se.errno not in (EAGAIN, ENOBUFS):
raise
else:
closeCount = self.dispatcher.statusMessage(self, data)
for ignored in xrange(closeCount):
self.pendingCloseSocketQueue.pop(0).close()
def doWrite(self, sendfd=sendfd):
"""
Transmit as many queued pending file descriptors as we can.
"""
while self.outgoingSocketQueue:
skt, desc = self.outgoingSocketQueue.pop(0)
try:
sendfd(self.outSocket.fileno(), skt.fileno(), desc)
except SocketError, se:
if se.errno in (EAGAIN, ENOBUFS):
self.outgoingSocketQueue.insert(0, (skt, desc))
return
raise
# Ready to close this socket; wait until it is acknowledged.
self.pendingCloseSocketQueue.append(skt)
if not self.outgoingSocketQueue:
self.stopWriting()
class IStatus(Interface):
"""
Defines the status of a socket. This keeps track of active connections etc.
"""
def effective():
"""
The current effective load.
@return: The current effective load.
@rtype: L{int}
"""
def active():
"""
Whether the socket should be active (able to be dispatched to).
@return: Active state.
@rtype: L{bool}
"""
def start():
"""
Worker process is starting. Mark status accordingly but do not make
it active.
@return: C{self}
"""
def restarted():
"""
Worker process has signaled it is ready so make this active.
@return: C{self}
"""
def stop():
"""
Worker process has stopped so make this inactive.
@return: C{self}
"""
class IStatusWatcher(Interface):
"""
A provider of L{IStatusWatcher} tracks the I{status messages} reported by
the worker processes over their control sockets, and computes internal
I{status values} for those messages. The I{messages} are individual
octets, representing one of three operations. C{0} meaning "a new worker
process has started, with zero connections being processed", C{+} meaning
"I have received and am processing your request; I am confirming that my
requests-being-processed count has gone up by one", and C{-} meaning "I
have completed processing a request, my requests-being-processed count has
gone down by one". The I{status value} tracked by
L{_SubprocessSocket.status} is an integer, indicating the current
requests-being-processed value. (FIXME: the intended design here is
actually just that all I{this} object knows about is that
L{_SubprocessSocket.status} is an orderable value, and that this
C{statusWatcher} will compute appropriate values so the status that I{sorts
the least} is the socket to which new connections should be directed; also,
the format of the status messages is only known / understood by the
C{statusWatcher}, not the L{InheritedSocketDispatcher}. It's hard to
explain it in that manner though.)
@note: the intention of this interface is to eventually provide a broader
notion of what might constitute 'status', so the above explanation just
explains the current implementation, in for expediency's sake, rather
than the somewhat more abstract language that would be accurate.
"""
def initialStatus():
"""
A new socket was created and added to the dispatcher. Compute an
initial value for its status.
@return: the new status.
"""
def newConnectionStatus(previousStatus):
"""
A new connection was sent to a given socket. Compute its status based
on the previous status of that socket.
@param previousStatus: A status value for the socket being sent work,
previously returned by one of the methods on this interface.
@return: the socket's status after incrementing its outstanding work.
"""
def statusFromMessage(previousStatus, message):
"""
A status message was received by a worker. Convert the previous status
value (returned from L{newConnectionStatus}, L{initialStatus}, or
L{statusFromMessage}).
@param previousStatus: A status value for the socket being sent work,
previously returned by one of the methods on this interface.
@return: the socket's status after taking the reported message into
account.
"""
def closeCountFromStatus(previousStatus):
"""
Based on a status previously returned from a method on this
L{IStatusWatcher}, determine how many sockets may be closed.
@return: a 2-tuple of C{number of sockets that may safely be closed},
C{new status}.
@rtype: 2-tuple of (C{int}, C{<opaque>})
"""
class InheritedSocketDispatcher(object):
"""
Used by one or more L{InheritingProtocolFactory}s, this keeps track of a
list of available sockets that connect to I{worker process}es and sends
inbound connections to be inherited over those sockets, by those processes.
L{InheritedSocketDispatcher} is therefore instantiated in the I{master
process}.
@ivar statusWatcher: The object which will handle status messages and
convert them into current | |
import math
import sys
#PIL
from PIL import ImageDraw
#scipy
import numpy
from scipy import optimize, ndimage
#appion
from appionlib import apImage
from appionlib import apDisplay
from appionlib import apDog
#pyami
from pyami import peakfinder
from pyami import correlator
#================================
#================================
def repairPicks(a1, a2, rmsd):
"""
Attempts to repair lists a1 and a2 that have become shifted
out of frame with minimal damage
"""
maxdev = ndimage.mean(rmsd[:5])
avgdev = 3*ndimage.mean(rmsd)
x0 = [ maxdev, avgdev, 0.25*len(rmsd), 0.75*len(rmsd) ]
print x0
solved = optimize.fmin(_rsmdStep, x0, args=([rmsd]),
xtol=1e-4, ftol=1e-4, maxiter=500, maxfun=500, disp=0, full_output=1)
upstep = int(math.floor(solved[0][2]))
print solved
a1b = numpyPop2d(a1, upstep)
a2b = numpyPop2d(a2, upstep)
return a1b, a2b
#================================
#================================
def _rsmdStep(x1, rmsd):
mean1 = x1[0]
mean2 = x1[1]
upstep = int(x1[2])
dnstep = int(x1[3])
fit = numpy.ones((len(rmsd)))*mean1
fit[upstep:dnstep] += mean2
error = ndimage.mean((rmsd-fit)**2/fit)
return error
##
##
## Fit All Least Squares Routine
##
##
#================================
#================================
def willsq(a1, a2, \
theta0, gamma0=0.0, phi0=0.0, scale0=1.0, shiftx0=0.0, shifty0=0.0,\
xscale=numpy.ones((6), dtype=numpy.float32)):
"""
given two sets of particles; find the tilt, and twist of them
"""
#x0 initial values
fit = {}
initx = numpy.array((
theta0 * math.pi/180.0,
gamma0 * math.pi/180.0,
phi0 * math.pi/180.0,
scale0,
shiftx0,
shifty0,
), dtype=numpy.float32)
#x1 delta values
x0 = numpy.zeros(6, dtype=numpy.float32)
#xscale scaling values
#xscale = numpy.ones(5, dtype=numpy.float32)
#xscale = numpy.array((1,1,1,1,1), dtype=numpy.float32)
#print "optimizing angles and shift..."
#print "initial rmsd:",_diffParticles(x0, initx, xscale, a1, a2)
a1f = numpy.asarray(a1, dtype=numpy.float32)
a2f = numpy.asarray(a2, dtype=numpy.float32)
solved = optimize.fmin(_diffParticles, x0, args=(initx, xscale, a1f, a2f),
xtol=1e-4, ftol=1e-4, maxiter=500, maxfun=500, disp=0, full_output=1)
x1 = solved[0]
fit['rmsd'] = float(solved[1]) #_diffParticles(x1, initx, xscale, a1, a2)
fit['iter'] = int(solved[3])
#print "final rmsd: "+str(fit['rmsd'])+" in "+str(fit['iter'])+" iterations"
#x3 final values
x3 = x1 * xscale + initx
fit['theta'] = x3[0]*180.0/math.pi
fit['gamma'] = x3[1]*180.0/math.pi % 180.0
fit['phi'] = x3[2]*180.0/math.pi % 180.0
if fit['gamma'] > 90:
fit['gamma'] -= 180.0
if fit['phi'] > 90:
fit['phi'] -= 180.0
fit['scale'] = x3[3]
fit['shiftx'] = x3[4]
fit['shifty'] = x3[5]
fit['point1'], fit['point2'] = getPointsFromArrays(a1, a2, fit['shiftx'], fit['shifty'])
#print "Final=",fit['point1'],"\t", fit['point2']
fit['prob'] = math.exp(-1.0*math.sqrt(abs(fit['rmsd'])))**2
return fit
#================================
#================================
def _diffParticles(x1, initx, xscale, a1, a2):
x2 = x1 * xscale + initx
theta = x2[0]
gamma = x2[1]
phi = x2[2]
scale = x2[3]
shiftx = x2[4]
shifty = x2[5]
point1, point2 = getPointsFromArrays(a1, a2, shiftx, shifty)
#print point1,"\t",point2
a2b = a2Toa1(a2, theta, gamma, phi, scale, point1, point2)
#maxpix = float(len(a2b))
diffmat = (a1 - a2b)
xrmsd = ndimage.mean(diffmat[:,0]**2)
yrmsd = ndimage.mean(diffmat[:,1]**2)
#xmed = numpy.median(diffmat[:,0]**2)
#ymed = numpy.median(diffmat[:,1]**2)
rmsd = math.sqrt((xrmsd + yrmsd)/float(len(a2b)))
#rmed = math.sqrt((xmed + ymed)/float(len(a2b)))
#print (x2*57.29).round(decimals=3),round(rmsd,6)
return rmsd
#================================
#================================
def getPointsFromArrays(a1, a2, shiftx, shifty):
if len(a1) == 0 or len(a2) == 0:
return None,None
point1 = numpy.asarray(a1[0,:], dtype=numpy.float32)
point2 = numpy.asarray(a2[0,:], dtype=numpy.float32) + numpy.array([shiftx,shifty], dtype=numpy.float32)
return (point1, point2)
#================================
#================================
def setPointsFromArrays(a1, a2, data):
if len(a1) > 0 and len(a2) > 0:
data['point1'] = numpy.asarray(a1[0,:], dtype=numpy.float32)
data['point2'] = ( numpy.asarray(a2[0,:], dtype=numpy.float32)
+ numpy.array([data['shiftx'], data['shifty']], dtype=numpy.float32) )
data['point2b'] = ( numpy.asarray(a2[0,:], dtype=numpy.float32)
- numpy.array([data['shiftx'], data['shifty']], dtype=numpy.float32) )
else:
print a1, a2
print "FAILED"
return
#================================
#================================
def a1Toa2Data(a1, data):
thetarad = data['theta']*math.pi/180.0
gammarad = data['gamma']*math.pi/180.0
phirad = data['phi']*math.pi/180.0
if not 'point2b' in data:
data['point2b'] = data['point2'] - 2 * numpy.array([data['shiftx'], data['shifty']], dtype=numpy.float32)
return a2Toa1(a1, -1.0*thetarad, 1.0*phirad, 1.0*gammarad,
1.0/data['scale'], data['point2'], data['point1'])
#================================
#================================
def a2Toa1Data(a2, data):
"""
flips the values and runs a2Toa1
"""
thetarad = data['theta']*math.pi/180.0
gammarad = data['gamma']*math.pi/180.0
phirad = data['phi']*math.pi/180.0
return a2Toa1(a2, thetarad, gammarad, phirad,
data['scale'], data['point1'], data['point2'])
#================================
#================================
def a1Toa2(a1, theta, gamma, phi, scale, point1, point2):
"""
flips the values and runs a2Toa1
"""
#raise NotImplementedError
a1b = a2Toa1(a1, -1.0*theta, 1.0*phi, 1.0*gamma, 1.0/scale, point2, point1)
return a1b
#================================
#================================
def a2Toa1(a2, theta, gamma, phi, scale, point1, point2):
"""
transforms second list of points one into same affine space as first list
a1 -> first numpy list of x,y coordinates
a2 -> second numpy list of x,y coordinates
theta -> tilt angle
gamma -> image 1 rotation
phi -> image 2 rotation
point1 -> numpy coordinates for a particle in image 1
point2 -> numpy coordinates for a particle in image 2
"""
#gamma rotation, negative for inverse rotation
cosgamma = math.cos(1.0*phi)
singamma = math.sin(1.0*phi)
gammamat = numpy.array([[ cosgamma, -singamma ], [ singamma, cosgamma ]], dtype=numpy.float32)
#theta compression
if theta < 0:
thetamat = numpy.array([[ math.cos(theta), 0.0 ], [ 0.0, 1.0]], dtype=numpy.float32)
else:
thetamat = numpy.array([[ 1.0/math.cos(theta), 0.0 ], [ 0.0, 1.0]], dtype=numpy.float32)
#phi rotation
cosphi = math.cos(-1.0*gamma)
sinphi = math.sin(-1.0*gamma)
phimat = numpy.array([[ cosphi, -sinphi ], [ sinphi, cosphi ]], dtype=numpy.float32)
#scale factor
scalemat = numpy.array([[ scale, 0.0 ], [ 0.0, scale ]], dtype=numpy.float32)
#merge together
if scale > 1.0:
trans = numpy.dot(numpy.dot(numpy.dot(scalemat,phimat),thetamat),gammamat)
else:
trans = numpy.dot(numpy.dot(numpy.dot(phimat,thetamat),gammamat),scalemat)
#convert a2 -> a1
a2b = numpy.zeros(a2.shape, dtype=numpy.float32)
for i in range((a2.shape)[0]):
a2c = numpy.dot(trans, a2[i,:] - point2) + point1
a2b[i,0] = a2c[0]
a2b[i,1] = a2c[1]
return a2b
#================================
#================================
def maskOverlapRegion(image1, image2, data):
#image1 = ndimage.median_filter(image1, size=2)
#image2 = ndimage.median_filter(image2, size=2)
#SET IMAGE LIMITS
####################################
gap = int(image1.shape[0]/256.0)
xm = image1.shape[1]+gap
ym = image1.shape[0]+gap
a1 = numpy.array([ data['point1'], [-gap,-gap], [-gap,ym], [xm,ym], [xm,-gap], ])
xm = image2.shape[1]+gap
ym = image2.shape[0]+gap
a2 = numpy.array([ data['point2'], [-gap,-gap], [-gap,ym], [xm,ym], [xm,-gap], ])
#CALCULATE TRANSFORM LIMITS
####################################
a2mask = a1Toa2Data(a1, data)
a1mask = a2Toa1Data(a2, data)
#print "a1=",a1
#print "a1mask=",a1mask
#print "a2=",a2
#print "a2mask=",a2mask
#CONVERT NUMPY TO POLYGON LIST
####################################
#maskimg2 = polygon.filledPolygon(img.shape, vert2)
a1masklist = []
a2masklist = []
for j in range(4):
for i in range(2):
item = int(a1mask[j+1,i])
a1masklist.append(item)
item = int(a2mask[j+1,i])
a2masklist.append(item)
#CREATE POLYGON MASK FROM THE LIMITS 1 -> IMAGE 2
####################################
#print "a2mask=",numpy.asarray(a2mask, dtype=numpy.int32)
#print "a2masklist=",a2masklist
mask2 = numpy.zeros(shape=image2.shape, dtype=numpy.bool_)
mask2b = apImage.arrayToImage(mask2, normalize=False)
mask2b = mask2b.convert("L")
draw2 = ImageDraw.Draw(mask2b)
draw2.polygon(a2masklist, fill="white")
mask2 = apImage.imageToArray(mask2b, dtype=numpy.float32)
#DRAW POLYGON ONTO IMAGE 2
####################################
mean2 = ndimage.mean(image2)
std2 = ndimage.standard_deviation(image2)
immin2 = mean2 - 2.0*std2
#med2 = numpy.median(image2.flatten())
#print "MAX=",ndimage.maximum(image2), med2, mean2, std2
#immin2 = ndimage.minimum(image2)+1.0
image2 = (image2-immin2)*mask2/255.0
#mean2 = ndimage.mean(image2)
#std2 = ndimage.standard_deviation(image2)
#med2 = numpy.median(image2.flatten())
immax2 = min(ndimage.maximum(image2), 8.0*std2)
#print "MAX=",ndimage.maximum(image2), med2, mean2, std2
#immax2 = mean2 + 3.0 * std2
image2 = numpy.where(image2==0, immax2, image2)
#CREATE POLYGON MASK FROM THE LIMITS 2 -> IMAGE 1
####################################
#print "a1mask=",numpy.asarray(a1mask, dtype=numpy.int32)
#print "a1masklist=",a1masklist
mask1 = numpy.zeros(shape=image1.shape, dtype=numpy.bool_)
mask1b = apImage.arrayToImage(mask1, normalize=False)
mask1b = mask1b.convert("L")
draw1 = ImageDraw.Draw(mask1b)
draw1.polygon(a1masklist, fill="white")
mask1 = apImage.imageToArray(mask1b, dtype=numpy.float32)
#DRAW POLYGON ONTO IMAGE 1
####################################
mean1 = ndimage.mean(image1)
std1 = ndimage.standard_deviation(image1)
#med1 = numpy.median(image1.flatten())
immin1 = mean1 - 2.0 * std1
#immin1 = ndimage.minimum(image1)+1.0
#print "MAX=",ndimage.maximum(image1), med1, mean1, std1
image1 = (image1-immin1)*mask1/255.0
#mean1 = ndimage.mean(image1)
#std1 = ndimage.standard_deviation(image1)
#med1 = numpy.median(image1.flatten())
immax1 = min(ndimage.maximum(image1), 8.0*std1)
#print "MAX=",ndimage.maximum(image1), med1, mean1, std1
#immax1 = mean1 + 3.0 * std1
image1 = numpy.where(image1==0, immax1, image1)
return (image1, image2)
#================================
#================================
def getOverlapPercent(image1, image2, data):
#SET IMAGE LIMITS
gap = int(image1.shape[0]/256.0)
xm = image1.shape[1]+gap
ym = image1.shape[0]+gap
a1 = numpy.array([ data['point1'], [-gap,-gap], [-gap,ym], [xm,ym], [xm,-gap], ])
xm = image2.shape[1]+gap
ym = image2.shape[0]+gap
a2 = numpy.array([ data['point2'], [-gap,-gap], [-gap,ym], [xm,ym], [xm,-gap], ])
#CALCULATE TRANSFORM LIMITS
a2mask = a1Toa2Data(a1, data)
a1mask = a2Toa1Data(a2, data)
#CONVERT NUMPY TO POLYGON LIST
a1masklist = []
a2masklist = []
for j in range(4):
for i in range(2):
item = int(a1mask[j+1,i])
a1masklist.append(item)
item = int(a2mask[j+1,i])
a2masklist.append(item)
#CREATE POLYGON MASK FROM THE LIMITS 1 -> IMAGE 2
mask2 = numpy.zeros(shape=image2.shape, dtype=numpy.bool_)
mask2b = apImage.arrayToImage(mask2, normalize=False)
mask2b = mask2b.convert("L")
draw2 = ImageDraw.Draw(mask2b)
draw2.polygon(a2masklist, fill="white")
mask2 = apImage.imageToArray(mask2b, dtype=numpy.float32)
#CREATE POLYGON MASK FROM THE LIMITS 2 -> IMAGE 1
mask1 = numpy.zeros(shape=image1.shape, dtype=numpy.bool_)
mask1b = apImage.arrayToImage(mask1, normalize=False)
mask1b = mask1b.convert("L")
draw1 = ImageDraw.Draw(mask1b)
draw1.polygon(a1masklist, fill="white")
mask1 = apImage.imageToArray(mask1b, dtype=numpy.float32)
percent1 = ndimage.sum(mask1) / (mask1.shape[0]*mask1.shape[1]) / ndimage.maximum(mask1)
percent2 = ndimage.sum(mask2) / (mask2.shape[0]*mask2.shape[1]) / ndimage.maximum(mask2)
return max(percent1,percent2), min(percent1,percent2)
#================================
#================================
def mergePicks(picks1, picks2, limit=25.0):
good = []
#newa1 = numpy.vstack((a1, list1))
for p2 in picks2:
p1, dist = findClosestPick(p2,picks1)
if dist > limit:
good.append(p2)
#apDisplay.printMsg("Kept "+str(len(good))+" of "+str(len(picks2))+" overlapping peaks")
if len(good) == 0:
return picks1
goodarray = numpy.asarray(good)
newarray = numpy.vstack((picks1, goodarray))
return newarray
def betterMergePicks(picks1a, picks1b, picks2a, picks2b, limit=25.0, msg=True):
picks1c = []
picks2c = []
origpart = picks1b.shape[0]
#elimate peaks that overlap with already picked
for i in range(picks1b.shape[0]):
p1a, dist = findClosestPick(picks1b[i], picks1a)
if dist > limit:
#no nearby particle
picks1c.append(picks1b[i])
picks2c.append(picks2b[i])
#picks1b = numpyPop2d(picks1b, i)
#picks2b = numpyPop2d(picks2b, i)
#apDisplay.printMsg("Kept "+str(len(picks1c))+" of "+str(len(picks1b)))
#apDisplay.printMsg("Kept "+str(len(picks2c))+" of "+str(len(picks2b)))
picks1d = []
picks2d = []
for i,p2c in enumerate(picks2c):
p2a, dist = findClosestPick(p2c, picks2a)
if dist > limit:
#no nearby particle
picks1d.append(picks1c[i])
picks2d.append(picks2c[i])
#apDisplay.printMsg("Kept "+str(len(picks1d))+" of "+str(len(picks1c)))
#apDisplay.printMsg("Kept "+str(len(picks2d))+" of "+str(len(picks2c)))
picks1e = numpy.asarray(picks1d, dtype=numpy.int32)
picks2e = numpy.asarray(picks2d, dtype=numpy.int32)
#merge pick sets
if picks1e.shape[0] > 0 and picks2e.shape[0] > 0:
newa1 = numpy.vstack((picks1a, picks1e))
newa2 = numpy.vstack((picks2a, picks2e))
else:
newa1 = picks1a
newa2 = picks2a
if msg is True:
newpart = len(newa1) - len(picks1a)
#newpart1 = len(picks1b) - elim
apDisplay.printMsg("Imported "+str(newpart)+" of "+str(origpart)
+" & merged with existing " +str(len(picks1a))+" giving "+str(len(newa1))+" particles")
return newa1,newa2
#================================
#================================
def numpyPop2d(a, i):
return numpy.vstack((a[0:i,:],a[i+1:len(a),:]))
#================================
#================================
def alignPicks(picks1, picks2, data, limit=20.0):
list1 = []
alignlist2 = []
#transform picks2
alignpicks2 = a2Toa1Data(picks2, data)
#find closest pick and insert into lists
filled = {}
for pick in picks1:
closepick, dist = findClosestPick(pick, alignpicks2)
if dist < limit:
key = str(closepick)
if not key in filled:
list1.append(pick)
alignlist2.append(closepick)
filled[key] = True
"""
limit *= 2.0
for pick in picks1:
closepick,dist = findClosestPick(pick, alignpicks2)
if dist < limit:
key = str(closepick)
if not key in filled:
list1.append(pick)
alignlist2.append(closepick)
filled[key] = True
limit *= 2.0
for pick in picks1:
closepick,dist = findClosestPick(pick, alignpicks2)
if dist < limit:
key = str(closepick)
if not key in filled:
list1.append(pick)
alignlist2.append(closepick)
filled[key] = True
"""
#convert lists
nlist1 = numpy.array(list1, dtype=numpy.int32)
nalignlist2 = numpy.array(alignlist2, dtype=numpy.int32)
#transform back
nlist2 = a1Toa2Data(nalignlist2, data)
apDisplay.printMsg("Aligned "+str(len(nlist1))+" of "+str(len(picks1))+\
" particles to "+str(len(nlist2))+" of "+str(len(picks2)))
return nlist1, nlist2
#================================
#================================
def findClosestPick(origpick, picks):
picked = None
bestdist = 512.0
for newpick in picks:
dist = pickDist(origpick, newpick)
if dist < bestdist:
picked = newpick
bestdist = dist
return picked,bestdist
#================================
#================================
def alignPicks2(picks1, picks2, data, limit=20.0, msg=True):
### create distance dictionary
alignpicks2 = a2Toa1Data(picks2, data)
sortedDict2 = {}
index = 0
while index < len(alignpicks2):
p2 = alignpicks2[index]
key = "%d,%d"%(p2[0]/limit, p2[1]/limit,)
if not key in sortedDict2:
sortedDict2[key] = [index,]
else:
sortedDict2[key].append(index)
index+=1
### find matching picks
filled = {}
list1 = []
alignlist2 = []
for p1 in picks1:
closepick, dist = findClosestPick2(p1, alignpicks2, sortedDict2, limit)
if dist < limit:
key = str(closepick)
if not key in filled:
list1.append(p1)
alignlist2.append(closepick)
filled[key] = True
#convert lists
nlist1 = numpy.array(list1, dtype=numpy.int32)
nalignlist2 = numpy.array(alignlist2, dtype=numpy.int32)
#transform back
nlist2 = a1Toa2Data(nalignlist2, data)
if msg is True:
apDisplay.printMsg("Aligned "+str(len(nlist1))+" of "+str(len(picks1))+\
" particles to "+str(len(nlist2))+" of "+str(len(picks2)))
return nlist1, nlist2
#================================
#================================
def findClosestPick2(origpick, picks, sdict, limit):
picked = None
bestdist = 512.0
x = int(origpick[0]/limit)
y = | |
"""
x86 Encoding recipes.
"""
from __future__ import absolute_import
from cdsl.isa import EncRecipe
from cdsl.predicates import IsSignedInt, IsEqual, Or
from cdsl.predicates import IsZero32BitFloat, IsZero64BitFloat
from cdsl.registers import RegClass
from base.formats import Unary, UnaryIeee32, UnaryIeee64, UnaryImm, UnaryBool
from base.formats import Binary, BinaryImm
from base.formats import MultiAry, NullAry
from base.formats import Trap, Call, CallIndirect, Store, Load
from base.formats import IntCompare, IntCompareImm, FloatCompare
from base.formats import IntCond, FloatCond
from base.formats import IntSelect, IntCondTrap, FloatCondTrap
from base.formats import Jump, Branch, BranchInt, BranchFloat
from base.formats import BranchTableEntry, BranchTableBase, IndirectJump
from base.formats import Ternary, FuncAddr, UnaryGlobalValue
from base.formats import RegMove, RegSpill, RegFill, CopySpecial
from base.formats import LoadComplex, StoreComplex
from base.formats import StackLoad
from .registers import GPR, ABCD, FPR
from .registers import GPR8, FPR8, FLAG
from .registers import StackGPR32, StackFPR32
from .defs import supported_floatccs
from .settings import use_sse41
try:
from typing import Tuple, Dict, Sequence, Any # noqa
from cdsl.instructions import InstructionFormat # noqa
from cdsl.isa import ConstraintSeq, BranchRange, PredNode, OperandConstraint # noqa
except ImportError:
pass
# Opcode representation.
#
# Cranelift requires each recipe to have a single encoding size in bytes, and
# x86 opcodes are variable length, so we use separate recipes for different
# styles of opcodes and prefixes. The opcode format is indicated by the recipe
# name prefix:
OPCODE_PREFIX = {
# Prefix bytes Name mmpp
(): ('Op1', 0b0000),
(0x66,): ('Mp1', 0b0001),
(0xf3,): ('Mp1', 0b0010),
(0xf2,): ('Mp1', 0b0011),
(0x0f,): ('Op2', 0b0100),
(0x66, 0x0f): ('Mp2', 0b0101),
(0xf3, 0x0f): ('Mp2', 0b0110),
(0xf2, 0x0f): ('Mp2', 0b0111),
(0x0f, 0x38): ('Op3', 0b1000),
(0x66, 0x0f, 0x38): ('Mp3', 0b1001),
(0xf3, 0x0f, 0x38): ('Mp3', 0b1010),
(0xf2, 0x0f, 0x38): ('Mp3', 0b1011),
(0x0f, 0x3a): ('Op3', 0b1100),
(0x66, 0x0f, 0x3a): ('Mp3', 0b1101),
(0xf3, 0x0f, 0x3a): ('Mp3', 0b1110),
(0xf2, 0x0f, 0x3a): ('Mp3', 0b1111)
}
# The table above does not include the REX prefix which goes after the
# mandatory prefix. VEX/XOP and EVEX prefixes are not yet supported. Encodings
# using any of these prefixes are represented by separate recipes.
#
# The encoding bits are:
#
# 0-7: The opcode byte <op>.
# 8-9: pp, mandatory prefix:
# 00 none (Op*)
# 01 66 (Mp*)
# 10 F3 (Mp*)
# 11 F2 (Mp*)
# 10-11: mm, opcode map:
# 00 <op> (Op1/Mp1)
# 01 0F <op> (Op2/Mp2)
# 10 0F 38 <op> (Op3/Mp3)
# 11 0F 3A <op> (Op3/Mp3)
# 12-14 rrr, opcode bits for the ModR/M byte for certain opcodes.
# 15: REX.W bit (or VEX.W/E)
#
# There is some redundancy between bits 8-11 and the recipe names, but we have
# enough bits, and the pp+mm format is ready for supporting VEX prefixes.
def decode_ops(ops, rrr=0, w=0):
# type: (Tuple[int, ...], int, int) -> Tuple[str, int]
"""
Given a sequence of opcode bytes, compute the recipe name prefix and
encoding bits.
"""
assert rrr <= 0b111
assert w <= 1
name, mmpp = OPCODE_PREFIX[ops[:-1]]
op = ops[-1]
assert op <= 256
return (name, op | (mmpp << 8) | (rrr << 12) | (w << 15))
def replace_put_op(emit, prefix):
# type: (str, str) -> str
"""
Given a snippet of Rust code (or None), replace the `PUT_OP` macro with the
corresponding `put_*` function from the `binemit.rs` module.
"""
if emit is None:
return None
else:
return emit.replace('PUT_OP', 'put_' + prefix.lower())
# Register class mapping for no-REX instructions.
NOREX_MAP = {
GPR: GPR8,
FPR: FPR8
}
def map_regs_norex(regs):
# type: (Sequence[OperandConstraint]) -> Sequence[OperandConstraint]
return tuple(NOREX_MAP.get(rc, rc) if isinstance(rc, RegClass) else rc
for rc in regs)
class TailRecipe:
"""
Generate encoding recipes on demand.
x86 encodings are somewhat orthogonal with the opcode representation on
one side and the ModR/M, SIB and immediate fields on the other side.
A `TailRecipe` represents the part of an encoding that follow the opcode.
It is used to generate full encoding recipes on demand when combined with
an opcode.
The arguments are the same as for an `EncRecipe`, except for `size` which
does not include the size of the opcode.
The `when_prefixed` parameter specifies a recipe that should be substituted
for this one when a REX (or VEX) prefix is present. This is relevant for
recipes that can only access the ABCD registers without a REX prefix, but
are able to access all registers with a prefix.
The `requires_prefix` parameter indicates that the recipe can't be used
without a REX prefix.
The `emit` parameter contains Rust code to actually emit an encoding, like
`EncRecipe` does it. Additionally, the text `PUT_OP` is substituted with
the proper `put_*` function from the `x86/binemit.rs` module.
"""
def __init__(
self,
name, # type: str
format, # type: InstructionFormat
base_size, # type: int
ins, # type: ConstraintSeq
outs, # type: ConstraintSeq
branch_range=None, # type: int
clobbers_flags=True, # type: bool
instp=None, # type: PredNode
isap=None, # type: PredNode
when_prefixed=None, # type: TailRecipe
requires_prefix=False, # type: bool
emit=None, # type: str
compute_size=None # type: str
):
# type: (...) -> None
self.name = name
self.format = format
self.base_size = base_size
self.ins = ins
self.outs = outs
self.branch_range = branch_range
self.clobbers_flags = clobbers_flags
self.instp = instp
self.isap = isap
self.when_prefixed = when_prefixed
self.requires_prefix = requires_prefix
self.emit = emit
self.compute_size = compute_size
# Cached recipes, keyed by name prefix.
self.recipes = dict() # type: Dict[str, EncRecipe]
def __call__(self, *ops, **kwargs):
# type: (*int, **int) -> Tuple[EncRecipe, int]
"""
Create an encoding recipe and encoding bits for the opcode bytes in
`ops`.
"""
assert not self.requires_prefix, "Tail recipe requires REX prefix."
rrr = kwargs.get('rrr', 0)
w = kwargs.get('w', 0)
name, bits = decode_ops(ops, rrr, w)
base_size = len(ops) + self.base_size
# All branch ranges are relative to the end of the instruction.
branch_range = None # type BranchRange
if self.branch_range is not None:
branch_range = (base_size, self.branch_range)
if name not in self.recipes:
recipe = EncRecipe(
name + self.name,
self.format,
base_size,
ins=self.ins,
outs=self.outs,
branch_range=branch_range,
clobbers_flags=self.clobbers_flags,
instp=self.instp,
isap=self.isap,
emit=replace_put_op(self.emit, name),
compute_size=self.compute_size)
recipe.ins = map_regs_norex(recipe.ins)
recipe.outs = map_regs_norex(recipe.outs)
self.recipes[name] = recipe
return (self.recipes[name], bits)
def rex(self, *ops, **kwargs):
# type: (*int, **int) -> Tuple[EncRecipe, int]
"""
Create a REX encoding recipe and encoding bits for the opcode bytes in
`ops`.
The recipe will always generate a REX prefix, whether it is required or
not. For instructions that don't require a REX prefix, two encodings
should be added: One with REX and one without.
"""
# Use the prefixed alternative recipe when applicable.
if self.when_prefixed:
return self.when_prefixed.rex(*ops, **kwargs)
rrr = kwargs.get('rrr', 0)
w = kwargs.get('w', 0)
name, bits = decode_ops(ops, rrr, w)
name = 'Rex' + name
base_size = 1 + len(ops) + self.base_size
# All branch ranges are relative to the end of the instruction.
branch_range = None # type BranchRange
if self.branch_range is not None:
branch_range = (base_size, self.branch_range)
if name not in self.recipes:
recipe = EncRecipe(
name + self.name,
self.format,
base_size,
ins=self.ins,
outs=self.outs,
branch_range=branch_range,
clobbers_flags=self.clobbers_flags,
instp=self.instp,
isap=self.isap,
emit=replace_put_op(self.emit, name),
compute_size=self.compute_size)
self.recipes[name] = recipe
return (self.recipes[name], bits)
@staticmethod
def check_names(globs):
# type: (Dict[str, Any]) -> None
for name, obj in globs.items():
if isinstance(obj, TailRecipe):
assert name == obj.name, "Mismatched TailRecipe name: " + name
def floatccs(iform):
# type: (InstructionFormat) -> PredNode
"""
Return an instruction predicate that checks in `iform.cond` is one of the
directly supported floating point condition codes.
"""
return Or(*(IsEqual(iform.cond, cc) for cc in supported_floatccs))
def valid_scale(iform):
# type: (InstructionFormat) -> PredNode
"""
Return an instruction predicate that checks if `iform.imm` is a valid
`scale` for a SIB byte.
"""
return Or(IsEqual(iform.imm, 1),
IsEqual(iform.imm, 2),
IsEqual(iform.imm, 4),
IsEqual(iform.imm, 8))
# A null unary instruction that takes a GPR register. Can be used for identity
# copies and no-op conversions.
null = EncRecipe('null', Unary, base_size=0, ins=GPR, outs=0, emit='')
# XX opcode, no ModR/M.
trap = TailRecipe(
'trap', Trap, base_size=0, ins=(), outs=(),
emit='''
sink.trap(code, func.srclocs[inst]);
PUT_OP(bits, BASE_REX, sink);
''')
# Macro: conditional jump over a ud2.
trapif = EncRecipe(
'trapif', IntCondTrap, base_size=4, ins=FLAG.rflags, outs=(),
clobbers_flags=False,
emit='''
// Jump over a 2-byte ud2.
sink.put1(0x70 | (icc2opc(cond.inverse()) as u8));
sink.put1(2);
// ud2.
sink.trap(code, func.srclocs[inst]);
sink.put1(0x0f);
sink.put1(0x0b);
''')
trapff = EncRecipe(
'trapff', FloatCondTrap, base_size=4, ins=FLAG.rflags, outs=(),
clobbers_flags=False,
instp=floatccs(FloatCondTrap),
emit='''
// Jump over a 2-byte ud2.
sink.put1(0x70 | (fcc2opc(cond.inverse()) as u8));
sink.put1(2);
// ud2.
sink.trap(code, func.srclocs[inst]);
sink.put1(0x0f);
sink.put1(0x0b);
''')
# XX /r
rr = TailRecipe(
'rr', Binary, base_size=1, ins=(GPR, GPR), outs=0,
emit='''
PUT_OP(bits, rex2(in_reg0, in_reg1), sink);
modrm_rr(in_reg0, in_reg1, sink);
''')
# XX /r with operands swapped. | |
import collections
import copy
import logging
import os.path
import os
import re
import json
import time
from functools import lru_cache
from typing import List, Dict, Optional, Deque
import urllib.parse
from botocore.exceptions import ClientError, EndpointConnectionError
from botocore.config import Config
from dragoneye.cloud_scanner.aws.aws_scan_settings import AwsCloudScanSettings
from dragoneye.config import config
from dragoneye.utils.boto_backoff import rate_limiter
from dragoneye.cloud_scanner.base_cloud_scanner import BaseCloudScanner
from dragoneye.utils.app_logger import logger
from dragoneye.utils.misc_utils import get_dynamic_values_from_files, custom_serializer, make_directory, init_directory, snakecase, \
elapsed_time
from dragoneye.utils.threading_utils import execute_parallel_functions_in_threads, ThreadedFunctionData
MAX_RETRIES = 3
class AwsScanner(BaseCloudScanner):
def __init__(self, session, settings: AwsCloudScanSettings):
super().__init__(settings)
self.session = session
self.settings = settings
# Services that will only be queried in the default region
# TODO: Identify these from boto
self.universal_services = [
"iam",
"route53",
"route53domains",
"s3",
"cloudfront",
"organizations",
]
self.default_region = settings.default_region or self.session.region_name
if self.default_region is None:
raise ValueError('Default region cannot be empty. '
'You must specify the default region or set the AWS_DEFAULT_REGION environment variable')
self.handler_config = Config(retries={'max_attempts': self.settings.max_attempts, 'mode': 'standard'},
max_pool_connections=self.settings.max_pool_connections)
logging.getLogger("botocore").setLevel(logging.WARN)
@elapsed_time('Scanning AWS live environment took {} seconds')
def scan(self) -> str:
self.account_data_dir = init_directory(self.settings.output_path, self.settings.account_name, self.settings.clean)
region_dict_list = self._create_regions_file_structure()
dependent_commands, independent_commands = self._get_scan_commands()
tasks: List[ThreadedFunctionData] = []
for region in region_dict_list:
tasks.append(ThreadedFunctionData(
self._scan_region_data,
(region, dependent_commands, independent_commands),
'An unknown exception has occurred'
))
deque_tasks: Deque[List[ThreadedFunctionData]] = collections.deque()
deque_tasks.append(tasks)
execute_parallel_functions_in_threads(deque_tasks, len(region_dict_list))
self._print_summary()
return os.path.abspath(os.path.join(self.account_data_dir, '..'))
def _create_regions_file_structure(self):
region_list = self._get_region_list()
with open(f"{self.account_data_dir}/describe-regions.json", "w+") as file:
file.write(json.dumps(region_list, indent=4, sort_keys=True))
logger.info("* Creating directory for each region name")
region_dict_list: List[dict] = region_list["Regions"]
for region in region_dict_list:
make_directory(os.path.join(self.account_data_dir, region.get("RegionName", "Unknown")))
return region_dict_list
def _get_region_list(self):
regions_filter = None
if len(self.settings.regions_filter) > 0:
regions_filter = self.settings.regions_filter.lower().split(",")
# Force include of default region -- seems to be required
if self.default_region not in regions_filter:
regions_filter.append(self.default_region)
logger.info("* Getting region names")
ec2 = self.session.client("ec2", region_name=self.default_region)
region_list = ec2.describe_regions()
if regions_filter is not None:
filtered_regions = [r for r in region_list["Regions"] if r["RegionName"] in regions_filter]
region_list["Regions"] = filtered_regions
return region_list
@staticmethod
def _get_identifier_from_parameter(parameter):
if isinstance(parameter, list):
identifier = parameter[0]
else:
identifier = parameter
return identifier
@staticmethod
def _get_filename_from_parameter(parameter):
if isinstance(parameter, list):
if len(parameter) > 1:
filename = parameter[1]
elif isinstance(parameter[0], list):
# For elbv2:describe-tags we need ResourceArns as a list like `[Arn]`
# the yaml file specifies `[[.LoadBalancerArn]]` because just doing
# `[.LoadBalancerArn]` presents other issues, so this extracts out the inner, inner value.
# Similar issue for elb:describe-tags
filename = parameter[0][0]
else:
filename = parameter[0]
else:
filename = parameter
return urllib.parse.quote_plus(filename)
def _get_and_save_data(self, output_file, handler, method_to_call, parameters, checks, region):
"""
Calls the AWS API function and downloads the data
check: Value to check and repeat the call if it fails
summary: Keeps tracks of failures
"""
# TODO: Decorate this with rate limiters from
# https://github.com/Netflix-Skunkworks/cloudaux/blob/master/cloudaux/aws/decorators.py
if os.path.isfile(output_file):
# Data already scanned, so skip
logger.warning("Response already present at {}".format(output_file))
return
call_summary = {
"service": handler.meta.service_model.service_name,
"action": method_to_call,
"parameters": parameters,
"region": region
}
params_string = '' if not parameters else ', '.join(f'{k}={v}' for k, v in parameters.items())
function_msg = f'{call_summary["service"]}.{call_summary["action"]}({params_string})'
logger.info(f'Invoking {function_msg}')
data = AwsScanner._get_data(output_file, handler, method_to_call, parameters, checks, call_summary)
AwsScanner._remove_unused_values(data)
AwsScanner._save_results_to_file(output_file, data)
logger.info(f'Results from {function_msg} were saved to {output_file}')
self.summary.put_nowait(call_summary)
@staticmethod
def _get_data(output_file, handler, method_to_call, parameters, checks, call_summary):
data = None
try:
for retries in range(MAX_RETRIES):
data = AwsScanner._call_boto_function(output_file, handler, method_to_call, parameters)
if not checks or AwsScanner._is_data_passing_check(data, checks):
break
elif retries == MAX_RETRIES - 1:
raise Exception(
"One of the following checks has repeatedly failed: {}".format(
', '.join(f'{check["Name"]}={check["Value"]}' for check in checks)
)
)
else:
logger.info(" Sleeping and retrying")
time.sleep(3)
except ClientError as ex:
if "NoSuchBucketPolicy" in str(ex):
# This error occurs when you try to get the bucket policy for a bucket that has no bucket policy, so this can be ignored.
logger.warning(" - No bucket policy")
elif "NoSuchPublicAccessBlockConfiguration" in str(ex):
# This error occurs when you try to get the account Public Access Block policy for an account that has none, so this can be ignored.
logger.warning(" - No public access block set")
elif (
"ServerSideEncryptionConfigurationNotFoundError" in str(ex)
and call_summary["service"] == "s3"
and call_summary["action"] == "get_bucket_encryption"
):
logger.warning(" - No encryption set")
elif (
"NoSuchEntity" in str(ex)
and call_summary["action"] == "get_account_password_policy"
):
logger.warning(" - No password policy set")
elif (
"AccessDeniedException" in str(ex)
and call_summary["service"] == "organizations"
and call_summary["action"] == "list_accounts"
):
logger.warning(" - Denied, which likely means this is not the organization root")
elif (
"RepositoryPolicyNotFoundException" in str(ex)
and call_summary["service"] == "ecr"
and call_summary["action"] == "get_repository_policy"
):
logger.warning(" - No policy exists")
elif (
"ResourceNotFoundException" in str(ex)
and call_summary["service"] == "lambda"
and call_summary["action"] == "get_policy"
):
logger.warning(" - No policy exists")
elif (
"AccessDeniedException" in str(ex)
and call_summary["service"] == "kms"
and call_summary["action"] == "list_key_policies"
):
logger.warning(" - Denied, which should mean this KMS has restricted access")
elif (
"AccessDeniedException" in str(ex)
and call_summary["service"] == "kms"
and call_summary["action"] == "list_grants"
):
logger.warning(" - Denied, which should mean this KMS has restricted access")
elif (
"AccessDeniedException" in str(ex)
and call_summary["service"] == "kms"
and call_summary["action"] == "get_key_policy"
):
logger.warning(" - Denied, which should mean this KMS has restricted access")
elif (
"AccessDeniedException" in str(ex)
and call_summary["service"] == "kms"
and call_summary["action"] == "get_key_rotation_status"
):
logger.warning(" - Denied, which should mean this KMS has restricted access")
elif "AWSOrganizationsNotInUseException" in str(ex):
logger.warning(' - Your account is not a member of an organization.')
elif (
"EntityNotFoundException" in str(ex)
and call_summary["service"] == "glue"
and call_summary["action"] == "get_resource_policy"
):
logger.warning(f' - Glue policy does not exist on region {call_summary["region"]}')
elif (
"NoSuchEntity" in str(ex)
):
logger.warning(f" - {str(ex)}")
elif (
"NoSuchAccessPointPolicy" in str(ex)
):
logger.warning(f" - {str(ex)}")
elif (
"PolicyNotFound" in str(ex)
):
logger.warning(f" - {str(ex)}")
else:
logger.warning(f"ClientError {ex}")
call_summary["exception"] = ex
except EndpointConnectionError as ex:
logger.warning("EndpointConnectionError: {}".format(ex))
call_summary["exception"] = ex
except Exception as ex:
if "Parameter validation failed" in str(ex):
logger.warning(f" - {str(ex)}")
else:
logger.warning("Exception: {}".format(ex))
call_summary["exception"] = ex
return data
@staticmethod
@rate_limiter()
def _call_boto_function(output_file, handler, method_to_call, parameters):
data = {}
if handler.can_paginate(method_to_call):
paginator = handler.get_paginator(method_to_call)
page_iterator = paginator.paginate(**parameters)
for response in page_iterator:
if not data:
data = response
else:
logger.info(" ...paginating {}".format(output_file))
for key, value in data.items():
if isinstance(value, list):
value.extend(response[key])
else:
function = getattr(handler, method_to_call)
data = function(**parameters)
return data
@staticmethod
def _is_data_passing_check(data: dict, checks: Optional[dict]) -> bool:
if checks:
for check in checks:
if data[check["Name"]] == check["Value"]:
pass
else:
return False
return True
@staticmethod
def _remove_unused_values(data: dict) -> None:
if data is not None:
data.pop("ResponseMetadata", None)
data.pop("Marker", None)
data.pop("IsTruncated", None)
@staticmethod
def _save_results_to_file(output_file: str, data: Optional[Dict]) -> None:
if data is not None:
with open(output_file, "w+") as file:
file.write(
json.dumps(data, indent=4, sort_keys=True, default=custom_serializer)
)
def _run_scan_commands(self, region, runner):
region = copy.deepcopy(region)
runner = copy.deepcopy(runner)
region_name = region["RegionName"]
if runner['Service'] == 'configservice':
runner['Service'] = 'config' # This is due to service name change between API (configservice) and python SDK (config)
client_region = region_name
if runner['Service'] == 'globalaccelerator':
# globalaccelerator only has api endpoint in us-west-2
if region_name == self.default_region:
client_region = 'us-west-2'
else:
return
else:
if not self._should_run_command_on_region(runner, region):
return
handler = self.session.client(
runner["Service"], region_name=client_region,
config=self.handler_config
)
filepath = os.path.join(self.account_data_dir, region_name, f'{runner["Service"]}-{runner["Request"]}')
method_to_call = snakecase(runner["Request"])
parameter_keys = set()
param_groups = self._get_parameter_group(runner, self.account_data_dir, region, parameter_keys)
suffix = runner.get('FilenameSuffix', '')
if suffix:
suffix = '_' + suffix
tasks: List[ThreadedFunctionData] = []
if runner.get("Parameters"):
make_directory(filepath)
for param_group in param_groups:
if set(param_group.keys()) != parameter_keys:
continue
unparsed_file_name = '_'.join([f'{k}-{v}' if not isinstance(v, list) else k for k, v in param_group.items()])
file_name = urllib.parse.quote_plus(unparsed_file_name) + suffix
output_file = os.path.join(filepath, f'{file_name}.json')
tasks.append(ThreadedFunctionData(
AwsScanner._get_and_save_data,
(self,
output_file,
handler,
method_to_call,
param_group,
runner.get("Check", None),
region_name),
'exception on command {}'.format(runner),
'timeout on command {}'.format(runner)))
else:
output_file = filepath + suffix + ".json"
tasks.append(ThreadedFunctionData(
AwsScanner._get_and_save_data,
(self,
output_file,
handler,
method_to_call,
{},
runner.get("Check", None),
region_name), 'exception on command {}'.format(runner), 'timeout on command {}'.format(runner)))
deque_tasks: Deque[List[ThreadedFunctionData]] = collections.deque()
deque_tasks.append(tasks)
execute_parallel_functions_in_threads(deque_tasks, config.get('MAX_WORKERS'), self.settings.command_timeout)
def _scan_region_data(self, region: dict, dependent_commands: List[dict], independent_commands: List[dict]):
non_dependable_tasks: List[ThreadedFunctionData] = []
dependable_tasks: List[ThreadedFunctionData] = []
deque_tasks: Deque[List[ThreadedFunctionData]] = collections.deque()
for independent_command in independent_commands:
non_dependable_tasks.append(ThreadedFunctionData(
self._run_scan_commands,
(region, independent_command),
'exception on command {}'.format(independent_command)))
deque_tasks.append(non_dependable_tasks)
for dependent_command in dependent_commands:
dependable_tasks.append(ThreadedFunctionData(
self._run_scan_commands,
(region, dependent_command),
'exception on command {}'.format(dependent_command)))
for dependable_task in dependable_tasks:
deque_tasks.append([dependable_task])
execute_parallel_functions_in_threads(deque_tasks, config.get('MAX_WORKERS'))
@staticmethod
def _get_call_parameters(call_parameters: dict, parameters_def: list) -> List[dict]:
group_param = {}
for parameter_def in parameters_def:
| |
math.degrees(number)
def rand(minint, maxint):
return random.randint(minint, maxint)
def sin(arg):
return math.sin(arg)
def sinh(arg):
return math.sinh(arg)
def sqrt(arg):
return math.sqrt(arg)
def srand(seed=None):
if seed is None:
return random.seed()
return random.seed(seed)
def tan(arg):
return math.tan(arg)
def tanh(arg):
return math.tanh(arg)
'''
Misc. Functions
'''
def connection_aborted():
pass
def connection_status():
pass
def constant():
pass
def define():
pass
def defined():
pass
def die():
sys.exit()
def get_browser():
pass
def __halt_compiler():
pass
def highlight_file():
pass
def highlight_string():
pass
def hrtime():
pass
def ignore_user_abort():
pass
def pack(format_codes, args):
return struct.pack(format_codes, args)
def php_check_syntax():
pass
def php_strip_whitespace():
pass
def sapi_windows_cp_conv():
pass
def sapi_windows_cp_get():
pass
def sapi_windows_cp_is_utf8():
pass
def sapi_windows_cp_set():
pass
def sapi_windows_vt100_support():
pass
def show_source():
pass
def sleep(seconds):
py_time.sleep(seconds)
def sys_getloadavg():
return os.getloadavg()
def time_nanosleep(seconds, nanoseconds):
pass
def time_sleep_until(timestamp):
py_time.sleep(timestamp - py_time.time())
def uniqid(prefix=''):
return prefix + hex(int(py_time.time()))[2:10] + hex(int(py_time.time() * 1000000) % 0x100000)[2:7]
def unpack(format_codes, data):
return struct.unpack(format_codes, data)
def usleep(micro_seconds):
py_time.sleep(micro_seconds / 1000000.0)
'''
Network Functions
'''
def checkdnsrr():
pass
def closelog():
return py_syslog.closelog()
def define_syslog_variables():
pass
def dns_check_record():
pass
def dns_get_mx(hostname):
pass
def dns_get_record():
pass
def fsockopen(hostname, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
return sock.makefile()
def gethostbyaddr(ip_address):
return socket.gethostbyaddr(ip_address)
def gethostbyname(hostname):
return socket.gethostbyname(hostname)
def gethostbynamel(hostname):
return socket.gethostbyname(hostname)
def gethostname():
return socket.gethostname()
def getmxrr():
pass
def getprotobyname(name):
return socket.getprotobyname(name)
def getprotobynumber(number):
table = {num: name[8:] for name, num in vars(socket).items() if name.startswith("IPPROTO")}
return table[number]
def getservbyname(service, protocol):
return socket.getservbyname(service, protocol)
def getservbyport(port, protocol):
return socket.getservbyport(port, protocol)
def header_register_callback():
pass
def header_remove():
pass
def header():
pass
def headers_list():
pass
def headers_sent():
pass
def http_response_code():
pass
def inet_ntop(in_addr):
return socket.inet_ntop(socket.AF_INET, in_addr)
def inet_pton(address):
return socket.inet_pton(socket.AF_INET, address)
def ip2long(ip_addr):
return unpack("!L", inet_aton(ip_addr))[0]
def long2ip(ip):
return inet_ntoa(pack("!L", ip))
def openlog(ident, option, facility):
return py_syslog.openlog(ident, option, facility)
def pfsockopen():
pass
def setcookie(name, value='', expire=0, path='', domain=''):
cookie = http.cookies.SimpleCookie()
cookie[name] = value
cookie[name]['domain'] = domain
cookie[name]['path'] = path
cookie[name]['expires'] = expire if expire != 0 else py_time.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
return cookie.output()
def setrawcookie():
pass
def socket_get_status():
pass
def socket_set_blocking():
pass
def socket_set_timeout():
pass
def syslog(priority, message):
return py_syslog.syslog(priority, message)
'''
Program execution Functions
'''
def escapeshellarg(arg):
return "\\'".join("'" + p + "'" for p in arg.split("'"))
def escapeshellcmd():
pass
def passthru():
pass
def proc_lose():
pass
def proc_et_tatus():
pass
def proc_ice():
pass
def proc_pen():
pass
def proc_erminate():
pass
def shell_xec(command):
return os.popen(command).read()
def system(command):
return os.system(command)
"""
Strings Functions
"""
def addcslashes(string):
pass
def addslashes(string):
pass
def bin2hex(string):
return binascii.hexlify(string)
def chop(string, character_mask=None):
return rtrim(string, character_mask)
def chunk_split(body, chunklen, end="\r\n"):
return end.join(textwrap.wrap(body, chunklen))
def convert_cyr_string(string):
pass
def convert_uudecode(string):
pass
def convert_uuencode(string):
pass
def count_chars(s, mode=0):
temp = {chr(_x): 0 for _x in range(256)}
if mode == 0:
temp.update(Counter(s))
return temp
elif mode == 1:
temp.update(Counter(s))
res = temp.copy()
for i, j in temp.items():
if not j:
res.pop(i)
return res
elif mode == 2:
temp.update(Counter(s))
res = temp.copy()
for i, j in temp.items():
if j:
res.pop(i)
return res
elif mode == 3:
res = ""
temp.update(Counter(s))
for i, j in temp.items():
if j:
res += i
return res
elif mode == 4:
res = ""
temp.update(Counter(s))
for i, j in temp.items():
if not j:
res += i
return res
else:
raise ValueError("Incorrect value of mode (%d)" % (mode,))
def crc32(string):
return binascii.crc32(string) & 0xffffffff
def crypt(string, salt):
return py_crypt.crypt(string, salt)
def echo(string):
print(string)
def explode(delimiter, string, limit):
if limit == 0:
limit = 1
if limit > 0:
return string.split(delimiter, limit)
else:
return string.split(delimiter)[:limit]
def fprintf(handle, format):
pass
def get_html_translation_table(string):
pass
def hebrev(string):
pass
def hebrevc(string):
pass
def hex2bin(hex_string):
return binascii.unhexlify(hex_string)
def html_entity_decode(string):
pass
def htmlentities(string):
pass
def htmlspecialchars_decode(string):
pass
def htmlspecialchars(string):
pass
def implode(glue='', pieces=[]):
return glue.join(pieces)
def join(glue='', pieces=[]):
return glue.join(pieces)
def lcfirst(string):
return string[0].lower() + string[1:]
def levenshtein(string1, string2):
n, m = len(string1), len(string2)
if n > m:
string1, string2 = string2, string1
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if string1[j - 1] != string2[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def localeconv(string):
pass
def ltrim(string, character_mask=None):
if character_mask is None:
return string.lstrip()
return string.lstrip(character_mask)
def md5_file(filename, raw_output=False):
crc = hashlib.md5()
fp = open(filename, 'rb')
for i in fp:
crc.update(i)
fp.close()
if raw_output:
return crc.digest()
return crc.hexdigest()
def md5(str, raw_output=False):
res = hashlib.md5(str.encode())
if raw_output:
return res.digest()
return res.hexdigest()
def metaphone(string):
pass
def money_format(string):
pass
def nl_langinfo(string):
pass
def nl2br(string, is_xhtml=True):
if is_xhtml:
return string.replace('\n', '<br />\n')
else:
return string.replace('\n', '<br>\n')
def number_format(number, decimals):
locale.setlocale(locale.LC_NUMERIC, '')
return locale.format("%.*f", (decimals, number), True)
def parse_str(string):
return urllib.parse.parse_qs(string)
def printf(string):
return print(string)
def quoted_printable_decode(string):
return quopri.decodestring(string)
def quoted_printable_encode(string):
return quopri.encodestring(string)
def quotemeta(string):
pass
def rtrim(string, character_mask=None):
if character_mask is None:
return string.rstrip()
return string.rstrip(character_mask)
def setlocale(string):
pass
def sha1_file(filename, raw_output=False):
crc = hashlib.sha1()
fp = open(filename, 'rb')
for i in fp:
crc.update(i)
fp.close()
if raw_output:
return crc.digest()
return crc.hexdigest()
def sha1(string):
return hashlib.sha1(string.encode()).hexdigest()
def similar_text(string):
pass
def soundex(string):
pass
def sprintf(string):
pass
def sscanf(string):
pass
def str_getcsv(string, delimiter=',', enclosure='"', escape="\\"):
with io.StringIO(string) as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=enclosure, escapechar=escape)
return next(reader)
def str_ireplace(search, replace, subject, count=0):
pattern = re.compile(search, re.IGNORECASE)
return pattern.sub(replace, subject, count)
def str_pad(string, pad_length, pad_string=' ', pad_type=1):
# STR_PAD_LEFT = 0
# STR_PAD_RIGHT = 1
# STR_PAD_BOTH = 2
if pad_type == 0:
return string.ljust(pad_length, pad_string)
elif pad_type == 2:
return string.center(pad_length, pad_string)
else:
return string.rjust(pad_length, pad_string)
def str_repeat(string, multiplier):
return string * multiplier
def str_replace(search, replace, subject, count=-1):
return subject.replace(search, replace, count)
def str_rot13(string):
enc = codecs.getencoder("rot-13")
return enc(string)[0]
def str_shuffle(string):
chars = list(string)
random.shuffle(chars)
return ''.join(chars)
def str_split(string, split_length=1):
return filter(None, re.split('(.{1,%d})' % split_length, string))
def str_word_count(string, format=0, charlist=''):
if isinstance(string, str):
words = re.sub('[^\w ' + charlist + ']', '', string)
words = words.replace(' ', ' ').split(' ')
if format == 0:
return len(words)
elif format == 1:
return words
elif format == 2:
result = {}
for word in words:
result[string.find(word)] = word
return result
return False
def strcasecmp(string):
pass
def strchr(haystack, needle):
pos = haystack.find(needle)
if pos < 0:
return None
else:
return haystack[pos:]
def strcmp(string1, string2):
return (string1 > string2) - (string1 < string2)
def strcoll(string):
pass
def strcspn(string1, string2):
return len(list(takewhile(lambda x: x not in string2, string1)))
def strip_tags(string):
pass
def stripcslashes(string):
pass
def stripos(haystack, needle, offset=0):
return haystack.upper().find(needle.upper(), offset)
def stripslashes(string):
pass
def stristr(haystack, needle):
pos = haystack.upper().find(needle.upper())
if pos < 0:
return None
else:
return haystack[pos:]
def strlen(string):
return len(string)
def strnatcasecmp(string):
pass
def strnatcmp(string):
pass
def strncasecmp(string):
pass
def strncmp(string):
pass
def strpbrk(haystack, char_list):
try:
pos = next(i for i, x in enumerate(haystack) if x in char_list)
return haystack[pos:]
except:
return None
def strpos(haystack, needle, offset=0):
pos = haystack.find(needle, offset)
if pos == -1:
return False
else:
return pos
def strrchr(haystack, needle):
return haystack.rfind(needle)
def strrev(string):
return string[::-1]
def strripos(haystack, needle, offset=0):
return haystack.upper().rfind(needle.upper(), offset)
def strrpos(haystack, needle, offset=0):
pos = haystack.rfind(needle, offset)
if pos == -1:
return False
else:
return pos
def strspn(subject, mask, start=0, length=None):
if not length: length = len(subject)
return len(re.search('^[' + mask + ']*', subject[start:start + length]).group(0))
def strstr(haystack, needle):
pos = haystack.find(needle)
if pos < 0:
return None
else:
return haystack[pos:]
def strtok(string):
pass
def strtolower(string):
return string.lower()
def strtoupper(string):
return string.upper()
def strtr(string, from_str, to_str=None):
if is_array(from_str):
return string.translate(str.maketrans(from_str))
return string.translate(str.maketrans(from_str, to_str))
def substr_compare(string):
pass
def substr_count(haystack, needle, offset=0, length=0):
if offset == 0:
return haystack.count(needle)
else:
if length == 0:
return haystack.count(needle, offset)
else:
return haystack.count(needle, offset, offset + length)
def substr_replace(subject, replace, start, length=None):
if length is None:
return subject[:start] + replace
elif length < 0:
return subject[:start] + replace + subject[length:]
else:
return subject[:start] + replace + subject[start + length:]
def substr(string, start, length=None):
if len(string) >= start:
if start > 0:
return False
else:
return string[start:]
if not length:
return string[start:]
elif length > 0:
return string[start:start + length]
else:
return string[start:length]
def trim(string, character_mask=None):
if character_mask is None:
return string.strip()
return string.strip(character_mask)
def ucfirst(string):
return string[0].upper() + string[1:]
def ucwords(words):
return string.capwords(words)
def vfprintf(string):
pass
def vprintf(string):
pass
def vsprintf(string):
pass
def wordwrap(string, width):
return textwrap.wrap(string, width)
'''
URL Functions
'''
def base64_decode(data):
return base64.b64decode(data)
def base64_encode(data):
return base64.encode(data)
def get_headers(url):
return urllib.request.urlopen('%s' % url).headers
def get_meta_tags(url):
out = {}
html = urllib.request.urlopen('%s' % url).read()
m = re.findall("name=\"([^\"]*)\" content=\"([^\"]*)\"", html)
for i in m:
out[i[0]] = i[1]
return out
def http_build_query(query_data):
return urllib.parse.urlencode(query_data)
def parse_url(url):
return urllib.parse.urlparse(url)
def rawurldecode(string):
return urllib.parse.unquote(string)
def rawurlencode(string):
return urllib.parse.quote(string)
def urldecode(string):
return urllib.parse.unquote_plus(string)
def urlencode(string):
return urllib.parse.quote_plus(string)
'''
Variable handling Functions
'''
def boolval(variable):
return bool(variable)
def debug_zval_dump():
pass
def doubleval(variable):
return float(variable)
def empty(variable):
if not variable:
return True
return False
def floatval(variable):
return float(variable)
def get_defined_vars():
pass
def get_resource_type():
pass
def gettype(variable):
return type(variable).__name__
def import_request_variables():
pass
def intval(variable, base=10):
return int(variable, base)
def is_array(variable):
return isinstance(variable, (list, tuple))
def is_bool(variable):
return isinstance(variable, bool)
def is_callable(name):
return callable(name)
def is_countable(variable):
try:
Counter(variable)
return True
except:
return False
def is_double(variable):
return isinstance(variable, float)
def is_float(variable):
return isinstance(variable, float)
def is_int(variable):
return isinstance(variable, int)
def is_integer(variable):
return isinstance(variable, int)
def | |
extra == "yeah":
msg = "You are so fucked, {0}, so fucking fucked! Yeah, yeah, yeah!".format(
target)
else:
msg = "You are so fucked, {0}, so fucking fucked!".format(target)
return msg
def utard(self):
if lt == 0:
msg = "You fucktard!"
else:
msg = "{0}, you fucktard!".format(target)
return msg
def valley(self):
if lt == 0:
msg = "Yea, though I walk through the shadow of the Valley of Death I shall fear no evil ... for I am the meanest motherfucker in all the land!"
else:
msg = "Yea, {0}, though I walk through the shadow of the Valley of Death I shall fear no evil ... for I am the meanest motherfucker in all the land!".format(
target)
return msg
def vvv(self):
msg = "Vidi, vici, veni." # "I saw, I conquered, I came"
return msg
def wafwot(self):
if lt == 0:
msg = "What a fucking waste of time!"
else:
msg = "{0}, that's a fucking waste of time!".format(target)
return msg
def wbfu(self): # because uses exclamation marks, otherwise the same.
if lt == 0:
msg = "Why? Because fuck you, that's why."
else:
msg = "Why? Because fuck you {0}, that's why.".format(target)
return msg
def we1(self):
if lt == 0 and le == 0 and ls == 0:
msg = "We are fucked!"
elif lt == 0 and le == 0 and ls > 0:
msg = "We're fucked!"
elif lt > 0 and le == 0 and ls == 0:
msg = "{0}, we are fucked!".format(target)
elif lt > 0 and le == 0 and ls > 0:
msg = "{0}, we're fucked!".format(target)
elif lt == 0 and le > 0 and ls == 0:
msg = "{0} we are fucked!".format(extra)
elif lt == 0 and le > 0 and ls > 0:
msg = "{0} we're fucked!".format(extra)
elif lt > 0 and le > 0 and ls == 0:
msg = "{0}, {1}, we are fucked!".format(extra, target)
elif lt > 0 and le > 0 and ls > 0:
msg = "{0}, {1}, we're fucked!".format(extra, target)
else:
msg = "Now ... we are fucked!"
return msg
def when1(self):
if lt == 0:
msg = "When the fuck will that happen?"
else:
msg = "{0}, when the fuck will that happen?".format(target)
return msg
def when2(self):
if lt == 0:
msg = "When the fuck will we get there?"
else:
msg = "{0}, when the fuck will we get there?".format(target)
return msg
def when3(self):
if lt == 0:
msg = "When the fuck did that happen?"
else:
msg = "{0}, when the fuck did that happen?".format(target)
return msg
def where1(self):
if lt == 0:
msg = "Where the fuck are we?"
else:
msg = "{0}, where the fuck are we?".format(target)
return msg
def where2(self):
if lt == 0:
msg = "Where the fuck is it?"
else:
msg = "{0}, where the fuck is it?".format(target)
return msg
def where3(self):
if lt == 0:
msg = "Where the fuck are you?"
else:
msg = "{0}, where the fuck are you?".format(target)
return msg
def who1(self):
if lt == 0:
msg = "Who the fuck do they think they are?"
else:
msg = "{0}, who the fuck do they think they are?".format(target)
return msg
def who2(self):
if lt == 0:
msg = "Who the fuck do they think you are?"
else:
msg = "{0}, who the fuck do they think you are?".format(target)
return msg
def who3(self):
if lt == 0:
msg = "Who the fuck do you think you are?"
else:
msg = "{0}, who the fuck do you think you are?".format(target)
return msg
def who4(self):
if lt == 0:
msg = "Who the fuck knows?"
else:
msg = "{0}, who the fuck knows?".format(target)
return msg
def who5(self):
if lt == 0 and le == 0:
msg = "Who the fuck are you?"
elif lt > 0 and le == 0: # target should not be a name.
msg = "{0}, who the fuck are you?".format(target)
elif lt > 0 and le > 0:
msg = "{0}, who the fuck are you anyway?".format(target)
else:
msg = "Who the fuck are you anyway?"
return msg
def who6(self):
if lt == 0:
msg = "Who the fuck cares?"
else:
msg = "{0}, who the fuck cares?".format(target)
return msg
def why(self):
if lt == 0:
msg = "Why the fuck should I?"
else:
msg = "{0}, why the fuck should I?".format(target)
return msg
def whyk(self):
if lt == 0:
msg = "Why the fuck should I know?"
else:
msg = "{0}, why the fuck should I know?".format(target)
return msg
def whynot(self):
if lt == 0:
msg = "Why the fuck not?!"
elif lt > 0:
msg = "Why the fuck not, {0}?!".format(target)
return msg
def wit(self):
if lt == 0:
msg = "You fuckwit!"
else:
msg = "{0}, you fuckwit!".format(target)
return msg
def woftam(self):
if lt == 0 and le == 0:
msg = "It's a waste of fucking time and money."
elif lt > 0 and le == 0:
msg = "{0}, it's a waste of fucking time and money.".format(target)
elif lt == 0 and le > 0:
msg = "{0} is a waste of fucking time and money.".format(extra)
elif lt > 0 and le > 0:
msg = "{0}, {1} is a waste of fucking time and money.".format(
target, extra)
return msg
def wsb1(self):
if lt == 0 and le == 0 and lR == 0:
msg = "Do not offer sympathy to the mentally ill, tell them firmly: I am not paid to listen to this drivel, you are a terminal fool."
elif lt == 0 and le == 0 and lR > 0:
msg = "{0}, do not offer sympathy to the mentally ill, tell them firmly: I am not paid to listen to this drivel, you are a terminal fool.".format(
relay)
elif lt > 0 and le == 0 and lR == 0:
msg = "{0}, I do not offer sympathy to the mentally ill, I tell them firmly: I am not paid to listen to this drivel, {1}, you are a terminal fool.".format(
target, target)
elif lt > 0 and le == 0 and lR > 0:
msg = "{0}, do not offer sympathy to the mentally ill, tell {1} firmly: I am not paid to listen to this drivel, you are a terminal fool.".format(
relay, target)
elif lt > 0 and le > 0 and lR == 0:
msg = "I am not paid to listen to this drivel, {0}, you are a terminal fool.".format(
target)
else:
msg = "I am not paid to listen to this drivel, you are a terminal fool."
def wtaf(self):
if lt == 0:
msg = "What the actual fuck?!"
elif lt > 0:
msg = "What the actual fuck {0}?!".format(target)
else:
msg = "What the actual fuck {0}?!".format(target)
return msg
def wtf(self):
if lt == 0:
msg = "What the fuck?!"
else:
msg = "{0}, what the fuck?!".format(target)
return msg
def wtfc(self):
if lt == 0:
msg = "What fucking crack are you smoking?!"
else:
msg = "{0}, what fucking crack are you smoking?!".format(target)
return msg
def wtfd(self):
if lt == 0:
msg = "What the fuck are you doing?!"
else:
msg = "{0}, what the fuck are you doing?!".format(target)
return msg
def wtfg(self):
if lt == 0:
msg = "What the fuck is going on?!"
else:
msg = "{0}, what the fuck is going on?!".format(target)
return msg
def wtfgh(self):
if lt == 0:
msg = "What the fuck is going on here?!"
else:
msg = "{0}, what the fuck is going on here?!".format(target)
return msg
def wtfo(self):
if lt == 0:
msg = "What the fuck are you on?!"
else:
msg = "{0}, what the fuck are you on?!".format(target)
return msg
def | |
import shutil, tempfile
from enthought.traits.api \
import HasTraits, Instance, Button, Int, Bool, on_trait_change
from enthought.traits.ui.api \
import View, Item, Group, HGroup, spring
from enthought.traits.ui.editors.range_editor import RangeEditor
from enthought.tvtk.pyface.scene_editor import SceneEditor
from enthought.mayavi.tools.mlab_scene_model import MlabSceneModel
from enthought.mayavi.core.ui.mayavi_scene import MayaviScene
from sfepy.base.base import *
from sfepy.base.tasks import Process
from sfepy.linalg import cycle
from sfepy.solvers.ts import get_print_info
from sfepy.postprocess.utils import mlab
from sfepy.postprocess.sources import create_file_source, FileSource
def get_glyphs_scale_factor(rng, rel_scaling, bbox):
delta = rng[1] - rng[0]
dx = nm.max((bbox[1,:] - bbox[0,:]))
if rel_scaling is None:
rel_scaling = 0.02 # -> delta fits 50x into dx.
return rel_scaling * dx / delta
def add_surf(obj, position, opacity=1.0):
surf = mlab.pipeline.surface(obj, opacity=opacity)
surf.actor.actor.position = position
return surf
def add_scalar_cut_plane(obj, position, normal, opacity=1.0):
scp = mlab.pipeline.scalar_cut_plane(obj, opacity=opacity)
scp.actor.actor.position = position
scp.implicit_plane.visible = False
scp.implicit_plane.normal = normal
return scp
def add_iso_surface(obj, position, contours=10, opacity=1.0):
obj = mlab.pipeline.iso_surface(obj, contours=contours, opacity=opacity)
obj.actor.actor.position = position
return obj
def add_glyphs(obj, position, bbox, rel_scaling=None,
scale_factor='auto', clamping=False, color=None):
glyphs = mlab.pipeline.glyph(obj, mode='2darrow', scale_mode='vector',
color=color, opacity=1.0)
if scale_factor == 'auto':
rng = glyphs.glyph.glyph.range
scale_factor = get_glyphs_scale_factor(rng, rel_scaling, bbox)
glyphs.glyph.color_mode = 'color_by_vector'
glyphs.glyph.scale_mode = 'scale_by_vector'
glyphs.glyph.glyph.clamping = clamping
glyphs.glyph.glyph.scale_factor = scale_factor
glyphs.glyph.glyph_source.glyph_position = 'tail'
glyphs.actor.actor.position = position
return glyphs
def add_text(obj, position, text, width=None, color=(0, 0, 0)):
if width is None:
width = 0.02 * len(text)
t = mlab.text(x=position[0], y=position[1], text=text,
z=position[2], color=color, width=width)
return t
def get_position_counts(n_data, layout):
n_col = max(1.0, min(5.0, nm.fix(nm.sqrt(n_data))))
n_row = int(nm.ceil(n_data / n_col))
n_col = int(n_col)
if layout == 'rowcol':
n_row, n_col = n_col, n_row
elif layout == 'row':
n_row, n_col = 1, n_data
elif layout == 'col':
n_row, n_col = n_data, 1
else: # layout == 'rowcol':
pass
return n_row, n_col
class Viewer(Struct):
"""Class to automate visualization of various data using Mayavi. It can be
used via postproc.py or isfepy the most easily.
It can use any format that mlab.pipeline.open() handles, e.g. a VTK format.
After opening a data file, all data (point, cell, scalars, vectors,
tensors) are plotted in a grid layout.
Parameters:
watch : bool
If True, watch the file for changes and update the mayavi
pipeline automatically.
animate : bool
If True, save a view snaphost for each time step and exit.
anim_format : str
If set to a ffmpeg-supported format (e.g. mov, avi, mpg), ffmpeg is
installed and results of multiple time steps are given, an animation is
created in the same directory as the view images.
ffmpeg_options : str
The ffmpeg animation encoding options.
output_dir : str
The output directory, where view snapshots will be saved.
Examples:
>>> view = Viewer('file.vtk')
>>> view() # view with default parameters
>>> view(layout='col') # use column layout
"""
def __init__(self, filename, watch=False,
animate=False, anim_format=None, ffmpeg_options=None,
output_dir='.', offscreen=False, auto_screenshot=True):
Struct.__init__(self,
filename = filename,
watch = watch,
animate = animate,
anim_format = anim_format,
ffmpeg_options = ffmpeg_options,
output_dir = output_dir,
offscreen = offscreen,
auto_screenshot = auto_screenshot,
scene = None,
gui = None)
self.options = get_arguments(omit = ['self'])
if mlab is None:
output('mlab cannot be imported, check your installation!')
insert_as_static_method(self.__class__, '__call__', self.call_empty)
else:
insert_as_static_method(self.__class__, '__call__', self.call_mlab)
def get_data_names(self, source=None, detailed=False):
if source is None:
mlab.options.offscreen = self.offscreen
scene = mlab.figure(bgcolor=(1,1,1), fgcolor=(0, 0, 0), size=(1, 1))
source = mlab.pipeline.open(self.filename)
point_scalar_names = sorted( source._point_scalars_list[:-1] )
point_vector_names = sorted( source._point_vectors_list[:-1] )
point_tensor_names = sorted( source._point_tensors_list[:-1] )
cell_scalar_names = sorted( source._cell_scalars_list[:-1] )
cell_vector_names = sorted( source._cell_vectors_list[:-1] )
cell_tensor_names = sorted( source._cell_tensors_list[:-1] )
p_names = [['point', 'scalars', name] for name in point_scalar_names]
p_names += [['point', 'vectors', name] for name in point_vector_names]
p_names += [['point', 'tensors', name] for name in point_tensor_names]
c_names = [['cell', 'scalars', name] for name in cell_scalar_names]
c_names += [['cell', 'vectors', name] for name in cell_vector_names]
c_names += [['cell', 'tensors', name] for name in cell_tensor_names]
if detailed:
return p_names, c_names
else:
return p_names + c_names
def set_source_filename(self, filename):
self.filename = filename
try:
self.file_source.set_filename(filename, self.scene.children[0])
except IndexError: # No sources yet.
pass
def save_image(self, filename):
"""Save a snapshot of the current scene."""
if self.output_dir and not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
name = os.path.join(self.output_dir, filename)
output('saving %s...' % name)
self.scene.scene.save(name)
output('...done')
def get_animation_info(self, filename, add_output_dir=True, rng=None):
if rng is None:
rng = self.file_source.get_step_range()
base, ext = os.path.splitext(filename)
if add_output_dir:
base = os.path.join(self.output_dir, base)
n_digit, fmt, suffix = get_print_info(rng[1] - rng[0] + 1)
return base, suffix, ext
def save_animation(self, filename):
"""Animate the current scene view for all the time steps and save
a snapshot of each step view."""
rng = self.file_source.get_step_range()
base, suffix, ext = self.get_animation_info(filename,
add_output_dir=False,
rng=rng)
for step in xrange(rng[0], rng[1]+1):
name = '.'.join((base, suffix % step, ext[1:]))
output('%d: %s' % (step, name))
self.set_step.step = step
self.save_image(name)
def encode_animation(self, filename, format, ffmpeg_options=None):
if ffmpeg_options is None:
ffmpeg_options = '-r 10 -sameq'
base, suffix, ext = self.get_animation_info(filename)
anim_name = '.'.join((base, format))
cmd = 'ffmpeg %s -i %s %s' % (ffmpeg_options,
'.'.join((base, suffix, ext[1:])),
anim_name)
output('creating animation "%s"...' % anim_name)
try:
os.system(cmd)
except:
output('...warning: animation not created, is ffmpeg installed?')
else:
output('...done')
return anim_name
def get_size_hint(self, layout, resolution=None):
if resolution is not None:
size = resolution
elif layout == 'rowcol':
size = (800, 600)
elif layout == 'row':
size = (1000, 600)
elif layout == 'col':
size = (600, 1000)
else:
size = (600, 800)
return size
def build_mlab_pipeline(self, file_source=None, is_3d=False, layout='rowcol',
scalar_mode='iso_surface',
vector_mode='arrows_norm',
rel_scaling=None, clamping=False,
ranges=None, is_scalar_bar=False, is_wireframe=False,
rel_text_width=None,
filter_names=None, group_names=None,
only_names=None):
"""Sets self.source, self.is_3d_data """
file_source = get_default(file_source, self.file_source,
'file_source not set!')
if filter_names is None:
filter_names = []
self.source = source = self.file_source()
bbox = file_source.get_bounding_box()
dx = 1.1 * (bbox[1,:] - bbox[0,:])
float_eps = nm.finfo(nm.float64).eps
self.is_3d_data = abs(dx[2]) > (10.0 * float_eps)
p_names, c_names = self.get_data_names(source, detailed=True)
names = p_names + c_names
if only_names is None:
names = [ii for ii in names if ii[2] not in filter_names]
else:
_names = [ii for ii in names if ii[2] in only_names]
if len(_names) != len(only_names):
output('warning: some names were not found!')
if not len(_names):
raise ValueError('no names were found! (%s not in %s)'
% (only_names, [name[2] for name in names]))
names = _names
if group_names is not None:
ndict = {}
for name in names:
ndict[name[2]] = name
repeat = []
_names = []
aux = set(name[2] for name in names)
for group in group_names:
aux.difference_update(group)
repeat.append(len(group))
for name in group:
_names.append(ndict[name])
repeat.extend([1] * len(aux))
n_pos = len(repeat)
names = _names
n_data = len(names)
else:
n_pos = n_data = len(names)
repeat = [1] * n_data
def _make_iterator(repeat, n_row, n_col):
ii = 0
for ij, iric in enumerate(cycle((n_row, n_col))):
ir, ic = iric
if ij < len(repeat):
for ik in xrange(repeat[ij]):
yield ii, ir, ic
ii += 1
n_row, n_col = get_position_counts(n_pos, layout)
if layout[:3] == 'col':
iterator = _make_iterator(repeat, n_col, n_row)
else:
iterator = _make_iterator(repeat, n_row, n_col)
max_label_width = nm.max([len(ii[2]) for ii in names] + [5]) + 2
if c_names:
ctp = mlab.pipeline.cell_to_point_data(source)
self.scalar_bars = []
for ii, ir, ic in iterator:
if layout[:3] == 'col':
ir, ic = ic, ir
if ii == n_data: break
family, kind, name = names[ii]
is_magnitude = False
position = nm.array([dx[0] * ic, dx[1] * (n_row - ir - 1), 0])
output(family, kind, name, position)
if kind == 'scalars':
if family == 'point':
active = mlab.pipeline.set_active_attribute(source)
else:
active = mlab.pipeline.set_active_attribute(ctp)
setattr(active, '%s_%s_name' % (family, kind), name)
if is_3d:
if 'cut_plane' in scalar_mode:
scp = add_scalar_cut_plane(active,
position, [1, 0, 0],
opacity=0.5)
scp = add_scalar_cut_plane(active,
position, [0, 1, 0],
opacity=0.5 )
scp = add_scalar_cut_plane(active,
position, [0, 0, 1],
opacity=0.5 )
if 'iso_surface' in scalar_mode:
active.point_scalars_name = name
iso = add_iso_surface(active, position, opacity=0.3)
else:
surf = add_surf(active, position)
elif kind == 'vectors':
if family == 'point':
active = mlab.pipeline.set_active_attribute(source)
else:
active = mlab.pipeline.set_active_attribute(ctp)
active.point_vectors_name = name
if (ranges is not None) and (name in ranges):
sf = get_glyphs_scale_factor(ranges[name],
rel_scaling, bbox)
else:
sf = None
if 'arrows' in vector_mode:
glyphs = add_glyphs(active, position, bbox,
rel_scaling=rel_scaling,
clamping=clamping)
if sf is not None:
glyphs.glyph.glyph.scale_factor = sf
if 'warp' in vector_mode:
active = mlab.pipeline.warp_vector(active)
active.filter.scale_factor = rel_scaling
if 'norm' in vector_mode:
active = mlab.pipeline.extract_vector_norm(active)
if 'arrows' in vector_mode:
opacity = 0.3
| |
lost a hand.
#The HandResult class contains three non static (instance) values:
#self.CardCount; An integer value which represents the number of cards which were in the Hand class object that this HandResult class instance represents when it finished.
#self.Value; An integer value which represents the point value of the Hand class object that this HandResult class instance represents when it finished.
#self.DoubleDown; A boolean value which indicates whether or not the Hand class object that this HandResult class instance represents finished by doubling down.
class HandResult:
def __init__(self, vl, cc, dd = False):
self.CardCount = cc
self.Value = vl
self.DoubleDown = dd
def __str__(self):
return ("CC = " + str(self.CardCount) + ", VL = " + str(self.Value) + ", DD = " + str(self.DoubleDown))
#[<NAME> (Oct.3 2021 {01:39})]
#Here I define a class which represents a player who will play blackjack, it contains three static (instance) values:
#self.Hand; An array of Hand class objects, one for each that the player is playing at once.
#self.tb; An integer value which represents the total amount of money currently being bet accross all hands currently being played.
#self.bi; An integer value which represents the original amount of money bet at the begining of the current round.
#The Player class contains three functions:
#Play(); A function which:
#Shows the player their hand by calling the print() function on the hand currently being interacted with.
#Determines whether or not the player can double down or split their hand.
#Checks if the player has reached or exceeded a score of 21 on their hand, reveals the score to the player,
#and finishes the hand if the player has reached or exceeded a score of 21 by intantiating and returning a HandResult class object based on the hand currently being interacted with.
#Informs the player of the things they can do with their hand, Hit, Stand, Split or Double Down as appropriate using the Call() function.
#Requests that the player decide what they would like to do with their hand next.
#If the player chooses to hit, call the hit() function for the hand currently being interacted with and recursively calls Play().
#If the player chooses to stand, instantiate and return a HandResult class object based on the hand currently being interacted with.
#If the player chooses to split, add self.bi to self.tb, call and return the Split() function.
#If the player chooses to double down, set Hand.doubled to true for the hand currently being iteracted with, add self.bi to self.tb, call the hit() functionm abd call Play() recursively.
#Takes in a(n):
#Deck class object which represents the deck from which the player will draw cards, called deck.
#Integer value which is used to index self.Hand to find the hand that is currently being interacted with, called i, assumed 0.
#Boolean value which indicates whether or not the player is betting, called b, assumed false.
#Integer value which represents the total remaining cash that a player can bet with, called cr, assumed 0.
#Integer value which represents the amount being bet on all hands initially, called tb, assumed -1.
#Returns an array of HandResult class object based on all hands being played by the player this round.
#Call(); A function which presents the player with all of the things they can do with their hand and processes their response.
#Takes in a:
#boolean value which indicates whether the player can double down on the hand currently being interacted with, called d.
#boolean value which indicates whether the player can split the hand currently being interacted with, called s.
#Returns an integer value which represents the selection that the player made.
#Split(); A function which:
#Creates a new Hand class object and adds it to self.Hands.
#Moves a card from the hand currently being interacted with to the newly created hand.
#Calls the hit() function on both the hand currently being interacted with and the newly created hand.
#Calls and returns Play() on both the hand currently being interacted with and the newly created hand.
#Takes in a(n):
#Deck class object which represents the deck from which the player will draw cards, called deck.
#Integer value which is used to index self.Hand to find the hand that is currently being interacted with, called s, assumed 0.
#Boolean value which indicates whether or not the player is betting, called b, assumed false.
#Integer value which represents the total remaining cash that a player can bet with, called cr, assumed 0.
#Returns an array of HandResult class object based on all hands being played by the player this round.
class Player:
def __init__(self):
self.Hand = [ Hand() ]
self.tb = 0
self.bi = 0
def Play(self, deck, i = 0, b = False, cr = 0, tb = -1):
if (tb != -1):
self.tb = tb
self.bi = tb
print(f"{colours.FORE_BRIGHT_CYAN}\nYour Hand:{colours.ENDC}")
self.Hand[i].print()
splitable = False
doublable = False
if (len(self.Hand[i].cards) == 2 and cr >= self.tb + self.bi):
if b:
doublable = True
if (self.Hand[i].cards[0].GetValue(0) == self.Hand[i].cards[1].GetValue(0)):
splitable = True
done = False
val = -1
if (self.Hand[i].evalAceLast() > 21):
print(f"{colours.FORE_BRIGHT_GREEN}\nTotal Score = Bust...{colours.ENDC}")
if self.Hand[i].doubled:
return [ HandResult(self.Hand[i].score, len(self.Hand[i].cards), True) ]
return [ HandResult(self.Hand[i].score, len(self.Hand[i].cards)) ]
elif (self.Hand[i].score == 21):
print(f"{colours.FORE_BRIGHT_GREEN}\nTotal Score = 21!{colours.ENDC}")
if self.Hand[i].doubled:
return [ HandResult(self.Hand[i].score, len(self.Hand[i].cards), True) ]
return [ HandResult(self.Hand[i].score, len(self.Hand[i].cards)) ]
else:
print(f"{colours.FORE_BRIGHT_GREEN}\nTotal Score = " + str(self.Hand[i].score) + f"{colours.ENDC}")
if not self.Hand[i].doubled:
while (not done):
val = self.Call(doublable, splitable)
if (val != -1):
done = True
else:
print (f"{colours.FORE_BRIGHT_ORANGE}\nInvalid input. Please input the letter in brackets for the option you want.{colours.ENDC}")
if (val == 0):
self.Hand[i].hit(deck)
return self.Play(deck, i, b, cr)
elif (val == 1):
return [ HandResult(self.Hand[i].score, len(self.Hand[i].cards)) ]
elif (val == 3):
self.tb = self.tb + self.bi
return self.Split(deck, i, b, cr)
else:
self.Hand[i].doubled = True
self.tb = self.tb + self.bi
self.Hand[i].hit(deck)
return self.Play(deck, i, b, cr)
else:
return [ HandResult(self.Hand[i].score, len(self.Hand[i].cards), True) ]
def Call(self, d, s):
response = ""
if (d and s):
response = input(f"{colours.FORE_BRIGHT_BLUE}\nWould you like to {colours.ENDC}(H){colours.FORE_BRIGHT_BLUE}it, {colours.ENDC}(S){colours.FORE_BRIGHT_BLUE}tand, {colours.ENDC}(D){colours.FORE_BRIGHT_BLUE}ouble Down or S{colours.ENDC}(P){colours.FORE_BRIGHT_BLUE}lit? - {colours.ENDC}")
elif (d and not s):
response = input(f"{colours.FORE_BRIGHT_BLUE}\nWould you like to {colours.ENDC}(H){colours.FORE_BRIGHT_BLUE}it, {colours.ENDC}(S){colours.FORE_BRIGHT_BLUE}tand or {colours.ENDC}(D){colours.FORE_BRIGHT_BLUE}ouble Down? - {colours.ENDC}")
elif (not d and s):
response = input(f"{colours.FORE_BRIGHT_BLUE}\nWould you like to {colours.ENDC}(H){colours.FORE_BRIGHT_BLUE}it, {colours.ENDC}(S){colours.FORE_BRIGHT_BLUE}tand or S{colours.ENDC}(P){colours.FORE_BRIGHT_BLUE}lit? - {colours.ENDC}")
else:
response = input(f"{colours.FORE_BRIGHT_BLUE}\nWould you like to {colours.ENDC}(H){colours.FORE_BRIGHT_BLUE}it or {colours.ENDC}(S){colours.FORE_BRIGHT_BLUE}tand? - {colours.ENDC}")
val = -1
for char in response:
if (char == "H" or char == "h"):
val = 0
break
elif (char == "S" or char == "s"):
val = 1
break
elif ((char == "D" or char == "d") and d):
val = 2
break
elif ((char == "P" or char == "p") and s):
val = 3
break
return val
def Split(self, deck, s, b = False, cr = 0):
self.Hand.append(Hand())
n = len(self.Hand) - 1
self.Hand[-1].cards.append(self.Hand[s].cards.pop(-1))
self.Hand[-1].hit(deck)
self.Hand[s].hit(deck)
r = self.Play(deck, s, b, cr)
input(f"{colours.FORE_BRIGHT_BLUE}\nHit enter for your next hand.{colours.ENDC}")
r.extend(self.Play(deck, n, b, cr))
return r
#[<NAME> (Oct.3 2021 {01:39})]
#Here I define a class which represents the dealer who the player will be trying to beat. The Dealer class is derived from the Player class.
#The Dealer class contains one non static (instance) value, self.Hand; a Hand class object.
#The Dealer class contains one function:
#Play(); A function which:
#Checks if the dealer has reached or exceeded a score of 21 on their hand, reveals the score to the player,
#and finishes the hand if the dealer has reached or exceeded a score of 21 by intantiating and returning a HandResult class object self.Hand.
#Checks if the dealer has reacher or exceeded a score of 17 on their hand,
#calls the hit() function of self.Hand() if the dealer has not reached or exceeded a score of 17 on their hand and calls Play() recursively,
#and finishes the hand if the dealer has reached or exceeded a score of 17 by intantiating and returning a HandResult class object self.Hand.
#Takes in a Deck class object which represents the deck from which the player will draw cards, called deck.
#Returns a HandResult class object based on self.Hand().
class Dealer(Player):
def __init__(self):
self.Hand = Hand()
def Play(self, deck):
print(f"{colours.FORE_BRIGHT_CYAN}\nDealer's Hand:{colours.ENDC}")
self.Hand.print()
if (self.Hand.evalAceLast() > 21):
print(f"{colours.FORE_BRIGHT_GREEN}\nTotal Score = Bust...{colours.ENDC}")
return HandResult(self.Hand.score, len(self.Hand.cards))
elif (self.Hand.score == 21):
print(f"{colours.FORE_BRIGHT_GREEN}\nTotal Score = 21!{colours.ENDC}")
return HandResult(self.Hand.score, len(self.Hand.cards))
else:
print(f"{colours.FORE_BRIGHT_GREEN}\nTotal Score = " + str(self.Hand.score) + f"{colours.ENDC}")
if (self.Hand.score <= 16):
print(f"{colours.FORE_BRIGHT_BLUE}\nDealer Hits!{colours.ENDC}")
self.Hand.hit(deck)
return self.Play(deck)
else:
print(f"{colours.FORE_BRIGHT_BLUE}\nDealer Stands!{colours.ENDC}")
return HandResult(self.Hand.score, len(self.Hand.cards))
#[<NAME> (Dec.15 2021 {22:10})]
#Here I define a class which represents the game instance as a whole, I would like to one day generalize this into a CardGame class and derive the BlackPack class from that.
#The BlackPack class contains nine non static (instance) values:
#self.Deck; A Deck objeect which contains all of the cards that will be | |
for norisk ageing flow age5to15 to age15to25
if '_norisk' in to_label:
add_unique_tuple_to_list(self.flows_by_type['fixed_transfer'],
(from_label, to_label,
self.params[param_label] * (1. - relevant_prop_ageing)))
elif '_norisk' in from_label and '_age15to25' in from_label:
# for diabetes_age_min
if '_diabetes' in to_label:
add_unique_tuple_to_list(self.flows_by_type['fixed_transfer'],
(from_label, to_label,
self.params[param_label] * prop_ageing_diabetes))
# for norisk ageing flow age15to25 to age25up
if'_norisk' in to_label:
add_unique_tuple_to_list(self.flows_by_type['fixed_transfer'],
(from_label, to_label,
self.params[param_label] * (1. - prop_ageing_diabetes)))
else:
add_unique_tuple_to_list(self.flows_by_type['fixed_transfer'],
(from_label, to_label, self.params[param_label]))
else:
add_unique_tuple_to_list(self.flows_by_type['fixed_transfer'],
(from_label, to_label, self.params[param_label]))
def set_linked_transfer_rate_flow(self, from_label, to_label, var_label):
"""
Set linked inter-compartmental transfer rate flows, where the flow between two compartments is dependent upon
a flow between another two compartments.
Args:
from_label: String for the compartment from which this flow comes.
to_label: String for the compartment to which this flow goes.
var_label: String to index the vars dictionary.
"""
match = 0
if self.remove_labels:
for pattern in self.remove_labels:
# check for remove labels ex dorm_age0to5 by splitting into dorm and age0to5
if all(word in from_label for word in pattern.split('_')):
match = match + 1
for pattern in self.remove_labels:
# check for remove labels ex dorm_age0to5 by splitting into dorm and age0to5
if all(word in to_label for word in pattern.split('_')):
match = match + 1
if match == 0:
add_unique_tuple_to_list(self.flows_by_type['linked_transfer'], (from_label, to_label, var_label))
else:
pass
#print('skipping linked transfer rate flow from label : ' + from_label + ' to ' + to_label)
def set_var_transfer_rate_flow(self, from_label, to_label, var_label):
"""
Set variable inter-compartmental transfer rate flows.
Args:
from_label: String for the compartment from which this flow comes.
to_label: String for the compartment to which this flow goes.
var_label: String to index the vars dictionary.
"""
match = 0
if self.remove_labels:
for pattern in self.remove_labels:
# check for remove labels ex dorm_age0to5 by splitting into dorm and age0to5
if all(word in from_label for word in pattern.split('_')):
match = match + 1
for pattern in self.remove_labels:
# check for remove labels ex dorm_age0to5 by splitting into dorm and age0to5
if all(word in to_label for word in pattern.split('_')):
match = match + 1
if match == 0:
add_unique_tuple_to_list(self.flows_by_type['var_transfer'], (from_label, to_label, var_label))
#print('++++++++++++++ adding var transfer rate flow from label : ' + from_label + ' to ' + to_label )
else:
pass
#print('skipping var transfer rate flow from label : ' + from_label + ' to ' + to_label)
''' variable and flow-related methods '''
def set_scaleup_fn(self, label, fn):
"""
Simple method to add a scale-up function to the dictionary of scale-ups.
Args:
label: String for name of function
fn: The function to be added
"""
self.scaleup_fns[label] = fn
def clear_vars(self):
"""
Clear previously populated vars dictionary. Method over-written in economics structures in next tier of model
object up.
"""
self.vars.clear()
def calculate_scaleup_vars(self):
"""
Find the values of the scale-up functions at a specific point in time. Called within the integration process.
"""
for label, fn in self.scaleup_fns.items(): self.vars[label] = fn(self.time)
def calculate_vars(self):
"""
Calculate the self.vars that depend on current model conditions (compartment sizes) rather than scale-up
functions (model-specific).
"""
pass
def calculate_vars_from_spending(self):
"""
Method that can be used in economic calculations to calculate coverage of interventions from the spending
directed to them.
"""
pass
def calculate_flows(self):
"""
Calculate flows, which should only depend on compartment values and self.vars calculated in
calculate_variable_rates.
"""
for label in self.labels: self.flows[label] = 0.
# birth flows
for label, vars_label in self.flows_by_type['var_entry']:
self.flows[label] += self.vars[vars_label]
# dynamic transmission flows
for from_label, to_label, vars_label in self.flows_by_type['var_transfer']:
val = self.compartments[from_label] * self.vars[vars_label]
self.flows[from_label] -= val
self.flows[to_label] += val
# fixed-rate flows
for from_label, to_label, rate in self.flows_by_type['fixed_transfer']:
val = self.compartments[from_label] * rate
self.flows[from_label] -= val
self.flows[to_label] += val
# linked flows
for from_label, to_label, vars_label in self.flows_by_type['linked_transfer']:
val = self.vars[vars_label]
self.flows[from_label] -= val
self.flows[to_label] += val
# normal death flows - note that there has to be a param or a var with the label 'demo_life_expectancy'
self.vars['rate_death'] = 0.
for label in self.labels:
val = self.compartments[label] / self.get_constant_or_variable_param('demo_life_expectancy')
self.flows[label] -= val
# extra death flows
self.vars['rate_infection_death'] = 0.
for label, rate in self.flows_by_type['fixed_infection_death']:
val = self.compartments[label] * rate
self.flows[label] -= val
self.vars['rate_infection_death'] += val
for label, rate in self.flows_by_type['var_infection_death']:
val = self.compartments[label] * self.vars[vars_label]
self.flows[label] -= val
self.vars['rate_infection_death'] += val
def prepare_vars_flows(self):
"""
This function collects some other functions that previously led to a bug because not all of them were called
in the diagnostics round.
"""
self.clear_vars()
self.calculate_scaleup_vars()
self.calculate_vars_from_spending()
self.calculate_vars()
self.calculate_flows()
def set_flows(self):
"""
Main method to work through setting all intercompartmental flows.
"""
pass
''' main integration methods '''
def init_run(self):
"""
Works through the main methods in needed for the integration process. Contains more code that is dependent on
correct naming of inputs, but should be universal to models based on this class (i.e. scenario_end_time).
"""
self.make_times(self.start_time, self.inputs.model_constants['scenario_end_time'], self.time_step)
self.initialise_compartments()
self.set_flows()
assert self.times is not None, 'Times have not been set yet'
def make_derivative_fn(self):
"""
Create the main derivative function.
"""
def derivative_fn(y, t):
self.time = t
self.compartments = self.convert_list_to_compartments(y)
self.prepare_vars_flows()
flow_vector = self.convert_compartments_to_list(self.flows)
self.checks()
return flow_vector
return derivative_fn
def integrate(self):
"""
Numerical integration. This version also includes storage of compartment / vars / flows solutions which was
previously done in calculate_diagnostics.
Currently implemented for Explicit Euler and Runge-Kutta 4 methods
"""
self.process_uncertainty_params()
self.init_run()
y = self.get_init_list() # get initial conditions (loaded compartments for scenarios)
if self.inputs.debug:
with open("compartments.json", "a") as json_file:
json_file.write(json.dumps(self.compartments, cls=NumpyEncoder))
json_file.write(',\n')
y = self.make_adjustments_during_integration(y)
# prepare storage objects
n_compartment = len(y)
n_time = len(self.times)
self.flow_array = numpy.zeros((n_time, len(self.labels)))
derivative = self.make_derivative_fn()
# previously done in calculate_diagnostics
for i, label in enumerate(self.labels):
self.compartment_soln[label] = [None] * n_time # initialise lists
self.compartment_soln[label][0] = y[i] # store initial state
# need to run derivative here to get the initial vars
k1 = derivative(y, self.times[0])
# 'make_adjustments_during_integration' was already run but needed to be done again now that derivative
# has been run. Indeed, derivative allows new vars to be created and these vars are used in
# 'make_adjustments_during_integration'
y = self.make_adjustments_during_integration(y)
#coercing to list for python3
self.var_labels = list(self.vars.keys())
self.var_array = numpy.zeros((n_time, len(self.var_labels)))
# populate arrays for initial state
for i_label, var_label in enumerate(self.var_labels):
self.var_array[0, i_label] = self.vars[var_label]
for i_label, label in enumerate(self.labels):
self.flow_array[0, i_label] = self.flows[label]
# initialisation of iterative objects that will be used during integration
y_candidate = numpy.zeros((len(y)))
prev_time = self.times[0] # time of the latest successful integration step (not necessarily stored)
dt_is_ok = True # Boolean to indicate whether previous proposed integration time was successfully passed
# for each time as stored in self.times, except the first one
for i_time, next_time in enumerate(self.times[1:]):
store_step = False # whether the calculated time is to be stored (i.e. appears in self.times)
cpt_reduce_step = 0 # counts the number of times that the time step needs to be reduced
while store_step is False:
if not dt_is_ok: # previous proposed time step was too wide
adaptive_dt /= 2.
is_temp_time_in_times = False # whether the upcoming calculation step corresponds to next_time
else: # Previous time step was accepted
adaptive_dt = next_time - prev_time
is_temp_time_in_times = True # upcoming attempted integration step corresponds to next_time
k1 = numpy.asarray(derivative(y, prev_time)) # evaluate function at previous successful step
temp_time = prev_time + adaptive_dt # new attempted calculation time
# explicit Euler integration
if self.integration_method == 'Explicit':
for i in range(n_compartment):
y_candidate[i] = y[i] + adaptive_dt * k1[i]
# Runge-Kutta 4 integration
elif self.integration_method == 'Runge Kutta':
y_k2 = y + 0.5 * adaptive_dt * k1
if (y_k2 >= 0.).all():
k2 = numpy.asarray(derivative(y_k2, prev_time + 0.5 * adaptive_dt))
else:
dt_is_ok = False
continue
y_k3 = y + 0.5 * adaptive_dt * k2
if (y_k3 >= 0.).all():
k3 = numpy.asarray(derivative(y_k3, prev_time + 0.5 * adaptive_dt))
else:
dt_is_ok = False
continue
y_k4 = y + adaptive_dt * k3
if (y_k4 >= 0.).all():
k4 = numpy.asarray(derivative(y_k4, temp_time))
else:
dt_is_ok = False
continue
y_candidate = []
for i in range(n_compartment):
y_candidate.append(y[i] + (adaptive_dt / 6.) * (k1[i] + 2. * | |
counter + 1
if hasnameset and renamename != None:
mvname = os.path.basename(renamename)
if not os.path.exists(os.path.join(tmpdir, mvname)):
try:
shutil.move(tmpfile[1], os.path.join(tmpdir, mvname))
except Exception, e:
## if there is an exception don't rename
pass
if offset == 0 and (gzipsize == os.stat(filename).st_size):
## if the gzip file is the entire file, then tag it
## as a compressed file and as gzip. Also check if the
## file might be a tar file and pass that as a hint
## to downstream unpackers.
newtags.append('compressed')
newtags.append('gzip')
## if the file has not been renamed already try to see
## if it needs to be renamed.
if not(hasnameset and renamename != None):
## rename the file, like gunzip does
if filename.lower().endswith('.gz'):
filenamenoext = os.path.basename(filename)[:-3]
if len(filenamenoext) > 0:
gzpath = os.path.join(tmpdir, filenamenoext)
if not os.path.exists(gzpath):
shutil.move(tmpfile[1], gzpath)
elif filename.lower().endswith('.tgz'):
filenamenoext = os.path.basename(filename)[:-4] + ".tar"
if len(filenamenoext) > 4:
gzpath = os.path.join(tmpdir, filenamenoext)
if not os.path.exists(gzpath):
shutil.move(tmpfile[1], gzpath)
gzipfile.close()
return (diroffsets, blacklist, newtags, hints)
def searchUnpackCompress(filename, tempdir=None, blacklist=[], offsets={}, scanenv={}, debug=False):
hints = {}
if not 'compress' in offsets:
return ([], blacklist, [], hints)
if offsets['compress'] == []:
return ([], blacklist, [], hints)
compresslimit = int(scanenv.get('COMPRESS_MINIMUM_SIZE', 1))
compress_tmpdir = scanenv.get('UNPACK_TEMPDIR', None)
counter = 1
diroffsets = []
compressfile = open(filename, 'rb')
for offset in offsets['compress']:
blacklistoffset = extractor.inblacklist(offset, blacklist)
if blacklistoffset != None:
continue
## according to the specification the "bits per code" has
## to be 9 <= bits per code <= 16
## The "bits per code" field is masked with 0x1f
compressfile.seek(offset+2)
compressdata = compressfile.read(1)
if len(compressdata) != 1:
break
compressbits = ord(compressdata) & 0x1f
if compressbits < 9:
continue
if compressbits > 16:
continue
## since compress expects a stream it will decompress some
## data, so as a first test read 1 MiB of data and then
## try to decompress it.
## If no data could be uncompressed return
compressfile.seek(offset)
compressdata = compressfile.read(1048576)
p = subprocess.Popen(['uncompress'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stanout, stanerr) = p.communicate(compressdata)
if len(stanout) == 0:
continue
tmpdir = dirsetup(tempdir, filename, "compress", counter)
res = unpackCompress(filename, offset, compresslimit, tmpdir, compress_tmpdir, blacklist)
if res != None:
## TODO: find out how to find the length of the compressed
## data that was uncompressed so the right offsets for the
## blacklist can be computed
compresssize = 0
diroffsets.append((res, offset, compresssize))
#blacklist.append((offset, offset + compresssize))
counter = counter + 1
if offset == 0 and compresssize == os.stat(filename).st_size:
newtags.append('compressed')
newtags.append('compress')
else:
## cleanup
os.rmdir(tmpdir)
compressfile.close()
return (diroffsets, blacklist, [], hints)
def unpackCompress(filename, offset, compresslimit, tempdir=None, compress_tmpdir=None, blacklist=[]):
tmpdir = unpacksetup(tempdir)
## if UNPACK_TEMPDIR is set to for example a ramdisk use that instead.
if compress_tmpdir != None:
tmpfile = tempfile.mkstemp(dir=compress_tmpdir)
os.fdopen(tmpfile[0]).close()
outtmpfile = tempfile.mkstemp(dir=compress_tmpdir)
unpackFile(filename, offset, tmpfile[1], compress_tmpdir, blacklist=blacklist)
else:
tmpfile = tempfile.mkstemp(dir=tmpdir)
os.fdopen(tmpfile[0]).close()
outtmpfile = tempfile.mkstemp(dir=tmpdir)
unpackFile(filename, offset, tmpfile[1], tmpdir, blacklist=blacklist)
p = subprocess.Popen(['uncompress', '-c', tmpfile[1]], stdout=outtmpfile[0], stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
os.fdopen(outtmpfile[0]).close()
os.unlink(tmpfile[1])
if os.stat(outtmpfile[1]).st_size < compresslimit:
os.unlink(outtmpfile[1])
if tempdir == None:
os.rmdir(tmpdir)
return None
if compress_tmpdir != None:
## create the directory and move the compressed file
try:
os.makedirs(tmpdir)
except OSError, e:
pass
shutil.move(outtmpfile[1], tmpdir)
return tmpdir
def searchUnpackKnownBzip2(filename, tempdir=None, scanenv={}, debug=False):
## first check if the file actually could be a valid gzip file
bzip2file = open(filename, 'rb')
bzip2file.seek(0)
bzip2header = bzip2file.read(3)
bzip2file.close()
if bzip2header != fsmagic.fsmagic['bz2']:
return ([], [], [], {})
## then try unpacking it.
res = searchUnpackBzip2(filename, tempdir, [], {'bz2': [0]}, scanenv, debug)
(diroffsets, blacklist, newtags, hints) = res
failed = False
## there were results, so check if they were successful
if diroffsets != []:
if len(diroffsets) != 1:
failed = True
else:
(dirpath, startoffset, endoffset) = diroffsets[0]
if startoffset != 0 or endoffset != os.stat(filename).st_size:
failed = True
if failed:
for i in diroffsets:
(dirpath, startoffset, endoffset) = i
try:
shutil.rmtree(dirpath)
except:
pass
return ([], [], [], {})
else:
return (diroffsets, blacklist, newtags, hints)
return ([], [], [], {})
## search and unpack bzip2 compressed files
def searchUnpackBzip2(filename, tempdir=None, blacklist=[], offsets={}, scanenv={}, debug=False):
hints = {}
if not 'bz2' in offsets:
return ([], blacklist, [], hints)
if offsets['bz2'] == []:
return ([], blacklist, [], hints)
diroffsets = []
counter = 1
newtags = []
bzip2datasize = 10000000
for offset in offsets['bz2']:
blacklistoffset = extractor.inblacklist(offset, blacklist)
if blacklistoffset != None:
continue
## sanity check: block size is byte number 4 in the header
bzfile = open(filename, 'rb')
bzfile.seek(offset + 3)
blocksizebyte = bzfile.read(1)
bzfile.close()
try:
blocksizebyte = int(blocksizebyte)
except:
continue
if blocksizebyte == 0:
continue
## some more sanity checks based on bzip2's decompress.c
bzfile = open(filename, 'rb')
bzfile.seek(offset + 4)
blockbytes = bzfile.read(6)
bzfile.close()
## first check if this is a stream or a regular file
if blockbytes[0] != '\x17':
## not a stream, so do some more checks
if blockbytes[0] != '\x31':
continue
if blockbytes[1] != '\x41':
continue
if blockbytes[2] != '\x59':
continue
if blockbytes[3] != '\x26':
continue
if blockbytes[4] != '\x53':
continue
if blockbytes[5] != '\x59':
continue
## extra sanity check: try to uncompress a few blocks of data
bzfile = open(filename, 'rb')
bzfile.seek(offset)
bzip2data = bzfile.read(bzip2datasize)
bzfile.close()
bzip2decompressobj = bz2.BZ2Decompressor()
bzip2size = 0
try:
uncompresseddata = bzip2decompressobj.decompress(bzip2data)
except Exception, e:
continue
if bzip2decompressobj.unused_data != "":
bzip2size = len(bzip2data) - len(bzip2decompressobj.unused_data)
else:
if len(uncompresseddata) != 0:
if len(bzip2data) == os.stat(filename).st_size:
bzip2size = len(bzip2data)
tmpdir = dirsetup(tempdir, filename, "bzip2", counter)
if bzip2size != 0:
tmpfile = tempfile.mkstemp(dir=tmpdir)
os.fdopen(tmpfile[0]).close()
outbzip2file = open(tmpfile[1], 'wb')
outbzip2file.write(uncompresseddata)
outbzip2file.flush()
outbzip2file.close()
diroffsets.append((tmpdir, offset, bzip2size))
blacklist.append((offset, offset + bzip2size))
if offset == 0 and (bzip2size == os.stat(filename).st_size):
## rename the file, like bunzip does
if filename.lower().endswith('.bz2'):
filenamenoext = os.path.basename(filename)[:-4]
if len(filenamenoext) > 0:
bz2path = os.path.join(tmpdir, filenamenoext)
if not os.path.exists(bz2path):
shutil.move(tmpfile[1], bz2path)
## slightly different for tbz2
elif filename.lower().endswith('.tbz2'):
filenamenoext = os.path.basename(filename)[:-5] + ".tar"
if len(filenamenoext) > 4:
bz2path = os.path.join(tmpdir, filenamenoext)
if not os.path.exists(bz2path):
shutil.move(tmpfile[1], bz2path)
newtags.append('compressed')
newtags.append('bzip2')
counter = counter + 1
else:
## try to load more data into the bzip2 decompression object
localoffset = offset + bzip2datasize
bzfile = open(filename, 'rb')
bzfile.seek(localoffset)
bzip2data = bzfile.read(bzip2datasize)
unpackingerror = False
bytesread = bzip2datasize
unpackedbytessize = len(uncompresseddata)
tmpfile = tempfile.mkstemp(dir=tmpdir)
os.fdopen(tmpfile[0]).close()
outbzip2file = open(tmpfile[1], 'wb')
outbzip2file.write(uncompresseddata)
outbzip2file.flush()
while bzip2data != "":
try:
uncompresseddata = bzip2decompressobj.decompress(bzip2data)
outbzip2file.write(uncompresseddata)
outbzip2file.flush()
unpackedbytessize += len(uncompresseddata)
except Exception, e:
unpackingerror = True
break
## end of the bzip2 compressed data is reached
if bzip2decompressobj.unused_data != "":
bytesread += len(bzip2data) - len(bzip2decompressobj.unused_data)
break
bytesread += len(bzip2data)
bzip2data = bzfile.read(bzip2datasize)
bzfile.close()
outbzip2file.close()
if unpackingerror:
## cleanup
os.unlink(tmpfile[1])
os.rmdir(tmpdir)
if unpackedbytessize != 0:
diroffsets.append((tmpdir, offset, bytesread))
blacklist.append((offset, offset + bytesread))
if offset == 0 and (bytesread == os.stat(filename).st_size):
## rename the file, like bunzip does
if filename.lower().endswith('.bz2'):
filenamenoext = os.path.basename(filename)[:-4]
bz2path = os.path.join(tmpdir, filenamenoext)
if not os.path.exists(bz2path):
shutil.move(tmpfile[1], bz2path)
## slightly different for tbz2
elif filename.lower().endswith('.tbz2'):
filenamenoext = os.path.basename(filename)[:-5] + ".tar"
bz2path = os.path.join(tmpdir, filenamenoext)
if not os.path.exists(bz2path):
shutil.move(tmpfile[1], bz2path)
newtags.append('compressed')
newtags.append('bzip2')
counter = counter + 1
else:
## cleanup
os.unlink(tmpfile[1])
os.rmdir(tmpdir)
return (diroffsets, blacklist, newtags, hints)
def searchUnpackRZIP(filename, tempdir=None, blacklist=[], offsets={}, scanenv={}, debug=False):
hints = {}
if not 'rzip' in offsets:
return ([], blacklist, [], hints)
if offsets['rzip'] == []:
return ([], blacklist, [], hints)
if offsets['rzip'][0] != 0:
return ([], blacklist, [], hints)
if os.stat(filename).st_size < 10:
return ([], blacklist, [], hints)
diroffsets = []
tags = []
offset = 0
rzipfile = open(filename, 'rb')
rzipfile.seek(0)
rzipdata = rzipfile.read(10)
rzipfile.close()
rzipsize = struct.unpack('>L', rzipdata[6:10])[0]
blacklistoffset = extractor.inblacklist(offset, blacklist)
if blacklistoffset != None:
return (diroffsets, blacklist, tags, hints)
tmpdir = dirsetup(tempdir, filename, "rzip", 1)
res = unpackRZIP(filename, offset, rzipsize, tmpdir)
if res != None:
rzipdir = res
diroffsets.append((rzipdir, offset, 0))
#blacklist.append((offset, offset + unpackrzipsize))
#if offset == 0:
# tags.append("compressed")
# tags.append("rzip")
else:
## cleanup
os.rmdir(tmpdir)
return (diroffsets, blacklist, tags, hints)
def unpackRZIP(filename, offset, rzipsize, tempdir=None):
tmpdir = unpacksetup(tempdir)
tmpfile = tempfile.mkstemp(dir=tempdir, suffix='.rz')
os.fdopen(tmpfile[0]).close()
unpackFile(filename, offset, tmpfile[1], tmpdir)
p = subprocess.Popen(['rzip', '-d', tmpfile[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
if p.returncode != 0:
os.unlink(tmpfile[1])
return None
if os.stat(tmpfile[1][:-3]).st_size == rzipsize:
return tmpdir
else:
os.unlink(tmpfile[1][:-3])
return None
def searchUnpackAndroidSparse(filename, tempdir=None, blacklist=[], offsets={}, scanenv={}, debug=False):
hints = {}
if not 'android-sparse' in offsets:
return ([], blacklist, [], hints)
if offsets['android-sparse'] == []:
return ([], blacklist, [], hints)
diroffsets = []
counter = 1
tags = []
for offset in offsets['android-sparse']:
blacklistoffset = extractor.inblacklist(offset, blacklist)
if blacklistoffset != None:
continue
## first see if the major version is correct
sparsefile = open(filename, 'rb')
sparsefile.seek(offset+4)
sparsedata = sparsefile.read(2)
sparsefile.close()
if len(sparsedata) != 2:
break
majorversion = struct.unpack('<H', sparsedata)[0]
if not majorversion == 1:
continue
tmpdir = dirsetup(tempdir, filename, "android-sparse", counter)
res = unpackAndroidSparse(filename, offset, tmpdir)
if res != None:
(sparsesize, sparsedir) = res
diroffsets.append((sparsedir, offset, sparsesize))
blacklist.append((offset, offset + sparsesize))
counter = counter + 1
else:
## cleanup
os.rmdir(tmpdir)
return (diroffsets, blacklist, tags, hints)
def unpackAndroidSparse(filename, offset, tempdir=None):
## checks to find the right size
## First check the size of the header. If it has some
## bizarre value (like bigger than the file it can unpack)
## it is not a valid android sparse file file system
sparsefile = open(filename, 'rb')
sparsefile.seek(offset)
sparsedata = sparsefile.read(28)
sparsefile.close()
if len(sparsedata) != 28:
return
## from sparse_format.h, everything little endian
## 0 - 3 : magic
## 4 - 5 : major version
## 6 - 7 : minor version
## 8 - 9 : file header size
## 10 - 11: chunk header size (should be 12 bytes)
## 12 - 15: block size
## 16 - 19: total blocks in original image
## 20 - 23: total chunks
## 24 - 27: CRC checksum
blocksize = struct.unpack('<L', sparsedata[12:16])[0]
chunkcount = struct.unpack('<L', sparsedata[20:24])[0]
## now reopen the file and read each chunk header.
sparsefile = open(filename, 'rb')
## keep a counter to see how many bytes were read. After unpacking
## this will indicate the size of the sparse file
seekctr = offset + 28
for i in xrange(0,chunkcount):
sparsefile.seek(seekctr)
## read the chunk header
sparsedata = sparsefile.read(12)
## 0 - 1 : chunk type
## 2 - 3 : unused
## 4 - 7 : chunk size (for raw)
## 8 - 12 : total size
chunktype = sparsedata[0:2]
if chunktype == '\xc1\xca':
## RAW
chunksize = struct.unpack('<L', sparsedata[4:8])[0]
datasize = chunksize * blocksize
elif chunktype == '\xc2\xca':
## FILL
datasize = 4
elif chunktype == '\xc3\xca':
## DON'T CARE
datasize = 0
elif chunktype == '\xc4\xca':
## CRC
datasize = 4
else:
## dunno what's happening here, so exit
sparsefile.close()
return None
seekctr = seekctr + 12 + datasize
sparsefile.close()
tmpdir = unpacksetup(tempdir)
tmpfile = tempfile.mkstemp(dir=tmpdir)
os.fdopen(tmpfile[0]).close()
unpackFile(filename, | |
"%s: %s" % (prefix, self.name()))
else:
_gdb_write(indent, "%s:" % (prefix, self.name()))
for (key, value) in self.values():
_gdb_write(indent+1, "%s: %s" % (key, str(value)))
class GdbGstEvent:
def __init__(self, val):
self.val = val.cast(gdb.lookup_type("GstEventImpl").pointer())
@save_memory_access("<inaccessible memory>")
def typestr(self):
t = self.val["event"]["type"]
(event_quarks, _) = gdb.lookup_symbol("event_quarks")
event_quarks = event_quarks.value()
i = 0
while event_quarks[i]["name"] != 0:
if t == event_quarks[i]["type"]:
return event_quarks[i]["name"].string()
i += 1
return None
def structure(self):
return GdbGstStructure(self.val["structure"])
@save_memory_access_print("<inaccessible memory>")
def print(self, indent):
typestr = self.typestr()
if typestr == "caps":
caps = GdbGstCaps(self.structure().value("caps").value())
caps.print(indent, "caps:")
elif typestr == "stream-start":
stream_id = self.structure().value("stream-id").value()
_gdb_write(indent, "stream-start:")
_gdb_write(indent + 1, "stream-id: %s" % stream_id.string())
elif typestr == "segment":
segment = self.structure().value("segment").value()
fmt = str(segment["format"]).split("_")[-1].lower()
_gdb_write(indent, "segment: %s" % fmt)
rate = float(segment["rate"])
applied_rate = float(segment["applied_rate"])
if applied_rate != 1.0:
applied = "(applied rate: %g)" % applied_rate
else:
applied = ""
_gdb_write(indent+1, "rate: %g%s" % (rate, applied))
elif typestr == "tag":
struct = self.structure()
# skip 'GstTagList-'
name = struct.name()[11:]
t = gdb.lookup_type("GstTagListImpl").pointer()
s = struct.value("taglist").value().cast(t)["structure"]
structure = GdbGstStructure(s)
_gdb_write(indent, "tag: %s" % name)
for (key, value) in structure.values():
_gdb_write(indent+1, "%s: %s" % (key, str(value)))
else:
self.structure().print(indent, typestr)
class GdbGstObject:
def __init__(self, klass, val):
self.val = val.cast(klass)
@save_memory_access("<inaccessible memory>")
def name(self):
obj = self.val.cast(gdb.lookup_type("GstObject").pointer())
return obj["name"].string()
def dot_name(self):
ptr = self.val.cast(gdb.lookup_type("void").pointer())
return re.sub('[^a-zA-Z0-9<>]', '_', "%s_%s" % (self.name(), str(ptr)))
def parent(self):
obj = self.val.cast(gdb.lookup_type("GstObject").pointer())
return obj["parent"]
def parent_element(self):
p = self.parent()
if p != 0 and g_inherits_type(p, "GstElement"):
element = p.cast(gdb.lookup_type("GstElement").pointer())
return GdbGstElement(element)
return None
class GdbGstPad(GdbGstObject):
def __init__(self, val):
gdb_type = gdb.lookup_type("GstPad").pointer()
super(GdbGstPad, self).__init__(gdb_type, val)
def __eq__(self, other):
return self.val == other.val
def is_linked(self):
return long(self.val["peer"]) != 0
def peer(self):
return GdbGstPad(self.val["peer"])
def direction(self):
return str(self.val["direction"])
def events(self):
if long(self.val["priv"]) == 0:
return
array = self.val["priv"]["events"]
for ev in _g_array_iter(array, gdb.lookup_type("PadEvent")):
yield GdbGstEvent(ev["event"])
def caps(self):
for ev in self.events():
if ev.typestr() != "caps":
continue
return GdbGstCaps(ev.structure().value("caps").value())
return None
def template_caps(self):
tmp = self.val["padtemplate"]
return GdbGstCaps(tmp["caps"]) if int(tmp) != 0 else None
def mode(self):
m = str(self.val["mode"]).split("_")[-1].lower()
if m in ("push", "pull"):
return m
return None
def pad_type(self):
s = str(self.val["direction"]).split("_")[-1].capitalize()
if g_inherits_type(self.val, "GstGhostPad"):
s += "Ghost"
return s + "Pad"
@save_memory_access_print("Pad(<inaccessible memory>)")
def print(self, indent):
m = ", " + self.mode() if self.mode() else ""
_gdb_write(indent, "%s(%s%s) {" % (self.pad_type(), self.name(), m))
first = True
for ev in self.events():
if first:
_gdb_write(indent+1, "events:")
first = False
ev.print(indent+2)
_gdb_write(indent, "}")
def _dot(self, color, pname, indent):
spc = " " * indent
activation_mode = "-><"
style = "filled,solid"
template = self.val["padtemplate"]
if template != 0:
presence = template["presence"]
if str(presence) == "GST_PAD_SOMETIMES":
style = "filled,dotted"
if str(presence) == "GST_PAD_REQUEST":
style = "filled,dashed"
task_mode = ""
task = self.val["task"]
if long(task) != 0:
task_state = int(task["state"])
if task_state == 0: # started
task_mode = "[T]"
if task_state == 2: # paused
task_mode = "[t]"
f = int(self.val["object"]["flags"])
flags = "B" if f & 16 else "b" # GST_PAD_FLAG_BLOCKED
flags += "F" if f & 32 else "f" # GST_PAD_FLAG_FLUSHING
flags += "B" if f & 16 else "b" # GST_PAD_FLAG_BLOCKING
s = "%s %s_%s [color=black, fillcolor=\"%s\", " \
"label=\"%s%s\\n[%c][%s]%s\", height=\"0.2\", style=\"%s\"];\n" % \
(spc, pname, self.dot_name(), color, self.name(), "",
activation_mode[int(self.val["mode"])], flags, task_mode, style)
return s
def dot(self, indent):
spc = " " * indent
direction = self.direction()
element = self.parent_element()
ename = element.dot_name() if element else ""
s = ""
if g_inherits_type(self.val, "GstGhostPad"):
if direction == "GST_PAD_SRC":
color = "#ffdddd"
elif direction == "GST_PAD_SINK":
color = "#ddddff"
else:
color = "#ffffff"
t = gdb.lookup_type("GstProxyPad").pointer()
other = GdbGstPad(self.val.cast(t)["priv"]["internal"])
if other:
s += other._dot(color, "", indent)
pname = self.dot_name()
other_element = other.parent_element()
other_ename = other_element.dot_name() if other_element else ""
other_pname = other.dot_name()
if direction == "GST_PAD_SRC":
s += "%s%s_%s -> %s_%s [style=dashed, minlen=0]\n" % \
(spc, other_ename, other_pname, ename, pname)
else:
s += "%s%s_%s -> %s_%s [style=dashed, minlen=0]\n" % \
(spc, ename, pname, other_ename, other_pname)
else:
if direction == "GST_PAD_SRC":
color = "#ffaaaa"
elif direction == "GST_PAD_SINK":
color = "#aaaaff"
else:
color = "#cccccc"
s += self._dot(color, ename, indent)
return s
def link_dot(self, indent, element):
spc = " " * indent
peer = self.peer()
peer_element = peer.parent_element()
caps = self.caps()
if not caps:
caps = self.template_caps()
peer_caps = peer.caps()
if not peer_caps:
peer_caps = peer.template_caps()
pname = self.dot_name()
ename = element.dot_name() if element else ""
peer_pname = peer.dot_name()
peer_ename = peer_element.dot_name() if peer_element else ""
if caps and peer_caps and caps == peer_caps:
s = "%s%s_%s -> %s_%s [label=\"%s\"]\n" % \
(spc, ename, pname, peer_ename, peer_pname, caps.dot())
elif caps and peer_caps and caps != peer_caps:
s = "%s%s_%s -> %s_%s [labeldistance=\"10\", labelangle=\"0\", " \
% (spc, ename, pname, peer_ename, peer_pname)
s += "label=\"" + " "*50 + "\", "
if self.direction() == "GST_PAD_SRC":
media_src = caps.dot()
media_dst = peer_caps.dot()
else:
media_src = peer_caps.dot()
media_dst = caps.dot()
s += "taillabel=\"%s\", headlabel=\"%s\"]\n" % \
(media_src, media_dst)
else:
s = "%s%s_%s -> %s_%s\n" % \
(spc, ename, pname, peer_ename, peer_pname)
return s
class GdbGstElement(GdbGstObject):
def __init__(self, val):
gdb_type = gdb.lookup_type("GstElement").pointer()
super(GdbGstElement, self).__init__(gdb_type, val)
self.is_bin = gst_is_bin(self.val)
def __eq__(self, other):
return self.val == other.val
def children(self):
if not self.is_bin:
return
b = self.val.cast(gdb.lookup_type("GstBin").pointer())
link = b["children"]
while link != 0:
yield GdbGstElement(link["data"])
link = link["next"]
def has_pads(self, pad_group="pads"):
return self.val[pad_group] != 0
def pads(self, pad_group="pads"):
link = self.val[pad_group]
while link != 0:
yield GdbGstPad(link["data"])
link = link["next"]
def _state_dot(self):
icons = "~0-=>"
current = int(self.val["current_state"])
pending = int(self.val["pending_state"])
if pending == 0:
# GST_ELEMENT_FLAG_LOCKED_STATE == 16
locked = (int(self.val["object"]["flags"]) & 16) != 0
return "\\n[%c]%s" % (icons[current], "(locked)" if locked else "")
return "\\n[%c] -> [%c]" % (icons[current], icons[pending])
@save_memory_access_print("Element(<inaccessible memory>)")
def print(self, indent):
_gdb_write(indent, "%s(%s) {" %
(g_type_name_from_instance(self.val), self.name()))
for p in self.pads():
p.print(indent+2)
_gdb_write(indent, "}")
def _dot(self, indent=0):
spc = " " * indent
s = "%ssubgraph cluster_%s {\n" % (spc, self.dot_name())
s += "%s fontname=\"Bitstream Vera Sans\";\n" % spc
s += "%s fontsize=\"8\";\n" % spc
s += "%s style=\"filled,rounded\";\n" % spc
s += "%s color=black;\n" % spc
s += "%s label=\"%s\\n%s%s%s\";\n" % \
(spc, g_type_name_from_instance(self.val), self.name(),
self._state_dot(), "")
sink_name = None
if self.has_pads("sinkpads"):
(ss, sink_name) = self._dot_pads(indent+1, "sinkpads",
self.dot_name() + "_sink")
s += ss
src_name = None
if self.has_pads("srcpads"):
(ss, src_name) = self._dot_pads(indent+1, "srcpads",
self.dot_name() + "_src")
s += ss
if sink_name and src_name:
name = self.dot_name()
s += "%s %s_%s -> %s_%s [style=\"invis\"];\n" % \
(spc, name, sink_name, name, src_name)
if gst_is_bin(self.val):
s += "%s fillcolor=\"#ffffff\";\n" % spc
s += self.dot(indent+1)
else:
if src_name and not sink_name:
s += "%s fillcolor=\"#ffaaaa\";\n" % spc
elif not src_name and sink_name:
s += "%s fillcolor=\"#aaaaff\";\n" % spc
elif src_name and sink_name:
s += "%s fillcolor=\"#aaffaa\";\n" % spc
else:
s += "%s fillcolor=\"#ffffff\";\n" % spc
s += "%s}\n\n" % spc
for p in self.pads():
if not p.is_linked():
continue
if p.direction() == "GST_PAD_SRC":
s += p.link_dot(indent, self)
else:
pp = p.peer()
if not g_inherits_type(pp.val, "GstGhostPad") and \
g_inherits_type(pp.val, "GstProxyPad"):
s += pp.link_dot(indent, None)
return s
def _dot_pads(self, indent, pad_group, cluster_name):
spc = " " * indent
s = "%ssubgraph cluster_%s {\n" % (spc, cluster_name)
s += "%s label=\"\";\n" % spc
s += "%s style=\"invis\";\n" % spc
name = None
for p in self.pads(pad_group):
s += p.dot(indent)
if not name:
name = p.dot_name()
s += "%s}\n\n" % spc
return(s, name)
def dot(self, indent):
s = ""
for child in self.children():
try:
s += child._dot(indent)
except gdb.MemoryError:
gdb.write("warning: inaccessible memory in element 0x%x\n" %
long(child.val))
return s
def pipeline_dot(self):
t = g_type_name_from_instance(self.val)
s = "digraph pipeline {\n"
s += " rankdir=LR;\n"
s += " fontname=\"sans\";\n"
s += " fontsize=\"10\";\n"
s += " labelloc=t;\n"
s += " nodesep=.1;\n"
s += " ranksep=.2;\n"
s += " label=\"<%s>\\n%s%s%s\";\n" % (t, self.name(), "", "")
s += " node [style=\"filled,rounded\", shape=box, fontsize=\"9\", " \
"fontname=\"sans\", margin=\"0.0,0.0\"];\n"
s += " edge [labelfontsize=\"6\", fontsize=\"9\", " \
"fontname=\"monospace\"];\n"
s += " \n"
s += " legend [\n"
s += " pos=\"0,0!\",\n"
s += " margin=\"0.05,0.05\",\n"
s += " style=\"filled\",\n"
s | |
resource.
# aggregation won't have resource files when copying a resource
if aggr.files.all().count() > 0:
parent_aggr = aggr.get_parent()
if parent_aggr is not None:
parent_aggr.update_coverage()
return element
def update_element(self, element_model_name, element_id, **kwargs):
resource = self.logical_file.resource
if resource.raccess.published:
raise ValidationError("Aggregation metadata editing is not allowed for a published resource")
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
model_type.model_class().update(element_id, **kwargs)
self.is_dirty = True
self.save()
if element_model_name.lower() == "coverage":
element = model_type.model_class().objects.get(id=element_id)
resource.update_coverage()
# if the aggregation (logical file) for which coverage data is updated
# has a parent aggregation then coverage needs to be updated for that
# parent aggregation
aggr = element.metadata.logical_file
parent_aggr = aggr.get_parent()
if parent_aggr is not None:
parent_aggr.update_coverage()
def delete_element(self, element_model_name, element_id):
resource = self.logical_file.resource
if resource.raccess.published:
raise ValidationError("Aggregation metadata editing is not allowed for a published resource")
model_type = self._get_metadata_element_model_type(element_model_name)
model_type.model_class().remove(element_id)
self.is_dirty = True
self.save()
def _get_metadata_element_model_type(self, element_model_name):
element_model_name = element_model_name.lower()
if not self._is_valid_element(element_model_name):
raise ValidationError("Metadata element type:%s is not one of the "
"supported metadata elements for %s."
% element_model_name, type(self))
unsupported_element_error = "Metadata element type:%s is not supported." \
% element_model_name
try:
model_type = ContentType.objects.get(app_label=self.model_app_label,
model=element_model_name)
except ObjectDoesNotExist:
try:
model_type = ContentType.objects.get(app_label='hs_core',
model=element_model_name)
except ObjectDoesNotExist:
raise ValidationError(unsupported_element_error)
if not issubclass(model_type.model_class(), AbstractMetaDataElement):
raise ValidationError(unsupported_element_error)
return model_type
def _is_valid_element(self, element_name):
allowed_elements = [el.lower() for el in self.get_supported_element_names()]
return element_name.lower() in allowed_elements
@classmethod
def validate_element_data(cls, request, element_name):
"""Subclass must implement this function to validate data for for the
specified metadata element (element_name)"""
raise NotImplementedError
def get_dataset_name_form(self):
form_action = "/hsapi/_internal/{0}/{1}/update-filetype-dataset-name/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
root_div = div()
dataset_name = self.logical_file.dataset_name if self.logical_file.dataset_name else ""
with root_div:
with form(action=form_action, id="filetype-dataset-name",
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="form-group"):
with div(cls="control-group"):
legend('Title')
with div(cls="controls"):
_input(value=dataset_name,
cls="form-control input-sm textinput textInput",
id="file_dataset_name", maxlength="250",
name="dataset_name", type="text")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 col-md-2 col-xs-6"):
button("Save changes", cls="btn btn-primary pull-right btn-form-submit",
style="display: none;", type="button")
return root_div
def _get_add_key_value_modal_form(self):
form_action = "/hsapi/_internal/{0}/{1}/update-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
modal_div = div(cls="modal fade", id="add-keyvalue-filetype-modal", tabindex="-1",
role="dialog", aria_labelledby="add-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
with form(action=form_action, id="add-keyvalue-filetype-metadata",
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Add Key/Value Metadata", cls="modal-title",
id="add-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_name")
with div(cls="controls"):
_input(cls="form-control input-sm textinput textInput",
id="file_extra_meta_name", maxlength="100",
name="name", type="text")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("OK", type="button", cls="btn btn-primary",
id="btn-confirm-add-metadata") # TODO: TESTING
return modal_div
def _get_edit_key_value_modal_forms(self):
# TODO: See if can use one modal dialog to edit any pair of key/value
form_action = "/hsapi/_internal/{0}/{1}/update-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
counter = 0
root_div = div(id="edit-keyvalue-filetype-modals")
with root_div:
for k, v in list(self.extra_metadata.items()):
counter += 1
modal_div = div(cls="modal fade",
id="edit-keyvalue-filetype-modal-{}".format(counter),
tabindex="-1",
role="dialog", aria_labelledby="edit-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
form_id = "edit-keyvalue-filetype-metadata-{}".format(counter)
with form(action=form_action,
id=form_id, data_counter="{}".format(counter),
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Update Key/Value Metadata", cls="modal-title",
id="edit-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key(Original)",
cls="control-label requiredField",
fr="file_extra_meta_key_original")
with div(cls="controls"):
_input(value=k, readonly="readonly",
cls="form-control input-sm textinput "
"textInput",
id="file_extra_meta_key_original",
maxlength="100",
name="key_original", type="text")
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_key")
with div(cls="controls"):
_input(value=k,
cls="form-control input-sm textinput "
"textInput",
id="file_extra_meta_key", maxlength="100",
name="key", type="text")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(v,
cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("OK", id="btn-confirm-edit-key-value",
type="button", cls="btn btn-primary")
return root_div
def _get_delete_key_value_modal_forms(self):
form_action = "/hsapi/_internal/{0}/{1}/delete-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
counter = 0
root_div = div(id="delete-keyvalue-filetype-modals")
with root_div:
for k, v in list(self.extra_metadata.items()):
counter += 1
modal_div = div(cls="modal fade",
id="delete-keyvalue-filetype-modal-{}".format(counter),
tabindex="-1",
role="dialog", aria_labelledby="delete-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
form_id = "delete-keyvalue-filetype-metadata-{}".format(counter)
with form(action=form_action,
id=form_id,
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Confirm to Delete Key/Value Metadata", cls="modal-title",
id="delete-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_name")
with div(cls="controls"):
_input(cls="form-control input-sm textinput "
"textInput", value=k,
id="file_extra_meta_key", maxlength="100",
name="key", type="text", readonly="readonly")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(v, cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text",
readonly="readonly")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("Delete", type="button", cls="btn btn-danger",
id="btn-delete-key-value") # TODO: TESTING
return root_div
@classmethod
def get_preview_data_url(self, resource, folder_path):
# A data preview url pointing to an external data service.
# - subclass needs to override this
return None
class AbstractLogicalFile(models.Model):
""" base class for HydroShare file types """
resource = models.ForeignKey('hs_composite_resource.CompositeResource')
# files associated with this logical file group
files = GenericRelation(ResourceFile, content_type_field='logical_file_content_type',
object_id_field='logical_file_object_id')
# the dataset name will allow us to identify a logical file group on user interface
dataset_name = models.CharField(max_length=255, null=True, blank=True)
# this will be used for dc:type in resourcemetadata.xml
# each specific logical type needs to reset this field
# also this data type needs to be defined in in terms.html page
data_type = "Generic"
# this field is for logical file to store extra key:value pairs, e.g., currently for .url
# file to store url value for easy redirection when opening the file
# for internal use only - won't get recorded in bag and shouldn't be used for storing metadata
extra_data = HStoreField(default={})
class Meta:
abstract = True
@classmethod
def initialize(cls, dataset_name, resource):
"""
A helper for creating aggregation. Creates a new aggregation/logical_file type
instance and sets it's dataset field
:param dataset_name: a name/title for the aggregation/logical file
:param resource: an instance of composite resource for which this aggregation being
created
"""
logical_file = cls.create(resource)
logical_file.dataset_name = dataset_name
# at this point the logical file is not created in DB - caller needs to save it to DB
return logical_file
@classmethod
def create_aggregation(cls, dataset_name, resource, res_files=None, new_files_to_upload=None,
folder_path=''):
"""Creates an aggregation
:param dataset_name a value for setting the dataset_name attribute of the new aggregation
:param resource an instance of CompositeResource in which the aggregation to be created
:param res_files a list of resource files that need to be part of the new aggregation
:param new_files_to_upload a list of files that needs to be uploaded to the resource as
part of creating the new aggregation
:param folder_path path of the folder to which files need to be uploaded
:returns a new aggregation
"""
logical_file = cls.initialize(dataset_name, resource)
logical_file.save()
if res_files is None:
res_files = []
# make all existing resource files part of the aggregation
for res_file in res_files:
logical_file.add_resource_file(res_file)
if new_files_to_upload is None:
new_files_to_upload = []
# add all new files to the resource
for f in new_files_to_upload:
uploaded_file = UploadedFile(file=open(f, 'rb'), name=os.path.basename(f))
new_res_file = add_file_to_resource(
resource, uploaded_file, folder=folder_path, add_to_aggregation=False
)
logical_file.add_resource_file(new_res_file)
return logical_file
@classmethod
def get_allowed_uploaded_file_types(cls):
# any file can be part of this logical file group - subclass needs to override this
return [".*"]
@classmethod
def get_main_file_type(cls):
# a single file extension in the group which is considered the main file
# - subclass needs to override this
return None
@classmethod
def can_set_folder_to_aggregation(cls, resource, dir_path):
"""helper to check if the specified folder *dir_path* can be set to this aggregation type
:param resource: an instance of composite resource in which the folder to be checked
:param dir_path: Resource file directory path (full folder path starting with resource id)
for which this aggregation type to be set
:return True or False
"""
return False
def can_be_deleted_on_file_delete(self):
"""Checks if this logical file (self) can be deleted on delete of any resource file
that is part of this aggregation"""
return True
def can_contain_aggregation(self, aggregation):
"""Checks if the specified *aggregation* can be part of this (self) aggregation
:param aggregation: an aggregation that can be part of this aggregation (aggregation nesting)
"""
return False
@property
def get_main_file(self):
file_extension = self.get_main_file_type()
if file_extension:
if file_extension == ".*":
# any file can serve as main file
return self.files.all().first()
else:
for f in self.files.all():
if f.extension == file_extension:
return f
return None
@property
def url(self):
return os.path.join("/", "resource", self.resource.file_path, self.aggregation_name)
@classmethod
def get_allowed_storage_file_types(cls):
# can store any | |
<reponame>jongiddy/dcos-e2e
import re
import subprocess
from . import constants
from .error import *
# functions that don't fall into the VM class
# basic utility function to execute some arguments and return the result
def execute (args):
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
return result
# Return the current Virtualbox version as a string
def version():
return subprocess.check_output([constants.cmd, "-v"])
# Public: List available virtual machines, virtual devices and their relevant
# properties. Currently only returns a string representation. Will eventually
# return a more structured format, probably a dictionary
#
# option - the resource to list. Possible options listed in constants.py and the
# VBoxManage manual
# longform - supply the --long switch to VBoxManage. Only relevant for a few
# options
#
# Returns a string representation of the requested option, or a dictionary of
# all of them
def ls(option="all", longform=False):
cmd = [constants.cmd, "list"]
if longform:
cmd.append("--long")
if not option in constants.lsopts and not option == "all":
raise UnknownOptionError("list", option)
if option == "all":
result = {}
for opt in constants.lsopts:
result[opt] = subprocess.check_output(cmd + [opt])
return result
else:
return subprocess.check_output(cmd + [option])
# Public: Create a new virtual with the given options.
#
# name - String that is the name of the new VM
# ostype - String that should be the OS type
# register - Boolean whether or not to register this VM in Virtualbox
# basefolder - String giving the path where to store the VM files
# uuid - Hexadecimal String to be the UUID of the VM
#
# Returns a VM object (eventually) wrapping the VM
def createvm(name,ostype=None,register=False,basefolder=None,uuid=None):
cmd = [constants.cmd, "createvm", "--name", name]
if ostype:
cmd += ["--ostype", ostype]
if register:
cmd += ["--register"]
if basefolder:
cmd += ["--basefolder", basefolder]
if uuid:
cmd += ["--uuid", uuid]
# TODO: change to return VM object
return subprocess.check_output(cmd)
# Public: Register a VM from its XML file
#
# filename - String giving the filepath to the XML file to use
#
# Returns True if the registration succeeded.
# Raises RegistrationError otherwise
def registervm(self, filename):
args = [constants.cmd, "registervm", filename]
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise RegistrationError(filename, e)
return True
# Public: Close a device based on a UUID or a filename
#
# device - one of "dvd", "floppy" or "disk"
# target - UUID or filename
# delete - whether or not to delete the device after closing
#
# Returns True if the registration succeeded.
# Raises NoMediumError if the device type is invalid, CommandError if there's
# some other error
def closemedium(self, device, target, delete=False):
if not device in constants.closemediumopts:
raise NoMediumError(device, target, delete)
args = [constants.cmd, "closemedium", target]
if delete:
args.append("--delete")
execute(args)
return True
# Public: Class that wraps a Virtualbox VM and lets you interact with it and
# configure. Does not interact with the Guest OS in any way.
class VM(object):
# Public: Initialize a VM object to wrap a particular Virtualbox VM. At
# least one of name or UUID must be provided to locate the VM and the VM
# referenced must already exist.
#
# name - String that is the name of VirtualBox VM.
# uuid - Hexadecimal String that is the UUID of the VirtualBox VM.
#
# Returns a VM object wrapping the VirtualBox VM
# Raises UnknownVMError if VM corresponding to the name or UUID is not found
def __init__(self, name=None, uuid=None):
if name == None and uuid == None:
raise UnknownVMError(name, uuid)
if not name:
argid = uuid
else:
argid = name
try:
args = [constants.cmd, "showvminfo", "--machinereadable", argid]
self.vminfo = subprocess.check_output(args)
except subprocess.CalledProcessError:
raise UnknownVMError(name, uuid)
self.info = self.parse_info(self.vminfo)
self.__name = self.info['name']
self.__uuid = self.info['UUID']
self.started = False
# Public: Parse a raw VM information string as returned by showvminfo and
# turn it into a machine-usable Python dictionary.
#
# rawinfo - String that is the raw information dump from showvminfo
# machine - Boolean saying if the raw information is from using the
# machinereadable switch
# pythonize - Boolean saying if values should be swapped with their Python
# equivalents (True for on, False for off, None for <none>)
#
# Returns a dictionary of information keys to their provided values
def parse_info(self, rawinfo=None,machine=True, pythonize=True):
if not rawinfo:
rawinfo = self.vminfo
info = {}
longkey = None
longval = None
if machine:
sep = "="
else:
sep = ":"
for line in rawinfo.splitlines():
line = line.decode()
parts = line.split(sep)
# Work with multiline key-value pairs
if not machine:
if len(parts) == 1 and not longkey:
longkey = parts[0].strip()
longval = ""
continue
elif len(parts) == 1:
longval + "\n"
longval += line
continue
else:
longkey = None
longval = None
key = parts[0].strip()
value = ':'.join(parts[1:]).strip()
else:
key = parts[0].strip()
value = parts[1].strip(' \"')
if pythonize:
# Turn numbers to ints
try:
value = int(value)
except ValueError:
pass
# Turn on/off/none to True/False/None
if value == "on":
value = True
elif value == "off":
value = False
elif value == "none":
value = None
info[key] = value
return info
# Public: Create a Python dictionary representing the output from the
# showvminfo command. Uses parse_info to parse the raw string and places the
# raw string into a 'string' key in the dictionary.
#
# details - Boolean to use the --details flag
# machine - Boolean to use the --machinereadable flag (easier to parse)
# pythonize - Boolean saying if values should be swapped with their Python
# equivalents (True for on, False for off, None for <none>)
#
# Returns the parsed dictionary representation
def showvminfo(self, details=False, machine=True, pythonize=True):
args = [constants.cmd, "showvminfo"]
if details:
args += ["--details"]
if machine:
args += ["--machinereadable"]
args += [self.__uuid]
info = subprocess.check_output(args)
parsed = self.parse_info(info, machine, pythonize)
parsed['string'] = info
return parsed
# Public: Unregister the VM and optionally delete
#
# delete - Boolean to delete the VM as well as unregister
#
# Returns True if unregistering was successful
# Raises the generic CommandError otherwise
def unregistervm(self, delete=False):
args = [constants.cmd, "unregistervm", self.__uuid]
if delete:
args += ["--delete"]
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
return True
# Public: Make modifications to the current VM
#
# option - string option to be modified
# optargs - List of arguments relevant to the option
#
# Returns the output of the modifyvm command
# Raises UnknownOptionError if the option or arguments are incorrect
# Raises CommandError if the modifyvm command fails for some reason
def modifyvm(self,option=None,*optargs):
optargs = list(optargs)
if not option in constants.modopts:
raise UnknownOptionError("modifyvm", option)
else:
args = [constants.cmd, "modifyvm", self.name]
if option in constants.modboolopts:
if optargs[0] == True or optargs[0] == "on":
args += ["on"]
elif optargs[1] == False or optargs[0] == "off":
args += ["off"]
else:
raise UnknownOptionError("modifyvm " + option, optargs[0])
elif option in constants.modindexopts:
try:
index = int(optargs[0])
except ValueError:
raise UnknownOptionError("modifyvm " + option, optargs[0])
args += ["--" + option + str(index)] + optargs[1:]
elif option in constants.modenumopts.keys():
if not optargs[0] in constants.modenumopts[option]:
raise UnknownOptionError("modifyvm " + option, optargs[0])
else:
args += ["--" + option, optargs[0]]
else:
args += ["--" + option] + optargs
try:
args = map(str, args)
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
return result
def start(self, gui="gui"):
args = [constants.cmd, "startvm", self.name, "--type", gui]
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise CommandError(args, e)
self.started = True
return result
def controlvm(self,option=None,*optargs):
optargs = list(optargs)
if not option in constants.ctrlopts:
raise UnknownOptionError("controlvm", option)
else:
args = [constants.cmd, "controlvm", self.name]
if option in constants.ctrlboolopts:
if optargs[0] == True or optargs[0] == "on":
args += ["on"]
elif optargs[1] == False or optargs[0] == "off":
args += ["off"]
else:
raise UnknownOptionError("modifyvm " + option, optargs[0])
elif option in constants.ctrlindexopts:
try:
index = int(optargs[0])
except ValueError:
raise UnknownOptionError("modifyvm " + option, optargs[0])
args += ["--" + option + str(index)] + optargs[1:]
# elif option in constants.ctrlenumopts.keys():
# if not optargs[0] in constants.ctrlenumopts[option]:
# raise UnknownOptionError("modifyvm " + | |
0x09, 0x00, 0x00,
0x80, 0x04, 0x00, 0x00, 0x00, 0x82, 0x80, 0x08,
0x80, 0x08, 0x4a, 0x02, 0x00, 0x60, 0x12, 0x00,
0x42, 0x20, 0x0c, 0x18, 0x42, 0x20, 0x08, 0x00,
0x20, 0x01, 0x80, 0x28, 0x08, 0x82, 0x42, 0x20,
0x08, 0x00, 0x00, 0x00, 0x00, 0xbf, 0xda, 0x09,
0x82, 0x00, 0x00, 0x00, 0x00, 0x80, 0x08, 0x18,
0x00, 0x00, 0x11, 0x00, 0x10, 0x01, 0x00, 0x00,
0x00, 0x80, 0x02, 0xa0, 0x82, 0x00, 0x00, 0x00,
0x48, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xe0, 0x66, 0x06, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x82, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xfa, 0x07, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x20, 0x08, 0x20, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x14,
0x14, 0x8f, 0x2a, 0x0e, 0x00, 0x48, 0x00, 0x82,
0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x00, 0x00,
0x10, 0x02, 0x18, 0x80, 0x08, 0x00, 0x00, 0x14,
0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00,
0x28, 0x00, 0x00, 0x00, 0x00, 0x21, 0xf0, 0x9a,
0x8d, 0x00, 0x82, 0x00, 0x82, 0x82, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00,
0x14, 0xf0, 0xb1, 0x52, 0x00, 0x82, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x40, 0x01, 0x37, 0x36, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x20, 0x08,
0x00, 0x82, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x40, 0x41, 0x01, 0x00, 0xf0, 0x25,
0x87, 0x00, 0x4a, 0x04, 0x48, 0x88, 0x82, 0x00,
0x00, 0x00, 0x2a, 0x02, 0x28, 0x00, 0x00, 0x40,
0x12, 0x02, 0x2b, 0x11, 0xa0, 0x98, 0x80, 0x08,
0x00, 0x15, 0x01, 0x14, 0x00, 0x4a, 0x04, 0x48,
0x00, 0x00, 0x00, 0x00, 0x2a, 0x02, 0x28, 0x00,
0x00, 0x14, 0x24, 0x21, 0x10, 0xf2, 0x2c, 0xa2,
0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28,
0x00, 0x00, 0x00, 0x00, 0x21, 0x80, 0x01, 0x88,
0x00, 0x00, 0x40, 0x01, 0x00, 0x80, 0x04, 0x00,
0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x00, 0x14,
0x00, 0x21, 0x00, 0xfd, 0x93, 0x20, 0x88, 0x04,
0x42, 0x00, 0x00, 0x00, 0x00, 0x28, 0x20, 0x02,
0x00, 0x00, 0x10, 0x42, 0x82, 0x01, 0x1a, 0x08,
0x82, 0x00, 0x40, 0x01, 0x11, 0x00, 0x48, 0x20,
0x04, 0x00, 0x00, 0x00, 0x80, 0x02, 0x22, 0x00,
0x00, 0x00, 0x21, 0x24, 0xf0, 0xee, 0x23, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x80, 0xb4, 0x11, 0x02, 0x1b, 0x21, 0x42, 0x1b,
0x21, 0x23, 0x94, 0x21, 0x2b, 0x14, 0x19, 0xb2,
0x42, 0x99, 0x21, 0x2b, 0x94, 0x11, 0x2b, 0x94,
0x22, 0x2f, 0x14, 0x29, 0xf2, 0x42, 0x91, 0x22,
0x2d, 0x91, 0x2a, 0xc4, 0x91, 0x2e, 0x42, 0x1c,
0xe8, 0x22, 0x64, 0x11, 0x2e, 0x42, 0x96, 0xe1,
0x22, 0x64, 0x19, 0x2c, 0xa4, 0x29, 0x2c, 0xb4,
0x91, 0xc2, 0x42, 0x1b, 0x29, 0x24, 0x1b, 0x29,
0x42, 0x1b, 0x29, 0x23, 0xb4, 0x81, 0x32, 0x42,
0x19, 0xb2, 0x42, 0x98, 0x21, 0x2b, 0x84, 0x11,
0x2b, 0x84, 0x22, 0x2b, 0x84, 0x22, 0x2b, 0x84,
0x22, 0x29, 0xa8, 0x42, 0x88, 0x2e, 0x42, 0xcb,
0x12, 0x80, 0xb4, 0x11, 0x86, 0xb4, 0x11, 0x22,
0xb4, 0x11, 0x32, 0x42, 0x19, 0xb2, 0x42, 0x91,
0x21, 0x2b, 0x94, 0x19, 0xb2, 0x42, 0x9b, 0x21,
0x2b, 0x94, 0x22, 0x2f, 0x14, 0x29, 0xf2, 0x42,
0x91, 0x22, 0x2d, 0x91, 0x2b, 0x42, 0x2d, 0x91,
0x2e, 0x52, 0x1c, 0xe9, 0x22, 0xec, 0x11, 0xe8,
0x22, 0x64, 0x19, 0x2e, 0x42, 0x96, 0xc1, 0x43,
0x9e, 0x21, 0x2c, 0xb4, 0x91, 0xc2, 0x42, 0x1b,
0x69, 0x2c, 0xb4, 0x91, 0x22, 0xb4, 0x91, 0x32,
0x42, 0x1b, 0x28, 0x23, 0x94, 0x21, 0x2b, 0x84,
0x19, 0xb2, 0x42, 0x9a, 0x21, 0x2b, 0x84, 0x22,
0x2b, 0x84, 0x22, 0x2b, 0x84, 0x22, 0x29, 0xb8,
0x22, 0x94, 0x82, 0x2e, 0x42, 0x5f, 0x26, 0x08,
0x20, 0x88, 0x04, 0x20, 0x08, 0x22, 0x00, 0x00,
0x90, 0x22, 0x00, 0x00, 0x00, 0x00, 0x21, 0x80,
0x01, 0x88, 0x00, 0x00, 0xc0, 0x81, 0x00, 0x00,
0x48, 0x00, 0x00, 0x00, 0x00, 0x22, 0x80, 0x02,
0x00, 0x00, 0x00, 0x50, 0x12, 0xf0, 0x6f, 0xf5,
0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x80, 0x08,
0x00, 0x00, 0x00, 0x80, 0x04, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xbf, 0x8c, 0x09, 0x20, 0x08, 0x00, 0x82, 0x18,
0x82, 0x82, 0x00, 0x00, 0x00, 0x21, 0x22, 0x00,
0x00, 0x00, 0x80, 0x04, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0x80,
0x04, 0x88, 0x88, 0x00, 0x00, 0x11, 0x14, 0xf0,
0x6b, 0x31, 0x00, 0xc2, 0x00, 0x00, 0x20, 0x08,
0x00, 0x20, 0x02, 0x24, 0x00, 0x00, 0x60, 0x24,
0x00, 0x12, 0x48, 0x82, 0x00, 0x00, 0x10, 0x01,
0x00, 0x20, 0x04, 0x00, 0x00, 0x00, 0x42, 0x20,
0x24, 0x22, 0x04, 0x82, 0x82, 0x00, 0x40, 0x12,
0x41, 0x01, 0xbf, 0x5c, 0x05, 0x00, 0x00, 0x80,
0x88, 0x21, 0x21, 0x08, 0x20, 0x08, 0x00, 0x80,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x93, 0xa5,
0x00, 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x20,
0x08, 0x20, 0x02, 0x00, 0x00, 0x20, 0x08, 0x20,
0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42,
0x00, 0xf0, 0x93, 0x31, 0x00, 0x00, 0x00, 0x80,
0x01, 0x00, 0x28, 0x80, 0x01, 0x00, 0x00, 0x48,
0x20, 0x08, 0x00, 0x00, 0x28, 0x40, 0x01, 0x2a,
0x08, 0x88, 0x00, 0x20, 0x08, 0x18, 0x00, 0x20,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
0x04, 0x5f, 0xdf, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x20, 0x08,
0x00, 0x00, 0x00, 0x14, 0x80, 0x08, 0x00, 0x00,
0x82, 0xa0, 0x18, 0x00, 0x48, 0x82, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0xf0, 0x3a,
0xf5, 0x00, 0x00, 0x20, 0x01, 0x12, 0x00, 0x20,
0x02, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00,
0x42, 0x22, 0x80, 0x08, 0x22, 0x22, 0x88, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x48, 0xf0, | |
('rhr_rh_user6', c_float) )
plist.append( ('rhr_rh_user7', c_float) )
plist.append( ('rhr_rh_user8', c_float) )
plist.append( ('rhr_rh_user9', c_float) )
plist.append( ('rhr_rh_user10', c_float) )
plist.append( ('rhr_rh_user11', c_float) )
plist.append( ('rhr_rh_user12', c_float) )
plist.append( ('rhr_rh_user13', c_float) )
plist.append( ('rhr_rh_user14', c_float) )
plist.append( ('rhr_rh_user15', c_float) )
plist.append( ('rhr_rh_user16', c_float) )
plist.append( ('rhr_rh_user17', c_float) )
plist.append( ('rhr_rh_user18', c_float) )
plist.append( ('rhr_rh_user19', c_float) )
plist.append( ('pad_xx', c_char * 72) )
plist.append( ('rhr_spectral_width', c_float) )
plist.append( ('rhr_csi_dims', c_short) )
plist.append( ('rhr_xcsi', c_short) )
plist.append( ('rhr_ycsi', c_short) )
plist.append( ('rhr_zcsi', c_short) )
plist.append( ('rhr_roilenx', c_float) )
plist.append( ('rhr_roileny', c_float) )
plist.append( ('rhr_roilenz', c_float) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_ps_mps_freq', c_int) )
plist.append( ('pad_xx', c_char * 560) )
plist.append( ('rhr_rh_user_usage_tag', c_uint) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhr_rh_user20', c_float) )
plist.append( ('rhr_rh_user21', c_float) )
plist.append( ('rhr_rh_user22', c_float) )
plist.append( ('rhr_rh_user23', c_float) )
plist.append( ('rhr_rh_user24', c_float) )
plist.append( ('rhr_rh_user25', c_float) )
plist.append( ('rhr_rh_user26', c_float) )
plist.append( ('rhr_rh_user27', c_float) )
plist.append( ('rhr_rh_user28', c_float) )
plist.append( ('rhr_rh_user29', c_float) )
plist.append( ('rhr_rh_user30', c_float) )
plist.append( ('rhr_rh_user31', c_float) )
plist.append( ('rhr_rh_user32', c_float) )
plist.append( ('rhr_rh_user33', c_float) )
plist.append( ('rhr_rh_user34', c_float) )
plist.append( ('rhr_rh_user35', c_float) )
plist.append( ('rhr_rh_user36', c_float) )
plist.append( ('rhr_rh_user37', c_float) )
plist.append( ('rhr_rh_user38', c_float) )
plist.append( ('rhr_rh_user39', c_float) )
plist.append( ('rhr_rh_user40', c_float) )
plist.append( ('rhr_rh_user41', c_float) )
plist.append( ('rhr_rh_user42', c_float) )
plist.append( ('rhr_rh_user43', c_float) )
plist.append( ('rhr_rh_user44', c_float) )
plist.append( ('rhr_rh_user45', c_float) )
plist.append( ('rhr_rh_user46', c_float) )
plist.append( ('rhr_rh_user47', c_float) )
plist.append( ('rhr_rh_user48', c_float) )
plist.append( ('pad_xx', c_char * 352) )
plist.append( ('rhr_rdb_hdr_off_data', c_int) )
plist.append( ('pad_xx', c_char * 139508) )
plist.append( ('rhe_magstrength', c_int) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhe_ex_datetime', c_int) )
plist.append( ('pad_xx', c_char * 52) )
plist.append( ('rhe_ex_no', c_ushort) )
plist.append( ('pad_xx', c_char * 22) )
plist.append( ('rhe_patsex', c_short) )
plist.append( ('pad_xx', c_char * 91) )
plist.append( ('rhe_reqnum', c_char * 13) )
plist.append( ('rhe_refphy', c_char * 33) )
plist.append( ('pad_xx', c_char * 105) )
plist.append( ('rhe_ex_sysid', c_char * 9) )
plist.append( ('pad_xx', c_char * 14) )
plist.append( ('rhe_hospname', c_char * 33) )
plist.append( ('rhe_patid', c_char * 13) )
plist.append( ('rhe_patname', c_char * 25) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhe_ex_verscre', c_char * 2) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhe_uniq_sys_id', c_char * 16) )
plist.append( ('pad_xx', c_char * 20) )
plist.append( ('rhe_study_uid', c_char * 32) )
plist.append( ('pad_xx', c_char * 64) )
plist.append( ('rhe_patnameff', c_char * 65) )
plist.append( ('rhe_patidff', c_char * 65) )
plist.append( ('rhe_reqnumff', c_char * 17) )
plist.append( ('rhe_dateofbirth', c_char * 9) )
plist.append( ('pad_xx', c_char * 358) )
plist.append( ('rhs_position', c_int) )
plist.append( ('rhs_entry', c_int) )
plist.append( ('pad_xx', c_char * 126) )
plist.append( ('rhs_se_no', c_short) )
plist.append( ('pad_xx', c_char * 122) )
plist.append( ('rhs_se_desc', c_char * 65) )
plist.append( ('pad_xx', c_char * 18) )
plist.append( ('rhs_anref', c_char * 3) )
plist.append( ('pad_xx', c_char * 27) )
plist.append( ('rhs_series_uid', c_char * 32) )
plist.append( ('rhs_landmark_uid', c_char * 32) )
plist.append( ('pad_xx', c_char * 1429) )
plist.append( ('rhi_dfov', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhi_scanspacing', c_float) )
plist.append( ('rhi_loc', c_float) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhi_nex', c_float) )
plist.append( ('pad_xx', c_char * 20) )
plist.append( ('rhi_user0', c_float) )
plist.append( ('rhi_user1', c_float) )
plist.append( ('rhi_user2', c_float) )
plist.append( ('rhi_user3', c_float) )
plist.append( ('rhi_user4', c_float) )
plist.append( ('rhi_user5', c_float) )
plist.append( ('rhi_user6', c_float) )
plist.append( ('rhi_user7', c_float) )
plist.append( ('rhi_user8', c_float) )
plist.append( ('rhi_user9', c_float) )
plist.append( ('rhi_user10', c_float) )
plist.append( ('rhi_user11', c_float) )
plist.append( ('rhi_user12', c_float) )
plist.append( ('rhi_user13', c_float) )
plist.append( ('rhi_user14', c_float) )
plist.append( ('rhi_user15', c_float) )
plist.append( ('rhi_user16', c_float) )
plist.append( ('rhi_user17', c_float) )
plist.append( ('rhi_user18', c_float) )
plist.append( ('rhi_user19', c_float) )
plist.append( ('rhi_user20', c_float) )
plist.append( ('rhi_user21', c_float) )
plist.append( ('rhi_user22', c_float) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhi_user23', c_float) )
plist.append( ('rhi_user24', c_float) )
plist.append( ('pad_xx', c_char * 60) )
plist.append( ('rhi_user25', c_float) )
plist.append( ('rhi_user26', c_float) )
plist.append( ('rhi_user27', c_float) )
plist.append( ('rhi_user28', c_float) )
plist.append( ('rhi_user29', c_float) )
plist.append( ('rhi_user30', c_float) )
plist.append( ('rhi_user31', c_float) )
plist.append( ('rhi_user32', c_float) )
plist.append( ('rhi_user33', c_float) )
plist.append( ('rhi_user34', c_float) )
plist.append( ('rhi_user35', c_float) )
plist.append( ('rhi_user36', c_float) )
plist.append( ('rhi_user37', c_float) )
plist.append( ('rhi_user38', c_float) )
plist.append( ('rhi_user39', c_float) )
plist.append( ('rhi_user40', c_float) )
plist.append( ('rhi_user41', c_float) )
plist.append( ('rhi_user42', c_float) )
plist.append( ('rhi_user43', c_float) )
plist.append( ('rhi_user44', c_float) )
plist.append( ('rhi_user45', c_float) )
plist.append( ('rhi_user46', c_float) )
plist.append( ('rhi_user47', c_float) )
plist.append( ('rhi_user48', c_float) )
plist.append( ('pad_xx', c_char * 76) )
plist.append( ('rhi_ctr_R', c_float) )
plist.append( ('rhi_ctr_A', c_float) )
plist.append( ('rhi_ctr_S', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhi_tlhc_R', c_float) )
plist.append( ('rhi_tlhc_A', c_float) )
plist.append( ('rhi_tlhc_S', c_float) )
plist.append( ('rhi_trhc_R', c_float) )
plist.append( ('rhi_trhc_A', c_float) )
plist.append( ('rhi_trhc_S', c_float) )
plist.append( ('rhi_brhc_R', c_float) )
plist.append( ('rhi_brhc_A', c_float) )
plist.append( ('rhi_brhc_S', c_float) )
plist.append( ('pad_xx', c_char * 196) )
plist.append( ('rhi_tr', c_int) )
plist.append( ('rhi_ti', c_int) )
plist.append( ('rhi_te', c_int) )
plist.append( ('pad_xx', c_char * 306) )
plist.append( ('rhi_numecho', c_short) )
plist.append( ('pad_xx', c_char * 36) )
plist.append( ('rhi_mr_flip', c_short) )
plist.append( ('pad_xx', c_char * 22) )
plist.append( ('rhi_ctyp', c_short) )
plist.append( ('pad_xx', c_char * 64) )
plist.append( ('rhi_freq_dir', c_short) )
plist.append( ('pad_xx', c_char * 112) )
plist.append( ('rhi_psdname', c_char * 33) )
plist.append( ('pad_xx', c_char * 84) )
plist.append( ('rhi_cname', c_char * 17) )
plist.append( ('pad_xx', c_char * 51) )
plist.append( ('rhi_image_uid', c_char * 32) )
elif version == 15:
plist.append( ('rhr_rh_rdbm_rev', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhr_rh_scan_date', c_char * 10) )
plist.append( ('rhr_rh_scan_time', c_char * 8) )
plist.append( ('rhr_rh_logo', c_char * 10) )
plist.append( ('rhr_rh_file_contents', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_data_collect_type', c_short) )
plist.append( ('pad_xx', c_char * 6) )
plist.append( ('rhr_rh_npasses', c_short) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhr_rh_nslices', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_frame_size', c_ushort) )
plist.append( ('rhr_rh_point_size', c_short) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_raw_pass_size', c_uint) )
plist.append( ('pad_xx', c_char * 80) )
plist.append( ('rhr_rh_dab[0]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[0]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_stop_rcv', c_short) )
plist.append( ('rhr_rh_user0', c_float) )
plist.append( ('rhr_rh_user1', c_float) )
plist.append( ('rhr_rh_user2', c_float) )
plist.append( ('rhr_rh_user3', c_float) )
plist.append( ('rhr_rh_user4', c_float) )
plist.append( ('rhr_rh_user5', c_float) )
plist.append( ('rhr_rh_user6', c_float) )
plist.append( ('rhr_rh_user7', c_float) )
plist.append( ('rhr_rh_user8', c_float) )
plist.append( ('rhr_rh_user9', c_float) )
plist.append( ('rhr_rh_user10', c_float) )
plist.append( ('rhr_rh_user11', c_float) )
plist.append( ('rhr_rh_user12', c_float) )
plist.append( ('rhr_rh_user13', c_float) )
plist.append( ('rhr_rh_user14', c_float) )
plist.append( ('rhr_rh_user15', c_float) )
plist.append( ('rhr_rh_user16', c_float) )
plist.append( ('rhr_rh_user17', c_float) )
plist.append( ('rhr_rh_user18', c_float) )
plist.append( ('rhr_rh_user19', c_float) )
plist.append( ('pad_xx', c_char * 72) )
plist.append( ('rhr_spectral_width', c_float) )
plist.append( ('rhr_csi_dims', c_short) )
plist.append( ('rhr_xcsi', c_short) )
plist.append( ('rhr_ycsi', c_short) )
plist.append( ('rhr_zcsi', c_short) )
plist.append( ('rhr_roilenx', c_float) )
plist.append( ('rhr_roileny', c_float) )
plist.append( ('rhr_roilenz', c_float) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_ps_mps_freq', c_int) )
plist.append( ('pad_xx', c_char * 560) )
plist.append( ('rhr_rh_user_usage_tag', c_uint) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhr_rh_user20', c_float) )
plist.append( ('rhr_rh_user21', c_float) )
plist.append( ('rhr_rh_user22', c_float) )
plist.append( ('rhr_rh_user23', c_float) )
plist.append( ('rhr_rh_user24', c_float) )
plist.append( ('rhr_rh_user25', c_float) )
plist.append( ('rhr_rh_user26', c_float) )
plist.append( ('rhr_rh_user27', c_float) )
plist.append( ('rhr_rh_user28', c_float) )
plist.append( ('rhr_rh_user29', c_float) )
plist.append( ('rhr_rh_user30', c_float) )
plist.append( ('rhr_rh_user31', c_float) )
plist.append( ('rhr_rh_user32', c_float) )
plist.append( ('rhr_rh_user33', c_float) )
plist.append( ('rhr_rh_user34', c_float) )
plist.append( ('rhr_rh_user35', c_float) )
plist.append( ('rhr_rh_user36', c_float) )
plist.append( ('rhr_rh_user37', c_float) )
plist.append( ('rhr_rh_user38', c_float) )
plist.append( ('rhr_rh_user39', c_float) )
plist.append( ('rhr_rh_user40', c_float) )
plist.append( ('rhr_rh_user41', c_float) )
plist.append( ('rhr_rh_user42', c_float) )
plist.append( ('rhr_rh_user43', c_float) )
plist.append( ('rhr_rh_user44', c_float) )
plist.append( ('rhr_rh_user45', c_float) )
plist.append( ('rhr_rh_user46', c_float) )
plist.append( ('rhr_rh_user47', c_float) )
plist.append( ('rhr_rh_user48', c_float) )
plist.append( ('pad_xx', c_char * 352) )
plist.append( ('rhr_rdb_hdr_off_data', c_int) )
plist.append( ('pad_xx', c_char * 139508) )
plist.append( ('rhe_magstrength', c_int) | |
the operation.
# We can disable this by passing "datetime.min" when we start tracking revisions.
doc.start_track_revisions("<NAME>", datetime.min)
builder.write("Hello again! ")
self.assertEqual(2, doc.revisions.count)
self.assertEqual("<NAME>", doc.revisions[1].author)
self.assertEqual(datetime.min, doc.revisions[1].date_time)
# We can accept/reject these revisions programmatically
# by calling methods such as "Document.accept_all_revisions", or each revision's "accept" method.
# In Microsoft Word, we can process them manually via "Review" -> "Changes".
doc.save(ARTIFACTS_DIR + "Document.track_revisions.docx")
#ExEnd
def test_accept_all_revisions(self):
#ExStart
#ExFor:Document.accept_all_revisions
#ExSummary:Shows how to accept all tracking changes in the document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Edit the document while tracking changes to create a few revisions.
doc.start_track_revisions("<NAME>")
builder.write("Hello world! ")
builder.write("Hello again! ")
builder.write("This is another revision.")
doc.stop_track_revisions()
self.assertEqual(3, doc.revisions.count)
# We can iterate through every revision and accept/reject it as a part of our document.
# If we know we wish to accept every revision, we can do it more straightforwardly so by calling this method.
doc.accept_all_revisions()
self.assertEqual(0, doc.revisions.count)
self.assertEqual("Hello world! Hello again! This is another revision.", doc.get_text().strip())
#ExEnd
def test_get_revised_properties_of_list(self):
#ExStart
#ExFor:RevisionsView
#ExFor:Document.revisions_view
#ExSummary:Shows how to switch between the revised and the original view of a document.
doc = aw.Document(MY_DIR + "Revisions at list levels.docx")
doc.update_list_labels()
paragraphs = doc.first_section.body.paragraphs
self.assertEqual("1.", paragraphs[0].list_label.label_string)
self.assertEqual("a.", paragraphs[1].list_label.label_string)
self.assertEqual("", paragraphs[2].list_label.label_string)
# View the document object as if all the revisions are accepted. Currently supports list labels.
doc.revisions_view = aw.RevisionsView.FINAL
self.assertEqual("", paragraphs[0].list_label.label_string)
self.assertEqual("1.", paragraphs[1].list_label.label_string)
self.assertEqual("a.", paragraphs[2].list_label.label_string)
#ExEnd
doc.revisions_view = aw.RevisionsView.ORIGINAL
doc.accept_all_revisions()
self.assertEqual("a.", paragraphs[0].list_label.label_string)
self.assertEqual("", paragraphs[1].list_label.label_string)
self.assertEqual("b.", paragraphs[2].list_label.label_string)
def test_update_thumbnail(self):
#ExStart
#ExFor:Document.update_thumbnail()
#ExFor:Document.update_thumbnail(ThumbnailGeneratingOptions)
#ExFor:ThumbnailGeneratingOptions
#ExFor:ThumbnailGeneratingOptions.generate_from_first_page
#ExFor:ThumbnailGeneratingOptions.thumbnail_size
#ExSummary:Shows how to update a document's thumbnail.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
builder.insert_image(IMAGE_DIR + "Logo.jpg")
# There are two ways of setting a thumbnail image when saving a document to .epub.
# 1 - Use the document's first page:
doc.update_thumbnail()
doc.save(ARTIFACTS_DIR + "Document.update_thumbnail.first_page.epub")
# 2 - Use the first image found in the document:
options = aw.rendering.ThumbnailGeneratingOptions()
self.assertEqual(drawing.Size(600, 900), options.thumbnail_size) #ExSKip
self.assertTrue(options.generate_from_first_page) #ExSkip
options.thumbnail_size = drawing.Size(400, 400)
options.generate_from_first_page = False
doc.update_thumbnail(options)
doc.save(ARTIFACTS_DIR + "Document.update_thumbnail.first_image.epub")
#ExEnd
def test_hyphenation_options(self):
#ExStart
#ExFor:Document.hyphenation_options
#ExFor:HyphenationOptions
#ExFor:HyphenationOptions.auto_hyphenation
#ExFor:HyphenationOptions.consecutive_hyphen_limit
#ExFor:HyphenationOptions.hyphenation_zone
#ExFor:HyphenationOptions.hyphenate_caps
#ExSummary:Shows how to configure automatic hyphenation.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.size = 24
builder.writeln("Lorem ipsum dolor sit amet, consectetur adipiscing elit, " +
"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
doc.hyphenation_options.auto_hyphenation = True
doc.hyphenation_options.consecutive_hyphen_limit = 2
doc.hyphenation_options.hyphenation_zone = 720
doc.hyphenation_options.hyphenate_caps = True
doc.save(ARTIFACTS_DIR + "Document.hyphenation_options.docx")
#ExEnd
self.assertTrue(doc.hyphenation_options.auto_hyphenation)
self.assertEqual(2, doc.hyphenation_options.consecutive_hyphen_limit)
self.assertEqual(720, doc.hyphenation_options.hyphenation_zone)
self.assertTrue(doc.hyphenation_options.hyphenate_caps)
self.assertTrue(DocumentHelper.compare_docs(ARTIFACTS_DIR + "Document.hyphenation_options.docx",
GOLDS_DIR + "Document.HyphenationOptions Gold.docx"))
def test_hyphenation_options_default_values(self):
doc = aw.Document()
doc = DocumentHelper.save_open(doc)
self.assertEqual(False, doc.hyphenation_options.auto_hyphenation)
self.assertEqual(0, doc.hyphenation_options.consecutive_hyphen_limit)
self.assertEqual(360, doc.hyphenation_options.hyphenation_zone) # 0.25 inch
self.assertTrue(doc.hyphenation_options.hyphenate_caps)
def test_hyphenation_options_exceptions(self):
doc = aw.Document()
doc.hyphenation_options.consecutive_hyphen_limit = 0
with self.assertRaises(Exception):
doc.hyphenation_options.hyphenation_zone = 0
with self.assertRaises(Exception):
doc.hyphenation_options.consecutive_hyphen_limit = -1
doc.hyphenation_options.hyphenation_zone = 360
def test_ooxml_compliance_version(self):
#ExStart
#ExFor:Document.compliance
#ExSummary:Shows how to read a loaded document's Open Office XML compliance version.
# The compliance version varies between documents created by different versions of Microsoft Word.
doc = aw.Document(MY_DIR + "Document.doc")
self.assertEqual(doc.compliance, aw.saving.OoxmlCompliance.ECMA376_2006)
doc = aw.Document(MY_DIR + "Document.docx")
self.assertEqual(doc.compliance, aw.saving.OoxmlCompliance.ISO29500_2008_TRANSITIONAL)
#ExEnd
@unittest.skip("WORDSNET-20342")
def test_image_save_options(self):
#ExStart
#ExFor:Document.save(str,SaveOptions)
#ExFor:SaveOptions.use_anti_aliasing
#ExFor:SaveOptions.use_high_quality_rendering
#ExSummary:Shows how to improve the quality of a rendered document with SaveOptions.
doc = aw.Document(MY_DIR + "Rendering.docx")
builder = aw.DocumentBuilder(doc)
builder.font.size = 60
builder.writeln("Some text.")
options = aw.saving.ImageSaveOptions(aw.SaveFormat.JPEG)
self.assertFalse(options.use_anti_aliasing) #ExSkip
self.assertFalse(options.use_high_quality_rendering) #ExSkip
doc.save(ARTIFACTS_DIR + "Document.image_save_options.default.jpg", options)
options.use_anti_aliasing = True
options.use_high_quality_rendering = True
doc.save(ARTIFACTS_DIR + "Document.image_save_options.high_quality.jpg", options)
#ExEnd
self.verify_image(794, 1122, ARTIFACTS_DIR + "Document.image_save_options.default.jpg")
self.verify_image(794, 1122, ARTIFACTS_DIR + "Document.image_save_options.high_quality.jpg")
def test_cleanup(self):
#ExStart
#ExFor:Document.cleanup()
#ExSummary:Shows how to remove unused custom styles from a document.
doc = aw.Document()
doc.styles.add(aw.StyleType.LIST, "MyListStyle1")
doc.styles.add(aw.StyleType.LIST, "MyListStyle2")
doc.styles.add(aw.StyleType.CHARACTER, "MyParagraphStyle1")
doc.styles.add(aw.StyleType.CHARACTER, "MyParagraphStyle2")
# Combined with the built-in styles, the document now has eight styles.
# A custom style counts as "used" while applied to some part of the document,
# which means that the four styles we added are currently unused.
self.assertEqual(8, doc.styles.count)
# Apply a custom character style, and then a custom list style. Doing so will mark the styles as "used".
builder = aw.DocumentBuilder(doc)
builder.font.style = doc.styles.get_by_name("MyParagraphStyle1")
builder.writeln("Hello world!")
builder.list_format.list = doc.lists.add(doc.styles.get_by_name("MyListStyle1"))
builder.writeln("Item 1")
builder.writeln("Item 2")
doc.cleanup()
self.assertEqual(6, doc.styles.count)
# Removing every node that a custom style is applied to marks it as "unused" again.
# Run the "cleanup" method again to remove them.
doc.first_section.body.remove_all_children()
doc.cleanup()
self.assertEqual(4, doc.styles.count)
#ExEnd
def test_automatically_update_styles(self):
#ExStart
#ExFor:Document.automatically_update_styles
#ExSummary:Shows how to attach a template to a document.
doc = aw.Document()
# Microsoft Word documents by default come with an attached template called "Normal.dotm".
# There is no default template for blank Aspose.Words documents.
self.assertEqual("", doc.attached_template)
# Attach a template, then set the flag to apply style changes
# within the template to styles in our document.
doc.attached_template = MY_DIR + "Business brochure.dotx"
doc.automatically_update_styles = True
doc.save(ARTIFACTS_DIR + "Document.automatically_update_styles.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.automatically_update_styles.docx")
self.assertTrue(doc.automatically_update_styles)
self.assertEqual(MY_DIR + "Business brochure.dotx", doc.attached_template)
self.assertTrue(os.path.exists(doc.attached_template))
def test_default_template(self):
#ExStart
#ExFor:Document.attached_template
#ExFor:Document.automatically_update_styles
#ExFor:SaveOptions.create_save_options(str)
#ExFor:SaveOptions.default_template
#ExSummary:Shows how to set a default template for documents that do not have attached templates.
doc = aw.Document()
# Enable automatic style updating, but do not attach a template document.
doc.automatically_update_styles = True
self.assertEqual("", doc.attached_template)
# Since there is no template document, the document had nowhere to track style changes.
# Use a SaveOptions object to automatically set a template
# if a document that we are saving does not have one.
options = aw.saving.SaveOptions.create_save_options("Document.default_template.docx")
options.default_template = MY_DIR + "Business brochure.dotx"
doc.save(ARTIFACTS_DIR + "Document.default_template.docx", options)
#ExEnd
self.assertTrue(os.path.exists(options.default_template))
def test_use_substitutions(self):
#ExStart
#ExFor:FindReplaceOptions.use_substitutions
#ExFor:FindReplaceOptions.legacy_mode
#ExSummary:Shows how to recognize and use substitutions within replacement patterns.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.write("Jason gave money to Paul.")
options = aw.replacing.FindReplaceOptions()
options.use_substitutions = True
# Using legacy mode does not support many advanced features, so we need to set it to 'False'.
options.legacy_mode = False
doc.range.replace_regex(r"([A-z]+) gave money to ([A-z]+)", r"$2 took money from $1", options)
self.assertEqual(doc.get_text(), "Paul took money from Jason.\f")
#ExEnd
def test_set_invalidate_field_types(self):
#ExStart
#ExFor:Document.normalize_field_types
#ExSummary:Shows how to get the keep a field's type up to date with its field code.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
field = builder.insert_field("DATE", None)
# Aspose.Words automatically detects field types based on field codes.
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.type)
# Manually change the raw text of the field, which determines the field code.
field_text = doc.first_section.body.first_paragraph.get_child_nodes(aw.NodeType.RUN, True)[0].as_run()
self.assertEqual("DATE", field_text.text) #ExSkip
field_text.text = "PAGE"
# Changing the field code has changed this field to one of a different type,
# but the field's type properties still display the old type.
self.assertEqual("PAGE", field.get_field_code())
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.type)
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.start.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.separator.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.end.field_type)
# Update those properties with this method to display current value.
doc.normalize_field_types()
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.type)
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.start.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.separator.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.end.field_type)
#ExEnd
def test_layout_options_revisions(self):
#ExStart
#ExFor:Document.layout_options
#ExFor:LayoutOptions
#ExFor:LayoutOptions.revision_options
#ExFor:RevisionColor
#ExFor:RevisionOptions
#ExFor:RevisionOptions.inserted_text_color
#ExFor:RevisionOptions.show_revision_bars
#ExSummary:Shows how to alter the appearance of revisions in a rendered output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert a revision, then change the color of all revisions to green.
builder.writeln("This is not a revision.")
doc.start_track_revisions("<NAME>", datetime.now())
self.assertEqual(aw.layout.RevisionColor.BY_AUTHOR, doc.layout_options.revision_options.inserted_text_color) #ExSkip
self.assertTrue(doc.layout_options.revision_options.show_revision_bars) #ExSkip
builder.writeln("This is a revision.")
doc.stop_track_revisions()
builder.writeln("This is not a revision.")
# Remove the bar that appears to the left of every revised line.
doc.layout_options.revision_options.inserted_text_color = aw.layout.RevisionColor.BRIGHT_GREEN
doc.layout_options.revision_options.show_revision_bars = False
doc.save(ARTIFACTS_DIR + "Document.layout_options_revisions.pdf")
#ExEnd
def test_layout_options_hidden_text(self):
for show_hidden_text in (False, True):
with self.subTest(show_hidden_text=show_hidden_text):
#ExStart
#ExFor:Document.layout_options
#ExFor:LayoutOptions
#ExFor:LayoutOptions.show_hidden_text
#ExSummary:Shows how to hide text in a rendered output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
self.assertFalse(doc.layout_options.show_hidden_text) #ExSkip
# Insert hidden text, then specify whether we wish to omit it from a rendered document.
builder.writeln("This text is not hidden.")
builder.font.hidden = True
builder.writeln("This text is hidden.")
doc.layout_options.show_hidden_text = show_hidden_text
doc.save(ARTIFACTS_DIR + "Document.layout_options_hidden_text.pdf")
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "Document.layout_options_hidden_text.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#if show_hidden_text:
# self.assertEqual("This text is not hidden.\nThis text is hidden.", text_absorber.text)
#else:
# self.assertEqual("This text is not hidden.", text_absorber.text)
def test_layout_options_paragraph_marks(self):
for show_paragraph_marks in (False, True):
with self.subTest(show_paragraph_marks=show_paragraph_marks):
#ExStart
#ExFor:Document.layout_options
#ExFor:LayoutOptions
#ExFor:LayoutOptions.show_paragraph_marks
#ExSummary:Shows how to show paragraph marks in a rendered output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
self.assertFalse(doc.layout_options.show_paragraph_marks) #ExSkip
# Add some paragraphs, then enable paragraph marks to show the ends of paragraphs
# with a pilcrow | |
Connect an expression expr via connection variable cv
# Works for both read and write targets
# s:
# pos: starting index of the expression
# cv: connection variable
# expr: original expression
def connect_dp(s, pos, cv, expr):
repl = "(*(&(" + expr + ") + " + cv + "))"
s = replace_ln(s, pos, expr, repl)
return s
### Place the asm connection statement
# s:
# idx: index of the second expression belonging to the dependency
# ps: index of the start of the line
# asm_st: asm statement to place
def place_asm_st(s, idx, ps, asm_st):
assert(idx > ps)
pt = puke2(s, idx-1)
tc = s[pt]
if is_stat_delim(tc):
# Place immediately before the expression
s = insert(s, pt+1, asm_st)
return s
else:
pt = puke(s, ps-1)
tc = s[pt]
if is_stat_delim(tc):
# Place at start of line
assert(s[ps] != '\n')
s = insert(s, ps, asm_st)
return s
else:
# Connector asm statement cannot be placed
return ""
### Add a dependency between the accesses
# s: file as string
# pos1: points at newline character at end of line of first access
# pos2: points at newline character at end of line of second access
# fst: first expression (read)
# snd: second expression (read or write)
# ty: type of first expression
# line_cnt: corresponding line number in results.txt
def place_dp(s, pos1, pos2, fst, snd, ty, line_cnt):
if ty == '' or ty == ' ':
return s
# Strip off c:: from source and target expressions (if any)
if fst[0:3] == 'c::':
fst = fst[3:]
if snd[0:3] == 'c::':
snd = snd[3:]
ps1 = ln_etos(s, pos1)
ps2 = ln_etos(s, pos2)
assert(ps1 != '\n')
assert(ps2 != '\n')
ln1 = extract_ln(s, pos1)
ln2 = extract_ln(s, pos2)
# Check if expressions exist on line (exact matches)
if not contains(ln1, fst):
print('dp ' + line_cnt + ', first expression not found')
return s
if not contains(ln2, snd):
print('dp ' + line_cnt + ', second expression not found')
return s
# Get starting indices of expressions in source file
idx1 = s.find(fst, ps1)
idx2 = s.find(snd, ps2)
assert(idx1 != -1)
assert(idx2 != -1)
len1 = len(fst)
len2 = len(snd)
if not from_to_scope(s, idx1, idx2):
print('dp ' + line_cnt + ', target out of scope')
return s
# At this point, we're pretty confident that it's possible to insert the dep
# :-)
# Handle source of dependency
stat = contains_stat(ln1)
if stat:
# Source line contains a complicated statement (if, while, for, do, case,
# switch)
print('Inserting dependency with complicated source, ' + line_cnt)
# Handle target of dependency
sn = connect_dp(s, idx2, "0", snd)
assert(len(sn) > 0)
# Handle connector asm statement
asm_st = '__asm volatile("":::"memory");'
sn = place_asm_st(sn, idx2, ps2, asm_st)
if sn == '':
return s
else:
# Source line is simple
print('Inserting dependency with simple source, ' + line_cnt)
# Pull and connection variables
pv = next_tmp_pull()
con = next_tmp_con()
# Handle target of dependency
sn = connect_dp(s, idx2, con, snd)
assert(len(sn) > 0)
# Handle connector asm statement
asm_st = get_connector(pv, con)
sn = place_asm_st(sn, idx2, ps2, asm_st)
if sn == '':
return s
# Transform line to pulled version
sn = delete(sn, idx1, idx1 + len1)
sn = insert(sn, idx1, '(' + pv + ')')
con_and_pull = 'int ' + con + ' = 0; ' + ty + ' ' + pv + ' = ' + fst + ';'
while True:
idx1 -= 1
c = sn[idx1]
if c == ';' or c == '{' or c == '}' or c == '\n':
sn = insert(sn, idx1 + 1, con_and_pull)
break
return sn
# ------------------------------------------------------------------------------
# Process each line in results.txt separately; not efficient but OK ...
# fences: list of pairs (with pair = (num, list), a list in a pair represents a
# line in results.txt)
#
# fences: all non-ignored lines in results.txt
def insert_fences(fences):
global musk_form
global handle_dp
global fm
# For every line in results.txt
for p in fences:
line_cnt = str(p[0])
l = p[1]
# Read file into string
assert(im_src_file1 == io_src_file)
fn = l[im_src_file1]
f = open(fn, 'r')
s = f.read()
f.close()
# Sanity checks
if musk_form:
if l[im_src_file1] != l[im_src_file2]:
print('Ignoring fence/dp ' + line_cnt + ', file mismatch')
continue
if l[im_func_name1] != l[im_func_name2]:
print('Ignoring fence/dp ' + line_cnt + ', func mismatch')
continue
# Handle inverse line numbers
if musk_form:
if int(l[im_line1]) > int(l[im_line2]):
pass
place_full_fence = True
# Dp's are handled specially and line specifies a dp
if handle_dp and l[im_fence] == "dp":
assert(musk_form)
# Get position pointers
ln1 = int(l[im_line1])
ln2 = int(l[im_line2])
if ln1 == ln2:
print('Ignoring dp ' + line_cnt + ', same line numbers')
continue
pos1 = after_line(s, ln1)
pos2 = after_line(s, ln2)
# Expressions
fst = l[im_exp1]
snd = l[im_exp2]
# Type of first expression (+ replace _ by space)
ty = l[im_type1]
ty = ty.replace('_', '')
s_bak = s
s = place_dp(s, pos1, pos2, fst, snd, ty, line_cnt)
# Dependency successfully inserted
if (s != s_bak):
place_full_fence = False
if place_full_fence:
# Insert a normal fence
assert(im_fence == io_fence)
try:
fence = fm[l[im_fence]]
except KeyError:
print_err('Unrecognized fence for architecture. Exiting.')
sys.exit(1)
# Using __asm as asm does not work with -std=c99
asm = '__asm volatile ("' + fence + '":::"memory");'
# Throws ValueError if not an integer
if musk_form:
if fence_pos == fence_first:
ln = int(l[im_line1])
elif fence_pos == fence_second:
ln = int(l[im_line2])-1
else:
ln = int(l[io_line])
# Point at newline character at end of line
pos = after_line(s, ln)
#assert(s[pos] == '\n')
assert_msg(s[pos] == '\n', 'Insert at line: ' + str(ln) + ', File: ' + fn)
s = place_fence(s, pos, asm, line_cnt)
# Write back result
f = open(fn, 'w')
f.write(s)
f.close()
# ------------------------------------------------------------------------------
def handle_args(args):
global musk_form
global handle_dp
global fm
if len(sys.argv) != 5:
print_err('Number of arguments != 5\n')
usage()
sys.exit(1)
arch = sys.argv[1]
if arch == 'x86':
print('x86 architecture')
fm = fm_x86
elif arch == 'arm':
print('ARM architecture')
fm = fm_arm
else:
print_err('Unrecognized architecture')
sys.exit(1)
dp_mode = sys.argv[2]
if dp_mode == "dp":
print('Handling dp')
handle_dp = True
elif dp_mode == "fence":
print('Using full fence for dp')
handle_dp = False
else:
print_err('Unrecognized fencing strategy')
sys.exit(1)
in_form = sys.argv[3]
if in_form == "musk":
print('Input format musketeer\n')
musk_form = True
elif in_form == "other":
print('Input format other\n')
musk_form = False
else:
print_err('Unrecognized format selector')
sys.exit(1)
if handle_dp and (not(musk_form) or not(arch == 'arm')):
print_err('Incompatible argument values.')
sys.exit(1)
# ------------------------------------------------------------------------------
if __name__ == "__main__":
handle_args(sys.argv)
# Hack (musk output insufficient for dp insertion at the moment)
handle_dp = False
print('Current working directory: \n' + os.getcwd() + '\n')
# Read results file (check files mentioned in there)
fences = [] # list of pairs: (num, list)
files = [] # files in results.txt that will be fenced
all_files = [] # all files mentioned in results.txt
with open(sys.argv[4], 'r') as f:
cnt = 0
for line in f:
cnt += 1
# Note: "a||b|".split('|') yields ['a', '', 'b', '']
l = line.split('|')
cols = len(l)
if cols == 11 or cols == 13:
print('<results> is from older version of musketeer')
sys.exit(1)
assert(cols == 5 or cols == 9)
assert(im_src_file1 == io_src_file)
fn = l[im_src_file1]
all_files.append(fn)
if fn[0] != '/' and os.path.isfile(fn):
# Files to fence
files.append(fn)
# Fencing instruction used (and its line in results.txt)
fences.append((cnt, l))
# Check if files exist before doing anything
not_found = False
all_files = list(set(all_files))
for f in all_files:
if not(os.path.isfile(f)):
not_found = True
print('File ' + f + ' does not exist')
continue
if f[0] == '/':
not_found = True
print('File ' + f + ' has absolute path. Ignoring.')
# Print extra newline if there is a file we ignore (pretty printing)
if not_found:
print()
# Files to fence
files = list(set(files))
# Backup files (if backup doesn't exist yet)
for f in files:
backup = f + '.fibak'
if not(os.path.isfile(backup)):
shutil.copyfile(f, backup)
# Fix curly braces in files | |
#!/usr/bin/env python
"""
############################################################
Integrated Circuit Package Component Specific Work Book View
############################################################
"""
# -*- coding: utf-8 -*-
#
# rtk.hardware.gui.gtk.IntegratedCircuit.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
# Import modules for localization support.
import gettext
import locale
# Modules required for the GUI.
try:
import pygtk
pygtk.require('2.0')
except ImportError:
sys.exit(1)
try:
import gtk
except ImportError:
sys.exit(1)
try:
import gtk.glade
except ImportError:
sys.exit(1)
# Modules required for plotting.
import matplotlib
from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from matplotlib.figure import Figure
# Import other RTK modules.
try:
import Configuration
import gui.gtk.Widgets as Widgets
except ImportError:
import rtk.Configuration as Configuration
import rtk.gui.gtk.Widgets as Widgets
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error:
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
matplotlib.use('GTK')
class Inputs(gtk.Frame):
"""
The Work Book view for displaying all the attributes for an integrated
circuit. The attributes of an inntegrated circuit Work Book view are:
"""
_lst_package = ["", _(u"Hermetic DIP w/ Weld Seal"), _(u"Pin Grid Array"),
_(u"Surface Mount"), _(u"Hermetic DIP w/ Glass Seal"),
_(u"Flatpack"), _(u"Can"),
_(u"Non-Hermetic DIP, PGA, SMT")]
def __init__(self, model):
"""
Method to create an input frame for the inntegrated circuit data model.
:param model: the :py:class `rtk.hardware.IntegratedCircuit.Model`
whose attributes will be displayed.
"""
# TODO: Consider rewriting __init__; current McCabe Complexity metrix = 10.
gtk.Frame.__init__(self)
self.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
# Define private dictionary attributes.
# Define private list attributes.
self._lst_count_labels = [_(u"Quality:"), _(u"Years in Production:")]
self._lst_stress_labels = [_(u"Quality:"),
_(u"\u03C0<sub>Q</sub> Override:")]
self._lst_quality = ["", "S", "B", "B-1"]
self._lst_handler_id = []
# Define private scalar attributes.
self._hardware_model = model
self._subcategory = model.subcategory_id
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.cmbApplication = Widgets.make_combo(simple=True)
self.cmbECC = Widgets.make_combo(simple=True)
self.cmbFamily = Widgets.make_combo(simple=True)
self.cmbManufacturing = Widgets.make_combo(simple=True)
self.cmbPackage = Widgets.make_combo(simple=True)
self.cmbQuality = Widgets.make_combo(simple=True)
self.cmbTechnology = Widgets.make_combo(simple=True)
self.txtCommercialPiQ = Widgets.make_entry(width=100)
self.txtCycles = Widgets.make_entry(width=100)
self.txtDieArea = Widgets.make_entry(width=100)
self.txtESDVolts = Widgets.make_entry(width=100)
self.txtFeatureSize = Widgets.make_entry(width=100)
self.txtLifeOpHours = Widgets.make_entry(width=100)
self.txtMemorySize = Widgets.make_entry(width=100)
self.txtNumBits = Widgets.make_entry(width=100)
self.txtNumElements = Widgets.make_entry(width=100)
self.txtNumGates = Widgets.make_entry(width=100)
self.txtNumPins = Widgets.make_entry(width=100)
self.txtNumTransistors = Widgets.make_entry(width=100)
self.txtYears = Widgets.make_entry(width=100)
# Subcategory specific attributes.
if self._subcategory == 1: # Linear
self._lst_stress_labels.append(_(u"Technology:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"# of Transistors:"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
elif self._subcategory == 2: # Logic
self._lst_stress_labels.append(_(u"Technology:"))
self._lst_stress_labels.append(_(u"Family:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"# of Gates:"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
elif self._subcategory == 3: # PAL/PLA
self._lst_stress_labels.append(_(u"Technology:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"# of Gates:"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
elif self._subcategory == 4: # Microprocessor
self._lst_stress_labels.append(_(u"Technology:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"# of Bits:"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
elif self._subcategory == 5: # ROM
self._lst_stress_labels.append(_(u"Technology:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"Memory Size (bits):"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
elif self._subcategory == 6: # EEPROM
self._lst_stress_labels.append(_(u"Technology:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"Manufacturing Process:"))
self._lst_stress_labels.append(_(u"Memory Size (bits):"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
self._lst_stress_labels.append(_(u"# of Programming Cycles:"))
self._lst_stress_labels.append(_(u"Error Correction Code:"))
self._lst_stress_labels.append(_(u"System Lifetime Operating "
u"Hours:"))
elif self._subcategory in [7, 8]: # DRAM/SRAM
self._lst_stress_labels.append(_(u"Technology:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"Memory Size (bits):"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
elif self._subcategory == 9: # GaAs
self._lst_stress_labels.append(_(u"Application:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"# of Elements:"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
elif self._subcategory == 10: # VLSI
self._lst_stress_labels.append(_(u"Application:"))
self._lst_stress_labels.append(_(u"Package Type:"))
self._lst_stress_labels.append(_(u"Manufacturing Process:"))
self._lst_stress_labels.append(_(u"# of Pins:"))
self._lst_stress_labels.append(_(u"Years in Production:"))
self._lst_stress_labels.append(_(u"Feature Size (microns):"))
self._lst_stress_labels.append(_(u"Die Area (cm2):"))
self._lst_stress_labels.append(_(u"ESD Susceptibility (Volts):"))
def create_217_count_inputs(self, x_pos=5):
"""
Method to create the MIL-HDBK-217FN2 parts count input widgets for
Integrated Circuits.
:keyword int x_pos: the x position of the display widgets.
:return: False if successful or True if an error is encountered.
"""
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Part Count Inputs") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Populate all the gtk.ComboBox().
for i in range(len(self._lst_quality)):
self.cmbQuality.insert_text(i, self._lst_quality[i])
# Create and place all the labels for the inputs.
(_x_pos, _y_pos) = Widgets.make_labels(self._lst_count_labels,
_fixed, 5, 5)
_x_pos = max(x_pos, _x_pos) + 50
# Create the tooltips for all the input widgets.
self.cmbQuality.set_tooltip_text(_(u"Select and display the quality "
u"level for the selected "
u"connection."))
# Place all the input widgets.
if self.cmbQuality.get_parent() is not None:
self.cmbQuality.reparent(_fixed)
if self.txtYears.get_parent() is not None:
self.txtYears.reparent(_fixed)
_fixed.put(self.cmbQuality, _x_pos, _y_pos[0])
_fixed.put(self.txtYears, _x_pos, _y_pos[1])
# Connect signals to callback functions.
self._lst_handler_id.append(
self.cmbQuality.connect('changed', self._on_combo_changed, 0))
self._lst_handler_id.append(self.txtYears.connect('focus-out-event',
self._on_focus_out,
6))
_fixed.show_all()
return _x_pos
def create_217_stress_inputs(self, x_pos=5): # pylint: disable=R0915
"""
Method to create the MIL-HDBK-217FN2 part stress input gtk.Widgets()
for Integrated Circuits.
:keyword int x_pos: the x position of the display widgets.
:return: False if successful or True if an error is encountered.
"""
# WARNING: Refactor create_217_stress_inputs; current McCabe Complexity metric = 24.
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Part Stress Inputs") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Populate all the gtk.ComboBox().
for i in range(len(self._lst_quality)):
self.cmbQuality.insert_text(i, self._lst_quality[i])
# Create and place all the labels for the inputs.
(_x_pos,
_y_pos) = Widgets.make_labels(self._lst_stress_labels, _fixed, 5, 5)
_x_pos = max(x_pos, _x_pos) + 50
# Create the tooltips for all the input widgets.
self.cmbQuality.set_tooltip_text(_(u"Select and display the quality "
u"level for the selected "
u"connection."))
self.txtCommercialPiQ.set_tooltip_text(_(u"Displays the user-defined "
u"quality factor for the "
u"selected connection. This "
u"value over rides the "
u"quality factor selected "
u"above."))
# Place all the input widgets.
if self.cmbQuality.get_parent() is not None:
self.cmbQuality.reparent(_fixed)
if self.txtYears.get_parent() is not None:
self.txtYears.reparent(_fixed)
_fixed.put(self.cmbQuality, _x_pos, _y_pos[0])
_fixed.put(self.txtCommercialPiQ, _x_pos, _y_pos[1])
# Connect signals to callback functions.
self._lst_handler_id.append(
self.cmbQuality.connect('changed', self._on_combo_changed, 0))
self._lst_handler_id.append(
self.txtCommercialPiQ.connect('focus-out-event',
self._on_focus_out, 1))
self._lst_handler_id.append(
self.cmbTechnology.connect('changed', self._on_combo_changed, 2))
self._lst_handler_id.append(
self.cmbPackage.connect('changed', self._on_combo_changed, 3))
self._lst_handler_id.append(
self.txtNumTransistors.connect('focus-out-event',
self._on_focus_out, 4))
self._lst_handler_id.append(
self.txtNumPins.connect('focus-out-event', self._on_focus_out, 5))
self._lst_handler_id.append(
self.txtYears.connect('focus-out-event', self._on_focus_out, 6))
self._lst_handler_id.append(
self.cmbFamily.connect('changed', self._on_combo_changed, 7))
self._lst_handler_id.append(
self.txtNumGates.connect('focus-out-event', self._on_focus_out, 8))
self._lst_handler_id.append(
self.txtNumBits.connect('focus-out-event', self._on_focus_out, 9))
self._lst_handler_id.append(
self.cmbManufacturing.connect('changed',
self._on_combo_changed, 10))
self._lst_handler_id.append(
self.txtCycles.connect('focus-out-event', self._on_focus_out, 11))
self._lst_handler_id.append(
self.cmbECC.connect('changed', self._on_combo_changed, 12))
self._lst_handler_id.append(
self.txtLifeOpHours.connect('focus-out-event',
self._on_focus_out, 13))
self._lst_handler_id.append(
self.cmbApplication.connect('changed', self._on_combo_changed, 14))
self._lst_handler_id.append(
self.txtNumElements.connect('focus-out-event',
self._on_focus_out, 15))
self._lst_handler_id.append(
self.txtFeatureSize.connect('focus-out-event',
self._on_focus_out, 16))
self._lst_handler_id.append(
self.txtDieArea.connect('focus-out-event', self._on_focus_out, 17))
self._lst_handler_id.append(
self.txtESDVolts.connect('focus-out-event',
self._on_focus_out, 18))
self._lst_handler_id.append(
self.txtMemorySize.connect('focus-out-event',
self._on_focus_out, 19))
if self._subcategory == 1: # Linear
# Populate the gtk.ComboBox().
self.cmbTechnology.insert_text(0, '')
self.cmbTechnology.insert_text(1, "Bipolar")
self.cmbTechnology.insert_text(2, "MOS")
for i in range(len(self._lst_package)):
self.cmbPackage.insert_text(i, self._lst_package[i])
# Place all the input widgets.
_fixed.put(self.cmbTechnology, _x_pos, _y_pos[2])
_fixed.put(self.cmbPackage, _x_pos, _y_pos[3])
_fixed.put(self.txtNumTransistors, _x_pos, _y_pos[4])
_fixed.put(self.txtNumPins, _x_pos, _y_pos[5])
_fixed.put(self.txtYears, _x_pos, _y_pos[6])
elif self._subcategory == 2: # Logic
_lst_family = ["", "TTL", "ASTTL", "CML", "HTTL", "FTTL", "DTL",
"ECL", "ALSTTL", "FLTTL", "STTL", "BiCMOS",
"LSTTL", "III", "IIIL", "ISL"]
# Populate the gtk.ComboBox().
self.cmbTechnology.insert_text(0, '')
self.cmbTechnology.insert_text(1, "Bipolar")
self.cmbTechnology.insert_text(2, "MOS")
for _index, _family in enumerate(_lst_family):
self.cmbFamily.insert_text(_index, _family)
for _index, _package in enumerate(self._lst_package):
self.cmbPackage.insert_text(_index, _package)
# Place all the input widgets.
_fixed.put(self.cmbTechnology, _x_pos, _y_pos[2])
_fixed.put(self.cmbFamily, _x_pos, _y_pos[3])
_fixed.put(self.cmbPackage, _x_pos, _y_pos[4])
_fixed.put(self.txtNumGates, _x_pos, _y_pos[5])
_fixed.put(self.txtNumPins, _x_pos, _y_pos[6])
_fixed.put(self.txtYears, | |
<reponame>srcarter3/awips2<gh_stars>0
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Tests overview options on hazard products
#
# Author:
# ----------------------------------------------------------------------------
def1a = """#Definition["includeOverviewHeadline"] = 1"""
def1b = """Definition["includeOverviewHeadline"] = 0"""
def2a = """#Definition["includeOverview"] = 1"""
def2b = """Definition["includeOverview"] = 0"""
scripts = [
{
"commentary": "Clear out all Hazards Table and Grids.",
"name": "HazOverview_WSW_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "WSW check: with overview headline and overview",
"name": "HazOverview_WSW_1",
"productType": "Hazard_WSW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "WS.A", ["FLZ139"]),
],
"checkStrings": [
"URGENT - WINTER WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ139-010800-",
],
"notCheckStrings": [],
},
{
"commentary": "WSW check: with just overview",
"name": "HazOverview_WSW_2",
"productType": "Hazard_WSW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "WS.A", ["FLZ139"]),
],
"fileChanges": [("Hazard_WSW_Local", "TextProduct", "replace", (def1a, def1b), "undo")],
"checkStrings": [
"URGENT - WINTER WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
".|*Overview (must edit)*|.",
"FLZ139-010800-",
],
"notCheckStrings": [
"...|*Overview headline (must edit)*|...",
],
},
{
"commentary": "WSW check: with just overview headline",
"name": "HazOverview_WSW_3",
"productType": "Hazard_WSW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "WS.A", ["FLZ139"]),
],
"fileChanges": [("Hazard_WSW_Local", "TextProduct", "replace", (def2a, def2b), "undo")],
"checkStrings": [
"URGENT - WINTER WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
"FLZ139-010800-",
],
"notCheckStrings": [
".|*Overview (must edit)*|.",
],
},
{
"commentary": "WSW check: with neither overview nor overview headline",
"name": "HazOverview_WSW_4",
"productType": "Hazard_WSW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "WS.A", ["FLZ139"]),
],
"fileChanges": [("Hazard_WSW_Local", "TextProduct", "replace", [(def2a, def2b), (def1a, def1b)], "undo"),
],
"checkStrings": [
"URGENT - WINTER WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ139-010800-",
],
"notCheckStrings": [
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
],
},
{
"commentary": "Canceling out all hazards.",
"name": "HazOverview_WSW_5",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
# NPW
{
"commentary": "Clear out all Hazards Table and Grids in prep for NPW.",
"name": "HazOverview_NPW_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "NPW check: with both overview headline and overview",
"name": "HazOverview_NPW_1",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FR.Y", ["FLZ139"]),
],
"checkStrings": [
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ139-010800-",
],
"notCheckStrings": [],
},
{
"commentary": "NPW check: with just overview",
"name": "HazOverview_NPW_2",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FR.Y", ["FLZ139"]),
],
"fileChanges": [("Hazard_NPW_Local", "TextProduct", "replace", (def1a, def1b), "undo")],
"checkStrings": [
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
".|*Overview (must edit)*|.",
"FLZ139-010800-",
],
"notCheckStrings": [
"...|*Overview headline (must edit)*|...",
],
},
{
"commentary": "NPW check: with just overview headline",
"name": "HazOverview_NPW_3",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FR.Y", ["FLZ139"]),
],
"fileChanges": [("Hazard_NPW_Local", "TextProduct", "replace", (def2a, def2b), "undo")],
"checkStrings": [
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
"FLZ139-010800-",
],
"notCheckStrings": [
".|*Overview (must edit)*|.",
],
},
{
"commentary": "NPW check: with neither overview headline nor overview",
"name": "HazOverview_NPW_4",
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FR.Y", ["FLZ139"]),
],
"fileChanges": [("Hazard_NPW_Local", "TextProduct", "replace", [(def2a, def2b), (def1a, def1b)], "undo"),
],
"checkStrings": [
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ139-010800-",
],
"notCheckStrings": [
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
],
},
{
"commentary": "Canceling out all hazards.",
"name": "HazOverview_NPW_5",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
# RFW
{
"commentary": "Clear out all Hazards Table and Grids for RFW.",
"name": "HazOverview_RFW_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "RFW check: with both overview headline and overview",
"name": "HazOverview_RFW_1",
"productType": "Hazard_RFW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FW.A", ["FLZ139"]),
],
"cmdLineVars" : "{('Select RFW Type', 'rfwType'): [], ('Source for Headline and \\nAffected Area Bullet', 'elevationSource'): 'Grids'}",
"checkStrings": [
"URGENT - FIRE WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ139-010800-",
],
"notCheckStrings": [],
},
{
"commentary": "RFW check: with only overview",
"name": "HazOverview_RFW_2",
"productType": "Hazard_RFW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FW.A", ["FLZ139"]),
],
"fileChanges": [("Hazard_RFW_Local", "TextProduct", "replace", (def1a, def1b), "undo")],
"cmdLineVars" : "{('Select RFW Type', 'rfwType'): [], ('Source for Headline and \\nAffected Area Bullet', 'elevationSource'): 'Grids'}",
"checkStrings": [
"URGENT - FIRE WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
".|*Overview (must edit)*|.",
"FLZ139-010800-",
],
"notCheckStrings": [
"...|*Overview headline (must edit)*|...",
],
},
{
"commentary": "RFW check: with only overview headline",
"name": "HazOverview_RFW_3",
"productType": "Hazard_RFW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FW.A", ["FLZ139"]),
],
"fileChanges": [("Hazard_RFW_Local", "TextProduct", "replace", (def2a, def2b), "undo")],
"cmdLineVars" : "{('Select RFW Type', 'rfwType'): [], ('Source for Headline and \\nAffected Area Bullet', 'elevationSource'): 'Grids'}",
"checkStrings": [
"URGENT - FIRE WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
"FLZ139-010800-",
],
"notCheckStrings": [
".|*Overview (must edit)*|.",
],
},
{
"commentary": "RFW check: with neither overview headline nor overview",
"name": "HazOverview_RFW_4",
"productType": "Hazard_RFW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "FW.A", ["FLZ139"]),
],
"fileChanges": [("Hazard_RFW_Local", "TextProduct", "replace", [(def2a, def2b), (def1a, def1b)], "undo"),
],
"cmdLineVars" : "{('Select RFW Type', 'rfwType'): [], ('Source for Headline and \\nAffected Area Bullet', 'elevationSource'): 'Grids'}",
"checkStrings": [
"URGENT - FIRE WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ139-010800-",
],
"notCheckStrings": [
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
],
},
{
"commentary": "Canceling out all hazards.",
"name": "HazOverview_RFW_5",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
# CFW
{
"commentary": "Clear out all Hazards Table and Grids for CFW.",
"name": "HazOverview_CFW_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "CFW check: with both overview headline and overview",
"name": "HazOverview_CFW_1",
"productType": "Hazard_CFW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "LS.A", ["FLZ139"]),
],
"checkStrings": [
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Lakeshore Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ139-010800-",
],
"notCheckStrings": [],
},
{
"commentary": "CFW check: with only overview",
"name": "HazOverview_CFW_2",
"productType": "Hazard_CFW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 39, "LS.A", ["FLZ139"]),
],
"fileChanges": [("Hazard_CFW_Local", "TextProduct", "replace", (def1a, def1b), "undo")],
"checkStrings": [
"URGENT - IMMEDIATE | |
+ B_i)
C_t^ = update_nl(W3x_t + U3h_{t-1} + B_c)
o_t = gate_nl(W4x_t + U4h_{t-1} + B_o)
C_t = f_t*C_{t-1} + i_t*C_t^
h_t = o_t*update_nl(C_t)
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
'''
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
name="LSTMLR"):
super(LSTMLRCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [4, 4]
self._wRank = wRank
self._uRank = uRank
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W1 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W2 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W3 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W4 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W4 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U2 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U3 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U4 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U4 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_f = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_i = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_c = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_o = nn.Parameter(torch.ones([1, hidden_size]))
@property
def state_size(self):
return 2 * self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_non_linearity(self):
return self._gate_non_linearity
@property
def update_non_linearity(self):
return self._update_non_linearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
return self._name
@property
def cellType(self):
return "LSTMLR"
def forward(self, input, hiddenStates):
(h, c) = hiddenStates
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
wComp3 = torch.matmul(input, self.W3)
wComp4 = torch.matmul(input, self.W4)
else:
wComp1 = torch.matmul(
torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(
torch.matmul(input, self.W), self.W2)
wComp3 = torch.matmul(
torch.matmul(input, self.W), self.W3)
wComp4 = torch.matmul(
torch.matmul(input, self.W), self.W4)
if self._uRank is None:
uComp1 = torch.matmul(h, self.U1)
uComp2 = torch.matmul(h, self.U2)
uComp3 = torch.matmul(h, self.U3)
uComp4 = torch.matmul(h, self.U4)
else:
uComp1 = torch.matmul(
torch.matmul(h, self.U), self.U1)
uComp2 = torch.matmul(
torch.matmul(h, self.U), self.U2)
uComp3 = torch.matmul(
torch.matmul(h, self.U), self.U3)
uComp4 = torch.matmul(
torch.matmul(h, self.U), self.U4)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
pre_comp3 = wComp3 + uComp3
pre_comp4 = wComp4 + uComp4
i = gen_non_linearity(pre_comp1 + self.bias_i,
self._gate_non_linearity)
f = gen_non_linearity(pre_comp2 + self.bias_f,
self._gate_non_linearity)
o = gen_non_linearity(pre_comp4 + self.bias_o,
self._gate_non_linearity)
c_ = gen_non_linearity(pre_comp3 + self.bias_c,
self._update_non_linearity)
new_c = f * c + i * c_
new_h = o * gen_non_linearity(new_c, self._update_non_linearity)
return new_h, new_c
def getVars(self):
Vars = []
if self._num_weight_matrices[0] == 4:
Vars.extend([self.W1, self.W2, self.W3, self.W4])
else:
Vars.extend([self.W, self.W1, self.W2, self.W3, self.W4])
if self._num_weight_matrices[1] == 4:
Vars.extend([self.U1, self.U2, self.U3, self.U4])
else:
Vars.extend([self.U, self.U1, self.U2, self.U3, self.U4])
Vars.extend([self.bias_f, self.bias_i, self.bias_c, self.bias_o])
return Vars
class GRULRCell(nn.Module):
'''
GRU LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_non_linearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_non_linearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 4 matrices if not None else creates 3 matrices)
uRank = rank of U matrix
(creates 4 matrices if not None else creates 3 matrices)
GRU architecture and compression techniques are found in
GRU(LINK) paper
Basic architecture is like:
r_t = gate_nl(W1x_t + U1h_{t-1} + B_r)
z_t = gate_nl(W2x_t + U2h_{t-1} + B_g)
h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
'''
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
name="GRULR"):
super(GRULRCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [3, 3]
self._wRank = wRank
self._uRank = uRank
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W1 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W2 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W3 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U2 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U3 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_r = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_non_linearity(self):
return self._gate_non_linearity
@property
def update_non_linearity(self):
return self._update_non_linearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
return self._name
@property
def cellType(self):
return "GRULR"
def forward(self, input, state):
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
wComp3 = torch.matmul(input, self.W3)
else:
wComp1 = torch.matmul(
torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(
torch.matmul(input, self.W), self.W2)
wComp3 = torch.matmul(
torch.matmul(input, self.W), self.W3)
if self._uRank is None:
uComp1 = torch.matmul(state, self.U1)
uComp2 = torch.matmul(state, self.U2)
else:
uComp1 = torch.matmul(
torch.matmul(state, self.U), self.U1)
uComp2 = torch.matmul(
torch.matmul(state, self.U), self.U2)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
r = gen_non_linearity(pre_comp1 + self.bias_r,
self._gate_non_linearity)
z = gen_non_linearity(pre_comp2 + self.bias_gate,
self._gate_non_linearity)
if self._uRank is None:
pre_comp3 = wComp3 + torch.matmul(r * state, self.U3)
else:
pre_comp3 = wComp3 + \
torch.matmul(torch.matmul(r * state, self.U), self.U3)
c = gen_non_linearity(pre_comp3 + self.bias_update,
self._update_non_linearity)
new_h = z * state + (1.0 - z) * c
return new_h
def getVars(self):
Vars = []
if self._num_weight_matrices[0] == 3:
Vars.extend([self.W1, self.W2, self.W3])
else:
Vars.extend([self.W, self.W1, self.W2, self.W3])
if self._num_weight_matrices[1] == 3:
Vars.extend([self.U1, self.U2, self.U3])
else:
Vars.extend([self.U, self.U1, self.U2, self.U3])
Vars.extend([self.bias_r, self.bias_gate, self.bias_update])
return Vars
class UGRNNLRCell(nn.Module):
'''
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_non_linearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_non_linearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
'''
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
name="UGRNNLR"):
super(UGRNNLRCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [2, 2]
self._wRank = wRank
self._uRank = uRank
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W1 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W2 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U2 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
| |
fields ):
clone_strs = [ 'return self.__class__(' ]
for name, type_ in fields.items():
clone_strs.append( " " + _gen_list_clone_strs( type_, f'self.{name}' ) + "," )
return _create_fn(
'__deepcopy__',
[ 'self', 'memo' ],
clone_strs + [ ')' ],
)
#-------------------------------------------------------------------------
# _mk_imatmul_fn
#-------------------------------------------------------------------------
# Creates @= function that copies the value over ...
# TODO create individual from_bits for imatmul and ilshift
# def __imatmul__( self, other ):
# if self.__class__ is not other.__class__:
# other = self.__class__.from_bits( other.to_bits() )
# self.x @= other.x
# self.y[0] @= other.y[0]
# self.y[1] @= other.y[1]
def _mk_imatmul_fn( fields ):
def _gen_list_imatmul_strs( type_, prefix='' ):
if isinstance( type_, list ):
ret = []
for i in range(len(type_)):
ret.extend( _gen_list_imatmul_strs( type_[0], f"{prefix}[{i}]" ) )
return ret
else:
return [ f"self.{prefix} @= other.{prefix}" ]
imatmul_strs = [ 'if self.__class__ is not other.__class__:',
' other = self.__class__.from_bits( other.to_bits() )']
for name, type_ in fields.items():
imatmul_strs.extend( _gen_list_imatmul_strs( type_, name ) )
return _create_fn(
'__imatmul__',
[ 'self', 'other' ],
imatmul_strs + [ "return self" ],
)
#-------------------------------------------------------------------------
# _mk_nbits_to_bits_fn
#-------------------------------------------------------------------------
# Creates nbits, to_bits function that copies the value over ...
#
# def to_bits( self ):
# return concat( self.x, self.y[0], self.y[1] )
#
# TODO packing order of array? x[0] is LSB or MSB of a list
# current we do LSB
def _mk_nbits_to_bits_fn( fields ):
def _gen_to_bits_strs( type_, prefix, start_bit ):
if isinstance( type_, list ):
to_strs = []
# The packing order is LSB, so we need to reverse the list to make x[-1] higher bits
for i in reversed(range(len(type_))):
start_bit, tos = _gen_to_bits_strs( type_[0], f"{prefix}[{i}]", start_bit )
to_strs.extend( tos )
return start_bit, to_strs
elif is_bitstruct_class( type_ ):
to_strs = []
for name, typ in getattr(type_, _FIELDS).items():
start_bit, tos = _gen_to_bits_strs( typ, f"{prefix}.{name}", start_bit )
to_strs.extend( tos )
return start_bit, to_strs
else:
end_bit = start_bit + type_.nbits
return end_bit, [ f"self.{prefix}" ]
to_bits_strs = []
total_nbits = 0
for name, type_ in fields.items():
total_nbits, tos = _gen_to_bits_strs( type_, name, total_nbits )
to_bits_strs.extend( tos )
return total_nbits, _create_fn( 'to_bits', [ 'self' ],
[ f"return concat({', '.join(to_bits_strs)})" ],
_globals={'concat':concat} )
#-------------------------------------------------------------------------
# _mk_from_bits_fn
#-------------------------------------------------------------------------
# Creates static method from_bits that creates a new bitstruct based on Bits
# and instance method _from_bits that copies the value over
#
# @staticmethod
# def from_bits( other ):
# return self.__class__( other[16:32], other[0:16] )
def _mk_from_bits_fns( fields, total_nbits ):
def _gen_from_bits_strs( type_, end_bit ):
if isinstance( type_, list ):
from_strs = []
# Since we are doing LSB for x[0], we need to unpack from the last
# element of the list, and then reverse it again to construct a list ...
for i in range(len(type_)):
end_bit, fs = _gen_from_bits_strs( type_[0], end_bit )
from_strs.extend( fs )
return end_bit, [ f"[{','.join(reversed(from_strs))}]" ]
elif is_bitstruct_class( type_ ):
if type_ in type_name_mapping:
type_name = type_name_mapping[ type_ ]
else:
type_name = f"_type{len(type_name_mapping)}"
type_name_mapping[ type_ ] = type_name
from_strs = []
for name, typ in getattr(type_, _FIELDS).items():
end_bit, fs = _gen_from_bits_strs( typ, end_bit )
from_strs.extend( fs )
return end_bit, [ f"{type_name}({','.join(from_strs)})" ]
else:
if type_ not in type_name_mapping:
type_name_mapping[ type_ ] = type_.__name__
else:
assert type_name_mapping[ type_ ] == type_.__name__
start_bit = end_bit - type_.nbits
return start_bit, [ f"other[{start_bit}:{end_bit}]" ]
from_bits_strs = []
end_bit = total_nbits
# This is to make sure we capture two types with the same name but different
# attributes
type_name_mapping = {}
type_count = 0
for _, type_ in fields.items():
end_bit, fs = _gen_from_bits_strs( type_, end_bit )
from_bits_strs.extend( fs )
assert end_bit == 0
_globals = { y: x for x,y in type_name_mapping.items() }
assert len(_globals) == len(type_name_mapping)
# TODO add assertion in bits
return _create_fn( 'from_bits', [ 'cls', 'other' ],
[ "assert cls.nbits == other.nbits, f'LHS bitstruct {cls.nbits}-bit <> RHS other {other.nbits}-bit'",
"other = other.to_bits()",
f"return cls({','.join(from_bits_strs)})" ], _globals )
#-------------------------------------------------------------------------
# _check_valid_array
#-------------------------------------------------------------------------
def _recursive_check_array_types( current ):
x = current[0]
if isinstance( x, list ):
x_len = len(x)
x_type = _recursive_check_array_types( x )
for y in current[1:]:
assert isinstance( y, list ) and len(y) == x_len
y_type = _recursive_check_array_types( y )
assert y_type is x_type
return x_type
assert issubclass( x, Bits ) or is_bitstruct_class( x )
for y in current[1:]:
assert y is x
return x
def _check_valid_array_of_types( arr ):
# Check if the provided list is a strict multidimensional array
try:
return _recursive_check_array_types( arr )
except Exception as e:
print(e)
return None
#-------------------------------------------------------------------------
# _check_field_annotation
#-------------------------------------------------------------------------
def _check_field_annotation( cls, name, type_ ):
# Make sure not default is annotated
if hasattr( cls, name ):
default = getattr( cls, name )
raise TypeError( "We don't allow subfields to have default value:\n"
f"- Field '{name}' of BitStruct {cls.__name__} has default value {default!r}." )
# Special case if the type is an instance of list
if isinstance( type_, list ):
if _check_valid_array_of_types( type_ ) is None:
raise TypeError( "The provided list spec should be a strict multidimensional ARRAY "
"with no varying sizes or types. All non-list elements should be VALID types." )
else:
# Now we work with types
if not isinstance( type_, type ):
raise TypeError(f"{type_} is not a type\n"\
f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}.")
# More specifically, Bits and BitStruct
if not issubclass( type_, Bits ) and not is_bitstruct_class( type_ ):
raise TypeError( "We currently only support BitsN, list, or another BitStruct as BitStruct field:\n"
f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}." )
#-------------------------------------------------------------------------
# _get_self_name
#-------------------------------------------------------------------------
# Return a self name based on fields.
def _get_self_name( fields ):
return( _ANTI_CONFLICT_SELF_NAME if _DEFAULT_SELF_NAME in fields else
_DEFAULT_SELF_NAME )
#-------------------------------------------------------------------------
# _process_cls
#-------------------------------------------------------------------------
# Process the input cls and add methods to it.
_bitstruct_hash_cache = {}
def _process_class( cls, add_init=True, add_str=True, add_repr=True,
add_hash=True ):
# Get annotations of the class
cls_annotations = cls.__dict__.get('__annotations__', {})
if not cls_annotations:
raise AttributeError( "No field is declared in the bit struct definition.\n"
f"Suggestion: check the definition of {cls.__name__} to"
" make sure it only contains 'field_name(string): Type(type).'" )
# Get field information from the annotation and prepare for hashing
fields = {}
hashable_fields = {}
def _convert_list_to_tuple( x ):
if isinstance( x, list ):
return tuple( [ _convert_list_to_tuple( y ) for y in x ] )
return x
reserved_fields = ['to_bits', 'from_bits', 'nbits']
for x in reserved_fields:
assert x not in cls.__dict__, f"Currently a bitstruct cannot have {reserved_fields}, but "\
f"{x} is provided as {cls.__dict__[x]}"
for a_name, a_type in cls_annotations.items():
assert a_name not in reserved_fields, f"Currently a bitstruct cannot have {reserved_fields}, but "\
f"{a_name} is annotated as {a_type}"
_check_field_annotation( cls, a_name, a_type )
fields[ a_name ] = a_type
hashable_fields[ a_name ] = _convert_list_to_tuple( a_type )
cls._hash = _hash = hash( (cls.__name__, *tuple(hashable_fields.items()),
add_init, add_str, add_repr, add_hash) )
if _hash in _bitstruct_hash_cache:
return _bitstruct_hash_cache[ _hash ]
_bitstruct_hash_cache[ _hash ] = cls
# Stamp the special attribute so that translation pass can identify it
# as bit struct.
setattr( cls, _FIELDS, fields )
# Add methods to the class
# Create __init__. Here I follow the dataclass convention that we only
# add our generated __init__ function when add_init is true and user
# did not define their own init.
if add_init:
if not '__init__' in cls.__dict__:
cls.__init__ = _mk_init_fn( _get_self_name(fields), fields )
# Create __str__
if add_str:
if not '__str__' in cls.__dict__:
cls.__str__ = _mk_str_fn( fields )
# Create __repr__
if add_repr:
if not '__repr__' in cls.__dict__:
cls.__repr__ = _mk_repr_fn( fields )
# Create __eq__. There is no need for a __ne__ method as python will
# call __eq__ and negate it.
# NOTE: if user overwrites __eq__ it may lead to different behavior for
# the translated verilog as in the verilog world two bit structs are
# equal only if all the fields are equal. We always try to add __eq__
if not '__eq__' in cls.__dict__:
cls.__eq__ = _mk_eq_fn( fields )
else:
w_msg = ( f'Overwriting {cls.__qualname__}\'s __eq__ may cause the '
'translated verilog behaves differently from PyMTL '
'simulation.')
warnings.warn( w_msg )
# Create __hash__.
if add_hash:
if not '__hash__' in cls.__dict__:
cls.__hash__ = _mk_hash_fn( fields )
# Shunning: add __ilshift__ and _flip for update_ff
assert not '__ilshift__' in cls.__dict__ and not '_flip' in cls.__dict__
cls.__ilshift__, cls._flip | |
area
analysisList = [
("InundationMax", self.moderatedMax),
("InundationTiming", self.moderatedMax, [6]),
]
return analysisList
def _extraRainfallAnalysisList(self):
analysisList = [
("QPF", self.accumSum),
]
return analysisList
###############################################################
### High level flow of formatter
def generateForecast(self, argDict):
# Generate Text Phrases for a list of edit areas
self.debug_print("argDict = %s" % (self._pp.pformat(argDict)), 1)
error = self._initializeVariables(argDict)
if error is not None:
return error
if self._stormName is None or self._stormName == "":
return "Could not determine the storm name"
self._segmentList = self._determineSegments()
self.debug_print("Segment Information: %s" % (self._pp.pformat(self._segmentList)), 1)
if len(self._segmentList) == 0:
return "No hazards to report"
# Determine time ranges
self._determineTimeRanges(argDict)
# Make sure we have all of the necessary grids before continuing
error = self._performGridChecks(argDict)
if error is not None:
return error
# Sample the data
self._sampleData(argDict)
# Create the product dictionary and format it to create the output
productDict = self._createProductDictionary(self._productParts_TCV,
self._segmentList,
areProductPartsSegmented=True)
productOutput = self._formatProductDictionary(LegacyFormatter, productDict)
self._archiveCurrentAdvisory()
return productOutput
def _initializeVariables(self, argDict):
error = HLSTCV_Common.TextProduct._initializeVariables(self, argDict)
if error is not None:
return error
self._windSection = dict()
self._stormSurgeSection = dict()
self._floodingRainSection = dict()
self._tornadoSection = dict()
self._initializeAdvisories()
return None
def _performGridChecks(self, argDict):
gridChecks = [(self._isCorrectNumGrids, "FloodingRainThreat", 1, argDict),
(self._isCorrectNumGrids, "TornadoThreat", 1, argDict),
(self._isContinuousDuration, "QPF", 72, argDict),]
if self._WSPGridsAvailable:
gridChecks += [(self._isCorrectNumGrids, "WindThreat", 1, argDict),
(self._isContinuousDuration, "Wind", 120, argDict),
(self._isContinuousDuration, "WindGust", 120, argDict),
(self._isContinuousDuration, "pws34int", 114, argDict),
(self._isContinuousDuration, "pws64int", 114, argDict),
(self._isCombinedContinuousDuration, "pwsD34", "pwsN34", 102, argDict),
(self._isCombinedContinuousDuration, "pwsD64", "pwsN64", 102, argDict),]
if self._PopulateSurge and len(self._coastalAreas()) != 0:
gridChecks += [(self._isCorrectNumGrids, "InundationMax", 1, argDict),
(self._isCorrectNumGrids, "InundationTiming", 12, argDict),]
missingGridErrors = []
for gridCheck in gridChecks:
# The first element is the grid check function to call and
# the rest of the elements are the arguments to the function
if not gridCheck[0](*gridCheck[1:]):
error = ""
if gridCheck[0] == self._isCorrectNumGrids:
if gridCheck[2] == 1:
error = "%s needs at least 1 grid" % (gridCheck[1])
else:
error = "%s needs at least %s grids" % (gridCheck[1], gridCheck[2])
elif gridCheck[0] == self._isContinuousDuration:
error = "%s needs at least %s continuous hours worth of data" % \
(gridCheck[1], gridCheck[2])
else:
error = "%s and %s combined need at least %s continuous hours worth of data" % \
(gridCheck[1], gridCheck[2], gridCheck[3])
missingGridErrors.append(error)
if len(missingGridErrors) != 0:
error = "There were problems with the following weather elements:\n"
for gridError in missingGridErrors:
error += "\t" + gridError + "\n"
return error
return None
def _isCorrectNumGrids(self, weatherElement, expectedNumGrids, argDict):
ifpClient = argDict["ifpClient"]
dbId = argDict["databaseID"]
parmId = ParmID(weatherElement, dbId)
times = ifpClient.getGridInventory(parmId)
self.debug_print("_isCorrectNumGrids test for element: %s" % weatherElement, 1)
self.debug_print("Expected number of grids: %s" % expectedNumGrids, 1)
gridTimes = []
for index in range(len(times)):
gridTime = TimeRange.TimeRange(times[index])
if (gridTime.endTime() <= self._timeRange1Hour.startTime() or
gridTime.startTime() >= self._timeRange1Hour.endTime()):
# prettyStartTime = self._pp.pformat(str(gridTime.startTime()))
# prettyEndTime = self._pp.pformat(str(gridTime.endTime()))
# self.debug_print("skipping grid %s (%s - %s): outside of time range"
# % (index, prettyStartTime, prettyEndTime), 1)
pass
else:
gridTimes.append(gridTime)
self.debug_print("Actual number of grids: %s" % len(gridTimes), 1)
retval = len(gridTimes) >= expectedNumGrids
if not retval:
self.debug_print("_isCorrectNumGrids test failed", 1)
self.debug_print("self._timeRange1Hour: %s" % str(self._timeRange1Hour), 1)
self.debug_print("times: %s" % str(times), 1)
return retval
def _isContinuousDuration(self, weatherElement, minimumNumHours, argDict):
return self._checkContinuousDuration([weatherElement], minimumNumHours, argDict)
def _isCombinedContinuousDuration(self, weatherElement1, weatherElement2, minimumNumHours, argDict):
return self._checkContinuousDuration([weatherElement1, weatherElement2], minimumNumHours, argDict)
def _checkContinuousDuration(self, weatherElementList, minimumNumHours, argDict):
self.debug_print("_checkContinuousDuration for elements: %s" % \
self._pp.pformat(weatherElementList), 1)
self.debug_print("Minimum Number of Hours: %s" % minimumNumHours, 1)
ifpClient = argDict["ifpClient"]
dbId = argDict["databaseID"]
gridTimes = []
inventoryDict = {}
for weatherElement in weatherElementList:
parmId = ParmID(weatherElement, dbId)
times = ifpClient.getGridInventory(parmId)
inventoryDict[weatherElement] = times
for index in range(times.size()):
gridTimes.append(TimeRange.TimeRange(times[index]))
if len(gridTimes) == 0:
# No grids
self.debug_print("No grids found.", 1)
return False
gridTimes = sorted(gridTimes, key= lambda gridTime: gridTime.startTime())
totalHours = 0
previousEndTime = None
for gridTime in gridTimes:
if gridTime.endTime() <= self._timeRange1Hour.startTime():
# prettyEndTime = self._pp.pformat(str(gridTime.endTime()))
# prettyStartTime = self._pp.pformat(str(self._timeRange1Hour.startTime()))
# self.debug_print("skipping: grid end time (%s) before time range start time (%s)"
# % (prettyEndTime, prettyStartTime), 1)
continue
if gridTime.startTime() >= self._timeRange1Hour.endTime():
# prettyStartTime = self._pp.pformat(str(gridTime.startTime()))
# prettyEndTime = self._pp.pformat(str(self._timeRange1Hour.endTime()))
# self.debug_print("done: grid start time (%s) after time range end time (%s)"
# % (prettyStartTime, prettyEndTime), 1)
break
if previousEndTime is None:
previousEndTime = gridTime.startTime()
if previousEndTime != gridTime.startTime():
break
previousEndTime = gridTime.endTime()
totalHours += gridTime.duration() / 3600 # Convert from seconds to hours
self.debug_print("Total Hours of continuous grids: %s" % totalHours, 1)
retval = totalHours >= minimumNumHours
if not retval:
self.debug_print("_checkContinuousDuration failed.", 1)
self.debug_print("self._timeRange1Hour: %s" % self._pp.pformat(self._timeRange1Hour), 1)
for we in inventoryDict:
self.debug_print("times for %s: %s" % (we, str(inventoryDict[we])), 1)
self.debug_print("Not continuous at: %s" % str(previousEndTime), 1)
return retval
###############################################################
### Product Parts Implementation
def _noOpParts(self):
'''
These represent product parts that should be skipped when calling product part methods.
They will be handled automatically by the formatters.
'''
return ['CR', 'endProduct', 'endSegment', 'doubleAmpersand']
################# Product Level
def _easMessage(self, productDict, productSegmentGroup, arguments=None):
productDict['easMessage'] = self._easPhrase
################# Segment Level
def _setup_segment(self, segmentDict, productSegmentGroup, productSegment):
segment, vtecRecords = productSegment
self.debug_print('setup_segment productSegment %s' % (self._pp.pformat(productSegment)), 1)
# NOTE -- using _getVtecRecords to change to milliseconds
segmentVtecRecords = self._getVtecRecords(segment)
# UGCs and Expire Time
# Assume that the geoType is the same for all hazard events in the segment i.e. area or point
self._ugcs = [segment]
self._timeZones = self._tpc.hazardTimeZones(self._ugcs)
# In order to compute the expire time, the VTEC record times
# need to be in milliseconds.
recordsInMS = []
for record in segmentVtecRecords:
recordInMS = copy.copy(record)
recordInMS["startTime"] = recordInMS["startTime"] * 1000
recordInMS["endTime"] = recordInMS["endTime"] * 1000
if recordInMS.has_key("purgeTime"):
recordInMS["purgeTime"] = recordInMS["purgeTime"] * 1000
if recordInMS.has_key("issueTime"):
recordInMS["issueTime"] = recordInMS["issueTime"] * 1000
recordsInMS.append(recordInMS)
# Get the expire time in milliseconds since the epoch
self._expireTime = self._tpc.getExpireTime(
self._issueTime_ms, self._purgeHours, recordsInMS)
# Then convert it to a date
segmentDict['expireTime'] = self._convertToISO(self._expireTime)
# Don't show UPG headlines
nonUPGrecords = []
for record in segmentVtecRecords:
if record['act'] != "UPG":
nonUPGrecords.append(record)
self._summaryHeadlines_value, _ = self._tpc.getHeadlinesAndSections(
nonUPGrecords, self._productID, self._issueTime_secs)
def _vtecRecords(self, segmentDict, productSegmentGroup, productSegment):
segment, vtecRecords = productSegment
records = []
for vtecRecord in vtecRecords:
vstr = vtecRecord["vtecstr"]
self.debug_print("vtecRecord = %s" % (self._pp.pformat(vtecRecord)), 1)
self.debug_print("final vstr = %s" % vstr, 1)
records.append(vstr)
segmentDict['vtecRecords'] = records
def _areaList(self, segmentDict, productSegmentGroup, productSegment):
# Area String
segmentDict['areaList'] = self._tpc.formatUGC_names(self._ugcs)
def _issuanceTimeDate(self, segmentDict, productSegmentGroup, productSegment):
segmentDict['issuanceTimeDate'] = self._timeLabel
def _summaryHeadlines(self, segmentDict, productSegmentGroup, productSegment):
segment, vtecRecords = productSegment
definitions = []
hazardsFound = []
for (phenSig, actions, name) in self.allowedHazards():
for vtecRecord in vtecRecords:
# The 'phensig' in the VTEC record could contain an
# ETN. As such, we need to strip the ETN before doing a
# comparison with the allowedHazards.
if vtecRecord["phensig"].split(":")[0] == phenSig and \
phenSig not in hazardsFound and \
vtecRecord["act"] in ["NEW", "EXA"]:
hazardsFound.append(phenSig)
definition = self._hazardDefinition(phenSig)
if definition != "":
definitions.append(definition)
summaryDict = collections.OrderedDict()
headlines = self._summaryHeadlines_value.split("\n")
headlinesInEffect = []
for headline in headlines:
if len(headline) != 0:
headlinesInEffect.append(headline)
summaryDict['headlinesInEffect'] = headlinesInEffect
summaryDict['headlineDefinitions'] = definitions
segmentDict['summaryHeadlines'] = summaryDict
def _locationsAffected(self, segmentDict, productSegmentGroup, productSegment):
segment, vtecRecords = productSegment
import TCVAreaDictionary
tcv_AreaDictionary = TCVAreaDictionary.TCV_AreaDictionary
segmentDict['locationsAffected'] = []
if segment in tcv_AreaDictionary:
segmentDict['locationsAffected'] = tcv_AreaDictionary[segment]["locationsAffected"]
def _fcstConfidence(self, segmentDict, productSegmentGroup, productSegment):
# TODO - Get this from the TCM product potentially? Not included until provided from NHC
return ""
def _infoSection(self, segmentDict, productSegmentGroup, productSegment):
segment, vtecRecords = productSegment
import TCVAreaDictionary
tcv_AreaDictionary = TCVAreaDictionary.TCV_AreaDictionary
segment, vtecRecords = productSegment
infoSection = []
if segment in tcv_AreaDictionary:
infoSection = tcv_AreaDictionary[segment]["infoSection"]
segmentDict['infoSection'] = infoSection
def _endSection(self, segmentDict, productSegmentGroup, productSegment):
segmentDict['endSection'] = "\n$$"
################# Product Parts Helper Methods
def _hazardDefinition(self, phenSig):
import VTECTable
phen, sig = phenSig.split('.')
headline = VTECTable.VTECTable[phenSig]["hdln"]
definition = "A " + headline + " means "
if phenSig == "HU.W":
definition += "hurricane-force winds are expected"
elif phenSig == | |
_ in range(2):
sync_mode.tick(timeout=self.timeout)
# Loop until all vehicles have reached their goal or we've exceeded self.max_iters.
for _ in range(self.max_iters):
snap, img = sync_mode.tick(timeout=self.timeout)
# Handle predictions.
self.agent_history.update(snap, self.world)
pred_dict = self._make_predictions()
# Run policies for each agent.
t_elapsed = snap.elapsed_seconds
completed = True
for idx_act, (act, policy) in enumerate(zip(self.vehicle_actors, self.vehicle_policies)):
control, z0, u0, is_feasible, solve_time = policy.run_step(pred_dict)
act_key = f"{act.attributes['role_name']}_{idx_act}"
if not policy.done():
z0 = np.append(t_elapsed, z0) # add the Carla timestamp
self.results_dict[act_key]["state_trajectory"].append(z0)
self.results_dict[act_key]["input_trajectory"].append(u0)
self.results_dict[act_key]["feasibility"].append(is_feasible)
self.results_dict[act_key]["solve_times"].append(solve_time)
# true at the end of the loop only if all agents are done or if iter_ctr>=max_iters
completed = completed and policy.done()
act.apply_control(control)
if idx_act == self.ego_vehicle_idx:
# Keep track of ego's information for rendering.
ego_vel = act.get_velocity()
ego_speed = np.linalg.norm([ego_vel.x, ego_vel.y])
ego_ctrl = control
try:
ego_conf_thresh = policy.confidence_thresh_manager.conf_thresh
except:
ego_conf_thresh = 5.991 # 95% confidence level (default)
self.results_dict[act_key]["conf_threshs"].append(ego_conf_thresh)
else:
self.results_dict[act_key]["conf_threshs"].append(np.nan)
# Get drone camera image.
img_drone = np.frombuffer(img.raw_data, dtype=np.uint8)
img_drone = np.reshape(img_drone, (img.height, img.width, 4))
img_drone = img_drone[:, :, :3]
img_drone = cv2.resize(img_drone, (self.viz_params.img_width, self.viz_params.img_height), interpolation = cv2.INTER_AREA)
# Handle overlays on drone camera image.
if self.viz_params.overlay_ego_info or self.viz_params.overlay_mode_probs:
# Place a alpha-blended rectangle to make text reading easier.
img_subregion = img_drone[10:110, 25:775, :].astype(np.float)
img_mask = 64*np.ones(img_subregion.shape, img_subregion.dtype)
alpha = 0.8
img_comb = alpha * img_mask + (1. - alpha) * img_subregion
img_comb = img_comb.astype(np.uint8)
img_drone[10:110, 25:775, :] = img_comb
if self.viz_params.overlay_ego_info:
ego_str = f"EGO - v:{ego_speed:.3f}, th: {ego_ctrl.throttle:.2f}, bk: {ego_ctrl.brake:.2f}, st: {ego_ctrl.steer:.2f}"
cv2.putText(img_drone, ego_str, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
if self.viz_params.overlay_gmm:
# We just deal with the first target vehicle for now.
# TODO: generalize to other target vehicles.
if pred_dict["tvs_valid_pred"][0]:
mus = pred_dict["tvs_mode_dists"][0][0]
sigmas = pred_dict["tvs_mode_dists"][1][0]
self._viz_gmm(img_drone, mus, sigmas, mdist_sq_thresh=ego_conf_thresh)
if self.viz_params.overlay_traj_hist:
self._viz_traj_hist(img_drone)
if self.viz_params.overlay_mode_probs:
# We just deal with the first target vehicle for now.
# TODO: generalize to other target vehicles.
if pred_dict["tvs_valid_pred"][0]:
cv2.putText(img_drone, "Mode probabilities: ", (50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
for prob_idx, mode_prob in enumerate(pred_dict["tvs_mode_probs"][0]):
cv2.putText(img_drone, f"{mode_prob:.3f}",
(360 + prob_idx * 100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, self.mode_rgb_colors[prob_idx], 2)
# Handle visualization / saving to video.
if self.viz_params.visualize_opencv:
cv2.imshow("Drone", img_drone); cv2.waitKey(1)
if self.viz_params.save_avi:
writer.write(img_drone)
if completed:
# All cars reached their destinations, end before self.max_iters.
break
# Save results and mark successful completion.
for act_key in self.results_dict:
for arr_key in ["state_trajectory",
"input_trajectory",
"feasibility",
"solve_times",
"conf_threshs"]:
self.results_dict[act_key][arr_key] = np.array(self.results_dict[act_key][arr_key])
pkl_name = os.path.join(self.savedir, "scenario_result.pkl")
pickle.dump(self.results_dict, open(pkl_name, "wb"))
ran_successfully = True
# Teardown.
finally:
if writer:
writer.release()
for actor in self.vehicle_actors:
actor.destroy()
self.drone.destroy()
cv2.destroyAllWindows()
return ran_successfully
def _make_predictions(self):
"""
This returns GMM predictions of target vehicles in a useable format for MPC modules.
Note that the predictions are in the world/global frame, not in the target vehicle's local frame.
We use the following notation:
N_TV: number of target vehicles.
ego_N: number of prediction timesteps
ego_num_modes: number of GMM modes to return (maximum)
The format is as follows:
tvs_poses : [ Vehicle i's (X,Y,theta) coords, np.ndarray with size (3,) ]_{i=1}^{N_TV}
tvs_mode_probs : [ GMM mode distribution for vehicle i, np.ndarray with size (ego_num_modes,) ]_{i=1}^{N_TV}
tvs_mode_dists : [
[ GMM mean prediction for vehicle i, np.ndarray with size (ego_num_modes, ego_N, 2) ]_{i=1}^{N_TV},
[ GMM covar prediction for vehicle i, np.ndarray with size (ego_num_modes, ego_N, 2, 2) ]_{i=1}^{N_TV}
]
tvs_valid_pred : [ Flag indicating if vehicle i's prediction are real or spoofed, bool ]_{i=1}^{N_TV}
tvs_dimensions : [ Dict containing vehicle geometry parameters ]_{i=1}^{N_TV}
"""
if len(self.tv_vehicle_idxs) == 0:
# No TVs in the scene so we make a fake constant pose prediction that is over a km away from the EV.
ego_location = self.vehicle_actors[self.ego_vehicle_idx].get_location()
ego_x, ego_y = ego_location.x, -ego_location.y
curr_target_vehicle_pose = np.array([1000 + ego_x, 1000 + ego_y, 0.])
tvs_poses = [curr_target_vehicle_pose]
tvs_traj_hists = np.stack([curr_target_vehicle_pose[:2]]*5)
tvs_mode_probs = [ np.ones(self.ego_num_modes) / self.ego_num_modes ]
tvs_mode_dists = [[np.stack([[curr_target_vehicle_pose[:2]]*self.ego_N]*self.ego_num_modes)],
[np.stack([[np.identity(2)]*self.ego_N]*self.ego_num_modes)]]
tvs_valid_pred = [False]
tvs_dimensions = [{}]
else:
# TODO: clean up and generalize this to many target vehicles.
target_agent_id = self.vehicle_actors[self.tv_vehicle_idxs[0]].id
past_states_tv, R_target_to_world, t_target_to_world = \
get_target_agent_history(self.agent_history, target_agent_id)
curr_target_vehicle_position = R_target_to_world @ past_states_tv[-1, 1:3] + t_target_to_world
curr_target_vehicle_yaw = np.arctan2(R_target_to_world[1][0], R_target_to_world[0][0])
curr_target_vehicle_pose = np.append(curr_target_vehicle_position, curr_target_vehicle_yaw)
tvs_poses = [curr_target_vehicle_pose]
# TODO: clean up, basic idea is to get states at t-4, ..., t in the world frame.
hist_tv_positions = R_target_to_world @ past_states_tv[-5:, 1:3].T + t_target_to_world.reshape(2, 1)
hist_tv_positions = hist_tv_positions.T
tvs_traj_hists = [hist_tv_positions]
if np.any(np.isnan(past_states_tv)):
# Not enough data for predictions to be made.
# We just return a constant pose GMM until we can extrapolate.
tvs_mode_probs = [ np.ones(self.ego_num_modes) / self.ego_num_modes ]
tvs_mode_dists = [[np.stack([[curr_target_vehicle_position]*self.ego_N]*self.ego_num_modes)],
[np.stack([[0.1*np.identity(2)]*self.ego_N]*self.ego_num_modes)]]
tvs_valid_pred = [False]
tvs_dimensions = [{}]
else:
# Nominal case: query the prediction model and parse GMM preds, truncating to self.ego_num_modes
# modes and self.ego_N timesteps.
img_tv = self.rasterizer.rasterize(self.agent_history, target_agent_id)
gmm_pred_tv = self.pred_model.predict_instance(img_tv, past_states_tv[:-1])
if type(gmm_pred_tv) is dict:
modes = list(gmm_pred_tv.keys())
mode_probs = np.array([gmm_pred_tv[k]["mode_probability"] for k in modes])
mus = np.array([gmm_pred_tv[k]["mus"] for k in modes])
sigmas = np.array([gmm_pred_tv[k]["sigmas"] for k in modes])
n_modes = len(modes)
n_timesteps = mus.shape[1]
gmm_pred_tv = GMMPrediction(n_modes, n_timesteps, mode_probs, mus, sigmas)
elif type(gmm_pred_tv) is GMMPrediction:
pass
else:
raise TypeError(f"Unexpected GMM type: {type(gmm_pred_tv)}")
gmm_pred_tv=gmm_pred_tv.get_top_k_GMM(self.ego_num_modes)
gmm_pred_tv.transform(R_target_to_world, t_target_to_world)
tvs_mode_probs = [gmm_pred_tv.mode_probabilities]
tvs_mode_dists = [[gmm_pred_tv.mus[:, :self.ego_N, :]], [gmm_pred_tv.sigmas[:, :self.ego_N, :, :]]]
tvs_valid_pred = [True]
tvs_dimensions = [vehicle_name_to_dimensions(self.vehicle_actors[self.tv_vehicle_idxs[0]].type_id)]
pred_dict = {"tvs_poses" : tvs_poses,
"tvs_traj_hists" : tvs_traj_hists,
"tvs_mode_probs" : tvs_mode_probs,
"tvs_mode_dists" : tvs_mode_dists,
"tvs_valid_pred" : tvs_valid_pred,
"tvs_dimensions" : tvs_dimensions}
return pred_dict
def _setup_carla_world(self, carla_params):
client = carla.Client(carla_params.ip_addr, carla_params.port)
client.set_timeout(carla_params.timeout_period)
self.world = client.load_world(carla_params.map_str)
self.world.set_weather(getattr(carla.WeatherParameters, carla_params.weather_str))
def _setup_camera(self, drone_viz_params):
bp_library = self.world.get_blueprint_library()
bp_drone = bp_library.find('sensor.camera.rgb')
# TODO: compute these, hardcoded for now based on the following:
# 1920 (H) x 1080 (W), fov of 90 degrees, pitch of -90 degrees, height of 50 m above ground.
assert drone_viz_params.z == 50. and \
drone_viz_params.roll == 0. and \
drone_viz_params.pitch == -90. and \
drone_viz_params.yaw == 0. and \
drone_viz_params.img_width == 1920 and \
drone_viz_params.img_height == 1080 and \
drone_viz_params.fov == 90
self.A_world_to_drone = np.array([[ 0., -19.2],
[-19.2, 0.]])
self.b_world_to_drone = np.array([ 0.5 * drone_viz_params.img_width,
0.5 * drone_viz_params.img_height ])
self.b_world_to_drone += 19.2 * np.array([-drone_viz_params.y,
drone_viz_params.x])
cam_loc = carla.Location(x=drone_viz_params.x,
y=drone_viz_params.y,
z=drone_viz_params.z)
cam_ori = carla.Rotation(roll=drone_viz_params.roll,
pitch=drone_viz_params.pitch,
yaw=drone_viz_params.yaw)
cam_transform = carla.Transform(cam_loc, cam_ori)
bp_drone.set_attribute('image_size_x', str(drone_viz_params.img_width))
bp_drone.set_attribute('image_size_y', str(drone_viz_params.img_height))
bp_drone.set_attribute('fov', str(drone_viz_params.fov))
bp_drone.set_attribute('role_name', 'drone')
self.drone = self.world.spawn_actor(bp_drone, cam_transform)
# Move the spectator to the specified drone position for convenience.
spec = self.world.get_spectator()
spec.set_transform(cam_transform)
def _setup_vehicles(self, vehicle_params_list, carla_params):
intersection_fname = os.path.join( os.path.dirname(os.path.abspath(__file__)),
carla_params.intersection_csv_loc )
intersection = load_intersection(intersection_fname)
bp_library = self.world.get_blueprint_library()
self.vehicle_actors = [] # list of carla.Actor objects corresponding to created vehicles
self.vehicle_policies = [] # list of corresponding control policy classes per vehicle
self.vehicle_colors = [] # list of colors for each car, also used for plotting
self.vehicle_init_speeds = [] # list of initial speeds to set up initial conditions of sim.
ego_vehicle_idxs = [] # list index of the ego vehicle (expect this to only hold one entry)
tv_vehicle_idxs = [] # list indices of target vehicles
for idx, vp in enumerate(vehicle_params_list):
veh_bp = bp_library.find(vp.vehicle_type)
veh_bp.set_attribute("color", vp.vehicle_color)
veh_bp.set_attribute("role_name", vp.role)
self.vehicle_colors.append([int(x) for x in vp.vehicle_color.split(", ")])
if vp.role == "ego":
ego_vehicle_idxs.append(idx)
elif vp.role == "static":
pass
elif vp.role == "target":
tv_vehicle_idxs.append(idx)
else:
raise ValueError(f"Invalid vehicle role selection : {vp.role}")
start_transform = get_intersection_transform(intersection, vp, "start")
goal_transform = get_intersection_transform(intersection, vp, "goal")
veh_actor = self.world.spawn_actor(veh_bp, start_transform)
veh_policy = get_vehicle_policy(vp, veh_actor, goal_transform, 1.0/carla_params.fps)
self.vehicle_actors.append(veh_actor)
self.vehicle_policies.append(veh_policy)
self.vehicle_init_speeds.append(vp.init_speed)
# Identify the ego vehicle.
if len(ego_vehicle_idxs) != 1:
raise RuntimeError(f"Invalid number of ego vehicles spawned: {len(ego_vehicle_idxs)}")
self.ego_vehicle_idx = ego_vehicle_idxs[0]
# Identify target vehicles (if present, can be empty).
self.tv_vehicle_idxs = tv_vehicle_idxs
# Parameters used for GMM-based predictions (see _make_predictions code.)
self.ego_N = vehicle_params_list[self.ego_vehicle_idx].N_mpc # constraint horizon (i.e. considered prediction horizon for GMM)
self.ego_num_modes = vehicle_params_list[self.ego_vehicle_idx].N_modes # maximum number of modes considered from GMM
def _setup_predictions(self, prediction_params):
self.agent_history = AgentHistory(self.world.get_actors())
self.rasterizer = SemBoxRasterizer(self.world.get_map().get_topology(), render_traffic_lights=\
prediction_params.render_traffic_lights)
prefix = os.path.abspath(__file__).split('scripts')[0]
self.pred_model = MultiPath( anchors=np.load(os.path.join(prefix, prediction_params.model_anchors)),
num_timesteps=25,
num_hist_timesteps=5 )
self.pred_model.load_weights( os.path.join(prefix, prediction_params.model_weights) )
# Try to do a sample prediction, initialize and check GPU model is working fine.
blank_image = np.zeros((self.rasterizer.sem_rast.raster_height,
self.rasterizer.sem_rast.raster_width,
3), dtype=np.uint8)
zero_traj = np.column_stack(( np.arange(-1.0, 0.00, 0.2),
np.zeros((5,3))
)).astype(np.float32)
self.pred_model.predict_instance(image_raw = | |
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'mu': numpy.copy(self.mu),
'W_delta': numpy.copy(self.W_delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
emb_time_t = numpy.dot(
self.float32_to_bit(
self.one_seq[-1]['time_since_last_event']
),
self.Emb_time
)
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, emb_time_t, self.hidden_t),
axis = 0
),
self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:]
)
#
cell_t_new = gate_forget * self.cell_t + gate_input * gate_pre_c
hidden_t_new = gate_output * numpy.tanh(cell_t_new)
self.hidden_t = numpy.copy(hidden_t_new)
self.cell_t = numpy.copy(cell_t_new)
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
#
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde = numpy.sum(
self.W_alpha * hidden_with_time,
axis = 0
) + self.mu
#
self.intensity = self.soft_relu(
self.intensity_tilde
)
# intensity computation is finished
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde_ub = numpy.sum(
self.hard_relu(
self.W_alpha * hidden_with_time
),
axis = 0
) + self.hard_relu(self.mu)
#
self.intensity_ub = self.soft_relu(
self.intensity_tilde_ub
)
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
#
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += E / intensity_hazard
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#
return time_current
#
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
#
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events, list_time = [], []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
print(list_events)
print(list_time)
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class NeuralHawkesAdaptiveBaseGen(object):
'''
here is the sequence generator using Neural Hawkes process
'''
def __init__(self, settings):
#
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
#
self.dim_float = numpy.int32(32)
self.dim_time = self.dim_float
#
self.args = settings['args']
numpy.random.seed(
settings['seed_random']
)
self.W_mu = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size = (
self.dim_model, self.dim_process
)
)
)
#
self.W_delta = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size=(
self.dim_model, self.dim_model,
self.dim_process
)
)
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.Emb_time = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_time, self.dim_model
)
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
3 * self.dim_model,
4 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (4*self.dim_model, )
)
)
#
#self.intensity = numpy.copy(self.mu)
self.name = 'AdaptiveNeuralHawkesGen'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'W_mu': numpy.copy(self.W_mu),
'W_delta': numpy.copy(self.W_delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
| |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
""" Subsampled honest forest extension to scikit-learn's forest methods.
"""
import numpy as np
import scipy.sparse
import threading
import sparse as sp
import itertools
from sklearn.utils import check_array, check_X_y, issparse
from sklearn.ensemble.forest import ForestRegressor, _accumulate_prediction
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.base import RegressorMixin
from warnings import catch_warnings, simplefilter, warn
from sklearn.exceptions import DataConversionWarning, NotFittedError
from sklearn.tree._tree import DTYPE, DOUBLE
from sklearn.utils import check_random_state, check_array, compute_sample_weight
from sklearn.utils._joblib import Parallel, delayed
from sklearn.utils.fixes import _joblib_parallel_args
from sklearn.utils.validation import check_is_fitted
from sklearn.ensemble.base import _partition_estimators
MAX_INT = np.iinfo(np.int32).max
def _parallel_add_trees(tree, forest, X, y, sample_weight, s_inds, tree_idx, n_trees, verbose=0):
"""Private function used to fit a single subsampled honest tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
# Construct the subsample of data
X, y = X[s_inds], y[s_inds]
if sample_weight is None:
sample_weight = np.ones((X.shape[0],), dtype=np.float64)
else:
sample_weight = sample_weight[s_inds]
# Split into estimation and splitting sample set
if forest.honest:
X_split, X_est, y_split, y_est,\
sample_weight_split, sample_weight_est = train_test_split(
X, y, sample_weight, test_size=.5, shuffle=True, random_state=tree.random_state)
else:
X_split, X_est, y_split, y_est, sample_weight_split, sample_weight_est =\
X, X, y, y, sample_weight, sample_weight
# Fit the tree on the splitting sample
tree.fit(X_split, y_split, sample_weight=sample_weight_split,
check_input=False)
# Set the estimation values based on the estimation split
total_weight_est = np.sum(sample_weight_est)
# Apply the trained tree on the estimation sample to get the path for every estimation sample
path_est = tree.decision_path(X_est)
# Calculate the total weight of estimation samples on each tree node:
# \sum_i sample_weight[i] * 1{i \\in node}
weight_est = scipy.sparse.csr_matrix(
sample_weight_est.reshape(1, -1)).dot(path_est)
# Calculate the total number of estimation samples on each tree node:
# |node| = \sum_{i} 1{i \\in node}
count_est = np.sum(path_est, axis=0)
# Calculate the weighted sum of responses on the estimation sample on each node:
# \sum_{i} sample_weight[i] 1{i \\in node} Y_i
value_est = scipy.sparse.csr_matrix(
(sample_weight_est.reshape(-1, 1) * y_est).T).dot(path_est)
# Prune tree to remove leafs that don't satisfy the leaf requirements on the estimation sample
# and for each un-pruned tree set the value and the weight appropriately.
children_left = tree.tree_.children_left
children_right = tree.tree_.children_right
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_id = stack.pop()
# If minimum weight requirement or minimum leaf size requirement is not satisfied on estimation
# sample, then prune the whole sub-tree
if weight_est[0, node_id] / total_weight_est < forest.min_weight_fraction_leaf\
or count_est[0, node_id] < forest.min_samples_leaf:
tree.tree_.children_left[parent_id] = -1
tree.tree_.children_right[parent_id] = -1
else:
for i in range(tree.n_outputs_):
# Set the value of the node to: \sum_{i} sample_weight[i] 1{i \\in node} Y_i / |node|
tree.tree_.value[node_id, i] = value_est[i, node_id] / count_est[0, node_id]
# Set the weight of the node to: \sum_{i} sample_weight[i] 1{i \\in node} / |node|
tree.tree_.weighted_n_node_samples[node_id] = weight_est[0, node_id] / count_est[0, node_id]
# Set the count to the estimation split count
tree.tree_.n_node_samples[node_id] = count_est[0, node_id]
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], node_id))
stack.append((children_right[node_id], node_id))
return tree
class SubsampledHonestForest(ForestRegressor, RegressorMixin):
"""
An implementation of a subsampled honest random forest regressor on top of an sklearn
regression tree. Implements subsampling and honesty as described in [3]_,
but uses a scikit-learn regression tree as a base. It provides confidence intervals based on ideas
described in [3]_ and [4]_
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and uses averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is smaller than the original size and subsampling is
performed without replacement. Each decision tree is built in an honest
manner: half of the sub-sampled data are used for creating the tree structure
(referred to as the splitting sample) and the other half for calculating the
constant regression estimate at each leaf of the tree (referred to as the estimation sample).
One difference with the algorithm proposed in [3]_ is that we do not ensure balancedness
and we do not consider poisson sampling of the features, so that we guarantee
that each feature has a positive probability of being selected on each split.
Rather we use the original algorithm of Breiman [1]_, which selects the best split
among a collection of candidate splits, as long as the max_depth is not reached
and as long as there are not more than max_leafs and each child contains
at least min_samples_leaf samples and total weight fraction of
min_weight_fraction_leaf. Moreover, it allows the use of both mean squared error (MSE)
and mean absoulte error (MAE) as the splitting criterion. Finally, we allow
for early stopping of the splits if the criterion is not improved by more than
min_impurity_decrease. These techniques that date back to the work of [1]_,
should lead to finite sample performance improvements, especially for
high dimensional features.
The implementation also provides confidence intervals
for each prediction using a bootstrap of little bags approach described in [3]_:
subsampling is performed at hierarchical level by first drawing a set of half-samples
at random and then sub-sampling from each half-sample to build a forest
of forests. All the trees are used for the point prediction and the distribution
of predictions returned by each of the sub-forests is used to calculate the standard error
of the point prediction.
In particular we use a variant of the standard error estimation approach proposed in [4]_,
where, if :math:`\\theta(X)` is the point prediction at X, then the variance of :math:`\\theta(X)`
is computed as:
.. math ::
Var(\\theta(X)) = \\frac{\\hat{V}}{\\left(\\frac{1}{B} \\sum_{b \\in [B], i\\in [n]} w_{b, i}(x)\\right)^2}
where B is the number of trees, n the number of training points, and:
.. math ::
w_{b, i}(x) = \\text{sample\\_weight}[i] \\cdot \\frac{1\\{i \\in \\text{leaf}(x; b)\\}}{|\\text{leaf}(x; b)|}
.. math ::
\\hat{V} = \\text{Var}_{\\text{random half-samples } S}\\left[ \\frac{1}{B_S}\
\\sum_{b\\in S, i\\in [n]} w_{b, i}(x) (Y_i - \\theta(X)) \\right]
where :math:`B_S` is the number of trees in half sample S. The latter variance is approximated by:
.. math ::
\\hat{V} = \\frac{1}{|\\text{drawn half samples } S|} \\sum_{S} \\left( \\frac{1}{B_S}\
\\sum_{b\\in S, i\\in [n]} w_{b, i}(x) (Y_i - \\theta(X)) \\right)^2
This variance calculation does not contain the correction due to finite number of monte carlo half-samples
used (as proposed in [4]_), hence can be a bit conservative when a small number of half samples is used.
However, it is on the conservative side. We use ceil(sqrt(n_estimators)) half samples, and the forest associated
with each such half-sample contains roughly sqrt(n_estimators) trees, amounting to a total of n_estimator trees
overall.
Parameters
----------
n_estimators : integer, optional (default=100)
The total number of trees in the forest. The forest consists of a
forest of sqrt(n_estimators) sub-forests, where each sub-forest
contains sqrt(n_estimators) trees.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of splitting samples required to split an internal node.
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` splitting samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression. After construction the tree is also pruned
so that there are at least min_samples_leaf estimation samples on
each leaf.
- If int, then consider `min_samples_leaf` as the | |
<reponame>pursuitofepic/lookit-api<gh_stars>0
import csv
import datetime
import io
import json
import re
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django.utils.http import urlencode
from django_dynamic_fixture import G
from accounts.backends import TWO_FACTOR_AUTH_SESSION_KEY
from accounts.models import Child, DemographicData, User
from accounts.utils import hash_id
from studies.models import ConsentRuling, Lab, Response, Study, StudyType, Video
class Force2FAClient(Client):
@property
def session(self):
_session = super().session
_session[TWO_FACTOR_AUTH_SESSION_KEY] = True
return _session
# Run celery tasks right away, but don't catch errors from them. The relevant tasks for
# this case involve S3/GCP access which we're not testing.
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@override_settings(CELERY_TASK_EAGER_PROPAGATES=False)
class ResponseViewsTestCase(TestCase):
def setUp(self):
self.client = Force2FAClient()
n_participants = 5
children_per_participant = 3
self.study_admin = G(
User, is_active=True, is_researcher=True, given_name="Researcher 1"
)
self.study_reader = G(
User, is_active=True, is_researcher=True, given_name="Researcher 2"
)
self.other_researcher = G(
User, is_active=True, is_researcher=True, given_name="Researcher 3"
)
self.participants = [
G(User, is_active=True, given_name="Mom") for i in range(n_participants)
]
self.study_type = G(StudyType, name="default", id=1)
self.lab = G(Lab, name="MIT")
self.study = G(
Study,
creator=self.study_admin,
shared_preview=False,
study_type=self.study_type,
name="Test Study",
lab=self.lab,
)
# Note: currently not mocking Study.image field, because I couldn't get any of the
# approaches outlined at https://stackoverflow.com/questions/26298821/django-testing-model-with-imagefield
# working.
self.study_shared_preview = G(
Study,
creator=self.study_admin,
shared_preview=True,
study_type=self.study_type,
name="Test Study",
lab=self.lab,
)
self.study.admin_group.user_set.add(self.study_admin)
self.study.researcher_group.user_set.add(self.study_reader)
self.study_reader_child = G(
Child,
user=self.study_reader,
given_name="Study reader child",
birthday=datetime.date.today() - datetime.timedelta(30),
)
self.other_researcher_child = G(
Child,
user=self.other_researcher,
given_name="Other researcher child",
birthday=datetime.date.today() - datetime.timedelta(60),
)
self.children_for_participants = []
self.demo_snapshots_for_participants = []
self.responses = []
self.preview_responses = []
for part in self.participants:
these_children = [
G(
Child,
user=part,
given_name="Child" + str(i),
existing_conditions=Child.existing_conditions.multiple_birth,
birthday=datetime.date.today() - datetime.timedelta(60),
)
for i in range(children_per_participant)
]
self.children_for_participants.append(these_children)
demo_snapshot = G(DemographicData, user=part, density="urban")
self.demo_snapshots_for_participants.append(demo_snapshot)
self.responses += [
G(
Response,
child=child,
study=self.study,
completed=False,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {"frameType": "CONSENT"},
},
demographic_snapshot=demo_snapshot,
)
for child in these_children
]
self.preview_responses += [
G(
Response,
child=child,
study=self.study,
completed=False,
is_preview=True,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {"frameType": "CONSENT"},
},
demographic_snapshot=demo_snapshot,
)
for child in these_children
]
# Confirm consent for all responses
self.n_previews = children_per_participant * n_participants
self.consent_rulings = [
G(
ConsentRuling,
response=response,
action="accepted",
arbiter=self.study_reader,
)
for response in self.responses + self.preview_responses
]
self.all_response_urls = [
reverse("exp:study-responses-all", kwargs={"pk": self.study.pk}),
reverse(
"exp:study-responses-children-summary-csv", kwargs={"pk": self.study.pk}
),
reverse(
"exp:study-responses-children-summary-dict-csv",
kwargs={"pk": self.study.pk},
),
reverse(
"exp:study-hashed-id-collision-check", kwargs={"pk": self.study.pk}
),
reverse(
"exp:study-responses-download-frame-data-dict-csv",
kwargs={"pk": self.study.pk},
),
reverse(
"exp:study-responses-download-frame-data-zip-csv",
kwargs={"pk": self.study.pk},
),
reverse("exp:study-demographics", kwargs={"pk": self.study.pk}),
reverse(
"exp:study-demographics-download-json", kwargs={"pk": self.study.pk}
),
reverse(
"exp:study-demographics-download-csv", kwargs={"pk": self.study.pk}
),
reverse(
"exp:study-demographics-download-dict-csv", kwargs={"pk": self.study.pk}
),
reverse("exp:study-responses-list", kwargs={"pk": self.study.pk}),
reverse(
"exp:study-responses-consent-manager", kwargs={"pk": self.study.pk}
),
reverse("exp:study-responses-download-json", kwargs={"pk": self.study.pk}),
reverse("exp:study-responses-download-csv", kwargs={"pk": self.study.pk}),
reverse(
"exp:study-responses-download-summary-dict-csv",
kwargs={"pk": self.study.pk},
),
reverse("exp:study-attachments", kwargs={"pk": self.study.pk}),
]
def test_cannot_see_any_responses_views_unauthenticated(self):
for url in self.all_response_urls:
page = self.client.get(url)
self.assertNotEqual(
page.status_code,
200,
"Unauthenticated user not redirected from responses: " + url,
)
def test_cannot_see_any_responses_views_as_participant(self):
self.client.force_login(self.participants[0])
for url in self.all_response_urls:
page = self.client.get(url)
self.assertEqual(
page.status_code,
403,
"Unassociated participant not forbidden to access responses: " + url,
)
def test_cannot_see_any_responses_views_as_unassociated_researcher(self):
self.client.force_login(self.other_researcher)
for url in self.all_response_urls:
page = self.client.get(url)
self.assertEqual(
page.status_code,
403,
"Unassociated researcher not forbidden to access responses: " + url,
)
def test_can_see_response_views_as_study_researcher(self):
self.client.force_login(self.study_reader)
for url in self.all_response_urls:
page = self.client.get(url)
self.assertIn(
page.status_code, [200, 302], "Unexpected status code for " + url
)
def test_can_see_video_attachments_as_study_researcher(self):
self.client.force_login(self.study_reader)
# Add a video for each response
self.videos = [
G(
Video,
frame_id="2-my-consent-frame",
full_name=f"videoStream_{self.study.uuid}_2-my-consent-frame_{resp.uuid}_1594823856933_{resp.pk}",
pipe_name=f"7WHkjNhHt741R4lpMsDzTGBgCqBfkC{resp.pk}.mp4",
study=self.study,
response=resp,
is_consent_footage=True,
)
for resp in self.responses
]
page = self.client.get(
reverse("exp:study-attachments", kwargs={"pk": self.study.pk})
)
self.assertIn(
page.status_code,
[200, 302],
"Unexpected status code for video attachments page",
)
def test_can_see_response_views_as_study_admin(self):
self.client.force_login(self.study_admin)
for url in self.all_response_urls:
page = self.client.get(url)
self.assertIn(
page.status_code, [200, 302], "Unexpected status code for " + url
)
def test_cannot_delete_preview_data_as_unassociated_researcher(self):
self.client.force_login(self.other_researcher)
url = reverse(
"exp:study-delete-preview-responses", kwargs={"pk": self.study.pk}
)
response = self.client.post(url, {})
self.assertEqual(
response.status_code,
403,
"Unassociated researcher able to delete preview data!",
)
# Check that there's still preview data
self.assertEqual(
self.study.responses.filter(is_preview=True).count(), self.n_previews
)
def test_delete_preview_data(self):
self.client.force_login(self.study_admin)
url = reverse(
"exp:study-delete-preview-responses", kwargs={"pk": self.study.pk}
)
self.assertEqual(
self.study.responses.filter(is_preview=True).count(), self.n_previews
)
self.client.post(url, {})
self.assertEqual(self.study.responses.filter(is_preview=True).count(), 0)
class ResponseDataDownloadTestCase(TestCase):
def setUp(self):
self.client = Force2FAClient()
n_participants = 3
children_per_participant = 2
self.study_reader = G(
User, is_active=True, is_researcher=True, given_name="Researcher 2"
)
self.study_previewer = G(
User, is_active=True, is_researcher=True, given_name="Researcher 3"
)
self.study_type = G(StudyType, name="default", id=1)
self.lab = G(Lab, name="MIT")
self.study = G(
Study,
creator=self.study_reader,
shared_preview=False,
study_type=self.study_type,
name="Test Study",
lab=self.lab,
)
self.other_study = G(
Study,
creator=self.study_reader,
shared_preview=False,
study_type=self.study_type,
name="Test Study 2",
lab=self.lab,
)
self.study.researcher_group.user_set.add(self.study_reader)
self.study.design_group.user_set.add(self.study_previewer)
self.other_study.researcher_group.user_set.add(self.study_reader)
self.other_study.design_group.user_set.add(self.study_previewer)
self.children_for_participants = []
self.demo_snapshots_for_participants = []
self.responses = []
self.preview_responses = []
self.participant_names = ["Alice", "Bob", "Carol"]
self.participants = [
G(User, is_active=True, nickname=self.participant_names[i])
for i in range(n_participants)
]
for part in self.participants:
these_children = [
G(
Child,
user=part,
given_name="ChildGivenName" + str(i),
existing_conditions=Child.existing_conditions.multiple_birth,
birthday=datetime.date.today() - datetime.timedelta(60),
)
for i in range(children_per_participant)
]
self.children_for_participants.append(these_children)
demo_snapshot = G(DemographicData, user=part, density="urban")
self.demo_snapshots_for_participants.append(demo_snapshot)
# Include one incomplete response for each participant
self.responses += [
G(
Response,
child=child,
study=self.study,
completed=False,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {"frameType": "CONSENT"},
},
demographic_snapshot=demo_snapshot,
)
for child in these_children
]
# And one complete response
self.responses += [
G(
Response,
child=child,
study=self.study,
completed=False,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {"frameType": "CONSENT"},
"3-my-exit-frame": {"frameType": "EXIT"},
},
demographic_snapshot=demo_snapshot,
)
for child in these_children
]
# And one preview
self.preview_responses += [
G(
Response,
child=child,
study=self.study,
completed=False,
is_preview=True,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {"frameType": "CONSENT"},
},
demographic_snapshot=demo_snapshot,
)
for child in these_children
]
# Add real but not preview response from an additional participant
self.non_preview_participant = G(
User, is_active=True, nickname="non-preview-participant"
)
self.non_preview_child = G(
Child,
user=self.non_preview_participant,
given_name="non-preview-child",
birthday=datetime.date.today() - datetime.timedelta(366),
)
self.non_preview_demo = G(
DemographicData,
user=self.non_preview_participant,
languages_spoken_at_home="Swahili",
)
self.non_preview_resp = G(
Response,
child=self.non_preview_child,
study=self.study,
completed=False,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {
"frameType": "CONSENT",
"someField": "non-preview-data",
},
},
demographic_snapshot=self.non_preview_demo,
)
# Add a response to a different study which shouldn't be included in self.study responses
self.other_study_response = G(
Response,
child=self.children_for_participants[0][0],
study=self.other_study,
completed=True,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {
"frameType": "CONSENT",
"someField": "different-study",
},
},
demographic_snapshot=self.demo_snapshots_for_participants[0],
)
# Confirm consent for all responses above
self.consent_rulings = [
G(
ConsentRuling,
response=response,
action="accepted",
arbiter=self.study_reader,
)
for response in self.responses
+ self.preview_responses
+ [self.non_preview_resp, self.other_study_response]
]
# Add unconsented response from additional participant
self.poison_string = (
"no-one-should-see-this" # phrase that shouldn't be in any downloads
)
self.unconsented_participant = G(
User, is_active=True, nickname=self.poison_string
)
self.unconsented_child = G(
Child,
user=self.unconsented_participant,
given_name=self.poison_string,
birthday=datetime.date.today() - datetime.timedelta(366),
)
self.unconsented_demo = G(
DemographicData,
user=self.unconsented_participant,
languages_spoken_at_home=self.poison_string,
)
self.unconsented_resp = G(
Response,
child=self.non_preview_child,
study=self.study,
completed=False,
completed_consent_frame=True,
sequence=["0-video-config", "1-video-setup", "2-my-consent-frame"],
exp_data={
"0-video-config": {"frameType": "DEFAULT"},
"1-video-setup": {"frameType": "DEFAULT"},
"2-my-consent-frame": {
"frameType": "CONSENT",
"data": self.poison_string,
},
},
demographic_snapshot=self.unconsented_demo,
)
# How many responses do we expect?
self.n_previews = children_per_participant * n_participants
self.n_responses = children_per_participant * n_participants * 2 + 1
self.n_preview_children = children_per_participant * n_participants
self.n_total_children = children_per_participant * n_participants + 1
self.n_preview_participants = n_participants
self.n_total_participants = n_participants + 1
# Build a few complementary sets of options for fields to include in downloads
self.age_optionset_1 = ["child__age_rounded"]
self.child_optionset_1 = [
"child__global_id",
"child__gender",
"child__condition_list",
"participant__nickname",
]
self.optionset_1 = self.age_optionset_1 + self.child_optionset_1
self.child_labels_json_1 = [
"global_id",
"gender",
"condition_list",
"age_rounded",
]
self.participant_labels_json_1 = ["nickname"]
self.age_optionset_2 = ["child__age_in_days", "child__birthday"]
self.child_optionset_2 = [
"child__name",
"child__age_at_birth",
"child__language_list",
"child__additional_information",
"participant__global_id",
]
self.optionset_2 = self.age_optionset_2 + self.child_optionset_2
self.child_labels_json_2 = [
"age_in_days",
"birthday",
"name",
"age_at_birth",
"language_list",
"additional_information",
]
self.participant_labels_json_2 = ["global_id"]
self.response_summary_url = reverse(
"exp:study-responses-download-csv", kwargs={"pk": self.study.pk}
)
self.response_summary_json_url = reverse(
"exp:study-responses-download-json", kwargs={"pk": self.study.pk}
)
def test_get_appropriate_fields_in_csv_downloads_set1(self):
self.client.force_login(self.study_reader)
query_string = urlencode({"data_options": self.optionset_1}, doseq=True)
response = self.client.get(f"{self.response_summary_url}?{query_string}")
content = response.content.decode("utf-8")
csv_reader = csv.reader(io.StringIO(content), quoting=csv.QUOTE_ALL)
csv_body = list(csv_reader)
csv_headers = csv_body.pop(0)
# Check that we have the expected number of responses
self.assertEqual(
len(csv_body),
self.n_responses + self.n_previews,
"Unexpected number of response rows in CSV download",
)
# Check that the appropriate specifically-requested headers ARE present
for header in self.optionset_1:
self.assertIn(
header,
csv_headers,
f"Downloaded summary CSV file is missing header {header}",
)
# Check that the remaining headers ARE NOT present
for header in self.optionset_2:
self.assertNotIn(
header,
csv_headers,
f"Downloaded summary CSV file has header | |
<reponame>evlog/SysPy<filename>SysPy_ver/Python_script/testbml2/_txt2BRAM.py
from pdb import *
def txt2BRAM_func0(txtFile_name, M_reactions, N_species, num_proc):
"""
FUNCTION: txt2BRAM_func0(a str, b int, c int)
a: text file name string
b: integer number of Spieces
c: integer number of Reactions
d: integer number of processors
- Initalizing a VHDL array with the contents of a text file.
"""
# Python's variable declerations
#----------------------------------------------------------------------------------------------------------------------------------
txtFile = ''
x = ''
s = ''
msg = ''
vhdFile_temp1 = ''
vhdFile_temp2 = ''
memFile = ''
memFilearr = []
bram0 = []
bram1 = []
bram2 = []
bram3 = []
bram4 = []
bram5 = []
bram6 = []
bram7 = []
bram8 = []
bram9 = []
bram10 = []
bram11 = []
bram12 = []
bram13 = []
bram14 = []
bram15 = []
bram16 = []
bram17 = []
bram18 = []
bram19 = []
bram20 = []
bram21 = []
bram22 = []
bram23 = []
bram24 = []
bram25 = []
bram26 = []
bram27 = []
bram28 = []
bram29 = []
bram30 = []
bram31 = []
bram32 = []
bram33 = []
bram34 = []
bram35 = []
bram36 = []
bram37 = []
bram38 = []
bram39 = []
bram40 = []
bram41 = []
bram42 = []
bram43 = []
bram44 = []
bram45 = []
bram46 = []
bram47 = []
bram48 = []
bram49 = []
bram50 = []
bram51 = []
bram52 = []
bram53 = []
bram54 = []
bram55 = []
bram56 = []
bram57 = []
bram58 = []
bram59 = []
bram60 = []
bram61 = []
bram62 = []
bram63 = []
bram_mem = [bram0, bram1, bram2, bram3, bram4, bram5, bram6, bram7,
bram8, bram9, bram10, bram11, bram12, bram13, bram14, bram15,
bram16, bram17, bram18, bram19, bram20, bram21, bram22, bram23,
bram24, bram25, bram26, bram27, bram28, bram29, bram30, bram31,
bram32, bram33, bram34, bram35, bram36, bram37, bram38, bram39,
bram40, bram41, bram42, bram43, bram44, bram45, bram46, bram47,
bram48, bram49, bram50, bram51, bram52, bram53, bram54, bram55,
bram56, bram57, bram58, bram59, bram60, bram61, bram62, bram63]
#----------------------------------------------------------------------------------------------------------------------------------
#msg = "File name: "
#txtFile_name = raw_input(msg).strip()
BRAM_size = M_reactions
txtFile = open("./" + txtFile_name, 'r')
if (num_proc == 1):
vhdFile_temp1 = open("./Initmems0_temp1.vhd", 'r')
vhdFile_temp2 = open("./Initmems0_temp2.vhd", 'r')
temp1 = vhdFile_temp1.read()
temp1 = temp1.replace("BRAM_size", str(BRAM_size - 1))
temp2 = vhdFile_temp2.read()
memFile = open("../paramsocs/LEONGILL/Lotka_VolteraparamRT.vhd", 'w')
memFile.write(temp1)
elif (num_proc == 2):
vhdFile_temp11 = open("./Initmems0_temp11_FRM24X.vhd", 'r')
vhdFile_temp12 = open("./Initmems0_temp12_FRM24X.vhd", 'r')
temp11 = vhdFile_temp11.read()
temp11 = temp11.replace("BRAM_size", str(BRAM_size - 1))
temp12 = vhdFile_temp12.read()
memFile1 = open("../paramsocs/LEONGILL/RT1.vhd", 'w')
memFile1.write(temp11)
vhdFile_temp21 = open("./Initmems0_temp21_FRM24X.vhd", 'r')
vhdFile_temp22 = open("./Initmems0_temp22_FRM24X.vhd", 'r')
temp21 = vhdFile_temp21.read()
temp21 = temp21.replace("BRAM_size", str(BRAM_size - 1))
temp22 = vhdFile_temp22.read()
memFile2 = open("../paramsocs/LEONGILL/RT2.vhd", 'w')
memFile2.write(temp21)
elif (num_proc == 4):
vhdFile_temp11 = open("./Initmems0_temp11_FRM24X.vhd", 'r')
vhdFile_temp12 = open("./Initmems0_temp12_FRM24X.vhd", 'r')
temp11 = vhdFile_temp11.read()
temp11 = temp11.replace("BRAM_size", str(BRAM_size - 1))
temp12 = vhdFile_temp12.read()
memFile1 = open("../paramsocs/LEONGILL/RT1.vhd", 'w')
memFile1.write(temp11)
vhdFile_temp21 = open("./Initmems0_temp21_FRM24X.vhd", 'r')
vhdFile_temp22 = open("./Initmems0_temp22_FRM24X.vhd", 'r')
temp21 = vhdFile_temp21.read()
temp21 = temp21.replace("BRAM_size", str(BRAM_size - 1))
temp22 = vhdFile_temp22.read()
memFile2 = open("../paramsocs/LEONGILL/RT2.vhd", 'w')
memFile2.write(temp21)
vhdFile_temp31 = open("./Initmems0_temp31_FRM24X.vhd", 'r')
vhdFile_temp32 = open("./Initmems0_temp32_FRM24X.vhd", 'r')
temp31 = vhdFile_temp31.read()
temp31 = temp31.replace("BRAM_size", str(BRAM_size - 1))
temp32 = vhdFile_temp32.read()
memFile3 = open("../paramsocs/LEONGILL/RT3.vhd", 'w')
memFile3.write(temp31)
vhdFile_temp41 = open("./Initmems0_temp41_FRM24X.vhd", 'r')
vhdFile_temp42 = open("./Initmems0_temp42_FRM24X.vhd", 'r')
temp41 = vhdFile_temp41.read()
temp41 = temp41.replace("BRAM_size", str(BRAM_size - 1))
temp42 = vhdFile_temp42.read()
memFile4 = open("../paramsocs/LEONGILL/RT4.vhd", 'w')
memFile4.write(temp41)
x = txtFile.read()
x = x.replace("\n", '')
for i in range(0, len(x), 72):
memFilearr.append(x[i:(i+72)])
for i in range((BRAM_size - len(memFilearr))):
if (i == (BRAM_size - (len(memFilearr)))):
memFilearr.append("000000000000000000000000000000000000000000000000000000000000000000000000")
else:
memFilearr.append("000000000000000000000000000000000000000000000000000000000000000000000000")
#print memFilearr
#print len(memFilearr)
for i in range(0, len(memFilearr)):
if (i < 8):
bram_mem[0].append(memFilearr[i])
elif ((i >= 8) and (i < 16)):
bram_mem[1].append(memFilearr[i])
elif ((i >= 16) and (i < 24)):
bram_mem[2].append(memFilearr[i])
elif ((i >= 24) and (i < 32)):
bram_mem[3].append(memFilearr[i])
elif ((i >= 32) and (i < 40)):
bram_mem[4].append(memFilearr[i])
elif ((i >= 40) and (i < 48)):
bram_mem[5].append(memFilearr[i])
elif ((i >= 48) and (i < 56)):
bram_mem[6].append(memFilearr[i])
elif ((i >= 56) and (i < 64)):
bram_mem[7].append(memFilearr[i])
elif ((i >= 64) and (i < 72)):
bram_mem[8].append(memFilearr[i])
elif ((i >= 72) and (i < 80)):
bram_mem[9].append(memFilearr[i])
elif ((i >= 80) and (i < 88)):
bram_mem[10].append(memFilearr[i])
elif ((i >= 88) and (i < 96)):
bram_mem[11].append(memFilearr[i])
elif ((i >= 96) and (i < 104)):
bram_mem[12].append(memFilearr[i])
elif ((i >= 104) and (i < 112)):
bram_mem[13].append(memFilearr[i])
elif ((i >= 112) and (i < 120)):
bram_mem[14].append(memFilearr[i])
elif ((i >= 120) and (i < 128)):
bram_mem[15].append(memFilearr[i])
elif ((i >= 128) and (i < 136)):
bram_mem[16].append(memFilearr[i])
elif ((i >= 136) and (i < 144)):
bram_mem[17].append(memFilearr[i])
elif ((i >= 144) and (i < 152)):
bram_mem[18].append(memFilearr[i])
elif ((i >= 152) and (i < 160)):
bram_mem[19].append(memFilearr[i])
elif ((i >= 160) and (i < 168)):
bram_mem[20].append(memFilearr[i])
elif ((i >= 168) and (i < 176)):
bram_mem[21].append(memFilearr[i])
elif ((i >= 176) and (i < 184)):
bram_mem[22].append(memFilearr[i])
elif ((i >= 184) and (i < 192)):
bram_mem[23].append(memFilearr[i])
elif ((i >= 192) and (i < 200)):
bram_mem[24].append(memFilearr[i])
elif ((i >= 200) and (i < 208)):
bram_mem[25].append(memFilearr[i])
elif ((i >= 208) and (i < 216)):
bram_mem[26].append(memFilearr[i])
elif ((i >= 216) and (i < 224)):
bram_mem[27].append(memFilearr[i])
elif ((i >= 224) and (i < 232)):
bram_mem[28].append(memFilearr[i])
elif ((i >= 232) and (i < 240)):
bram_mem[29].append(memFilearr[i])
elif ((i >= 240) and (i < 248)):
bram_mem[30].append(memFilearr[i])
elif ((i >= 248) and (i < 256)):
bram_mem[31].append(memFilearr[i])
elif ((i >= 256) and (i < 264)):
bram_mem[32].append(memFilearr[i])
elif ((i >= 264) and (i < 272)):
bram_mem[33].append(memFilearr[i])
elif ((i >= 272) and (i < 280)):
bram_mem[34].append(memFilearr[i])
elif ((i >= 280) and (i < 288)):
bram_mem[35].append(memFilearr[i])
elif ((i >= 288) and (i < 296)):
bram_mem[36].append(memFilearr[i])
elif ((i >= 296) and (i < 304)):
bram_mem[37].append(memFilearr[i])
elif ((i >= 304) and (i < 312)):
bram_mem[38].append(memFilearr[i])
elif ((i >= 312) and (i < 320)):
bram_mem[39].append(memFilearr[i])
elif ((i >= 320) and (i < 328)):
bram_mem[40].append(memFilearr[i])
elif ((i >= 328) and (i < 336)):
bram_mem[41].append(memFilearr[i])
elif ((i >= 336) and (i < 344)):
bram_mem[42].append(memFilearr[i])
elif ((i >= 344) and (i < 352)):
bram_mem[43].append(memFilearr[i])
elif ((i >= 352) and (i < 360)):
bram_mem[44].append(memFilearr[i])
elif ((i >= 360) and (i < 368)):
bram_mem[45].append(memFilearr[i])
elif ((i >= 368) and (i < 376)):
bram_mem[46].append(memFilearr[i])
elif ((i >= 376) and (i < 384)):
bram_mem[47].append(memFilearr[i])
elif ((i >= 384) and (i < 392)):
bram_mem[48].append(memFilearr[i])
elif ((i >= 392) and (i < 400)):
bram_mem[49].append(memFilearr[i])
elif ((i >= 400) and (i < 408)):
bram_mem[50].append(memFilearr[i])
elif ((i >= 408) and (i < 416)):
bram_mem[51].append(memFilearr[i])
elif ((i >= 416) and (i < 424)):
bram_mem[52].append(memFilearr[i])
elif ((i >= 424) and (i < 432)):
bram_mem[53].append(memFilearr[i])
elif ((i >= 432) and (i < 440)):
bram_mem[54].append(memFilearr[i])
elif ((i >= 440) and (i < 448)):
bram_mem[55].append(memFilearr[i])
elif ((i >= 448) and (i < 456)):
bram_mem[56].append(memFilearr[i])
elif ((i >= 456) and (i < 464)):
bram_mem[57].append(memFilearr[i])
elif ((i >= 464) and (i < 472)):
bram_mem[58].append(memFilearr[i])
elif ((i >= 472) and (i < 480)):
bram_mem[59].append(memFilearr[i])
elif ((i >= 480) and (i < 488)):
bram_mem[60].append(memFilearr[i])
elif ((i >= 488) and (i < 496)):
bram_mem[61].append(memFilearr[i])
elif ((i >= 496) and (i < 504)):
bram_mem[62].append(memFilearr[i])
elif ((i >= 504) and (i < 512)):
bram_mem[63].append(memFilearr[i])
if (num_proc == 1):
for i in range(0, (BRAM_size / 8)):
s = s + "\n "
for j in range(len(bram_mem[i])):
s = s + "X\"" + bram_mem[i][j] + "\","
s = s[:(len(s) - 1)]
memFile.write(s)
memFile.write("\n" + temp2)
memFile.close()
elif (num_proc == 2):
for i in range(0, (BRAM_size / 8)):
s = s + "\n "
for j in range(len(bram_mem[i])):
s = s + "X\"" + bram_mem[i][j] + "\","
s = s[:(len(s) - 1)]
memFile1.write(s)
memFile1.write("\n" + temp12)
memFile1.close()
memFile2.write(s)
memFile2.write("\n" + temp22)
memFile2.close()
elif (num_proc == 4):
for i in range(0, (BRAM_size / 8)):
s = s + "\n "
for j in range(len(bram_mem[i])):
s = s + "X\"" + bram_mem[i][j] + "\","
s = s[:(len(s) - 1)]
memFile1.write(s)
memFile1.write("\n" + temp12)
memFile1.close()
memFile2.write(s)
memFile2.write("\n" | |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
import numpy as np
import pandapower as pp
from pandapower.test.toolbox import add_grid_connection
from pandapower.toolbox import convert_format
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
@pytest.fixture
def simple_opf_test_net():
net = pp.create_empty_network()
pp.create_bus(net, vn_kv=10.)
pp.create_bus(net, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100)
return net
def test_convert_format():
""" Testing a very simple network without transformer for voltage
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
net.gen["cost_per_kw"] = 100
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# run OPF
convert_format(net)
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# check and assert result
logger.debug("test_simplest_voltage")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_ext_grid:\n%s" % net.res_ext_grid)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
def test_simplest_voltage():
""" Testing a very simple network without transformer for voltage
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100)
pp.create_polynomial_cost(net, 0, "gen", np.array([100, 0]))
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# check and assert result
logger.debug("test_simplest_voltage")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_ext_grid:\n%s" % net.res_ext_grid)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
pp.runopp(net, verbose=False, check_connectivity=True)
assert net["OPF_converged"]
# check and assert result
logger.debug("test_simplest_voltage")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_ext_grid:\n%s" % net.res_ext_grid)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
def test_eg_voltage():
""" Testing a very simple network without transformer for voltage
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0, vm_pu=1.01)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100)
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# check and assert result
logger.debug("test_simplest_voltage")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_ext_grid:\n%s" % net.res_ext_grid)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert net.res_bus.vm_pu.at[0] == net.ext_grid.vm_pu.values
def test_simplest_dispatch():
""" Testing a very simple network without transformer for voltage
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_polynomial_cost(net, 0, "gen", np.array([100, 0]))
pp.create_ext_grid(net, 0)
pp.create_polynomial_cost(net, 0, "ext_grid", np.array([101, 0]))
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, cost_function="linear", verbose=False, init=init)
assert net["OPF_converged"]
# check and assert result
logger.debug("test_simplest_voltage")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_est_grid:\n%s" % net.res_ext_grid)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
def test_opf_gen_voltage():
""" Testing a simple network with transformer for voltage
constraints with OPF using a generator """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# ceate net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_transformer_from_parameters(net, 0, 1, vsc_percent=3.75,
tp_max=2, vn_lv_kv=0.4,
shift_degree=150, tp_mid=0,
vn_hv_kv=10.0, vscr_percent=2.8125,
tp_pos=0, tp_side="hv", tp_min=-2,
tp_st_percent=2.5, i0_percent=0.68751,
sn_kva=16.0, pfe_kw=0.11, name=None,
in_service=True, index=None, max_loading_percent=200)
pp.create_gen(net, 3, p_kw=-10, controllable=True, max_p_kw=0, min_p_kw=-25, max_q_kvar=500,
min_q_kvar=-500)
pp.create_polynomial_cost(net, 0, "gen", np.array([10, 0]))
pp.create_ext_grid(net, 0)
pp.create_line_from_parameters(net, 1, 2, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100000)
pp.create_line_from_parameters(net, 2, 3, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100000)
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# check and assert result
logger.debug("test_opf_gen_voltage")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
def test_opf_sgen_voltage():
""" Testing a simple network with transformer for voltage
constraints with OPF using a static generator """
# boundaries
vm_max = 1.04
vm_min = 0.96
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_transformer_from_parameters(net, 0, 1, vsc_percent=3.75,
tp_max=2, vn_lv_kv=0.4,
shift_degree=150, tp_mid=0,
vn_hv_kv=10.0, vscr_percent=2.8125,
tp_pos=0, tp_side="hv", tp_min=-2,
tp_st_percent=2.5, i0_percent=0.68751,
sn_kva=16.0, pfe_kw=0.11, name=None,
in_service=True, index=None, max_loading_percent=1000000)
pp.create_sgen(net, 3, p_kw=-10, controllable=True, max_p_kw=-5, min_p_kw=-15, max_q_kvar=25,
min_q_kvar=-25)
pp.create_polynomial_cost(net, 0, "sgen", np.array([100, 0]))
pp.create_ext_grid(net, 0)
pp.create_line_from_parameters(net, 1, 2, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=1000000)
pp.create_line_from_parameters(net, 2, 3, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=1000000)
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# assert and check result
logger.debug("test_opf_sgen_voltage")
logger.debug("res_sgen:\n%s" % net.res_sgen)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
def test_opf_gen_loading():
""" Testing a simple network with transformer for loading
constraints with OPF using a generator """
# wide open voltage boundaries to make sure they don't interfere with loading constraints
vm_max = 1.5
vm_min = 0.5
max_line_loading = 11
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_transformer_from_parameters(net, 0, 1, vsc_percent=3.75,
tp_max=2, vn_lv_kv=0.4,
shift_degree=150, tp_mid=0,
vn_hv_kv=10.0, vscr_percent=2.8125,
tp_pos=0, tp_side="hv", tp_min=-2,
tp_st_percent=2.5, i0_percent=0.68751,
sn_kva=16.0, pfe_kw=0.11, name=None,
in_service=True, index=None, max_loading_percent=145)
pp.create_gen(net, 3, p_kw=-10, controllable=True, max_p_kw=-5, min_p_kw=-15, max_q_kvar=50,
min_q_kvar=-50)
pp.create_polynomial_cost(net, 0, "gen", np.array([-10, 0]))
pp.create_ext_grid(net, 0)
pp.create_polynomial_cost(net, 0, "ext_grid", np.array([.1, 0]))
pp.create_line_from_parameters(net, 1, 2, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=max_line_loading)
pp.create_line_from_parameters(net, 2, 3, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=max_line_loading)
# run OPF
pp.runopp(net, verbose=False, OPF_VIOLATION=1e-1, OUT_LIM_LINE=2,
PDIPM_GRADTOL=1e-10, PDIPM_COMPTOL=1e-10, PDIPM_COSTTOL=1e-10)
assert net["OPF_converged"]
# assert and check result
logger.debug("test_opf_gen_loading")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_line.loading_percent:\n%s" % net.res_line.loading_percent)
assert max(net.res_line.loading_percent) < max_line_loading
logger.debug("res_trafo.loading_percent:\n%s" % net.res_trafo.loading_percent)
assert max(net.res_trafo.loading_percent) < 145
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
def test_opf_sgen_loading():
""" Testing a simple network with transformer for loading
constraints with OPF using a generator """
# boundaries
vm_max = 1.5
vm_min = 0.5
max_trafo_loading = 800
max_line_loading = 13
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_transformer_from_parameters(net, 0, 1, vsc_percent=3.75, tp_max=2, vn_lv_kv=0.4,
shift_degree=150, tp_mid=0, vn_hv_kv=10.0,
vscr_percent=2.8125, tp_pos=0, tp_side="hv", tp_min=-2,
tp_st_percent=2.5, i0_percent=0.68751, sn_kva=16.0,
pfe_kw=0.11, name=None, in_service=True, index=None,
max_loading_percent=max_trafo_loading)
pp.create_sgen(net, 3, p_kw=-10, controllable=True, max_p_kw=-5, min_p_kw=-15, max_q_kvar=25,
min_q_kvar=-25)
pp.create_polynomial_cost(net, 0, "sgen", np.array([-10, 0]))
pp.create_ext_grid(net, 0)
pp.create_polynomial_cost(net, 0, "ext_grid", np.array([.1, 0]))
pp.create_line_from_parameters(net, 1, 2, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=max_line_loading)
pp.create_line_from_parameters(net, 2, 3, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=max_line_loading)
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# assert and check result
logger.debug("test_opf_sgen_loading")
logger.debug("res_sgen:\n%s" % net.res_sgen)
logger.debug("res_line.loading_percent:\n%s" % net.res_line.loading_percent)
assert max(net.res_line.loading_percent) - max_line_loading < 1e-2
logger.debug("res_trafo.loading_percent:\n%s" % net.res_trafo.loading_percent)
assert max(net.res_trafo.loading_percent) < max_trafo_loading
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
# check connectivity check
pp.runopp(net, verbose=False, check_connectivity=True)
def test_unconstrained_line():
""" Testing a very simple network without transformer for voltage
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=20, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876)
pp.create_polynomial_cost(net, 0, "gen", np.array([1, 0]))
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# check and assert result
logger.debug("test_simplest_voltage")
logger.debug("res_gen:\n%s" % net.res_gen)
logger.debug("res_ext_grid:\n%s" % net.res_ext_grid)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
def test_trafo3w_loading():
net = pp.create_empty_network()
b1, b2, l1 = add_grid_connection(net, vn_kv=110.)
b3 = pp.create_bus(net, vn_kv=20.)
b4 = pp.create_bus(net, vn_kv=10.)
tidx = pp.create_transformer3w(
net, b2, b3, b4, std_type='63/25/38 MVA 110/20/10 kV', max_loading_percent=120)
pp.create_load(net, b3, 5e3, controllable=False)
id = pp.create_load(net, b4, 5e3, controllable=True, max_p_kw=5e4, min_p_kw=0, min_q_kvar=-1e9, max_q_kvar= 1e9)
| |
+
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_take", "Person", "Object", "Place_second"],
"Action_take": [["bring" , "give"], [], [], []],
"Person": [["me"], [], [], []],
"Object": [[], [], ["item"], []],
"Place_second": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- -Place_second-) (step )) " +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
###### FIND PEOPLE
#$talk to $whowhere $findp in the {room} and $talk
{"params": ["Action_talk", "Question", "Person", "Location"],
"Action_talk": [["speak", "answer", "tell", "say"], [], [], []],
"Question": [[], [], ["question"], []],
"Person": [[], [], ["person"], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Location-) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question -Question-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#Tell me the name of the person at the {beacon} Tell me the name of the person in the {room}
{"params": ["Action_talk","Me", "Name", "Location"],
"Action_talk": [["tell"], [], [], []],
"Me": [["me"], [], [], []],
"Name":[["name"], [], [], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_person_in_room) (params person -Location-) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question ask_name) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_name) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$findp in the {room}
{"params": ["Action_get", "Person", "Location"],
"Action_get": [["find", "look_for", "locate"], [], [], []],
"Person": [["person", "someone"], [], [], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_person_in_room) (params person -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#### FIND OBJECTS
#$fndobj = $vbfind the {kobject?} in the {room}
{"params": ["Action_get", "Object", "Location"],
"Action_get": [["find", "look_for", "locate"], [], [], []],
"Object": [[], [], ["item"], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))"+
"(task (plan user_speech) (action_type find_object_in_room) (params -Object- -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$fndobj = Tell me how many $object there are on the {placement}
{"params": ["Action_talk", "Person", "Many", "Object", "Location_second"],
"Action_talk": [["tell"], [], [], []],
"Person":[["me"],[],[],[]],
"Many":[["many"],[],[],[]],
"Object": [[], [], ["item"], []],
"Location_second":[[],[],["place", "room"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_objects) (params -Object- -Location_second- nil) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_many_obj) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
######### NAGOYA 2017 CATEGORY 2
#$vbdeliver the $object to $someone
{"params": ["Action_take", "Object", "Gesture", "Location"],
"Action_take": [["bring", "give", "deliver"], [], [], []],
"Object": [[], [], ["item"], []],
"Gesture":[["waving", "rising_their_left_arm", "raising_their_right_arm", "pointing_to_the_left", "pointing_to_the_right"],[],[],[]],
"Location": [[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_object) (params -Object- default_location) (step )) " +
"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_pgg_person) (params -Gesture- -Location-) (step ))" +
"(task (plan user_speech) (action_type handover_object) (params ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
###### FIND PEOPLE
#$fndppl = $talk to $whowhere
{"params": ["Action_talk", "Question", "Gesture", "Location"],
"Action_talk": [["speak", "answer", "tell", "say"], [], [], []],
"Question": [[], [], ["question"], []],
"Gesture":[[],[],["gesture"],[]],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_pgg_person) (params -Gesture- -Location-) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question -Question-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$findp = $vbfind a ($pgesture | $ppose) person
{"params": ["Action_find","Person", "PGG"],
"Action_find": [["find", "locate", "look_for"], [], [], []],
"Person": [["man"],[],[],[]],
"PGG":[[],[],["gprsn", "posprs", "gesture"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type find_pgg_person) (params -PGG- place_loc) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$findp = $vbfind a $pgenders
{"params": ["Action_find","Person"],
"Action_find": [["find", "locate", "look_for"], [], [], []],
"Person": [["man"],[],[],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type find_pgg_person) (params -Person- place_loc) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$findppl = $findp in the {room}
{"params": ["Action_find", "PGG", "Location"],
"Action_find": [["find", "locate", "look_for"], [], [], []],
"PGG":[["man"],[],[],[]],
"Location":[[],[],["place", "room"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_pgg_person) (params -PGG- -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$findppl = $findp in the {room}
{"params": ["Action_find", "Person", "PGG", "Location"],
"Action_find": [["find", "locate", "look_for"], [], [], []],
"Person": [["man"],[],[],[]],
"PGG":[[],[],["gprsn", "posprs", "gesture"],[]],
"Location":[[],[],["place", "room"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_pgg_person) (params -PGG- -Location-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$fndppl = Tell me the ( gender | pose) of the person at the {beacon}
#$fndppl = Tell me the ( gender | pose) of the person in the {room}
{"params": ["Action_talk","Me", "Genderpose", "Location"],
"Action_talk": [["tell"], [], [], []],
"Me": [["me"], [], [], []],
"Genderpose":[["gender", "pose"], [], [], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_gender_pose_person) (params -Genderpose- -Location-) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_gender_pose) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$fndppl = tell me how many people in the {room} are ($pgenderp | $pose)
{"params": ["Action_talk", "Person", "How", "Many", "People", "Location", "Pgender"],
"Action_talk": [["tell"], [], [], []],
"Person": [["me"],[],[],[]],
"How": [["how"],[],[],[]],
"Many": [["meany"],[],[],[]],
"People": [["people"],[],[],[]],
"Location":[[], [], ["place", "room"], []],
"Pgender":[["men", "women", "boys", "girls", "male", "female", "sitting", "standing", "lying"], [], [], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_gender_pose_crowd) (params -Pgender- -Location-) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_how_many_people) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#Tell me how many {category} there are on the {placement}
{"params": ["Action_talk", "Person", "Many", "Category", "Location_second"],
"Action_talk": [["tell"], [], [], []],
"Person":[["me"],[],[],[]],
"Many":[["many"],[],[],[]],
"Category": [["snacks", "cutlery", "food", "drinks", "tableware", "containers", "fruits", "cleaning_stuff"], [], [], []],
"Location_second":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_category) (params -Category- -Location_second- nil) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_many_obj) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$fndobj = Tell me what's the $oprop object on the {placement}
{"params": ["Action_talk", "Person", "Property", "Object", "Location"],
"Action_talk": [["tell"], [], [], []],
"Person": [["me"], [], [], []],
"Property": [["biggest", "smallest", "heaviest", "lightest", "largest", "thinnest"], [], [], []],
"Object": [["object"], [], [], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_prop_object) (params -Property- nil) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_what) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$fndobj = Tell me what's the $oprop {category} on the {placement}
{"params": ["Action_talk", "Person", "Property", "Category", "Location"],
"Action_talk": [["tell"], [], [], []],
"Person": [["me"],[],[],[]],
"Property": [["biggest", "smallest", "heaviest", "lightest", "largest", "thinnest"], [], [], []],
"Category": [["snacks", "cutlery", "food", "drinks", "tableware", "containers", "fruits", "cleaning_stuff"], [], [], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_prop_object) (params -Property- -Category-) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_what_cat) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$fndobj = $vbfind the {category} in the {room}
{"params": ["Action_get", "Category", "Location"],
"Action_get": [["find", "look_for", "locate"], [], [], []],
"Category": [["snacks", "cutlery", "food", "drinks", "tableware", "containers", "fruits", "cleaning_stuff"], [], [], []],
"Location":[[], [], ["place", "room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location -Location-) (step ))" +
"(task (plan user_speech) (action_type find_category_room) (params -Category- -Location-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$follow = $vbfollow {name 1} from the {beacon 1} to the {room 2}
#$follow = meet {name 1} at the {beacon 1} and $vbfollow them $fllwdest
{"params": ["Action_follow", "Person", "Location_first", "Location_second"],
"Action_follow": [["follow", "after"], [], [], []],
"Person": [[], [], ["person"], []],
"Location_first":[[], [], ["place", "room"], []],
"Location_second":[[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Location_first-) (step )) " +
"(task (plan user_speech) (action_type follow_man) (params man -Location_second-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$gdcmd = $vbguide {name 1} from the {beacon 1} to the {beacon 2}
#$gdcmd = meet {name 1} at the {beacon 1} and $guideto
{"params": ["Action_guide", "Person", "Location_first", "Location_second"],
"Action_guide": [["guide" , "escort" , "take" , "lead" , "accompany"], [], [], []],
"Person": [[], [], ["person"], []],
"Location_first":[[], [], ["place", "room"], []],
"Location_second":[[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Location_first-) (step )) " +
"(task (plan user_speech) (action_type get_object) (params man_guide -Location_second-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$gdcmd = $vbguide {name 1} to the {beacon 2}, $gdwhere
{"params": ["Action_guide", "Person", "Location_first", "May", "Action_find", "Location_second"],
"Action_guide": [["guide" , "escort" , "take" , "lead" , "accompany"], [], [], []],
"Person": [[], [], ["person"], []],
"Location_first":[[], [], ["place", "room"], []],
"May":[["may", "can", "will"],[],[],[]],
"Action_find":[["find"],[],[],[]],
"Location_second":[[], [], ["place", "room"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type find_person_in_room) (params -Person- -Location_second-) (step )) " +
"(task (plan user_speech) (action_type get_object) (params man_guide -Location_first-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": | |
options(joinedload_all('instances')).\
first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name):
session = get_session()
result = session.query(models.SecurityGroup).\
filter_by(project_id=project_id).\
filter_by(name=group_name).\
filter_by(deleted=False).\
options(joinedload_all('rules')).\
options(joinedload_all('instances')).\
first()
if not result:
raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
session = get_session()
return session.query(models.SecurityGroup).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
options(joinedload_all('rules')).\
all()
@require_context
def security_group_get_by_instance(context, instance_id):
session = get_session()
return session.query(models.SecurityGroup).\
filter_by(deleted=False).\
options(joinedload_all('rules')).\
join(models.SecurityGroup.instances).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
all()
@require_context
def security_group_exists(context, project_id, group_name):
try:
group = security_group_get_by_name(context, project_id, group_name)
return group is not None
except exception.NotFound:
return False
@require_context
def security_group_create(context, values):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
security_group_ref.save()
return security_group_ref
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
filter_by(id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def security_group_destroy_all(context, session=None):
if not session:
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
###################
@require_context
def security_group_rule_get(context, security_group_rule_id, session=None):
if not session:
session = get_session()
if is_admin_context(context):
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(id=security_group_rule_id).\
first()
else:
# TODO(vish): Join to group and check for project_id
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=False).\
filter_by(id=security_group_rule_id).\
first()
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
session=None):
if not session:
session = get_session()
if is_admin_context(context):
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(parent_group_id=security_group_id).\
options(joinedload_all('grantee_group.instances')).\
all()
else:
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=False).\
filter_by(parent_group_id=security_group_id).\
options(joinedload_all('grantee_group.instances')).\
all()
return result
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id,
session=None):
if not session:
session = get_session()
if is_admin_context(context):
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(group_id=security_group_id).\
all()
else:
result = session.query(models.SecurityGroupIngressRule).\
filter_by(deleted=False).\
filter_by(group_id=security_group_id).\
all()
return result
@require_context
def security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save()
return security_group_rule_ref
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
session = get_session()
with session.begin():
security_group_rule = security_group_rule_get(context,
security_group_rule_id,
session=session)
security_group_rule.delete(session=session)
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
session = get_session()
return session.query(models.ProviderFirewallRule).\
filter_by(deleted=can_read_deleted(context)).\
all()
@require_admin_context
def provider_fw_rule_get_all_by_cidr(context, cidr):
session = get_session()
return session.query(models.ProviderFirewallRule).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(cidr=cidr).\
all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
###################
@require_admin_context
def user_get(context, id, session=None):
if not session:
session = get_session()
result = session.query(models.User).\
filter_by(id=id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.UserNotFound(user_id=id)
return result
@require_admin_context
def user_get_by_access_key(context, access_key, session=None):
if not session:
session = get_session()
result = session.query(models.User).\
filter_by(access_key=access_key).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.AccessKeyNotFound(access_key=access_key)
return result
@require_admin_context
def user_create(_context, values):
user_ref = models.User()
user_ref.update(values)
user_ref.save()
return user_ref
@require_admin_context
def user_delete(context, id):
session = get_session()
with session.begin():
session.query(models.UserProjectAssociation).\
filter_by(user_id=id).\
delete()
session.query(models.UserRoleAssociation).\
filter_by(user_id=id).\
delete()
session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=id).\
delete()
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
def user_get_all(context):
session = get_session()
return session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
all()
def user_get_roles(context, user_id):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
return [role.role for role in user_ref['roles']]
def user_get_roles_for_project(context, user_id, project_id):
session = get_session()
with session.begin():
res = session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
return [association.role for association in res]
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
filter_by(role=role).\
delete()
def user_remove_role(context, user_id, role):
session = get_session()
with session.begin():
res = session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(role=role).\
all()
for role in res:
session.delete(role)
def user_add_role(context, user_id, role):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
models.UserRoleAssociation(user=user_ref, role=role).\
save(session=session)
def user_add_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
project_ref = project_get(context, project_id, session=session)
models.UserProjectRoleAssociation(user_id=user_ref['id'],
project_id=project_ref['id'],
role=role).save(session=session)
def user_update(context, user_id, values):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
user_ref.update(values)
user_ref.save(session=session)
###################
def project_create(_context, values):
project_ref = models.Project()
project_ref.update(values)
project_ref.save()
return project_ref
def project_add_member(context, project_id, user_id):
session = get_session()
with session.begin():
project_ref = project_get(context, project_id, session=session)
user_ref = user_get(context, user_id, session=session)
project_ref.members += [user_ref]
project_ref.save(session=session)
def project_get(context, id, session=None):
if not session:
session = get_session()
result = session.query(models.Project).\
filter_by(deleted=False).\
filter_by(id=id).\
options(joinedload_all('members')).\
first()
if not result:
raise exception.ProjectNotFound(project_id=id)
return result
def project_get_all(context):
session = get_session()
return session.query(models.Project).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload_all('members')).\
all()
def project_get_by_user(context, user_id):
session = get_session()
user = session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(id=user_id).\
options(joinedload_all('projects')).\
first()
if not user:
raise exception.UserNotFound(user_id=user_id)
return user.projects
def project_remove_member(context, project_id, user_id):
session = get_session()
project = project_get(context, project_id, session=session)
user = user_get(context, user_id, session=session)
if user in project.members:
project.members.remove(user)
project.save(session=session)
def project_update(context, project_id, values):
session = get_session()
with session.begin():
project_ref = project_get(context, project_id, session=session)
project_ref.update(values)
project_ref.save(session=session)
def project_delete(context, id):
session = get_session()
with session.begin():
session.query(models.UserProjectAssociation).\
filter_by(project_id=id).\
delete()
session.query(models.UserProjectRoleAssociation).\
filter_by(project_id=id).\
delete()
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
session = get_session()
result = session.query(models.Network).\
filter_by(project_id=project_id).\
filter_by(deleted=False).all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
@require_context
def project_get_networks_v6(context, project_id):
return project_get_networks(context, project_id)
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = migration_get(context, id, session=session)
migration.update(values)
migration.save(session=session)
return migration
@require_admin_context
def migration_get(context, id, session=None):
if not session:
session = get_session()
result = session.query(models.Migration).\
filter_by(id=id).first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
session = get_session()
result = session.query(models.Migration).\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
pool.save()
return pool
def console_pool_get(context, pool_id):
session = get_session()
result = session.query(models.ConsolePool).\
filter_by(deleted=False).\
filter_by(id=pool_id).\
first()
if not result:
raise exception.ConsolePoolNotFound(pool_id=pool_id)
return result
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
session = get_session()
result = session.query(models.ConsolePool).\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
filter_by(deleted=False).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(host=host,
console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
session = get_session()
return session.query(models.ConsolePool).\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(deleted=False).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# consoles are meant to be transient. (mdragon)
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_id):
session = get_session()
result = session.query(models.Console).\
filter_by(pool_id=pool_id).\
filter_by(instance_id=instance_id).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id,
instance_id=instance_id)
return result
def console_get_all_by_instance(context, instance_id):
session = get_session()
results = session.query(models.Console).\
filter_by(instance_id=instance_id).\
options(joinedload('pool')).\
all()
return results
def console_get(context, console_id, instance_id=None):
session = get_session()
query = session.query(models.Console).\
filter_by(id=console_id)
if instance_id:
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
if instance_id:
raise exception.ConsoleNotFoundForInstance(console_id=console_id,
instance_id=instance_id)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def instance_type_create(_context, values):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
try:
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
instance_type_ref.save()
except Exception, e:
raise exception.DBError(e)
return instance_type_ref
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance OR volume type query returned by sqlalchemy
and returns it as a dictionary, converting the extra_specs
entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value']) for x in \
inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
@require_context
def instance_type_get_all(context, inactive=False):
"""
Returns a dict describing all instance_types with name as key.
"""
session = get_session()
if inactive:
inst_types = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
order_by("name").\
all()
else:
inst_types = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(deleted=False).\
order_by("name").\
all()
inst_dict = {}
if inst_types:
for i in inst_types:
inst_dict[i['name']] = _dict_with_extra_specs(i)
return inst_dict
@require_context
def instance_type_get(context, id):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not inst_type:
raise exception.InstanceTypeNotFound(instance_type=id)
else:
return _dict_with_extra_specs(inst_type)
@require_context
def instance_type_get_by_name(context, name):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not inst_type:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return _dict_with_extra_specs(inst_type)
@require_context
def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
try:
flavor_id = int(id)
except ValueError:
raise exception.FlavorNotFound(flavor_id=id)
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
filter_by(flavorid=flavor_id).\
first()
if not inst_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
else:
return _dict_with_extra_specs(inst_type)
@require_admin_context
def instance_type_destroy(context, name):
""" Marks specific instance_type as deleted"""
session = get_session()
instance_type_ref = session.query(models.InstanceTypes).\
filter_by(name=name)
records = instance_type_ref.update(dict(deleted=True))
if records == 0:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
@require_admin_context
def instance_type_purge(context, name):
""" Removes specific instance_type from DB
Usually instance_type_destroy should be used
"""
session = get_session()
instance_type_ref = session.query(models.InstanceTypes).\
filter_by(name=name)
records = instance_type_ref.delete()
if records == 0:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
####################
@require_admin_context
def zone_create(context, values):
zone = models.Zone()
zone.update(values)
zone.save()
return zone
@require_admin_context
def zone_update(context, zone_id, values):
session = | |
import functools
from pathlib import Path
from typing import Sequence, Tuple, Union
import PIL
import numpy
import six
import tensorflow
from PIL import Image, ImageColor, ImageDraw, ImageFont
from attr import dataclass
from matplotlib import pyplot
from typing import Sequence, Tuple, Union
from warg.mixins.dict_mixins import IterDictValuesMixin
__author__ = "<NAME>"
__doc__ = r"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
@dataclass
class BoundingBoxCoordinatesSpec(IterDictValuesMixin):
x_min = 0
y_min = 0
x_max = 0
y_max = 0
"""
def __init__(self,x_min,y_min,x_max,y_max):
self.x_min = x_min
self.y_min = y_min
self.x_max = x_max
self.y_max = y_max
"""
@dataclass
class BoundingBoxSpec:
__slots__ = ["coordinates", "score", "label", "mask", "keypoints", "color"]
coordinates: Tuple[float]
score: float
label: str
mask: Tuple[float]
keypoints: Tuple[float]
color: Union[str, tuple]
def __post_init__(self):
pass
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
"AliceBlue",
"Chartreuse",
"Aqua",
"Aquamarine",
"Azure",
"Beige",
"Bisque",
"BlanchedAlmond",
"BlueViolet",
"BurlyWood",
"CadetBlue",
"AntiqueWhite",
"Chocolate",
"Coral",
"CornflowerBlue",
"Cornsilk",
"Crimson",
"Cyan",
"DarkCyan",
"DarkGoldenRod",
"DarkGrey",
"DarkKhaki",
"DarkOrange",
"DarkOrchid",
"DarkSalmon",
"DarkSeaGreen",
"DarkTurquoise",
"DarkViolet",
"DeepPink",
"DeepSkyBlue",
"DodgerBlue",
"FireBrick",
"FloralWhite",
"ForestGreen",
"Fuchsia",
"Gainsboro",
"GhostWhite",
"Gold",
"GoldenRod",
"Salmon",
"Tan",
"HoneyDew",
"HotPink",
"IndianRed",
"Ivory",
"Khaki",
"Lavender",
"LavenderBlush",
"LawnGreen",
"LemonChiffon",
"LightBlue",
"LightCoral",
"LightCyan",
"LightGoldenRodYellow",
"LightGray",
"LightGrey",
"LightGreen",
"LightPink",
"LightSalmon",
"LightSeaGreen",
"LightSkyBlue",
"LightSlateGray",
"LightSlateGrey",
"LightSteelBlue",
"LightYellow",
"Lime",
"LimeGreen",
"Linen",
"Magenta",
"MediumAquaMarine",
"MediumOrchid",
"MediumPurple",
"MediumSeaGreen",
"MediumSlateBlue",
"MediumSpringGreen",
"MediumTurquoise",
"MediumVioletRed",
"MintCream",
"MistyRose",
"Moccasin",
"NavajoWhite",
"OldLace",
"Olive",
"OliveDrab",
"Orange",
"OrangeRed",
"Orchid",
"PaleGoldenRod",
"PaleGreen",
"PaleTurquoise",
"PaleVioletRed",
"PapayaWhip",
"PeachPuff",
"Peru",
"Pink",
"Plum",
"PowderBlue",
"Purple",
"Red",
"RosyBrown",
"RoyalBlue",
"SaddleBrown",
"Green",
"SandyBrown",
"SeaGreen",
"SeaShell",
"Sienna",
"Silver",
"SkyBlue",
"SlateBlue",
"SlateGray",
"SlateGrey",
"Snow",
"SpringGreen",
"SteelBlue",
"GreenYellow",
"Teal",
"Thistle",
"Tomato",
"Turquoise",
"Violet",
"Wheat",
"White",
"WhiteSmoke",
"Yellow",
"YellowGreen",
]
def save_image_array_as_png(image: PIL.Image, output_path: Path) -> None:
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(numpy.uint8(image)).convert("RGB")
with tensorflow.gfile.Open(output_path, "w") as fid:
image_pil.save(fid, "PNG")
def encode_image_array_as_png_str(image: PIL.Image) -> bytes:
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(numpy.uint8(image))
output = six.BytesIO()
image_pil.save(output, format="PNG")
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(
image: numpy.ndarray,
y_min: int,
x_min: int,
y_max: int,
x_max: int,
labels: tuple = (),
*,
color: str = "red",
thickness: int = 2,
use_normalized_coordinates: bool = True,
mode: str = "RGBA",
) -> None:
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
y_min: y_min of bounding box.
x_min: x_min of bounding box.
y_max: y_max of bounding box.
x_max: x_max of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 2.
labels: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
y_min, x_min, y_max, x_max as relative to the image. Otherwise treat
coordinates as absolute."""
image_pil = Image.fromarray(image, mode=mode)
draw_bounding_box_on_image(
image_pil,
y_min,
x_min,
y_max,
x_max,
labels,
line_color=color,
thickness=thickness,
use_normalized_coordinates=use_normalized_coordinates,
)
numpy.copyto(image, numpy.array(image_pil))
def draw_bounding_box_on_image(
image,
x_min,
y_min,
x_max,
y_max,
labels=(),
*,
line_color="red",
thickness=2,
use_normalized_coordinates=True,
label_inside=True,
text_color="black",
):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
:param image: a PIL.Image object.
:param x_min: x_min of bounding box.
:param y_min: y_min of bounding box.
:param x_max: x_max of bounding box.
:param y_max: y_max of bounding box.
:param line_color: color to draw bounding box. Default is red.
:param thickness: line thickness. Default value is 2.
:param labels: list of strings to display in box
(each to be shown on its own line).
:param use_normalized_coordinates: If True (default), treat coordinates
y_min, x_min, y_max, x_max as relative to the image. Otherwise treat
coordinates as absolute.
:param label_inside:
:param text_color:"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (
x_min * im_width,
x_max * im_width,
y_min * im_height,
y_max * im_height,
)
else:
(left, right, top, bottom) = (x_min, x_max, y_min, y_max)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=thickness,
fill=line_color,
)
try:
font = ImageFont.truetype("arial.ttf", 24)
except IOError:
font = ImageFont.load_default()
if labels:
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_size = [font.getsize(ds) for ds in labels]
display_str_width, display_str_height = zip(*display_str_size)
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_height)
total_display_str_width = sum(display_str_width)
if left < 0:
text_left = right - total_display_str_width
else:
text_left = left
if top > total_display_str_height:
if label_inside:
text_bottom = top + total_display_str_height
else:
text_bottom = top
else:
if label_inside:
text_bottom = bottom
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in labels[::-1]:
text_width, text_height = font.getsize(display_str)
margin = numpy.ceil(0.05 * text_height)
draw.rectangle(
[
(text_left, text_bottom - text_height - 2 * margin),
(text_left + text_width, text_bottom),
],
fill=line_color,
)
draw.text(
(text_left + margin, text_bottom - text_height - margin),
display_str,
fill=text_color,
font=font,
)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(
image, boxes, labels=None, *, color="red", thickness=2, mode="RGBA"
) -> None:
"""Draws bounding boxes on image (numpy array).
Args:
:param image: a numpy array object.
:param boxes: a 2 dimensional numpy array of [N, 4]: (y_min, x_min, y_max, x_max).
The coordinates are in normalized format between [0, 1].
:param color: color to draw bounding box. Default is red.
:param thickness: line thickness. Default value is 4.
:param labels: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
:param mode:
Raises:
ValueError: if boxes is not a [N, 4] array"""
image_pil = Image.fromarray(image, mode=mode)
draw_bounding_boxes_on_image(
image_pil, boxes, labels, color=color, thickness=thickness
)
numpy.copyto(image, numpy.array(image_pil))
def draw_bounding_boxes_on_image(
image, boxes, labels_iterable=None, *, color="red", thickness=2
):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (y_min, x_min, y_max, x_max).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
labels_iterable: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError("Input must be of size [N, 4]")
for i in range(boxes_shape[0]):
labels = ()
if not labels_iterable is None:
labels = labels_iterable[i]
draw_bounding_box_on_image(
image,
boxes[i, 0],
boxes[i, 1],
boxes[i, 2],
boxes[i, 3],
labels,
line_color=color,
thickness=thickness,
)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs
)
def _visualize_boxes_and_masks(
image, boxes, classes, scores, masks, category_index, **kwargs
):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs,
)
def _visualize_boxes_and_keypoints(
image, boxes, classes, scores, keypoints, category_index, **kwargs
):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs,
)
def _visualize_boxes_and_masks_and_keypoints(
image, boxes, classes, scores, masks, keypoints, category_index, **kwargs
):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs,
)
def draw_bounding_boxes_on_image_tensors(
images,
boxes,
classes,
scores,
category_index,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
line_thickness=2,
):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
:param images: A | |
query': '没有暂挂查询匹配的注册',
'No person record found for current user.': '没有人找到记录的当前用户。',
'No problem group defined yet': '没有问题组尚未定义',
'No records matching the query': '没有查询匹配的记录',
'No reports available.': '无可用的报告。',
'No reports currently available': '目前没有报告可用',
'No requests found': '未找到请求',
'No resources currently reported': '当前没有资源报告',
'No service profile available': '没有可用的服务概要',
'No skills currently set': '当前设置任何技能',
'No staff or volunteers currently registered': '没有工作人员或当前注册志愿者',
'No status information available': '没有可用的状态信息',
'No synchronization': '没有同步',
'No tasks currently registered': '当前没有已注册任务',
'No template found!': '没有模板找到!',
'No units currently registered': '目前没有单元注册',
'No volunteer availability registered': '没有志愿者可用性注册',
'No': '无效',
'Non-structural Hazards': '非结构性危险',
'None (no such record)': '无(没有这样的记录)',
'None': '无',
'Noodles': '面条',
'Normal': '正常',
'Not Applicable': '不适用',
'Not Authorised!': '未授权!',
'Not Possible': '不可能',
'Not Set': '未设置',
'Not Authorized': '未授权',
'Not installed or incorrectly configured.': '未安装或配置不正确。',
'Not yet a Member of any Group': '没有当前注册资格',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': '注意,此列表仅显示活动的志愿人员。 查看所有人登记在系统中搜索此屏幕而不是',
'Notice to Airmen': '注意到,',
'Number of Columns': '列数',
'Number of Patients': '患者的人数',
'Number of Rows': '行数',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': '一些额外的病床的类型将成为可用于此单元未来的24小时之内。',
'Number of alternative places for studying': '其他地方用于研究数目',
'Number of available/vacant beds of that type in this unit at the time of reporting.': '报告输入时这个单位一些可用的/空置病床。',
'Number of deaths during the past 24 hours.': '数目在过去24小时内,死亡。',
'Number of discharged patients during the past 24 hours.': '一些病人在过去24小时。',
'Number of doctors': '医生的数目',
'Number of in-patients at the time of reporting.': '数中的病人时间的报告。',
'Number of newly admitted patients during the past 24 hours.': '新的入院病人在过去24小时。',
'Number of non-medical staff': '一些非医务人员',
'Number of nurses': '护士人数',
'Number of private schools': '号码的私立学校',
'Number of public schools': '号码的公立学校',
'Number of religious schools': '一些宗教学校',
'Number of residential units not habitable': '不适合居住的住宅单位数目',
'Number of residential units': '的住宅单位数目',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '多少个空置的/可用这个医院病床。 自动更新每日报告。',
'Number of vacant/available units to which victims can be transported immediately.': '多少个空置的单位/可用的受害者可将立即生效。',
'Number or Label on the identification tag this person is wearing (if any).': '行号或标号的标识标记这个人就是穿着(如果有的话)。',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': '数字或代码用于标记的地方找到,例如标志代码,网格坐标", "站点引用号或类似(如果可用)',
'Number': '编号',
'Number/Percentage of affected population that is Female & Aged 0-5': '编号/百分比的受影响的人口是女性和〇岁五',
'Number/Percentage of affected population that is Female & Aged 13-17': '编号/百分比的受影响的人口是女性和13岁17',
'Number/Percentage of affected population that is Female & Aged 18-25': '编号/百分比的受影响的人口是女性和18岁25',
'Number/Percentage of affected population that is Female & Aged 26-60': '编号/百分比的受影响的人口是女性和26岁60',
'Number/Percentage of affected population that is Female & Aged 6-12': '编号/百分比的受影响的人口是女性和六岁12',
'Number/Percentage of affected population that is Female & Aged 61+': '编号/百分比的受影响人口的女性和年龄61+',
'Number/Percentage of affected population that is Male & Aged 0-5': '编号/百分比的受影响人口的男性和〇岁五',
'Number/Percentage of affected population that is Male & Aged 13-17': '编号/百分比的受影响人口的男性和13岁17',
'Number/Percentage of affected population that is Male & Aged 18-25': '编号/百分比的受影响人口的男性和18岁25',
'Number/Percentage of affected population that is Male & Aged 26-60': '编号/百分比的受影响人口的男性和26岁60',
'Number/Percentage of affected population that is Male & Aged 6-12': '编号/百分比的受影响人口的男性和六岁12',
'Number/Percentage of affected population that is Male & Aged 61+': '编号/百分比的受影响人口的男性和年龄61+',
'Nursery Beds': '苗圃病床',
'Nutrition problems': '营养问题',
'Nutrition': '营养',
'OK': '等于',
'OR Reason': '或原因',
'OR Status Reason': '或状态原因',
'OR Status': '或状态',
'Observer': '观察器 (observer)',
'Obsolete': '过时',
'Obstetrics/Gynecology': '产科妇科/',
'Office Address': '办公地址',
'Office Details': '办公室详细',
'Office Phone': '办公电话',
'Office added': '办公室添加',
'Office deleted': '删除办公室',
'Office updated': '更新办公室',
'Office': '办公室',
'Offices & Warehouses': '办公室和仓库',
'Offices': '办事处',
'Offline Sync (from USB/File Backup)': '脱机同步(从USB/文件备份)',
'Offline Sync': '脱机同步',
'Older people as primary caregivers of children': '老人作为主要照顾者的儿童',
'Older people in care homes': '在家照顾老人',
'Older people participating in coping activities': '参与活动的老人',
'Older person (>60 yrs)': '>60老人',
'On by default? (only applicable to Overlays)': '缺省情况下? (仅适用于覆盖)',
'On by default?': '缺省情况下?',
'One Time Cost': '单次成本',
'One time cost': '单次成本',
'One-time costs': '一次性的成本',
'One-time': '一次性',
'Oops! Something went wrong...': '对不起, 发生错误......',
'Oops! something went wrong on our side.': '对不起, 发生错误在我们一边。',
'Opacity (1 for opaque, 0 for fully-transparent)': '透明度(一为不透明,〇表示完全透明)',
'Open area': '打开区域',
'Open recent': '打开最近更新的文档',
'Open': '打开',
'Operating Rooms': '操作房间',
'Optional link to an Incident which this Assessment was triggered by.': '可选的链接事件这一评估已触发的。',
'Optional': '可选组件',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': '可选参数。 如果您想要的样式功能的基础上的值属性,选择属性在此处使用。',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '可选。 在geoserver,这是工作空间名称空间URI (而不是名称).在WFS getCapabilities,这是FeatureType名部分前冒号(:)。',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': '可选参数。 元素的名称,其内容应该是一个图像的URL文件放置到弹出框。',
'Optional. The name of an element whose contents should be put into Popups.': '可选参数。 元素的名称,其内容应放入弹出框。',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': '可选参数。 模式的名称。 在geoserver这格式http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name。',
'Options': '选项',
'Organization Details': '组织详细信息',
'Organization Registry': '组织注册表',
'Organization added': '添加组织',
'Organization deleted': '组织删除',
'Organization updated': '更新组织',
'Organization': '组织 (organization)',
'Organizations': '组织',
'Origin of the separated children': '源的分隔儿童',
'Origin': '起源',
'Other (describe)': '其他(描述)',
'Other (specify)': '其他(请指明)',
'Other Evidence': '其他证据',
'Other Faucet/Piped Water': '其他水龙头/管道水',
'Other Isolation': '其他隔离',
'Other Name': '其它名称',
'Other activities of boys 13-17yrs before disaster': '其他活动的男孩13 17yrs前灾难',
'Other activities of boys 13-17yrs': '其他活动的男孩13 17yrs',
'Other activities of boys <12yrs before disaster': '其他活动的男孩<12yrs前灾难',
'Other activities of boys <12yrs': '其他活动的男孩<12yrs',
'Other activities of girls 13-17yrs before disaster': '其他活动的女孩13 17yrs前灾难',
'Other activities of girls 13-17yrs': '其他活动的女孩13 17yrs',
'Other activities of girls<12yrs before disaster': '其他的活动girls<12yrs前灾难',
'Other activities of girls<12yrs': '其他活动的girls<12yrs',
'Other alternative infant nutrition in use': '其他婴儿营养使用中',
'Other alternative places for study': '其他地方的研究',
'Other assistance needed': '其他所需援助',
'Other assistance, Rank': '其他援助,排名',
'Other current health problems, adults': '其他当前运行状况问题,成人',
'Other current health problems, children': '其他当前运行状况问题,儿童',
'Other events': '其他事件',
'Other factors affecting school attendance': '其他因素影响学校出席',
'Other major expenses': '其他主要开支',
'Other non-food items': '其他非食品项目',
'Other recommendations': '其他建议',
'Other residential': '其他住宅',
'Other school assistance received': '其他学校的援助',
'Other school assistance, details': '其他学校援助,详细信息',
'Other school assistance, source': '其他学校援助,源',
'Other settings can only be set by editing a file on the server': '其他设置只能在服务器上设置编辑文件',
'Other side dishes in stock': '其他方面菜肴的股票',
'Other types of water storage containers': '其他类型的水存储容器',
'Other ways to obtain food': '其他方法来获取食物',
'Other': '其他错误',
'Outbound Mail settings are configured in models/000_config.py.': '出站邮件设置配置模型中/000_config.. py。',
'Outbox': '发件箱',
'Outgoing SMS Handler': 'SMS传出处理',
'Outgoing SMS handler': 'SMS传出处理',
'Overall Hazards': '整体危险',
'Overhead falling hazard': '开销坠落危险',
'Overland Flow Flood': 'Overland流程洪水',
'Owned Resources': '拥有的资源',
'PAHO UID': '泛美UID',
'PIN number': '引脚编号',
'PIN': '锁定',
'PL Women': '妇女PL',
'Pack': '封装',
'Packs': '包',
'Parameters': '参数',
'Parapets, ornamentation': '护墙,装饰',
'Parent Office': '父办公室',
'Parent needs to be of the correct level': '父需要正确的级别',
'Parent needs to be set for locations of level': '父需要设置地点级别',
'Parent needs to be set': '父需要设置',
'Parent': '父代',
'Parents/Caregivers missing children': '家长/照顾者失踪儿童',
'Partial': '部分',
'Participant': '参与者',
'Pashto': '普什图语',
'Pass': '传递',
'Passport': '护照',
'Password': '密码',
'Path': '路径',
'Pathology': '病理学',
'Patients': '病人',
'Pediatric ICU': '儿科ICU',
'Pediatric Psychiatric': '小儿精神科',
'Pediatrics': '儿科',
'Peer Details': '同行详细信息',
'Peer Registration Details': '注册详细信息等',
'Peer Registration Request': '注册请求等',
'Peer Registration': '注册等',
'Peer Type': '类型等',
'Peer UID': 'UID对等',
'Peer added': '添加同仁',
'Peer deleted': '删减同仁',
'Peer not allowed to push': '不允许对推动',
'Peer registration request added': '对添加注册请求',
'Peer registration request deleted': '对注册删除请求',
'Peer registration request updated': '对注册请求更新',
'Peer updated': '更新对等',
'Peer': '同行',
'Peers': '同级设备',
'Pending Requests': '暂挂请求',
'Pending': '暂挂中',
'People Needing Food': '需要食品的人',
'People Needing Shelter': '需要住房的人',
'People Needing Water': '需要水# 的人',
'People Trapped': '被困人员',
'People': '人员',
'Performance Rating': '性能评级',
'Person 1': '第一个人员',
'Person 1, Person 2 are the potentially duplicate records': '人员一,人员二是潜在的重复记录',
'Person 2': '第二个人员',
'Person De-duplicator': '人德复印机',
'Person Details': '人员详细信息',
'Person Registry': '人注册表',
'Person added to Group': '组添加成员',
'Person added to Team': '组添加成员',
'Person added': '添加的人员',
'Person deleted': '删除的人',
'Person details updated': '人详细信息更新',
'Person interviewed': '采访人',
'Person who has actually seen the person/group.': '人其实是在个人/组织。',
'Person': '人员',
'Person/Group': '人员/组',
'Personal Data': '个人数据',
'Personal Effects Details': '个人影响详细信息',
'Personal Effects': '个人影响',
'Personal Map': '个人映射',
'Personal Profile': '个人概要文件',
'Personal impact of disaster': '灾难的个人影响',
'Persons in institutions': '人在机构',
'Persons with disability (mental)': '残疾人士(精神)',
'Persons with disability (physical)': '残疾人士(物理)',
'Persons': '人员',
'Phone 1': '电话一',
'Phone 2': '电话二',
'Phone': '电话',
'Phone/Business': '电话号码/商业',
'Phone/Emergency': '电话/紧急',
'Phone/Exchange (Switchboard)': '电话/交换(总机)',
'Photo Details': '照片详细',
'Photo Taken?': '照片采取?',
'Photo added': '照片添加',
'Photo deleted': '删除照片',
'Photo updated': '更新照片',
'Photo': '照片',
'Photograph': '照片',
'Photos': '照片',
'Physical Description': '物理描述',
'Physical Safety': '物理安全',
'Picture upload and finger print upload facility': '图片上传和指纹上载工具',
'Picture': '图片',
'Place of Recovery': '位置的恢复',
'Place': '场所',
'Places for defecation': '大便的场所',
'Places the children have been sent to': '将儿童送往的地方',
'Playing': '播放',
'Please correct all errors.': '请更正所有错误。',
'Please enter a first name': '请输入名字',
'Please enter a site OR a location': '请输入一个地点或位置',
'Please enter the first few letters of the Person/Group for the autocomplete.': '请输入个人/组织的前几个字母.',
'Please enter the recipient': '请输入收件',
'Please fill this!': '请填写本!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': '请提供所指内容的URL页面和您预期会发生的与实际发生了什么的描述。',
'Please report here where you are:': '请在这里报告您的位置:',
'Please select another level': '请选择另一个层次',
'Please select': '请选择',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '请注册您的手机,这使我们能够向您发送文本消息。 请包括完整的地区代码。',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '请指定任何问题和障碍的正确处理的疾病,详细信息(在人数,适当情况下)。 您还可以添加建议的情况可以改进。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '请使用此字段来记录的任何其他信息,包括的历史记录(如果该更新。',
'Please use this field to record any additional information, including any Special Needs.': '请使用此字段来记录的任何其他信息,包括任何特殊需要。',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': '请使用此字段来记录的任何其他信息,例如Ushahidi实例标识。 包括的历史记录(如果是更新。',
'Pledge Support': '质押支持',
'Poisoning': '中毒',
'Poisonous Gas': '有毒气体',
'Police': '警察',
'Pollution and other environmental': '污染和其他环境',
'Polygon reference of the rating unit': '多边形评级的参考单元',
'Polygon': '多边形',
'Poor': '欠佳',
'Population Statistic Details': '人口统计详细信息',
'Population Statistic added': '人口统计信息添加',
'Population Statistic deleted': '人口统计信息删除',
'Population Statistic updated': '人口统计信息更新',
'Population Statistics': '人口统计',
'Population and number | |
<reponame>afrokyss/DIT-ILLUMNI-WAGTAIL-APP
from __future__ import unicode_literals
from django.forms import ValidationError
from django.core.exceptions import NON_FIELD_ERRORS
from django.forms.formsets import TOTAL_FORM_COUNT
from django.forms.models import (
BaseModelFormSet, modelformset_factory,
ModelForm, _get_foreign_key, ModelFormMetaclass, ModelFormOptions
)
from django.db.models.fields.related import ForeignObjectRel
from modelcluster.models import get_all_child_relations
class BaseTransientModelFormSet(BaseModelFormSet):
""" A ModelFormSet that doesn't assume that all its initial data instances exist in the db """
def _construct_form(self, i, **kwargs):
# Need to override _construct_form to avoid calling to_python on an empty string PK value
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
if pk == '':
kwargs['instance'] = self.model()
else:
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
# bypass BaseModelFormSet's own _construct_form
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def save_existing_objects(self, commit=True):
# Need to override _construct_form so that it doesn't skip over initial forms whose instance
# has a blank PK (which is taken as an indication that the form was constructed with an
# instance not present in our queryset)
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
if obj.pk is None:
# no action to be taken to delete an object which isn't in the database
continue
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def transientmodelformset_factory(model, formset=BaseTransientModelFormSet, **kwargs):
return modelformset_factory(model, formset=formset, **kwargs)
class BaseChildFormSet(BaseTransientModelFormSet):
def __init__(self, data=None, files=None, instance=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.rel_name = ForeignObjectRel(self.fk, self.fk.remote_field.model, related_name=self.fk.remote_field.related_name).get_accessor_name()
if queryset is None:
queryset = getattr(self.instance, self.rel_name).all()
super(BaseChildFormSet, self).__init__(data, files, queryset=queryset, **kwargs)
def save(self, commit=True):
# The base ModelFormSet's save(commit=False) will populate the lists
# self.changed_objects, self.deleted_objects and self.new_objects;
# use these to perform the appropriate updates on the relation's manager.
saved_instances = super(BaseChildFormSet, self).save(commit=False)
manager = getattr(self.instance, self.rel_name)
# if model has a sort_order_field defined, assign order indexes to the attribute
# named in it
if self.can_order and hasattr(self.model, 'sort_order_field'):
sort_order_field = getattr(self.model, 'sort_order_field')
for i, form in enumerate(self.ordered_forms):
setattr(form.instance, sort_order_field, i)
# If the manager has existing instances with a blank ID, we have no way of knowing
# whether these correspond to items in the submitted data. We'll assume that they do,
# as that's the most common case (i.e. the formset contains the full set of child objects,
# not just a selection of additions / updates) and so we delete all ID-less objects here
# on the basis that they will be re-added by the formset saving mechanism.
no_id_instances = [obj for obj in manager.all() if obj.pk is None]
if no_id_instances:
manager.remove(*no_id_instances)
manager.add(*saved_instances)
manager.remove(*self.deleted_objects)
self.save_m2m() # ensures any parental-m2m fields are saved.
if commit:
manager.commit()
return saved_instances
def clean(self, *args, **kwargs):
self.validate_unique()
return super(BaseChildFormSet, self).clean(*args, **kwargs)
def validate_unique(self):
'''This clean method will check for unique_together condition'''
# Collect unique_checks and to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
unique_checks, date_checks = form.instance._get_unique_checks()
all_unique_checks.update(unique_checks)
all_date_checks.update(date_checks)
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
if errors:
raise ValidationError(errors)
def childformset_factory(
parent_model, model, form=ModelForm,
formset=BaseChildFormSet, fk_name=None, fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=None, validate_max=False,
formfield_callback=None, widgets=None, min_num=None, validate_min=False
):
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
validate_max = True
if exclude is None:
exclude = []
exclude += [fk.name]
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
# if the model supplies a sort_order_field, enable ordering regardless of
# the current setting of can_order
'can_order': (can_order or hasattr(model, 'sort_order_field')),
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'validate_max': validate_max,
'widgets': widgets,
'min_num': min_num,
'validate_min': validate_min,
}
FormSet = transientmodelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
class ClusterFormOptions(ModelFormOptions):
def __init__(self, options=None):
super(ClusterFormOptions, self).__init__(options=options)
self.formsets = getattr(options, 'formsets', None)
self.exclude_formsets = getattr(options, 'exclude_formsets', None)
class ClusterFormMetaclass(ModelFormMetaclass):
extra_form_count = 3
@classmethod
def child_form(cls):
return ClusterForm
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, ClusterForm)]
except NameError:
# We are defining ClusterForm itself.
parents = None
# grab any formfield_callback that happens to be defined in attrs -
# so that we can pass it on to child formsets - before ModelFormMetaclass deletes it.
# BAD METACLASS NO BISCUIT.
formfield_callback = attrs.get('formfield_callback')
new_class = super(ClusterFormMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
# ModelFormMetaclass will have set up new_class._meta as a ModelFormOptions instance;
# replace that with ClusterFormOptions so that we can access _meta.formsets
opts = new_class._meta = ClusterFormOptions(getattr(new_class, 'Meta', None))
if opts.model:
formsets = {}
for rel in get_all_child_relations(opts.model):
# to build a childformset class from this relation, we need to specify:
# - the base model (opts.model)
# - the child model (rel.field.model)
# - the fk_name from the child model to the base (rel.field.name)
rel_name = rel.get_accessor_name()
# apply 'formsets' and 'exclude_formsets' rules from meta
if opts.formsets is not None and rel_name not in opts.formsets:
continue
if opts.exclude_formsets and rel_name in opts.exclude_formsets:
continue
try:
widgets = opts.widgets.get(rel_name)
except AttributeError: # thrown if opts.widgets is None
widgets = None
kwargs = {
'extra': cls.extra_form_count,
'form': cls.child_form(),
'formfield_callback': formfield_callback,
'fk_name': rel.field.name,
'widgets': widgets
}
# see if opts.formsets looks like a dict; if so, allow the value
# to override kwargs
try:
kwargs.update(opts.formsets.get(rel_name))
except AttributeError:
pass
formset = childformset_factory(opts.model, rel.field.model, **kwargs)
formsets[rel_name] = formset
new_class.formsets = formsets
new_class._has_explicit_formsets = (opts.formsets is not None or opts.exclude_formsets is not None)
return new_class
class ClusterForm(ModelForm, metaclass=ClusterFormMetaclass):
def __init__(self, data=None, files=None, instance=None, prefix=None, **kwargs):
super(ClusterForm, self).__init__(data, files, instance=instance, prefix=prefix, **kwargs)
self.formsets = {}
for rel_name, formset_class in self.__class__.formsets.items():
if prefix:
formset_prefix = "%s-%s" % (prefix, rel_name)
else:
formset_prefix = rel_name
self.formsets[rel_name] = formset_class(data, files, instance=instance, prefix=formset_prefix)
if self.is_bound and not self._has_explicit_formsets:
# check which formsets have actually been provided as part of the form submission -
# if no `formsets` or `exclude_formsets` was specified, we allow them to be omitted
# (https://github.com/wagtail/wagtail/issues/5414#issuecomment-567468127).
self._posted_formsets = [
formset
for formset in self.formsets.values()
if '%s-%s' % (formset.prefix, TOTAL_FORM_COUNT) in self.data
]
else:
# expect all defined formsets to be part of the post
self._posted_formsets = self.formsets.values()
def as_p(self):
form_as_p = super(ClusterForm, self).as_p()
return form_as_p + ''.join([formset.as_p() for formset in self.formsets.values()])
def is_valid(self):
form_is_valid = super(ClusterForm, self).is_valid()
formsets_are_valid = all(formset.is_valid() for formset in self._posted_formsets)
return form_is_valid and formsets_are_valid
def is_multipart(self):
return (
super(ClusterForm, self).is_multipart()
or any(formset.is_multipart() for formset in self.formsets.values())
)
@property
def media(self):
media = super(ClusterForm, self).media
for formset in self.formsets.values():
media = media + formset.media
return media
def save(self, commit=True):
# do we have any fields that expect us to call save_m2m immediately?
save_m2m_now = False
exclude = self._meta.exclude
fields = self._meta.fields
for | |
aggregate: function applied across data going into each cell
of the table <http://www.sqlite.org/lang_aggfunc.html>_
where: list of tuples or list of strings for filtering data
method:
'valid': only returns rows or columns with valid entries.
'full': return full factorial combinations of the
conditions specified by rows and cols
returns:
:class:`PyvtTbl` object
"""
if rows == None:
rows = []
if cols == None:
cols = []
if where == None:
where = []
##############################################################
# pivot programmatic flow #
##############################################################
# 1. Check to make sure the table can be pivoted with the #
# specified parameters #
# 2. Create a sqlite table with only the data in columns #
# specified by val, rows, and cols. Also eliminate #
# rows that meet the exclude conditions #
# 3. Build rnames and cnames lists #
# 4. Build query based on val, rows, and cols #
# 5. Run query #
# 6. Read data to from cursor into a list of lists #
# 7. Query grand, row, and column totals #
# 8. Clean up #
# 9. flatten if specified #
# 10. Initialize and return PyvtTbl Object #
##############################################################
# 1. Check to make sure the table can be pivoted with the
# specified parameters
##############################################################
# This may seem excessive but it provides better feedback
# to the user if the errors can be parsed out before had
# instead of crashing on confusing looking code segments
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
# check the supplied arguments
if val not in list(self.keys()):
raise KeyError(val)
if not hasattr(rows, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(cols).__name__)
if not hasattr(cols, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(cols).__name__)
for k in rows:
if k not in list(self.keys()):
raise KeyError(k)
for k in cols:
if k not in list(self.keys()):
raise KeyError(k)
# check for duplicate names
dup = Counter([val] + rows + cols)
del dup[None]
if not all(count == 1 for count in list(dup.values())):
raise Exception('duplicate labels specified')
# check aggregate function
aggregate = aggregate.lower()
if aggregate not in self.aggregates:
raise ValueError("supplied aggregate '%s' is not valid"%aggregate)
# check to make sure where is properly formatted
# todo
# 2. Create a sqlite table with only the data in columns
# specified by val, rows, and cols. Also eliminate
# rows that meet the exclude conditions
##############################################################
self._build_sqlite3_tbl([val] + rows + cols, where)
# 3. Build rnames and cnames lists
##############################################################
# Refresh conditions list so we can build row and col list
self._execute('select %s from TBL'
%', '.join(_sha1(n) for n in [val] + rows + cols))
Zconditions = DictSet(list(zip([val]+rows+cols, list(zip(*list(self.cur))))))
# rnames_mask and cnanes_mask specify which unique combinations of
# factor conditions have valid entries in the table.
# 1 = valid
# 0 = not_valid
# Build rnames
if rows == []:
rnames = [1]
rnames_mask = [1]
else:
rnames = []
rnames_mask = []
conditions_set = set(zip(*[self[n] for n in rows]))
for vals in Zconditions.unique_combinations(rows):
rnames_mask.append(tuple(vals) in conditions_set)
rnames.append(list(zip(rows,vals)))
# Build cnames
if cols == []:
cnames = [1]
cnames_mask = [1]
else:
cnames = []
cnames_mask = []
conditions_set = set(zip(*[self[n] for n in cols]))
for vals in Zconditions.unique_combinations(cols):
cnames_mask.append(tuple(vals) in conditions_set)
cnames.append(list(zip(cols,vals)))
# 4. Build query based on val, rows, and cols
##############################################################
# Here we are using string formatting to build the query.
# This method is generally discouraged for security, but
# in this circumstance I think it should be okay. The column
# labels are protected with leading and trailing underscores.
# The rest of the query is set by the logic.
#
# When we pass the data in we use the (?) tuple format
if aggregate == 'tolist':
agg = 'group_concat'
else:
agg = aggregate
query = ['select ']
if rnames == [1] and cnames == [1]:
query.append('%s( %s ) from TBL'%(agg, _sha1(val)))
else:
if rnames == [1]:
query.append(_sha1(val))
else:
query.append(', '.join(_sha1(r) for r in rows))
if cnames == [1]:
query.append('\n , %s( %s )'%(agg, _sha1(val)))
else:
for cs in cnames:
query.append('\n , %s( case when '%agg)
if all(map(_isfloat, list(zip(*cols))[1])):
query.append(
' and '.join(('%s=%s'%(_sha1(k), v) for k, v in cs)))
else:
query.append(
' and '.join(('%s="%s"'%(_sha1(k) ,v) for k, v in cs)))
query.append(' then %s end )'%_sha1(val))
if rnames == [1]:
query.append('\nfrom TBL')
else:
query.append('\nfrom TBL group by ')
for i, r in enumerate(rows):
if i != 0:
query.append(', ')
query.append(_sha1(r))
# 5. Run Query
##############################################################
self._execute(''.join(query))
# 6. Read data from cursor into a list of lists
##############################################################
data, mask = [],[]
val_type = self._get_sqltype(val)
fill_val = self._get_mafillvalue(val)
# keep the columns with the row labels
if attach_rlabels:
cnames = [(r, '') for r in rows].extend(cnames)
cnames_mask = [1 for i in _xrange(len(rows))].extend(cnames_mask)
if aggregate == 'tolist':
if method=='full':
i=0
for row in self.cur:
while not rnames_mask[i]:
data.append([[fill_val] for j in _xrange(len(cnames))])
mask.append([[True] for j in _xrange(len(cnames))])
i+=1
data.append([])
mask.append([])
for cell, _mask in zip(list(row)[-len(cnames):], cnames_mask):
if cell == None or not _mask:
data[-1].append([fill_val])
mask[-1].append([True])
else:
if val_type == 'real' or val_type == 'integer':
split =cell.split(',')
data[-1].append(list(map(float, split)))
mask[-1].append([False for j in _xrange(len(split))])
else:
split =cell.split(',')
data[-1].append(split)
mask[-1].append([False for j in _xrange(len(split))])
i+=1
else:
for row in self.cur:
data.append([])
mask.append([])
for cell, _mask in zip(list(row)[-len(cnames):], cnames_mask):
if _mask:
if cell == None:
data[-1].append([fill_val])
mask[-1].append([True])
elif val_type == 'real' or val_type == 'integer':
split =cell.split(',')
data[-1].append(list(map(float, split)))
mask[-1].append([False for j in _xrange(len(split))])
else:
split =cell.split(',')
data[-1].append(split)
mask[-1].append([False for j in _xrange(len(split))])
# numpy arrays must have the same number of dimensions so we need to pad
# cells to the maximum dimension of the data
max_len = max(_flatten([[len(c) for c in L] for L in data]))
for i,L in enumerate(data):
for j,c in enumerate(L):
for k in _xrange(max_len - len(data[i][j])):
data[i][j].append(fill_val)
mask[i][j].append(True)
else:
if method=='full':
i=0
for row in self.cur:
while not rnames_mask[i]:
data.append([fill_val for j in _xrange(len(cnames))])
mask.append([True for j in _xrange(len(cnames))])
i+=1
row_data = list(row)[-len(cnames):]
data.append([(fill_val,v)[m] for v,m in zip(row_data, cnames_mask)])
mask.append([not m for v,m in zip(row_data, cnames_mask)])
i+=1
else:
for row in self.cur:
row_data = list(row)[-len(cnames):]
data.append([v for v,m in zip(row_data, cnames_mask) if m])
mask.append([False for m in cnames_mask if m])
# 7. Get totals
##############################################################
row_tots, col_tots, grand_tot = [], [], np.nan
row_mask, col_mask = [], []
if aggregate not in ['tolist', 'group_concat', 'arbitrary']:
query = 'select %s( %s ) from TBL'%(agg, _sha1(val))
self._execute(query)
grand_tot = list(self.cur)[0][0]
if cnames != [1] and rnames != [1]:
query = ['select %s( %s ) from TBL group by'%(agg, _sha1(val))]
query.append(', '.join(_sha1(r) for r in rows))
self._execute(' '.join(query))
if method=='full':
i=0
row_tots=[]
row_mask=[]
for tup in self.cur:
while not rnames_mask[i]:
row_tots.append(fill_val)
row_mask.append(True)
i+=1
row_tots.append(tup[0])
row_mask.append(False)
i+=1
else:
row_tots = [tup[0] for tup in self.cur]
row_mask = [False for z in row_tots]
query = ['select %s( %s ) from TBL group by'%(agg, _sha1(val))]
query.append(', '.join(_sha1(r) for r in cols))
self._execute(' '.join(query))
if method=='full':
i=0
col_tots=[]
col_mask=[]
for tup in self.cur:
while not cnames_mask[i]:
col_tots.append(fill_val)
col_mask.append(True)
i+=1
col_tots.append(tup[0])
col_mask.append(False)
i+=1
else:
col_tots = [tup[0] for tup in self.cur]
col_mask = [False for z in col_tots]
row_tots = np.ma.array(row_tots, mask=row_mask)
col_tots = np.ma.array(col_tots, mask=col_mask)
# 8. Clean up
##############################################################
self.conn.commit()
# 9. Build rnames and cnames if method=='valid'
##############################################################
if method=='valid':
rnames = [n for n,m in zip(rnames,rnames_mask) if m]
cnames = [n for n,m in zip(cnames,cnames_mask) if m]
# 10. Initialize and return PyvtTbl Object
##############################################################
##
## print(data)
## print(mask)
## print(rnames)
## print(cnames)
## print(col_tots)
## print(row_tots)
## print(grand_tot)
## print()
##
return PyvtTbl(data, val, Zconditions, rnames, cnames, aggregate,
mask=mask,
| |
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC',h[0].header['DEC'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA BARY',ra)
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC BARY',dec)
hdu = GLOBALutils.update_header(hdu,'HIERARCH EQUINOX',h[0].header['HIERARCH ESO TEL TARG EQUINOX'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LATITUDE',h[0].header['HIERARCH ESO TEL GEOLAT'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LONGITUDE',h[0].header['HIERARCH ESO TEL GEOLON'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS ALTITUDE',h[0].header['HIERARCH ESO TEL GEOELEV'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARG AIRMASS',h[0].header['HIERARCH ESO TEL AIRM START'])
print '\t\tWavelength calibration:'
print '\t\t\tComparision fibre is '+ cotype
indice = sorted_indices[0]
thar_fits_ob_R = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.ob.R.fits.S'
thar_fits_co_R = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.co.R.fits.S'
thar_fits_ob_B = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.ob.B.fits.S'
thar_fits_co_B = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.co.B.fits.S'
pkl_wsol = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'wavsolpars.pkl'
print "\t\t\tUnpickling wavelength solution from", pkl_wsol, " ..."
wsol_dict = pickle.load(open(pkl_wsol,'r'))
#cotype = 'SKY'
if cotype == 'WAVE':
# Extract thAr lines from comparison orders
lines_thar_co_R = sci_S_co_R[:,1,:]
iv_thar_co_R = sci_S_co_R[:,2,:]
lines_thar_co_B = sci_S_co_B[:,1,:]
iv_thar_co_B = sci_S_co_B[:,2,:]
Red_Pixel_Centers_co = np.array([])
Red_Wavelengths_co = np.array([])
Red_Orders_co = np.array([])
Red_Centroids_co = np.array([])
Red_Sigmas_co = np.array([])
Red_Intensities_co = np.array([])
for order in range(nord_co2):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co_R[order,:]
IV = iv_thar_co_R[order,:]
wei = np.sqrt( IV )
#bkg = CoralieUtils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'R_order_'+order_s+final_wav, thar_order, order, wei, rmsmax=MRMS_initial, minlines=minlines_initial, \
FixEnds=True,Dump_Argon=dumpargon, Dump_AllLines=True, Cheby=use_cheby, porder=porder)
Red_Pixel_Centers_co = np.append( Red_Pixel_Centers_co, pixel_centers )
Red_Wavelengths_co = np.append( Red_Wavelengths_co, wavelengths )
Red_Orders_co = np.append( Red_Orders_co, np.zeros( len(pixel_centers) ) + order )
Red_Centroids_co = np.append( Red_Centroids_co, centroids)
Red_Sigmas_co = np.append( Red_Sigmas_co, sigmas)
Red_Intensities_co = np.append( Red_Intensities_co, intensities )
p1_co_R, G_pix_co_R, G_ord_co_R, G_wav_co_R, II_co_R, rms_ms_co_R, G_res_co_R = \
GLOBALutils.Fit_Global_Wav_Solution(Red_Pixel_Centers_co, Red_Wavelengths_co, Red_Orders_co,\
np.ones(Red_Intensities_co.shape), wsol_dict['p1_co_R'], Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_R,ntotal=nord_co2,npix=len(thar_order),nx=ncoef_x_R,nm=ncoef_m_R)
p_shift_co_R, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix_co_R, G_wav_co_R, G_ord_co_R,\
np.ones(G_wav_co_R.shape), wsol_dict['p1_co_R'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_R,ntotal=nord_co2,npix=len(thar_order),nx=ncoef_x_R,nm=ncoef_m_R)
weight_R = (np.sqrt(len(orders)) / rms_ms)**2
Blue_Pixel_Centers_co = np.array([])
Blue_Wavelengths_co = np.array([])
Blue_Orders_co = np.array([])
Blue_Centroids_co = np.array([])
Blue_Sigmas_co = np.array([])
Blue_Intensities_co = np.array([])
for order in range(nord_co1):
order = order + 1
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co_B[order-1,:]
IV = iv_thar_co_B[order-1,:]
wei = np.sqrt( IV )
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'B_order_'+order_s+final_wav, thar_order, order, wei, rmsmax=MRMS_initial, minlines=50, \
FixEnds=True,Dump_Argon=dumpargon, Dump_AllLines=True, Cheby=use_cheby, porder=porder)
Blue_Pixel_Centers_co = np.append( Blue_Pixel_Centers_co, pixel_centers )
Blue_Wavelengths_co = np.append( Blue_Wavelengths_co, wavelengths )
Blue_Orders_co = np.append( Blue_Orders_co, np.zeros( len(pixel_centers) ) + order )
Blue_Centroids_co = np.append( Blue_Centroids_co, centroids)
Blue_Sigmas_co = np.append( Blue_Sigmas_co, sigmas)
Blue_Intensities_co = np.append( Blue_Intensities_co, intensities )
p1_co_B, G_pix_co_B, G_ord_co_B, G_wav_co_B, II_co_B, rms_ms_co_B, G_res_co_B = \
GLOBALutils.Fit_Global_Wav_Solution(Blue_Pixel_Centers_co, Blue_Wavelengths_co, Blue_Orders_co,\
np.ones(Blue_Intensities_co.shape), wsol_dict['p1_co_B'], Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_B,ntotal=nord_co1,npix=len(thar_order),nx=ncoef_x_B,nm=ncoef_m_B)
p_shift_co_B, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix_co_B, G_wav_co_B, G_ord_co_B,\
np.ones(G_wav_co_B.shape), wsol_dict['p1_co_B'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_B,ntotal=nord_co1,npix=len(thar_order),nx=ncoef_x_B,nm=ncoef_m_B)
weight_B = (np.sqrt(len(orders)) / rms_ms)**2
shift = (p_shift_co_R[0]*weight_R + p_shift_co_B[0]*weight_B) / weight_R + weight_B
print p_shift_co_R,p_shift_co_B,shift
else:
p_shift_co_R = [0.]
p_shift_co_B = [0.]
p_shift = 0.
shift = 0.
good_quality = True
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT_R',p_shift_co_R[0])
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT_B',p_shift_co_B[0])
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT',shift)
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT APPLIED',dosim)
# Apply new wavelength solution including barycentric correction
equis = np.arange( dataB.shape[1] )
for order in range(nord_ob2):
m = order + or0_R
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=or0_R,ntotal=nord_ob2,npix=len(equis),nx=ncoef_x_R,nm=ncoef_m_R)
if dosim:
WavSol = lbary_ltopo * (1.0 + 1.0e-6*shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_R'],chebs,ncoef_x_R,ncoef_m_R)
else:
WavSol = lbary_ltopo * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_R'],chebs,ncoef_x_R,ncoef_m_R)
spec[0,order,:] = GLOBALutils.ToVacuum(WavSol)
spec[1,order,:] = sci_S_ob_R[order,1, :]
spec[2,order,:] = sci_S_ob_R[order,2, :]
fn = R_flat_ob[order,1,:]
L = np.where( fn > 0 )[0]
spec[3,order,:][L] = sci_S_ob_R[order,1,:][L] / R_flat_ob[order,1,:][L]
spec[4,order,:][L] = sci_S_ob_R[order,2,:][L] * ( R_flat_ob[order,1,:][L] ** 2 )
for order in range(nord_ob1):
m = order + or0_B
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=or0_B,ntotal=nord_ob1,npix=len(equis),nx=ncoef_x_B,nm=ncoef_m_B)
if dosim:
WavSol = lbary_ltopo * (1.0 + 1.0e-6*shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_B'],chebs,ncoef_x_B,ncoef_m_B)
else:
WavSol = lbary_ltopo * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_B'],chebs,ncoef_x_B,ncoef_m_B)
spec[0,order + nord_ob2,:] = GLOBALutils.ToVacuum(WavSol)
spec[1,order + nord_ob2,:] = sci_S_ob_B[order,1, :]
spec[2,order + nord_ob2,:] = sci_S_ob_B[order,2, :]
fn = B_flat_ob[order,1,:]
L = np.where( fn > 0 )[0]
spec[3,order + nord_ob2,:][L] = sci_S_ob_B[order,1,:][L] / B_flat_ob[order,1,:][L]
spec[4,order + nord_ob2,:][L] = sci_S_ob_B[order,2,:][L] * ( B_flat_ob[order,1,:][L] ** 2 )
ccoefs = GLOBALutils.get_cont(spec[0],spec[3])
for order in range(nord_ob2):
fn = R_flat_ob_n[order,1,:]
L = np.where( (spec[1,order,:] != 0) & (fn > 0) )[0]
spec[5,order,:][L] = spec[3,order,:][L] / np.polyval(ccoefs[order],spec[0,order,:][L])
ratio = np.polyval(ccoefs[order],spec[0,order,:][L])*Rnorms[order]
spec[3,order,:][L] = sci_S_ob_R[order,1,:][L] / R_flat_ob_n[order,1,:][L]
spec[4,order,:][L] = sci_S_ob_R[order,2,:][L] * ( R_flat_ob_n[order,1,:][L] ** 2 )
spec[6,order,:][L] = spec[4,order,:][L] * (ratio ** 2 )
spec[7,order,:][L] = ratio
spec[8,order,:][L] = ratio * R_flat_ob_n[order,1,:][L] / np.sqrt( ratio * R_flat_ob_n[order,1,:][L] / gain2 + (ron2/gain2)**2 )
spl = scipy.interpolate.splrep(np.arange(WavSol.shape[0]), WavSol,k=3)
dlambda_dx = scipy.interpolate.splev(np.arange(WavSol.shape[0]), spl, der=1)
NN = np.average(dlambda_dx)
dlambda_dx /= NN
LL = np.where(spec[5,order,:] > 1 + 10. / scipy.signal.medfilt(spec[8,order,:],21))[0]
spec[5,order,LL] = 1.
spec[9,order,:][L] = spec[5,order,:][L] * (dlambda_dx[L] ** 1)
spec[10,order,:][L] = spec[6,order,:][L] / (dlambda_dx[L] ** 2)
for order in range(nord_ob1):
fn = B_flat_ob_n[order,1,:]
L = np.where( (spec[1,order + nord_ob2,:] != 0) & (fn > 0) )[0]
spec[5,order + nord_ob2,:][L] = spec[3,order + nord_ob2,:][L] / np.polyval(ccoefs[order + nord_ob2],spec[0,order + nord_ob2,:][L])
ratio = np.polyval(ccoefs[order + nord_ob2],spec[0,order + nord_ob2,:][L])*Bnorms[order]
#L = np.where( fn > 0 )
spec[3,order + nord_ob2,:][L] = sci_S_ob_B[order,1,:][L] / B_flat_ob_n[order,1,:][L]
spec[4,order + nord_ob2,:][L] = sci_S_ob_B[order,2,:][L] * ( B_flat_ob_n[order,1,:][L] ** 2 )
spec[6,order + nord_ob2,:][L] = spec[4,order + nord_ob2,:][L] * (ratio ** 2 )
spec[7,order + nord_ob2,:][L] = ratio
spec[8,order + nord_ob2,:][L] = ratio * B_flat_ob_n[order,1,:][L] / np.sqrt( ratio * B_flat_ob_n[order,1,:][L] / gain1 + (ron1/gain1)**2 )
spl = scipy.interpolate.splrep(np.arange(WavSol.shape[0]), WavSol,k=3)
dlambda_dx = scipy.interpolate.splev(np.arange(WavSol.shape[0]), spl, der=1)
NN = np.average(dlambda_dx)
dlambda_dx /= NN
LL = np.where(spec[5,order + nord_ob2,:] > 1 + 20./scipy.signal.medfilt(spec[8,order + nord_ob2,:],21))[0]
spec[5,order + nord_ob2,LL] = 1.
spec[9,order + nord_ob2,:][L] = spec[5,order + nord_ob2,:][L] * (dlambda_dx[L] ** 1)
spec[10,order + nord_ob2,:][L] = spec[6,order + nord_ob2,:][L] / (dlambda_dx[L] ** 2)
JustExtract = False
if (not JustExtract):
if DoClass:
print '\t\tSpectral Analysis:'
query_success,sp_type_query = False,'None'
# spectral analysis
#query_success,sp_type_query = GLOBALutils.simbad_query_obname(obname)
# Now, query SIMBAD by coordinates if above not successful
#if (not query_success):
# query_success,sp_type_query = GLOBALutils.simbad_query_coords('12:00:00','00:00:00')
#print "\t\t\tSpectral type returned by SIMBAD query:",sp_type_query
hdu = GLOBALutils.update_header(hdu,'HIERARCH SIMBAD SPTYP', sp_type_query)
pars_file = dirout + fsim.split('/')[-1][:-4]+'_stellar_pars.txt'
if os.access(pars_file,os.F_OK) == False or force_stellar_pars:
print "\t\t\tEstimating atmospheric parameters:"
Rx = np.around(1./np.sqrt(1./40000.**2 - 1./RESI**2))
spec2 = spec.copy()
for i in range(spec.shape[1]):
IJ = np.where(spec[5,i]!=0.)[0]
spec2[5,i,IJ] = GLOBALutils.convolve(spec[0,i,IJ],spec[5,i,IJ],Rx)
T_eff, logg, Z, vsini, vel0, ccf = correlation.CCF(spec2,model_path=models_path,npools=npools)
line = "%6d %4.1f %4.1f %8.1f %8.1f\n" % (T_eff,logg, Z, vsini, vel0)
f = open(pars_file,'w')
f.write(line)
f.close()
else:
print "\t\t\tAtmospheric parameters loaded from file:"
T_eff, logg, Z, vsini, vel0 = np.loadtxt(pars_file,unpack=True)
print "\t\t\t\tT_eff=",T_eff,"log(g)=",logg,"Z=",Z,"vsin(i)=",vsini,"vel0",vel0
else:
T_eff, logg, Z, vsini, vel0 = -999,-999,-999,-999,-999
# store the parameters measured for this epoch
T_eff_epoch = T_eff
logg_epoch = logg
Z_epoch = Z
vsini_epoch = vsini
vel0_epoch = vel0
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEFF', float(T_eff))
hdu = GLOBALutils.update_header(hdu,'HIERARCH LOGG', float(logg))
hdu = GLOBALutils.update_header(hdu,'HIERARCH Z', Z)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VSINI', vsini)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VEL0', vel0)
print "\t\tRadial Velocity analysis:"
# assign mask
sp_type, mask = GLOBALutils.get_mask_reffile(obname,reffile=reffile,base='../data/xc_masks/')
print "\t\t\tWill use",sp_type,"mask for CCF."
# Read in mask
ml, mh, weight = np.loadtxt(mask,unpack=True)
ml_v = GLOBALutils.ToVacuum( ml )
mh_v = GLOBALutils.ToVacuum( mh )
av_m = 0.5*( ml_v + mh_v )
mask_hw_kms = (GLOBALutils.Constants.c/1e3) * 0.5*(mh_v - ml_v) / av_m
disp = GLOBALutils.get_disp(obname, reffile=reffile)
if disp == 0:
known_sigma = False
if vsini != -999 and vsini != 0.:
disp = vsini
else:
disp = 3.
else:
known_sigma = True
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
print '\t\t\tComputing the CCF...'
cond = True
if sp_type == 'M5':
moon_sig = 4.5
elif sp_type == 'K5':
moon_sig = 4.2
else:
moon_sig = 4.0
while (cond):
# first rough correlation to find the minimum
vels, xc_full, sn, nlines_ccf, W_ccf = \
GLOBALutils.XCor(spec, ml_v, mh_v, weight,\
0, lbary_ltopo, vel_width=300, vel_step=3,\
spec_order=9, iv_order=10, sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
# Normalize the continuum of the CCF robustly with lowess
yy = scipy.signal.medfilt(xc_av,11)
pred = lowess(yy, vels,frac=0.4,it=10,return_sorted=False)
tck1 = scipy.interpolate.splrep(vels,pred,k=1)
xc_av_orig = xc_av.copy()
xc_av /= pred
vel0_xc = vels[ np.argmin( xc_av ) ]
rvels, rxc_av, rpred, rxc_av_orig, rvel0_xc = \
vels.copy(), xc_av.copy(), pred.copy(),\
xc_av_orig.copy(), vel0_xc
xc_av_rough = xc_av
vels_rough = vels
vel_width = np.maximum( 20.0, 6*disp )
vels, xc_full, | |
725756740,
725641311,
-1,
-54461,
725712036,
10079,
-1,
725756157,
10080,
725953351,
725691838,
-1,
-54458,
725896748,
129052,
-1,
725954655,
129048,
-54456,
725893681,
128627,
-1,
726079292,
128327,
726281041,
725618449,
-1,
726346576,
726235036,
-1,
726412111,
726295300,
129180,
726532095,
726335488,
-1,
-54450,
726434674,
129183,
-1,
726481757,
129182,
-1,
726370416,
129181,
-1,
726293191,
128628,
726805336,
726228690,
-1,
726870869,
726766221,
-1,
-54444,
726831171,
128604,
-1,
726867140,
128612,
727121919,
726805558,
-1,
-54441,
727027779,
128606,
-1,
727063748,
128614,
727264095,
726738271,
-1,
727329630,
727212133,
-1,
727395165,
727291074,
-1,
-54436,
727338540,
129055,
-1,
727396447,
129051,
-1,
727340007,
128178,
-1,
727271785,
10135,
727722852,
727199238,
-1,
727788387,
727673877,
-1,
-54430,
727739204,
128977,
-1,
727784206,
128975,
-1,
727744142,
128958,
728050535,
727683765,
-1,
-54426,
728006306,
128913,
-1,
728047662,
128903,
728247146,
728009905,
-1,
-54423,
728190508,
129053,
-1,
728248415,
129049,
728443757,
728205342,
-1,
-54420,
728394564,
128980,
-1,
728439566,
128979,
728640368,
728397021,
-1,
-54417,
728583724,
129054,
-1,
728641631,
129050,
728836979,
728577284,
-1,
-54414,
728792718,
128946,
-1,
728853534,
128970,
-54412,
728787964,
10133,
-54411,
728979959,
10134,
-1,
729040508,
128633,
729230206,
724594508,
-1,
729295741,
729186640,
-1,
729361276,
729256855,
-1,
-54406,
729318678,
127892,
-54405,
729379614,
128157,
-1,
729421276,
128152,
-1,
729294563,
128159,
-1,
729221250,
128585,
-1,
729164510,
127911,
-54400,
724529341,
127807,
-54399,
729764546,
128641,
-54398,
729819017,
129428,
-1,
729881712,
1519,
730082306,
701894417,
-1,
730147797,
730028887,
-1,
730213320,
730087902,
-1,
730278827,
730157481,
-1,
730344333,
730221251,
-1,
730409867,
730285597,
-1,
-54390,
730371848,
68889,
-1,
730435889,
68887,
-54388,
730348245,
68872,
-1,
730533649,
68881,
730737553,
730291031,
-1,
-54385,
730699528,
68891,
-54384,
730740706,
68890,
-1,
730795793,
68885,
730999700,
730693966,
-1,
-54381,
730956246,
68868,
-1,
730992401,
68867,
731196311,
730953762,
-1,
-54378,
731134677,
68880,
-1,
731189009,
68879,
731392922,
731148108,
-1,
-54375,
731344793,
68877,
-1,
731385617,
68876,
731589533,
731325791,
-1,
-54372,
731522539,
68875,
-1,
731582225,
68874,
-54370,
731551866,
68878,
-54369,
731748104,
68888,
-54368,
731812145,
68886,
-54367,
731876935,
68892,
-54366,
731932939,
68866,
-54365,
731993694,
68884,
-54364,
732056905,
68883,
-54363,
732121626,
68869,
-54362,
732183253,
68871,
-54361,
732248034,
68882,
-54360,
732312394,
68873,
-54359,
732372307,
68870,
-54358,
732436531,
68865,
-1,
732499729,
68864,
732703673,
730212345,
-1,
732769199,
732660046,
-1,
-54354,
732730582,
68914,
-1,
732774226,
68915,
732965810,
732723234,
-1,
-54351,
732907901,
68918,
-1,
732967478,
68919,
733162421,
732902182,
-1,
-54348,
733112074,
68916,
-1,
733169887,
68917,
-54346,
733124871,
68912,
-54345,
733307652,
68913,
-54344,
733371842,
68921,
-1,
733425152,
68920,
733621183,
732664006,
-1,
-54341,
733579415,
68895,
-54340,
733634661,
68897,
-54339,
733691997,
68894,
-54338,
733752159,
68896,
-1,
733810449,
68893,
734014405,
733576272,
-1,
734079940,
733970902,
-1,
-54334,
734035698,
68903,
-54333,
734092208,
68902,
-1,
734149450,
68901,
-1,
734018640,
68900,
734461951,
733960008,
-1,
-54329,
734361787,
68898,
-1,
734419907,
68899,
734604239,
730151559,
-1,
734669774,
734566293,
-1,
734735309,
734612163,
-1,
-54324,
734677474,
983209,
-1,
734739934,
983212,
-1,
734691032,
983210,
-1,
734625508,
983211,
735117311,
734537055,
-1,
735128531,
734992435,
-1,
-54318,
735073235,
129342,
-1,
735131561,
128092,
-54316,
735083263,
129309,
-1,
735250114,
129310,
735456249,
730104142,
-1,
735521784,
735408196,
-1,
735587313,
735465897,
-1,
735652829,
735541282,
-1,
735718364,
735579921,
-1,
-54309,
735664259,
67822,
-1,
735717282,
67825,
-1,
735657517,
67828,
735980512,
735609166,
-1,
-54305,
735916431,
67816,
-1,
735975409,
67829,
736177122,
735919243,
67812,
-1,
736133917,
67815,
736308197,
736104209,
-1,
-54300,
736270411,
67823,
-1,
736317853,
67808,
-54298,
736270589,
67814,
-54297,
736467033,
67817,
-54296,
736530916,
67813,
-54295,
736587362,
67826,
-54294,
736651852,
67824,
-54293,
736714809,
67821,
-54292,
736778382,
67820,
-54291,
736841178,
67819,
-54290,
736906050,
67818,
-54289,
736967088,
67810,
-54288,
737027647,
67811,
-1,
737089960,
67809,
737345535,
735535092,
-1,
737356789,
737247566,
-1,
-54284,
737317478,
67838,
-1,
737357550,
67837,
737553399,
737305348,
67835,
-1,
737477166,
67839,
-1,
737490175,
67836,
-1,
735453178,
128035,
737815550,
735401492,
-1,
737881084,
737761467,
128296,
-1,
737804377,
128736,
-54275,
737836962,
128057,
-1,
737942762,
127828,
-54273,
737767646,
128436,
-54272,
738094357,
128587,
-54271,
738150342,
128135,
-1,
738207565,
11233,
738405415,
730030181,
-1,
738470930,
738357068,
-1,
738536462,
738413036,
-1,
738601997,
738492750,
-1,
738667532,
738529158,
-1,
738733067,
738626242,
983059,
738852863,
738656256,
-1,
-54262,
738759619,
983140,
-1,
738818673,
983137,
-1,
738656869,
11134,
-1,
738619323,
128677,
-1,
738532067,
11203,
739246079,
738490747,
128014,
739311615,
739115008,
-1,
-54255,
739209136,
127943,
-1,
739259213,
128052,
739453976,
738429079,
-1,
739519511,
739408254,
-1,
739585046,
739450064,
127968,
-1,
739538978,
127960,
-1,
739546039,
127969,
-1,
739472114,
9203,
739847197,
739410254,
-1,
739912732,
739770368,
-1,
-54245,
739863235,
127798,
-1,
739911769,
127789,
-1,
739847776,
127976,
740174881,
739790071,
-1,
740240416,
740119874,
128616,
-1,
740164157,
128617,
-1,
740174687,
128371,
740437028,
740121914,
-1,
-54237,
740366659,
128029,
-1,
740426779,
127855,
-54235,
740392576,
127973,
-54234,
740583633,
983123,
-1,
740630534,
128298,
740830267,
738345053,
-1,
740895792,
740782024,
-1,
740961327,
740825634,
-1,
-54229,
740919447,
983980,
-54228,
740974693,
983982,
-54227,
741032029,
983979,
-54226,
741092191,
983981,
-1,
741150481,
983978,
-1,
740889889,
110593,
741420088,
740833574,
-1,
741485621,
741345309,
-1,
741551156,
741441134,
128644,
-1,
741475733,
128645,
-1,
741489997,
128096,
741801983,
741408768,
-1,
-54217,
741695743,
983122,
-1,
741743746,
128262,
-54215,
741371137,
129435,
-54214,
741887012,
129406,
-1,
741939842,
127802,
742140991,
740788375,
-1,
-54211,
742095674,
128559,
-54210,
742153341,
128175,
-1,
742209778,
129303,
742403138,
742097230,
983060,
-54207,
742357026,
983138,
-1,
742410741,
983141,
-1,
742365092,
11226,
742665895,
701851857,
-1,
742731158,
742592273,
-1,
742796542,
742669430,
-1,
742861947,
742726967,
-1,
742927456,
742810650,
-1,
742992976,
742873943,
-1,
743058508,
742949198,
-1,
-54197,
743012680,
92957,
-1,
743062638,
92971,
-54195,
743001790,
92960,
-54194,
743197595,
92962,
-54193,
743251844,
92968,
-1,
743315351,
92972,
743517268,
742931143,
-1,
-54190,
743463946,
92969,
-54189,
743526078,
92965,
-1,
743577495,
92959,
743779415,
743447863,
-1,
-54186,
743717998,
92967,
-1,
743774103,
92975,
-54184,
743741244,
92964,
-54183,
743937580,
92974,
-54182,
744001126,
92956,
-54181,
744059030,
92961,
-54180,
744123989,
92963,
-54179,
744188873,
92970,
-54178,
744249221,
92966,
-54177,
744312510,
92958,
-1,
744363927,
92973,
744620031,
742870505,
-1,
744631399,
744522062,
-1,
744696934,
744585250,
-1,
-54172,
744657422,
93069,
-54171,
744701244,
93053,
-1,
744755078,
93062,
-1,
744636080,
93061,
745024619,
744573635,
-1,
-54167,
744985739,
93070,
-54166,
745038931,
93058,
-1,
745093862,
93064,
745286766,
744986285,
-1,
-54163,
745221422,
93054,
-1,
745279962,
93068,
745483377,
745246261,
-1,
-54160,
745444485,
93071,
-1,
745476570,
93066,
745679988,
745426167,
-1,
-54157,
745621482,
93055,
-1,
745674673,
93056,
745876599,
745618133,
-1,
-54154,
745837701,
93059,
-1,
745887252,
93065,
-54152,
745838247,
93057,
-54151,
746023689,
93063,
-54150,
746085148,
93060,
-1,
746140532,
93067,
746335406,
742816848,
-1,
746400903,
746266665,
-1,
746466433,
746357070,
-1,
746531968,
746428067,
93044,
-1,
746455380,
93045,
-1,
746487348,
92997,
746728580,
746396983,
-1,
-54141,
746686795,
93043,
-1,
746732889,
92985,
-54139,
746679680,
93046,
-54138,
746872642,
93047,
-1,
746928916,
93042,
747121809,
746362354,
-1,
747187343,
747083437,
-1,
747252878,
747121979,
-1,
-54133,
747210245,
92990,
-54132,
747270923,
92989,
-54131,
747331556,
92988,
-1,
747385711,
92991,
-1,
747201619,
93028,
-54128,
747127023,
93034,
-1,
747575225,
92996,
747777179,
747081921,
-1,
747842711,
747733326,
-1,
747908246,
747780807,
-1,
-54123,
747856437,
92983,
-1,
747913455,
92986,
-1,
747862763,
92984,
-54120,
747796913,
92992,
-54119,
748117835,
92994,
-54118,
748180981,
93027,
-1,
748237757,
92987,
748432543,
747733326,
-1,
-54115,
748394050,
93037,
-54114,
748437084,
93030,
-1,
748492695,
93039,
748694692,
748377620,
-1,
748760227,
748629298,
-1,
-54110,
748717915,
93038,
-1,
748781733,
92993,
-1,
748719489,
93041,
749022375,
748641111,
-1,
-54106,
748980125,
93035,
-1,
749039198,
93033,
749218986,
748960455,
-1,
-54103,
749166570,
93032,
-1,
749228716,
93029,
-54101,
749181240,
93031,
-54100,
749360326,
93040,
-54099,
749420862,
92995,
-1,
749475784,
93036,
749677785,
746295501,
-1,
749743294,
749604625,
-1,
749808819,
749703449,
-1,
-54094,
749768245,
92943,
-1,
749803554,
92942,
750005430,
749766807,
-1,
-54091,
749964853,
92933,
-1,
750000162,
92932,
750202041,
749944925,
-1,
-54088,
750161461,
92939,
-1,
750196770,
92938,
750398652,
750128913,
-1,
-54085,
750358069,
92955,
-1,
750393378,
92954,
-54083,
750358069,
92951,
-1,
750524450,
92950,
750726340,
749701271,
-1,
750791874,
750653201,
-1,
-54079,
750751285,
92945,
-1,
750786594,
92944,
-54077,
750751285,
92935,
-1,
750917666,
92934,
751119562,
750674021,
-1,
751185096,
751067237,
-1,
-54073,
751144501,
92941,
-1,
751179810,
92940,
-54071,
751144501,
92947,
-1,
751310882,
92946,
751512784,
751059037,
-1,
751578318,
751439633,
-1,
-54067,
751537717,
92949,
-1,
751573026,
92948,
-54065,
751537717,
92931,
-1,
751704098,
92930,
751906006,
751446879,
-1,
751971540,
751840095,
-1,
-54061,
751930933,
92929,
-1,
751966242,
92928,
-54059,
751930933,
92937,
-1,
752097314,
92936,
752353279,
751866137,
-1,
-54056,
752258613,
92953,
-1,
752293922,
92952,
752495847,
749611001,
-1,
752561373,
752451918,
-1,
-54052,
752522454,
93010,
-1,
752566098,
93011,
752757984,
752515106,
-1,
-54049,
752699773,
93014,
-1,
752759350,
93015,
752954595,
752694054,
-1,
-54046,
752903946,
93012,
-1,
752961759,
93013,
-54044,
752916743,
93008,
-54043,
753099524,
93009,
-54042,
753163714,
93017,
-1,
753217024,
93016,
753413363,
752443380,
-1,
753478895,
753369422,
-1,
753544430,
753413870,
-1,
753609965,
753467392,
-1,
-54036,
753566621,
| |
coefficients for r to the order of -4, -6, -8, and -10, respectively.
"""
new_multipole_terms = np.zeros(4)
new_multipole_terms[0] = np.sum(multipole_terms[:1])
new_multipole_terms[1] = np.sum(multipole_terms[2:6])
new_multipole_terms[2] = np.sum(multipole_terms[6:8])
new_multipole_terms[3] = np.sum(multipole_terms[8])
return new_multipole_terms
def calc_cross_multipole_potential(r, multipole_terms, nondimensional=False, temperature=None, total_only=True):
r"""
Calculation of nondimensionalized cross-interaction potential from multipole moments.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) of nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
multipole_terms : numpy.ndarray
This can be either a list of terms corresponds to the coefficients for r to the order of -4, -6, -8, and -10, or a list of nine terms terms corresponding to the coefficients the various multipole interactions.
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
total_only : bool, Optional, default=True
If true, only the overall potential is returned. This is useful for parameter fitting. If False, the potential for each term is returned in a numpy array.
Returns
-------
potential : numpy.ndarray
Array of nondimensionalized potential between beads based on multipole moments. Array is equal in length to "r". :math:`\phi'=\phi/(3k_{B}T)` or in kcal/mol
potential_terms : numpy.ndarray, Optional
2D array of terms involved in multipole moment. Could be 4 terms relating to orders of r from -4 to -10 by steps of 2, or could be the individual contributions. Either dimensionalized or in kcal/mol
Only provided if ``total_only`` is False
"""
if np.size(multipole_terms) == 4:
potential_terms = np.array([
-multipole_terms[0] / r**4., -multipole_terms[1] / r**6., -multipole_terms[2] / r**8.,
-multipole_terms[3] / r**10.
])
elif np.size(multipole_terms) == 9:
potential_terms = np.array([
-multipole_terms[0] / r**4., -multipole_terms[1] / r**4., -multipole_terms[2] / r**6.,
-multipole_terms[3] / r**6., -multipole_terms[4] / r**6., -multipole_terms[5] / r**6.,
-multipole_terms[6] / r**8., -multipole_terms[7] / r**8., -multipole_terms[8] / r**10.
])
else:
raise ValueError(
"Multipole terms input should be either of length 4 or length 9 for the supported interaction types.")
potential = np.sum(potential_terms, axis=0)
if not nondimensional:
potential = float_dimensions(potential, "ionization_energy", temperature)
potential_terms = float_dimensions(potential_terms, "ionization_energy", temperature)
if total_only:
return potential
else:
return potential, potential_terms
def _obj_polarizability_from_integral(polarizability, bead_dict, Cintegral, sigma0):
r"""
Objective function used to determine the polarizability from multipole and Mie integrals from some minimum to infinity
Parameters
----------
polarizability : float
Guess in nondimensionalized polarizability with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
bead_dict : dict
Dictionary of multipole parameters for bead_A.
- charge (float) Charge nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy nondimensionalized as :math:`I'=I/(3k_{B}T)`
Cintegral : float
The Mie integral is set equal to the sum of the multipole potential contributions to determine the polarizability.
sigma0 : float
Lower bound of the integral, reported in nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
Returns
-------
obj_value : float
Difference between multipole term and Mie potential term integral
"""
dict_tmp = bead_dict.copy()
dict_tmp["polarizability"] = polarizability
Cmultipole, _ = multipole_integral(dict_tmp, dict_tmp, sigma0=sigma0, nondimensional=True)
return Cmultipole - Cintegral
def partial_polarizability(bead_dict0, temperature=None, sigma0=None, lower_bound="rmin", nondimensional=False):
r"""
Calculate partial derivative with respect to multipole moments. This is useful in estimating the error.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead_dict : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
temperature : float, Optional, default=298
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
sigma0 : float, Optional, default=None
This lower bound of the integral dictates where the lower bound of the definite integral is. Can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
lower_bound : str, Optional, default='rmin'
Lower bound of distance array. Used only when sigma0 is None. Can be one of:
- rmin: the position of the potential well
- sigma: the size parameter
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
Returns
-------
partial_dict : dict
Partial derivative with respect to multipole moments
"""
if not nondimensional:
if temperature is None:
temperature = 298
logger.info("Using default temperature of 298 K")
bead_dict = dict_dimensions(bead_dict0.copy(), temperature, dimensions=False)
else:
bead_dict = bead_dict0.copy()
if sigma0 is None:
if lower_bound == "rmin":
rm = mie_potential_minimum(bead_dict)
elif lower_bound == "sigma":
rm = bead_dict["sigma"]
else:
rm = float_dimensions(sigma0,"sigma",temperature,dimensions=False)
a = -2 / bead_dict['ionization_energy'] * (bead_dict['charge']**2. * rm**2 + 2 * bead_dict['dipole']**2 / 3 +
3 * bead_dict['quadrupole']**2.0 * rm**2)
b = 4 / bead_dict['ionization_energy']**2 * (
bead_dict['charge']**4. * rm**4 + 4 * bead_dict['charge']**2. * bead_dict['dipole']**2 * rm**2 / 3 +
6 * bead_dict['quadrupole']**2. * bead_dict['charge']**2. / 5 + 4 / 9 * bead_dict['dipole']**4 + 4 / 5 *
bead_dict['dipole']**2 * bead_dict['quadrupole']**2.0 / rm**2 + 9 / 25 * bead_dict['quadrupole']**4.0 / rm**4)
c = 4 / bead_dict['ionization_energy'] * (
bead_dict['charge']**2. * bead_dict['dipole']**2 * rm**2 + bead_dict['dipole']**4 / 3 +
bead_dict['quadrupole']**2. * bead_dict['charge']**2. / 5 +
3 / 5 * bead_dict['quadrupole']**2. * bead_dict['dipole']**2. / rm**2 +
3 / 5 * bead_dict['quadrupole']**4.0 / rm**4 - prefactor(bead_dict['lambdar'], bead_dict['lambdaa']) /
(bead_dict['lambdaa'] - 3) * bead_dict['epsilon'] * bead_dict['sigma']**bead_dict['lambdaa'] / rm**
(bead_dict['lambdaa'] - 6))
partial_dict = {}
for key in bead_dict0:
if key == "ionization_energy":
partial_dict[key] = -(a + np.sqrt(b - c)) / bead_dict['ionization_energy']
elif key == "charge":
tmp1 = 4 / bead_dict['ionization_energy']**2 * (
4 * bead_dict['charge']**3 * rm**4 + 8 / 3 * bead_dict['charge'] * bead_dict['dipole']**2 * rm**2 +
bead_dict['charge'] * bead_dict['quadrupole']**2 * 12 / 5)
tmp2 = 8 / bead_dict['ionization_energy'] * (bead_dict['charge'] * bead_dict['dipole']**2 * rm**2 +
bead_dict['charge'] * bead_dict['quadrupole']**2 / 5)
partial_dict[key] = -4 * bead_dict['charge'] * rm**2 / bead_dict['ionization_energy'] + (tmp1 - tmp2) / (
2 * np.sqrt(b - c))
elif key == "dipole":
tmp1 = 4 / bead_dict['ionization_energy']**2 * (
8 / 3 * bead_dict['charge']**2 * rm**2 * bead_dict['dipole'] + 16 / 9 * bead_dict['dipole']**3 +
8 / 5 * bead_dict['dipole'] * bead_dict['quadrupole']**2 / rm**2)
tmp2 = 8 / bead_dict['ionization_energy'] * (
bead_dict['charge'] * bead_dict['dipole']**2 * rm**2 + 4 / 3 * bead_dict['dipole']**3 +
3 / 5 * bead_dict['dipole'] * bead_dict['quadrupole']**2 / rm**2)
partial_dict[key] = -8 / 3 * bead_dict['dipole'] / bead_dict['ionization_energy'] + (tmp1 - tmp2) / (
2 * np.sqrt(b - c))
elif key == "quadrupole":
tmp1 = 4 / bead_dict['ionization_energy']**2 * (12 / 5 * bead_dict['charge']**2 * bead_dict['quadrupole'] +
8 / 5 * bead_dict['dipole']**2 * bead_dict['quadrupole'] /
rm**2 + 36 / 25 * bead_dict['quadrupole']**3 / rm**4)
tmp2 = 4 / bead_dict['ionization_energy'] * (2 / 5 * bead_dict['charge']**2 * bead_dict['quadrupole'] + 6 /
5 * bead_dict['dipole']**2 * bead_dict['quadrupole'] / rm**2 +
12 / 5 * bead_dict['quadrupole']**3 / rm**4)
partial_dict[key] = -12 / 5 * bead_dict['quadrupole'] / bead_dict['ionization_energy'] / rm**2 + (
tmp1 - tmp2) / (2 * np.sqrt(b - c))
if not nondimensional:
for key in partial_dict:
if key != | |
download_url_container=report_table.quarter_download_url.to_list() # container to store the download urls of quarter statements
# Designate a directory to store downloaded statements (begin statement piling)
statement_pile_path=os.path.join(root_path,'statement_pile')
company_pile_path=os.path.join(statement_pile_path,self.symbol)
try:
os.mkdir(statement_pile_path) # Create the statement_pile_path path
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
try:
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
os.chdir(company_pile_path)
# Downlaod accessible statements into the statement_pile path
# Construct a data frame to store the specified statement type
period_container=[] # container to store statement periods
statement_container=[] # container to store statement table
for url_index in range(len(download_url_container)):
statement_period=report_periods[url_index].strftime("%Y-%m-%d")
if(download_url_container[url_index] is not None and download_url_container[url_index][download_url_container[url_index].rfind('.')+1:len(download_url_container[url_index])]!='xls'):
statement_file=requests.get(download_url_container[url_index])
file_name=self.symbol+statement_period+self.report_type+'.xlsx'
with open(file_name, 'wb+') as fs:
fs.write(statement_file.content) # populating statement contents
dfs=pd.ExcelFile(fs)
sheet_headers=list(map(lambda x: x.lower().replace(' ','').replace('_','').replace('-','').replace(',','').replace("'","").replace('&','').replace('/',''), [dfs.parse(sn).columns[0] for sn in dfs.sheet_names]))
############################ Income Statements ###################################
if (statement_type=='income'):
income_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''),income_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in income_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify income statement and store in dataframe form
income_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store income statement into the statement container
statement_container.append(income_statement)
# Store income statement period into the period container
period_container.append(statement_period)
# Serialize the income statement dataframe into '.pickle'- to be accessed faster next time
income_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store income statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store income statement period into the period container
period_container.append(statement_period)
# Message to warn that income statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' income statement not identified or not available: update income statement identifiers or pass')
############################ Balance Sheets ###################################
if (statement_type=='balance'):
balance_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), balance_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in balance_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify balance sheet and store in dataframe form
balance_sheet=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store balacne sheet into the statement container
statement_container.append(balance_sheet)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Serialize the balance sheet dataframe into '.pickle'- to be accessed faster next time
balance_sheet.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store balance sheet as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Message to warn that balance sheet may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' balance sheet not identified or not available: update balance sheet identifiers or pass')
############################ Cash Flow Statements ###################################
if (statement_type=='cashflow'):
cashflow_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), cashflow_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in cashflow_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify cash flow statement and store in dataframe form
cashflow_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store cash flow statement into the statement container
statement_container.append(cashflow_statement)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Serialize the cash flow statement dataframe into '.pickle'- to be accessed faster next time
cashflow_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store cash flow statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Message to warn that cash flow statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' cashflow statement not identified or not available: update cash flow statement identifiers or pass')
fs.close() # close the downloaded '.xlsx' file
os.remove(file_name) # remove the downloaded '.xlsx' file after extracting financial statements
else:
print(self.symbol+' '+statement_period+' '+self.report_type+' '+statement_type+' statement not available')
# Combine the conpany's income statement(s) or balance sheet(s) or cash flow statement(s), and statement periods into a dataframe
statement_df=pd.DataFrame({'statement_periods':period_container,statement_type+'_statement':statement_container},index=[self.symbol]*len(period_container))
# Return back to root_path (end statement piling)
os.chdir(root_path)
# Return the data frame contructed above if it is not empty
if not statement_df.empty:
return statement_df
else:
return 'No '+self.report_type+' '+statement_type+' statement for '+self.symbol+' between '+self.start_period.strftime("%Y-%m-%d")+' and '+self.end_period.strftime("%Y-%m-%d")
#------------------------Extract Most Recent Income Statements--------------------------------
def ghost_income(self):
bin_path=r'.\\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualIncome" in s for s in bin_files]):
annual_income_file=[s for s in bin_files if "AnnualIncome" in s]
annual_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_income_file))
annual_income_file=[annual_income_file[i] for i in range(len(annual_income_file)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_periods=[annual_income_periods[i] for i in range(len(annual_income_periods)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_file.reverse()
annual_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[6]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[5]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[4]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[2]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[1]).group()
except:
try:
binded_income=pd.read_pickle(bin_path+'\\'+annual_income_file[0])
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(annual_income_periods)>0):
if(end_period-annual_income_periods[0]).days>365:
print('Recommend updating to the latest annual income statements: update via .update_financial_statements("income"), then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[6]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[6]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[5]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[5]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[4]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[4]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[2]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[2]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[1]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[1]
except:
try:
binded_income=business_income.income_statement[0]
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
elif(self.report_type=='quarter'):
if any(["QuarterIncome" in s for s in bin_files]):
quarter_income_file=[s for s in bin_files if "QuarterIncome" in s]
quarter_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),quarter_income_file))
quarter_income_file=[quarter_income_file[i] for i in range(len(quarter_income_file)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_periods=[quarter_income_periods[i] for i in range(len(quarter_income_periods)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_file.reverse()
quarter_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+f) for f in quarter_income_file], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([re.search('\d{4}-\d{2}-\d{2}',f).group() for f in quarter_income_file])
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(quarter_income_periods)>0):
if(end_period-quarter_income_periods[0]).days>180:
print('Recommend updating to the latest quarter income statements: update via .update_financial_statements("income") function, then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat(business_income.income_statement.to_list(), axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([business_income.statement_periods[i] for i in range(len(business_income.statement_periods))])
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
print(binded_message)
return binded_income
#------------------------Extract Most Recent Balance Sheets--------------------------------
def ghost_balance(self):
bin_path=r'.\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualBalance" in s for s in bin_files]):
annual_balance_file=[s for s in bin_files if "AnnualBalance" in s]
annual_balance_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_balance_file))
annual_balance_file=[annual_balance_file[i] for i in range(len(annual_balance_file)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_periods=[annual_balance_periods[i] for i in range(len(annual_balance_periods)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_file.reverse()
annual_balance_periods.reverse()
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6]), pd.read_pickle(bin_path+'\\'+annual_balance_file[8])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[8]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6]), pd.read_pickle(bin_path+'\\'+annual_balance_file[7])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[7]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[5]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[3]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()
except:
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[1]).group()
except:
try:
binded_balance=pd.read_pickle(bin_path+'\\'+annual_balance_file[0])
binded_message='Ghosted | |
if source in graph:
graph[source].append(sink)
else:
graph[source] = [sink]
return graph
# 59-5
def find_contigs(graph, node, indegree, outdegree):
contigs = []
for next in graph[node]:
new_path = [node, next]
ins, outs = indegree[next], outdegree[next]
while ins == 1 and outs == 1:
node = next
next = graph[node][0]
new_path.append(next)
ins, outs = indegree[next], outdegree[next]
contigs.append(new_path)
return contigs
# 59-5
def debruijn_to_contigs(graph):
outpaths = []
indegree, outdegree = path_degrees(graph)
for node in outdegree:
ins, outs = indegree[node], outdegree[node]
if outs > 0 and not (outs == 1 and ins == 1):
outpaths.extend(find_contigs(graph, node, indegree, outdegree))
return outpaths
# 71-8
def make_change(change, coins):
change_map = { 0: 0, 1: 1 }
for money in range(2, change + 1):
min = 10000000
for coin in coins:
if money - coin in change_map:
num_coins = change_map[money - coin] + 1
if num_coins < min:
min = num_coins
change_map[money] = min
return change_map[change]
# 72-9
def parse_matrix(instrings, n, m):
mat = []
if len(instrings) != n:
raise Exception('Expected n={} rows, saw {}'.format(n, len(instrings)))
for instring in instrings:
row = map(int, instring.split(' '))
if len(row) != m:
raise Exception('Expected m={} columns, saw {}'.format(m, len(instring)))
mat.append(row)
return mat
# 72-9
# 248-3
# 248-5
# 248-7
# 249-8
# 250-12
# 250-14
def init_matrix(rows, cols):
matrix = []
for row in range(rows):
m_row = []
for col in range(cols):
m_row.append(0)
matrix.append(m_row)
return matrix
# 72-9
def longest_path(n, m, downmatrix, rightmatrix):
pathmatrix = init_matrix(n + 1, m + 1)
for row in range(1, n + 1):
pathmatrix[row][0] = pathmatrix[row - 1][0] + downmatrix[row - 1][0]
for col in range(1, m + 1):
pathmatrix[0][col] = pathmatrix[0][col - 1] + rightmatrix[0][col - 1]
for row in range(1, n + 1):
for col in range(1, m + 1):
down = pathmatrix[row - 1][col] + downmatrix[row - 1][col]
right = pathmatrix[row][col - 1] + rightmatrix[row][col - 1]
pathmatrix[row][col] = max(down, right)
return pathmatrix[n][m]
# 74-5
# 76-3
# 76-9
# 248-3
# 248-5
# 248-7
def max_and_direction(down, right, diag):
dir = 'down'
max = down
if right > max:
dir = 'right'
max = right
if diag > max:
dir = 'diag'
max = diag
return max, dir
# 74-5
def print_matrix(matrix):
for row in matrix:
print(row)
# 74-5
def longest_common_subsequence(seq1, seq2):
v = len(seq1)
w = len(seq2)
pathmatrix = init_matrix(v + 1, w + 1)
backtrack_matrix = init_matrix(v + 1, w + 1)
for row in range(1, v + 1):
for col in range(1, w + 1):
down = pathmatrix[row - 1][col]
right = pathmatrix[row][col - 1]
diag = pathmatrix[row - 1][col - 1]
if seq1[row - 1] == seq2[col - 1]:
diag += 1
max, dir = max_and_direction(down, right, diag)
pathmatrix[row][col] = max
backtrack_matrix[row][col] = dir
return pathmatrix[v][w], backtrack_matrix
# 74-5
def output_longest_common_subsequence(backtrack_matrix, v, i, j):
if i == 0 or j == 0:
return ''
dir = backtrack_matrix[i][j]
if dir == 'down':
return output_longest_common_subsequence(backtrack_matrix, v, i - 1, j)
elif dir == 'right':
return output_longest_common_subsequence(backtrack_matrix, v, i, j - 1)
else:
retstr = output_longest_common_subsequence(backtrack_matrix, v, i - 1, j - 1)
return retstr + v[i - 1]
# 74-7
def parse_dag_edges(edge_strs):
graph = {}
for edge_str in edge_strs:
source, rest = edge_str.split('->')
sink, weightstr = rest.split(':')
weight = int(weightstr)
if source in graph:
graph[source].append([sink, weight])
else:
graph[source] = [[sink, weight]]
return graph
# 74-7
def wikipedia_depth_first_topological_sort_visit(dag,node,unmarked,temp_marked,ordered):
if node in temp_marked:
raise Exception("Not a DAG!")
if node in unmarked:
temp_marked.add(node)
for sinkweight in dag[node]:
sink, weight = sinkweight
unmarked,temp_marked,ordered = wikipedia_depth_first_topological_sort_visit(dag,sink,unmarked,temp_marked,ordered)
unmarked.remove(node)
temp_marked.remove(node)
ordered = [node] + ordered
return unmarked,temp_marked,ordered
# 74-7
def wikipedia_depth_first_topological_sort(dag, sink):
from random import sample
# set of nodes only
unmarked = {x for x in dag}
temp_marked = set()
ordered = []
while unmarked:
node = sample(unmarked, 1)[0]
unmarked,temp_marked,ordered = wikipedia_depth_first_topological_sort_visit(dag,node,unmarked,temp_marked,ordered)
return ordered
# 74-7
def longest_dag_weight(dag, ordering, source, final_sink):
vals = {}
backtrack = {}
for node in ordering:
if node in vals:
nodeval = vals[node]
else:
if node != source:
continue
nodeval = 0
for sinkweight in dag[node]:
sink, weight = sinkweight
sinkval = nodeval + weight
if sink in vals:
if sinkval > vals[sink]:
vals[sink] = sinkval
backtrack[sink] = node
else:
vals[sink] = sinkval
backtrack[sink] = node
return vals[final_sink], backtrack
# 74-7
def output_longest_dag_path(backtrack, source, sink):
pred = backtrack[sink]
path = [pred, sink]
while pred != source:
pred = backtrack[pred]
path = [pred] + path
return '->'.join(path)
# 76-3
# 76-9
def parse_scoring_matrix(lines):
# non-empty only
header = [x for x in lines[0].strip().split(' ') if x]
matrix = {}
for line in lines[1:]:
elements = [x for x in line.strip().split(' ') if x]
matrix_row = dict(zip(header, map(int, elements[1:])))
matrix[elements[0]] = matrix_row
return matrix
# 76-3
# 248-3
def scored_longest_common_subsequence(scoring_matrix, indel_penalty, seq1, seq2):
v = len(seq1)
w = len(seq2)
pathmatrix = init_matrix(v + 1, w + 1)
backtrack_matrix = init_matrix(v + 1, w + 1)
for row in range(1, v + 1):
pathmatrix[row][0] = pathmatrix[row - 1][0] + indel_penalty
for col in range(1, w + 1):
pathmatrix[0][col] = pathmatrix[0][col - 1] + indel_penalty
for row in range(1, v + 1):
for col in range(1, w + 1):
down = pathmatrix[row - 1][col] + indel_penalty
right = pathmatrix[row][col - 1] + indel_penalty
diag = pathmatrix[row - 1][col - 1] + scoring_matrix[seq1[row - 1]][seq2[col - 1]]
max, dir = max_and_direction(down, right, diag)
pathmatrix[row][col] = max
backtrack_matrix[row][col] = dir
return pathmatrix[v][w], backtrack_matrix
# 76-3
def output_longest_common_subsequence_aligned(backtrack_matrix, v, w, i, j):
if i == 0:
return '-'*j, w[:j]
if j == 0:
return v[:i], '-'*i
dir = backtrack_matrix[i][j]
if dir == 'down':
retstr1, retstr2 = output_longest_common_subsequence_aligned(backtrack_matrix, v, w, i - 1, j)
return retstr1 + v[i - 1], retstr2 + '-'
elif dir == 'right':
retstr1, retstr2 = output_longest_common_subsequence_aligned(backtrack_matrix, v, w, i, j - 1)
return retstr1 + '-', retstr2 + w[j - 1]
else:
retstr1, retstr2 = output_longest_common_subsequence_aligned(backtrack_matrix, v, w, i - 1, j - 1)
return retstr1 + v[i - 1], retstr2 + w[j - 1]
# 76-9
def scored_longest_common_subsequence_local(scoring_matrix, indel_penalty, seq1, seq2):
v = len(seq1)
w = len(seq2)
pathmatrix = init_matrix(v + 1, w + 1)
backtrack_matrix = init_matrix(v + 1, w + 1)
best_result, best_row, best_col = -1, -1, -1
for row in range(1, v + 1):
for col in range(1, w + 1):
down = pathmatrix[row - 1][col] + indel_penalty
right = pathmatrix[row][col - 1] + indel_penalty
diag = pathmatrix[row - 1][col - 1] + scoring_matrix[seq1[row - 1]][seq2[col - 1]]
max, dir = max_and_direction(down, right, diag)
if max < 0:
max, dir = 0, 'zero' # "free ride"
elif max > best_result:
best_result = max
best_row = row
best_col = col
pathmatrix[row][col] = max
backtrack_matrix[row][col] = dir
return pathmatrix[best_row][best_col], backtrack_matrix, best_row, best_col
# 76-9
# 248-5
# 248-7
def output_longest_common_subsequence_local(backtrack_matrix, v, w, i, j):
if i == 0 or j == 0:
return '', ''
dir = backtrack_matrix[i][j]
if dir == 'down':
retstr1, retstr2 = output_longest_common_subsequence_local(backtrack_matrix, v, w, i - 1, j)
return retstr1 + v[i - 1], retstr2 + '-'
elif dir == 'right':
retstr1, retstr2 = output_longest_common_subsequence_local(backtrack_matrix, v, w, i, j - 1)
return retstr1 + '-', retstr2 + w[j - 1]
elif dir == 'diag':
retstr1, retstr2 = output_longest_common_subsequence_local(backtrack_matrix, v, w, i - 1, j - 1)
return retstr1 + v[i - 1], retstr2 + w[j - 1]
else:
return '', ''
# 248-3
def mismatch_scoring_matrix(alphabet):
matrix = {}
for letter in alphabet:
matrix_row = {}
for other_letter in alphabet:
if letter == other_letter:
matrix_row[other_letter] = 0
else:
matrix_row[other_letter] = -1
matrix[letter] = matrix_row
return matrix
# 248-5
def mismatch_scoring_matrix_fitted(alphabet):
matrix = {}
for letter in alphabet:
matrix_row = {}
for other_letter in alphabet:
if letter == other_letter:
matrix_row[other_letter] = 1
else:
matrix_row[other_letter] = -1
matrix[letter] = matrix_row
return matrix
# 248-5
def scored_longest_common_subsequence_fitted(scoring_matrix, indel_penalty, seq1, seq2):
v = len(seq1)
w = len(seq2)
pathmatrix = init_matrix(v + 1, w + 1)
backtrack_matrix = init_matrix(v + 1, w + 1)
best_row, best_col = -1000000, -1000000
for col in range(1, w + 1):
best_result_for_col = -1000000
pathmatrix[0][col] = pathmatrix[0][col - 1] + indel_penalty
for row in range(1, v + 1):
down = pathmatrix[row - 1][col] + indel_penalty
right = pathmatrix[row][col - 1] + indel_penalty
diag = pathmatrix[row - 1][col - 1] + | |
+ du, v + dv
smell = orangeSmell
if not 0 <= r < self.rows or not 0 <= c < self.cols:
return 'Blocked'
cellColor = self.layout[ r ][ c ]
# Red tiles are impassable.
if cellColor == self.redTile:
return 'Blocked'
elif cellColor == self.pinkTile:
pass
elif cellColor == self.orangeTile:
smell = True
elif cellColor == self.blueTile and not orangeSmell:
return 'Blocked'
if cellColor == self.purpleTile or not visited[ orangeSmellDict[ smell ] ][ r ][ c ]:
visited[ orangeSmellDict[ smell ] ][ r ][ c ] = True
q.append( (adjacentLocation, smell, currentLocation) )
if ( isPurpleTile and _applyMovement( du, dv ) == 'Blocked' ) or not isPurpleTile:
for du, dv in self.adjacentLocationDelta:
_applyMovement( du, dv )
stepCount += 1
return -1
class DreamTest( unittest.TestCase ):
def test_Dream( self ):
for testfile in getTestFileList( tag='dream' ):
self._verify( testfile )
def _verify( self, testfile ):
with open( 'tests/usaco/dream/{}.in'.format( testfile ) ) as inputFile, \
open( 'tests/usaco/dream/{}.out'.format( testfile ) ) as solutionFile:
rows, cols = readIntegers( inputFile )
layout = [ list( readIntegers( inputFile ) ) for _ in range( rows ) ]
stepCount = readInteger( solutionFile )
print( 'Testcase {} [{} x {}] stepCount = {}'.format( testfile, rows, cols, stepCount ) )
self.assertEqual( Dream( rows, cols, layout ).go(), stepCount )
def test_Dream_Sample( self ):
rows, cols = 4, 4
layout = [
[ 1, 0, 2, 1 ],
[ 1, 1, 4, 1 ],
[ 1, 0, 4, 0 ],
[ 1, 3, 1, 1 ]
]
self.assertEqual( Dream( rows, cols, layout ).go(), 10 )
'''
USACO 2015 December Contest, Silver
Problem 1. Switching on the Lights
<NAME> has recently built an enormous barn consisting of an N×N grid of rooms (2≤N≤100), numbered from (1,1) up to (N,N). Being somewhat afraid of the dark, Bessie the cow wants to turn on the lights in as many rooms as possible.
Bessie starts in room (1,1), the only room that is initially lit. In some rooms, she will find light switches that she can use to toggle the lights in other rooms; for example there might be a switch in room (1,1) that toggles the lights in room (1,2). Bessie can only travel through lit rooms, and she can only move from a room (x,y) to its four adjacent neighbors (x−1,y), (x+1,y), (x,y−1) and (x,y+1) (or possibly fewer neighbors if this room is on the boundary of the grid).
Please determine the maximum number of rooms Bessie can illuminate.
INPUT FORMAT (file lightson.in):
The first line of input contains integers N and M (1≤M≤20,000).
The next M lines each describe a single light switch with four integers x, y, a, b, that a switch in room (x,y) can be used to toggle the lights in room (a,b). Multiple switches may exist in any room, and multiple switches may toggle the lights of any room.
OUTPUT FORMAT (file lightson.out):
A single line giving the maximum number of rooms Bessie can illuminate.
SAMPLE INPUT:
3 6
1 1 1 2
2 1 2 2
1 1 1 3
2 3 3 1
1 3 1 2
1 3 2 1
SAMPLE OUTPUT:
5
Here, Bessie can use the switch in (1,1) to turn on lights in (1,2) and (1,3). She can then walk to (1,3) and turn on the lights in (2,1), from which she can turn on the lights in (2,2). The switch in (2,3) is inaccessible to her, being in an unlit room. She can therefore illuminate at most 5 rooms.
Problem credits: <NAME> and <NAME>
'''
class Lights:
def __init__( self, N, switchInfo ):
self.N = N
self.switchInfoDict = defaultdict( lambda : list() )
for (x, y, a, b) in switchInfo:
self.switchInfoDict[ (x, y) ].append( (a, b) )
self.adjacentLocationDelta = [ (0, 1), (0, -1), (1, 0), (-1, 0) ]
def go( self ):
startLocation = 1, 1
q = deque()
q.append( startLocation )
visited = [ [ False for _ in range( self.N + 1 ) ] for _ in range( self.N + 1 ) ]
visited[ 1 ][ 1 ] = True
illuminatedRooms = set()
illuminatedRooms.add( startLocation )
adjacentUnlitRooms = set()
while len( q ) > 0:
u, v = currentLocation = q.popleft()
for targetRoom in self.switchInfoDict[ currentLocation ]:
illuminatedRooms.add( targetRoom )
if targetRoom in adjacentUnlitRooms:
adjacentUnlitRooms.remove( targetRoom )
x, y = targetRoom
visited[ x ][ y ] = True
q.append( targetRoom )
for du, dv in self.adjacentLocationDelta:
x, y = adjacentLocation = u + du, v + dv
if not 0 < x <= self.N or not 0 < y <= self.N or visited[ x ][ y ]:
continue
if adjacentLocation in illuminatedRooms:
visited[ x ][ y ] = True
q.append( adjacentLocation )
else:
adjacentUnlitRooms.add( adjacentLocation )
return len( illuminatedRooms )
class LightsTest( unittest.TestCase ):
def test_Lights( self ):
for testfile in getTestFileList( tag='lights' ):
self._verify( testfile )
def _verify( self, testfile ):
with open( 'tests/usaco/lights/{}.in'.format( testfile ) ) as inputFile, \
open( 'tests/usaco/lights/{}.out'.format( testfile ) ) as solutionFile:
N, numberOfSwitches = readIntegers( inputFile )
switchInfo = [ tuple( readIntegers( inputFile ) ) for _ in range( numberOfSwitches ) ]
illuminatedRooms = readInteger( solutionFile )
formatString = 'Testcase {} N = {} numberOfSwitches = {} illuminatedRooms = {}'
print( formatString.format( testfile, N, numberOfSwitches, illuminatedRooms ) )
self.assertEqual( Lights( N, switchInfo ).go(), illuminatedRooms )
def test_Lights_Sample( self ):
N = 3
switchInfo = [
(1, 1, 1, 2),
(2, 1, 2, 2),
(1, 1, 1, 3),
(2, 3, 3, 1),
(1, 3, 1, 2),
(1, 3, 2, 1)
]
self.assertEqual( Lights( N, switchInfo ).go(), 5 )
'''
USACO 2019 January Contest, Silver
Problem 2. Icy Perimeter
<NAME> is going into the ice cream business! He has built a machine that produces blobs of ice cream but unfortunately in somewhat irregular shapes, and he is hoping to optimize the machine to make the shapes produced as output more reasonable.
The configuration of ice cream output by the machine can be described using an N×N grid (1≤N≤1000) as follows:
##....
....#.
.#..#.
.#####
...###
....##
Each '.' character represents empty space and each '#' character represents a 1×1 square cell of ice cream.
Unfortunately, the machine isn't working very well at the moment and might produce multiple disconnected blobs of ice cream (the figure above has two). A blob of ice cream is connected if you can reach any ice cream cell from every other ice cream cell in the blob by repeatedly stepping to adjacent ice cream cells in the north, south, east, and west directions.
<NAME> would like to find the area and perimeter of the blob of ice cream having the largest area. The area of a blob is just the number of '#' characters that are part of the blob. If multiple blobs tie for the largest area, he wants to know the smallest perimeter among them. In the figure above, the smaller blob has area 2 and perimeter 6, and the larger blob has area 13 and perimeter 22.
Note that a blob could have a "hole" in the middle of it (empty space surrounded by ice cream). If so, the boundary with the hole also counts towards the perimeter of the blob. Blobs can also appear nested within other blobs, in which case they are treated as separate blobs. For example, this case has a blob of area 1 nested within a blob of area 16:
#####
#...#
#.#.#
#...#
#####
Knowing both the area and perimeter of a blob of ice cream is important, since <NAME> ultimately wants to minimize the ratio of perimeter to area, a quantity he calls the icyperimetric measure of his ice cream. When this ratio is small, the ice cream melts slower, since it has less surface area relative to its mass.
INPUT FORMAT (file perimeter.in):
The first line of input contains N, and the next N lines describe the output of the machine. At least one '#' character will be present.
OUTPUT FORMAT (file perimeter.out):
Please output one line containing two space-separated integers, the first being the area of the largest blob, and the second being its perimeter. If multiple blobs are tied for largest area, print the information for whichever of these has the smallest perimeter.
SAMPLE INPUT:
6
##....
....#.
.#..#.
.#####
...###
....##
SAMPLE OUTPUT:
13 22
Problem credits: <NAME>
'''
class IcyPerimeter:
def __init__( self, size, icecreamLayout ):
self.size = size
self.icecreamLayout = icecreamLayout
self.emptyCell, self.icecreamCell = '.#'
self.adjacentLocationDelta = [ (0, 1), (0, -1), (1, 0), (-1, 0) ]
def _floodFill( self, r, c, visited ):
area = perimeter = 0
q = deque()
q.append( (r, c) )
visited[ r ][ c ] = True
area += 1
while len( q ) > 0:
u, v = q.popleft()
for du, dv in self.adjacentLocationDelta:
r, c = u + du, | |
= os.path.join(
intermediate_dir, 'clipped_lulc%s.tif' % file_suffix)
eto_path = os.path.join(intermediate_dir, 'eto%s.tif' % file_suffix)
precip_path = os.path.join(intermediate_dir, 'precip%s.tif' % file_suffix)
depth_to_root_rest_layer_path = os.path.join(
intermediate_dir, 'depth_to_root_rest_layer%s.tif' % file_suffix)
pawc_path = os.path.join(intermediate_dir, 'pawc%s.tif' % file_suffix)
tmp_pet_path = os.path.join(intermediate_dir, 'pet%s.tif' % file_suffix)
# Paths for output rasters
fractp_path = os.path.join(
per_pixel_output_dir, 'fractp%s.tif' % file_suffix)
wyield_path = os.path.join(
per_pixel_output_dir, 'wyield%s.tif' % file_suffix)
aet_path = os.path.join(per_pixel_output_dir, 'aet%s.tif' % file_suffix)
demand_path = os.path.join(intermediate_dir, 'demand%s.tif' % file_suffix)
watersheds_path = args['watersheds_path']
watershed_results_vector_path = os.path.join(
output_dir, 'watershed_results_wyield%s.shp' % file_suffix)
watershed_paths_list = [
(watersheds_path, 'ws_id', watershed_results_vector_path)]
sub_watersheds_path = None
if 'sub_watersheds_path' in args and args['sub_watersheds_path'] != '':
sub_watersheds_path = args['sub_watersheds_path']
subwatershed_results_vector_path = os.path.join(
output_dir, 'subwatershed_results_wyield%s.shp' % file_suffix)
watershed_paths_list.append(
(sub_watersheds_path, 'subws_id', subwatershed_results_vector_path))
seasonality_constant = float(args['seasonality_constant'])
# Initialize a TaskGraph
work_token_dir = os.path.join(intermediate_dir, '_taskgraph_working_dir')
try:
n_workers = int(args['n_workers'])
except (KeyError, ValueError, TypeError):
# KeyError when n_workers is not present in args
# ValueError when n_workers is an empty string.
# TypeError when n_workers is None.
n_workers = -1 # single process mode.
graph = taskgraph.TaskGraph(work_token_dir, n_workers)
base_raster_path_list = [
args['eto_path'],
args['precipitation_path'],
args['depth_to_root_rest_layer_path'],
args['pawc_path'],
args['lulc_path']]
aligned_raster_path_list = [
eto_path,
precip_path,
depth_to_root_rest_layer_path,
pawc_path,
clipped_lulc_path]
target_pixel_size = pygeoprocessing.get_raster_info(
args['lulc_path'])['pixel_size']
align_raster_stack_task = graph.add_task(
pygeoprocessing.align_and_resize_raster_stack,
args=(base_raster_path_list, aligned_raster_path_list,
['near'] * len(base_raster_path_list),
target_pixel_size, 'intersection'),
kwargs={'raster_align_index': 4,
'base_vector_path_list': [watersheds_path]},
target_path_list=aligned_raster_path_list,
task_name='align_raster_stack')
# Joining now since this task will always be the root node
# and it's useful to have the raster info available.
align_raster_stack_task.join()
nodata_dict = {
'out_nodata': -1.0,
'precip': pygeoprocessing.get_raster_info(precip_path)['nodata'][0],
'eto': pygeoprocessing.get_raster_info(eto_path)['nodata'][0],
'depth_root': pygeoprocessing.get_raster_info(
depth_to_root_rest_layer_path)['nodata'][0],
'pawc': pygeoprocessing.get_raster_info(pawc_path)['nodata'][0],
'lulc': pygeoprocessing.get_raster_info(clipped_lulc_path)['nodata'][0]}
# Open/read in the csv file into a dictionary and add to arguments
bio_dict = utils.build_lookup_from_csv(
args['biophysical_table_path'], 'lucode', to_lower=True)
bio_lucodes = set(bio_dict.keys())
bio_lucodes.add(nodata_dict['lulc'])
LOGGER.debug('bio_lucodes %s', bio_lucodes)
if 'demand_table_path' in args and args['demand_table_path'] != '':
demand_dict = utils.build_lookup_from_csv(
args['demand_table_path'], 'lucode')
demand_reclassify_dict = dict(
[(lucode, demand_dict[lucode]['demand'])
for lucode in demand_dict])
demand_lucodes = set(demand_dict.keys())
demand_lucodes.add(nodata_dict['lulc'])
LOGGER.debug('demand_lucodes %s', demand_lucodes)
else:
demand_lucodes = None
valid_lulc_txt_path = os.path.join(intermediate_dir, 'valid_lulc_values.txt')
check_missing_lucodes_task = graph.add_task(
_check_missing_lucodes,
args=(clipped_lulc_path, demand_lucodes,
bio_lucodes, valid_lulc_txt_path),
target_path_list=[valid_lulc_txt_path],
dependent_task_list=[align_raster_stack_task],
task_name='check_missing_lucodes')
# Break the bio_dict into three separate dictionaries based on
# Kc, root_depth, and LULC_veg fields to use for reclassifying
Kc_dict = {}
root_dict = {}
vegetated_dict = {}
for lulc_code in bio_dict:
Kc_dict[lulc_code] = bio_dict[lulc_code]['kc']
# Catch invalid LULC_veg values with an informative error.
lulc_veg_value = bio_dict[lulc_code]['lulc_veg']
try:
vegetated_dict[lulc_code] = int(lulc_veg_value)
if vegetated_dict[lulc_code] not in set([0, 1]):
raise ValueError()
except ValueError:
# If the user provided an invalid LULC_veg value, raise an
# informative error.
raise ValueError('LULC_veg value must be either 1 or 0, not %s',
lulc_veg_value)
# If LULC_veg value is 1 get root depth value
if vegetated_dict[lulc_code] == 1.0:
root_dict[lulc_code] = bio_dict[lulc_code]['root_depth']
# If LULC_veg value is 0 then we do not care about root
# depth value so will just substitute in a 1.0 . This
# value will not end up being used.
else:
root_dict[lulc_code] = 1.0
# Create Kc raster from table values to use in future calculations
LOGGER.info("Reclassifying temp_Kc raster")
tmp_Kc_raster_path = os.path.join(intermediate_dir, 'kc_raster.tif')
create_Kc_raster_task = graph.add_task(
func=pygeoprocessing.reclassify_raster,
args=((clipped_lulc_path, 1), Kc_dict, tmp_Kc_raster_path,
gdal.GDT_Float32, nodata_dict['out_nodata']),
target_path_list=[tmp_Kc_raster_path],
dependent_task_list=[
align_raster_stack_task, check_missing_lucodes_task],
task_name='create_Kc_raster')
# Create root raster from table values to use in future calculations
LOGGER.info("Reclassifying tmp_root raster")
tmp_root_raster_path = os.path.join(
intermediate_dir, 'root_depth.tif')
create_root_raster_task = graph.add_task(
func=pygeoprocessing.reclassify_raster,
args=((clipped_lulc_path, 1), root_dict, tmp_root_raster_path,
gdal.GDT_Float32, nodata_dict['out_nodata']),
target_path_list=[tmp_root_raster_path],
dependent_task_list=[
align_raster_stack_task, check_missing_lucodes_task],
task_name='create_root_raster')
# Create veg raster from table values to use in future calculations
# of determining which AET equation to use
LOGGER.info("Reclassifying tmp_veg raster")
tmp_veg_raster_path = os.path.join(intermediate_dir, 'veg.tif')
create_veg_raster_task = graph.add_task(
func=pygeoprocessing.reclassify_raster,
args=((clipped_lulc_path, 1), vegetated_dict, tmp_veg_raster_path,
gdal.GDT_Float32, nodata_dict['out_nodata']),
target_path_list=[tmp_veg_raster_path],
dependent_task_list=[
align_raster_stack_task, check_missing_lucodes_task],
task_name='create_veg_raster')
dependent_tasks_for_watersheds_list = []
LOGGER.info('Calculate PET from Ref Evap times Kc')
calculate_pet_task = graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(eto_path, 1), (tmp_Kc_raster_path, 1),
(nodata_dict['eto'], 'raw'),
(nodata_dict['out_nodata'], 'raw')],
pet_op, tmp_pet_path, gdal.GDT_Float32,
nodata_dict['out_nodata']),
target_path_list=[tmp_pet_path],
dependent_task_list=[create_Kc_raster_task],
task_name='calculate_pet')
dependent_tasks_for_watersheds_list.append(calculate_pet_task)
# List of rasters to pass into the vectorized fractp operation
raster_list = [
tmp_Kc_raster_path, eto_path, precip_path, tmp_root_raster_path,
depth_to_root_rest_layer_path, pawc_path, tmp_veg_raster_path]
LOGGER.debug('Performing fractp operation')
calculate_fractp_task = graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(x, 1) for x in raster_list]
+ [(nodata_dict, 'raw'), (seasonality_constant, 'raw')],
fractp_op, fractp_path, gdal.GDT_Float32,
nodata_dict['out_nodata']),
target_path_list=[fractp_path],
dependent_task_list=[
create_Kc_raster_task, create_veg_raster_task,
create_root_raster_task, align_raster_stack_task],
task_name='calculate_fractp')
LOGGER.info('Performing wyield operation')
calculate_wyield_task = graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(fractp_path, 1), (precip_path, 1),
(nodata_dict['precip'], 'raw'),
(nodata_dict['out_nodata'], 'raw')],
wyield_op, wyield_path, gdal.GDT_Float32,
nodata_dict['out_nodata']),
target_path_list=[wyield_path],
dependent_task_list=[calculate_fractp_task, align_raster_stack_task],
task_name='calculate_wyield')
dependent_tasks_for_watersheds_list.append(calculate_wyield_task)
LOGGER.debug('Performing aet operation')
calculate_aet_task = graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(fractp_path, 1), (precip_path, 1),
(nodata_dict['precip'], 'raw'),
(nodata_dict['out_nodata'], 'raw')],
aet_op, aet_path, gdal.GDT_Float32, nodata_dict['out_nodata']),
target_path_list=[aet_path],
dependent_task_list=[
calculate_fractp_task, create_veg_raster_task,
align_raster_stack_task],
task_name='calculate_aet')
dependent_tasks_for_watersheds_list.append(calculate_aet_task)
# list of rasters that will always be summarized with zonal stats
raster_names_paths_list = [
('precip_mn', precip_path),
('PET_mn', tmp_pet_path),
('AET_mn', aet_path),
('wyield_mn', wyield_path)]
if 'demand_table_path' in args and args['demand_table_path'] != '':
# Create demand raster from table values to use in future calculations
create_demand_raster_task = graph.add_task(
func=pygeoprocessing.reclassify_raster,
args=((clipped_lulc_path, 1), demand_reclassify_dict, demand_path,
gdal.GDT_Float32, nodata_dict['out_nodata']),
target_path_list=[demand_path],
dependent_task_list=[
align_raster_stack_task, check_missing_lucodes_task],
task_name='create_demand_raster')
dependent_tasks_for_watersheds_list.append(create_demand_raster_task)
raster_names_paths_list.append(('demand', demand_path))
# Aggregate results to watershed polygons, and do the optional
# scarcity and valuation calculations.
for base_ws_path, ws_id_name, target_ws_path in watershed_paths_list:
zonal_stats_task_list = []
zonal_stats_pickle_list = []
# Do zonal stats with the input shapefiles provided by the user
# and store results dictionaries in pickles
for key_name, rast_path in raster_names_paths_list:
target_stats_pickle = os.path.join(
pickle_dir, '%s_%s%s.pickle' % (ws_id_name, key_name, file_suffix))
zonal_stats_pickle_list.append((target_stats_pickle, key_name))
zonal_stats_task_list.append(graph.add_task(
func=zonal_stats_tofile,
args=(base_ws_path, rast_path, target_stats_pickle),
target_path_list=[target_stats_pickle],
dependent_task_list=dependent_tasks_for_watersheds_list,
task_name='%s_%s_zonalstats' % (ws_id_name, key_name)))
# Create copies of the input shapefiles in the output workspace.
# Add the zonal stats data to the attribute tables.
# Compute optional scarcity and valuation
create_output_vector_task = graph.add_task(
func=create_vector_output,
args=(base_ws_path, target_ws_path, ws_id_name,
zonal_stats_pickle_list, valuation_params),
target_path_list=[target_ws_path],
dependent_task_list=zonal_stats_task_list,
task_name='create_%s_vector_output' % ws_id_name)
# Export a CSV with all the fields present in the output vector
target_basename = os.path.splitext(target_ws_path)[0]
target_csv_path = target_basename + '.csv'
create_output_table_task = graph.add_task(
func=convert_vector_to_csv,
args=(target_ws_path, target_csv_path),
target_path_list=[target_csv_path],
dependent_task_list=[create_output_vector_task],
task_name='create_%s_table_output' % ws_id_name)
graph.join()
def create_vector_output(
base_vector_path, target_vector_path, ws_id_name,
stats_path_list, valuation_params):
"""Create the main vector outputs of this model.
Join results of zonal stats to copies of the watershed shapefiles.
Also do optional scarcity and valuation calculations.
Parameters:
base_vector_path (string): Path to a watershed shapefile provided in
the args dictionary.
target_vector_path (string): Path where base_vector_path will be copied
to in the output workspace.
ws_id_name (string): Either 'ws_id' or 'subws_id', which are required
names of a unique ID field in the watershed and subwatershed
shapefiles, respectively. Used to determine if the polygons
represent watersheds or subwatersheds.
stats_path_list (list): List of file paths to pickles storing the zonal
stats results.
valuation_params (dict): The dictionary built from
args['valuation_table_path']. Or None if valuation table was not
provided.
Returns:
None
"""
esri_shapefile_driver = gdal.GetDriverByName('ESRI Shapefile')
watershed_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)
esri_shapefile_driver.CreateCopy(target_vector_path, watershed_vector)
watershed_vector = None
for pickle_path, key_name in stats_path_list:
with open(pickle_path, 'rb') as picklefile:
ws_stats_dict = pickle.load(picklefile)
if key_name == 'wyield_mn':
_add_zonal_stats_dict_to_shape(
target_vector_path, ws_stats_dict, key_name, 'mean')
# Also create and populate 'wyield_vol' field, which
# relies on 'wyield_mn' already present in attribute table
compute_water_yield_volume(target_vector_path)
# consum_* variables rely on 'wyield_*' fields present,
# so this would fail if somehow 'demand' comes before 'wyield_mn'
# in key_names. The order is hardcoded in raster_names_paths_list.
elif key_name == 'demand':
# Add aggregated consumption to sheds shapefiles
_add_zonal_stats_dict_to_shape(
target_vector_path, ws_stats_dict, 'consum_vol', 'sum')
# Add aggregated consumption means to sheds shapefiles
_add_zonal_stats_dict_to_shape(
target_vector_path, ws_stats_dict, 'consum_mn', 'mean')
compute_rsupply_volume(target_vector_path)
else:
_add_zonal_stats_dict_to_shape(
target_vector_path, ws_stats_dict, key_name, 'mean')
if valuation_params:
# only do valuation for watersheds, not subwatersheds
if ws_id_name == 'ws_id':
compute_watershed_valuation(target_vector_path, valuation_params)
def convert_vector_to_csv(base_vector_path, target_csv_path):
"""Create a CSV with all the fields present in vector attribute table.
Parameters:
base_vector_path (string):
Path to the watershed shapefile in the output workspace.
target_csv_path (string):
Path to a CSV to create in the output workspace.
Returns:
None
"""
watershed_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)
csv_driver = gdal.GetDriverByName('CSV')
_ = csv_driver.CreateCopy(target_csv_path, watershed_vector)
def zonal_stats_tofile(base_vector_path, raster_path, target_stats_pickle):
"""Calculate zonal statistics for watersheds and write results to a file.
Parameters:
base_vector_path (string): Path to the watershed shapefile in the
output workspace.
raster_path (string): Path to raster to aggregate.
target_stats_pickle (string): Path to pickle file to store dictionary
returned by zonal stats.
Returns:
None
"""
ws_stats_dict = pygeoprocessing.zonal_statistics(
(raster_path, 1), base_vector_path, ignore_nodata=True)
with open(target_stats_pickle, 'wb') as picklefile:
picklefile.write(pickle.dumps(ws_stats_dict))
def aet_op(fractp, precip, precip_nodata, output_nodata):
"""Compute actual evapotranspiration values.
Parameters:
| |
body in list:
# Style may place the ':' on the next line.
comment = get_comment(data, name + ' :')
if len(comment) == 0:
comment = get_comment(data, name + "\n")
validate_comment(filename, name, comment)
self.classes.append(
obj_class(self, filename, attrib, name, parent_name, body, comment,
includes, forward_declares))
if added:
# a global function or class was read from the header file
self.filenames.append(filename)
def __repr__(self):
result = ''
if len(self.typedefs) > 0:
strlist = []
for cls in self.typedefs:
strlist.append(str(cls))
result += string.join(strlist, "\n") + "\n\n"
if len(self.funcs) > 0:
strlist = []
for cls in self.funcs:
strlist.append(str(cls))
result += string.join(strlist, "\n") + "\n\n"
if len(self.classes) > 0:
strlist = []
for cls in self.classes:
strlist.append(str(cls))
result += string.join(strlist, "\n")
return result
def get_file_names(self):
""" Return the array of header file names. """
return self.filenames
def get_typedefs(self):
""" Return the array of typedef objects. """
return self.typedefs
def get_funcs(self, filename=None):
""" Return the array of function objects. """
if filename is None:
return self.funcs
else:
# only return the functions in the specified file
res = []
for func in self.funcs:
if func.get_file_name() == filename:
res.append(func)
return res
def get_classes(self, filename=None):
""" Return the array of class objects. """
if filename is None:
return self.classes
else:
# only return the classes in the specified file
res = []
for cls in self.classes:
if cls.get_file_name() == filename:
res.append(cls)
return res
def get_class(self, classname, defined_structs=None):
""" Return the specified class or None if not found. """
for cls in self.classes:
if cls.get_name() == classname:
return cls
elif not defined_structs is None:
defined_structs.append(cls.get_capi_name())
return None
def get_class_names(self):
""" Returns the names of all classes in this object. """
result = []
for cls in self.classes:
result.append(cls.get_name())
return result
def get_base_class_name(self, classname):
""" Returns the base (root) class name for |classname|. """
cur_cls = self.get_class(classname)
while True:
parent_name = cur_cls.get_parent_name()
if is_base_class(parent_name):
return parent_name
else:
parent_cls = self.get_class(parent_name)
if parent_cls is None:
break
cur_cls = self.get_class(parent_name)
return None
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.typedefs:
cls.get_types(list)
for cls in self.classes:
cls.get_types(list)
def get_alias_translation(self, alias):
""" Return a translation of alias to value based on typedef
statements. """
for cls in self.typedefs:
if cls.alias == alias:
return cls.value
return None
def get_analysis(self, value, named=True):
""" Return an analysis of the value based the header file context. """
return obj_analysis([self], value, named)
def get_defined_structs(self):
""" Return a list of already defined structure names. """
return [
'cef_print_info_t', 'cef_window_info_t', 'cef_base_ref_counted_t',
'cef_base_scoped_t'
]
def get_capi_translations(self):
""" Return a dictionary that maps C++ terminology to C API terminology.
"""
# strings that will be changed in C++ comments
map = {
'class': 'structure',
'Class': 'Structure',
'interface': 'structure',
'Interface': 'Structure',
'true': 'true (1)',
'false': 'false (0)',
'empty': 'NULL',
'method': 'function'
}
# add mappings for all classes and functions
funcs = self.get_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
classes = self.get_classes()
for cls in classes:
map[cls.get_name()] = cls.get_capi_name()
funcs = cls.get_virtual_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
funcs = cls.get_static_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
return map
class obj_class:
""" Class representing a C++ class. """
def __init__(self, parent, filename, attrib, name, parent_name, body, comment,
includes, forward_declares):
if not isinstance(parent, obj_header):
raise Exception('Invalid parent object type')
self.parent = parent
self.filename = filename
self.attribs = str_to_dict(attrib)
self.name = name
self.parent_name = parent_name
self.comment = comment
self.includes = includes
self.forward_declares = forward_declares
# extract typedefs
p = re.compile(
'\n' + _cre_space + 'typedef' + _cre_space + _cre_typedef + ';',
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the typedef objects
self.typedefs = []
for value in list:
pos = value.rfind(' ')
if pos < 0:
raise Exception('Invalid typedef: ' + value)
alias = value[pos + 1:].strip()
value = value[:pos].strip()
self.typedefs.append(obj_typedef(self, filename, value, alias))
# extract static functions
p = re.compile('\n' + _cre_space + _cre_attrib + '\n' + _cre_space +
'static' + _cre_space + _cre_func + '\((.*?)\)',
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the static function objects
self.staticfuncs = []
for attrib, retval, argval in list:
comment = get_comment(body, retval + '(' + argval + ')')
validate_comment(filename, retval, comment)
self.staticfuncs.append(
obj_function_static(self, attrib, retval, argval, comment))
# extract virtual functions
p = re.compile(
'\n' + _cre_space + _cre_attrib + '\n' + _cre_space + 'virtual' +
_cre_space + _cre_func + '\((.*?)\)' + _cre_vfmod,
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the virtual function objects
self.virtualfuncs = []
for attrib, retval, argval, vfmod in list:
comment = get_comment(body, retval + '(' + argval + ')')
validate_comment(filename, retval, comment)
self.virtualfuncs.append(
obj_function_virtual(self, attrib, retval, argval, comment,
vfmod.strip()))
def __repr__(self):
result = '/* ' + dict_to_str(
self.attribs) + ' */ class ' + self.name + "\n{"
if len(self.typedefs) > 0:
result += "\n\t"
strlist = []
for cls in self.typedefs:
strlist.append(str(cls))
result += string.join(strlist, "\n\t")
if len(self.staticfuncs) > 0:
result += "\n\t"
strlist = []
for cls in self.staticfuncs:
strlist.append(str(cls))
result += string.join(strlist, "\n\t")
if len(self.virtualfuncs) > 0:
result += "\n\t"
strlist = []
for cls in self.virtualfuncs:
strlist.append(str(cls))
result += string.join(strlist, "\n\t")
result += "\n};\n"
return result
def get_file_name(self):
""" Return the C++ header file name. Includes the directory component,
if any. """
return self.filename
def get_capi_file_name(self):
""" Return the CAPI header file name. Includes the directory component,
if any. """
return get_capi_file_name(self.filename)
def get_file_directory(self):
""" Return the file directory component, if any. """
pos = self.filename.rfind('/')
if pos >= 0:
return self.filename[:pos]
return None
def get_name(self):
""" Return the class name. """
return self.name
def get_capi_name(self):
""" Return the CAPI structure name for this class. """
return get_capi_name(self.name, True)
def get_parent_name(self):
""" Return the parent class name. """
return self.parent_name
def get_parent_capi_name(self):
""" Return the CAPI structure name for the parent class. """
return get_capi_name(self.parent_name, True)
def has_parent(self, parent_name):
""" Returns true if this class has the specified class anywhere in its
inheritance hierarchy. """
# Every class has a known base class as the top-most parent.
if is_base_class(parent_name) or parent_name == self.parent_name:
return True
if is_base_class(self.parent_name):
return False
cur_cls = self.parent.get_class(self.parent_name)
while True:
cur_parent_name = cur_cls.get_parent_name()
if is_base_class(cur_parent_name):
break
elif cur_parent_name == parent_name:
return True
cur_cls = self.parent.get_class(cur_parent_name)
return False
def get_comment(self):
""" Return the class comment as an array of lines. """
return self.comment
def get_includes(self):
""" Return the list of classes that are included from this class'
header file. """
return self.includes
def get_forward_declares(self):
""" Return the list of classes that are forward declared for this
class. """
return self.forward_declares
def get_attribs(self):
""" Return all attributes as a dictionary. """
return self.attribs
def has_attrib(self, name):
""" Return true if the specified attribute exists. """
return name in self.attribs
def get_attrib(self, name):
""" Return the first or only value for specified attribute. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is a list
return self.attribs[name][0]
else:
# the value is a string
return self.attribs[name]
return None
def get_attrib_list(self, name):
""" Return all values for specified attribute as a list. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is already a list
return self.attribs[name]
else:
# convert the value to a list
return [self.attribs[name]]
return None
def get_typedefs(self):
""" Return the array of typedef objects. """
return self.typedefs
def has_typedef_alias(self, alias):
""" Returns true if the specified typedef alias is defined in the scope
of this class declaration. """
for typedef in self.typedefs:
if typedef.get_alias() == alias:
return True
return False
def get_static_funcs(self):
""" Return the array of static function objects. """
return self.staticfuncs
def get_virtual_funcs(self):
""" Return the array of virtual function objects. """
return self.virtualfuncs
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.typedefs:
cls.get_types(list)
for cls in self.staticfuncs:
cls.get_types(list)
for cls in self.virtualfuncs:
cls.get_types(list)
| |
# -------------------------------------------------------------------------------------------------
# system
from time import sleep
from math import sqrt
import csv
# -------------------------------------------------------------------------------------------------
# PyQuantum.TC
from PyQuantum.TC.Unitary import *
# -------------------------------------------------------------------------------------------------
# Common
# from PyQuantum.Common.STR import *
# from PyQuantum.Common.Tools import *
from PyQuantum.Common.Quantum.Operators import operator_a, operator_acrossa, operator_L
# -------------------------------------------------------------------------------------------------
# scientific
import numpy as np
import scipy.linalg as lg
from numpy.linalg import multi_dot
# -------------------------------------------------------------------------------------------------
from time import sleep
from PyQuantum.Tools.Units import *
# import peakutils
# import matplotlib
# import matplotlib.pyplot as plt
# from PyQuantum.Common.Fidelity import *
# -------------------------------------------------------------------------------------------------
def run_out_click(args):
# ---------------------------------------------------------------------------------------------
ro_0 = args['ro_0'] if 'ro_0' in args else None
# print(ro_0.data.diagonal())
Assert(ro_0 is not None, 'param[\'ro_0\'] is not set')
# ---------------------------------------------------------------------------------------------
H = args['H'] if 'H' in args else None
Assert(H is not None, 'param[\'H\'] is not set')
# ---------------------------------------------------------------------------------------------
if 'T_list' in args:
T_list = args['T_list']
T_list.clear()
# if 'sink_list' in args:
# sink_list = args['sink_list']
# sink_list.clear()
# ---------------------------------------------------------------------------------------------
dt = args['dt'] if 'dt' in args else None
Assert(dt is not None, 'param[\'dt\'] is not set')
# ---------------------------------------------------------------------------------------------
# print("run starts ...")
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Unitary
U = Unitary(H, dt)
# if 'U_csv' in args:
# U.to_csv(args['U_csv'])
U_data = U.data
U_conj = U.conj()
# ---------------------------------------------------------------------------------------------
ro_t = ro_0
# ---------------------------------------------------------------------------------------------
states_dim = []
en = 0
L_out = operator_L(ro_t, args['lindblad']['out'])
diag_abs = np.abs(ro_t.data.diagonal(), dtype=np.longdouble)
start_energy = ro_t.energy(
H.capacity, H.cavity.n_atoms, H.states_bin, diag_abs)
start_energy = np.sum(start_energy)
t = 0
L_op = L_out
L_type = 'out'
cnt = 0
t_in = 0
T_click = []
nt = 0
p_sink_prev = 0
while True:
# if t > 0:
# print('\t'+time_unit_full(t))
# -----------------------------------------------------------
ro_t.data = ((U.data).dot(ro_t.data + dt *
L_op(ro_t).data)).dot(U_conj.data)
# ro_t.normalize()
# -----------------------------------------------------------
# print(L_type)
nt += 1
t += dt
if t >= args['time_limit']:
print('t >= time_limit')
break
diag_abs = np.abs(ro_t.data.diagonal(), dtype=np.longdouble)
energy = ro_t.energy(H.capacity, H.cavity.n_atoms,
H.states_bin, diag_abs)
if L_type == 'out':
p_sink = start_energy - np.sum(energy)
# print(p_sink)
if nt % args['dt_click'] == 0:
# if nt % 50 == 0:
p_coin = np.random.random_sample()
if p_coin <= p_sink:
# print('p_sink: ', "{:.3f}".format(np.round(
# p_sink, 3)), ', p_coin: ', "{:.3f}".format(np.round(p_coin, 3)), ' ', time_unit_full(t), sep='')
# print(p_sink, p_coin)
# print(nt, time_unit_full(t))
# exit(0)
return t
# -----------------------------------------------------------
return cnt
def run_click(args):
# ---------------------------------------------------------------------------------------------
ro_0 = H = None
T = nt = dt = l = None
U_csv = x_csv = y_csv = z_csv = None
thres = None
T_list = sink_list = None
sink_limit = None
in_photons = out_photons = None
lindblad = args['lindblad']
if 'in_photons' in args:
in_photons = args['in_photons']
if 'out_photons' in args:
out_photons = args['out_photons']
# ---------------------------------------------------------------------------------------------
if 'ro_0' in args:
ro_0 = args['ro_0']
Assert(ro_0 is not None, 'param[\'ro_0\'] is not set')
# ---------------------------------------------------------------------------------------------
if 'H' in args:
H = args['H']
Assert(H is not None, 'param[\'H\'] is not set')
# ---------------------------------------------------------------------------------------------
if 'l' in args:
l = args['l']
# ---------------------------------------------------------------------------------------------
if 'T' in args:
T = args['T']
if 'dt' in args:
dt = args['dt']
if 'nt' in args:
nt = args['nt']
if 'sink_limit' in args:
sink_limit = args['sink_limit']
if 'precision' in args:
precision = args['precision']
else:
precision = 1e-10
# ---------------------------------------------------------------------------------------------
if 'U_csv' in args:
U_csv = args['U_csv']
if 'x_csv' in args:
x_csv = args['x_csv']
if 'y_csv' in args:
y_csv = args['y_csv']
if 'z_csv' in args:
z_csv = args['z_csv']
# ---------------------------------------------------------------------------------------------
if 'thres' in args:
thres = args['thres']
if 'T_list' in args:
T_list = args['T_list']
T_list.clear()
if 'sink_list' in args:
sink_list = args['sink_list']
sink_list.clear()
# ---------------------------------------------------------------------------------------------
Assert(dt is not None, 'param[\'dt\'] is not set')
# ---------------------------------------------------------------------------------------------
# print("run starts ...")
# ---------------------------------------------------------------------------------------------
# Unitary
U = Unitary(H, dt)
# if U_csv is not None:
# U.to_csv(args['U_csv'])
U_data = U.data
U_conj = U.conj()
# ---------------------------------------------------------------------------------------------
ro_t = ro_0
# ---------------------------------------------------------------------------------------------
if "z_csv" is not None:
fz_csv = open("z_csv", "w")
writer = csv.writer(fz_csv, quoting=csv.QUOTE_NONE, lineterminator="\n")
states_dim = []
en = 0
L_ro = L_out = operator_L(ro_t, args['lindblad']['out'])
diag_abs = np.abs(ro_t.data.diagonal(), dtype=np.longdouble)
start_energy = ro_t.energy(
H.capacity, H.cavity.n_atoms, H.states_bin, diag_abs)
start_energy = np.sum(start_energy)
t = 0
ph_out = False
T_click = []
while True:
# for t in range(0, nt+1):
# -----------------------------------------------------------
# print("\t", t)
# print(t, "/", nt)
diag_abs = np.abs(ro_t.data.diagonal(), dtype=np.longdouble)
# print(diag_abs)
energy = ro_t.energy(H.capacity, H.cavity.n_atoms,
H.states_bin, diag_abs)
# print(np.sum(energy))
p_sink = 1 - np.sum(energy)
p_coin = np.random.random_sample()
print('p_sink: ', p_sink, ', p_coin: ', p_coin, sep='')
if p_coin <= p_sink:
T_click.append(t)
# ph_out = True
break
if sink_list is None:
if np.sum(energy) - start_energy > in_photons:
return False
# -----------------------------------------------------------
if "z_csv" is not None:
writer.writerow(["{:.5f}".format(x) for x in diag_abs])
# -----------------------------------------------------------
# T_list
if T_list is not None:
T_list.append(t)
# -----------------------------------------------------------
if sink_list is not None:
zero = diag_abs[0]
sink = start_energy - np.sum(energy[1:H.capacity+1])
if len(sink_list) != 0 and (sink_list[-1] - sink) > precision:
print("err:", sink, "<", sink_list[-1])
exit(0)
sink_list.append(sink)
if sink_limit is not None:
if abs(sink_limit - sink) < precision:
return False
# -----------------------------------------------------------
ro_t.data = ((U.data).dot(ro_t.data + dt *
L_ro(ro_t).data)).dot(U_conj.data)
Assert(abs(1 - ro_t.abs_trace()) <=
args['precision'], "ro is not normed: " + str(ro_t.abs_trace()))
# ro_t.normalize()
# -----------------------------------------------------------
t += dt
if ph_out:
return True
# ---------------------------------------------------------------------------------------------
if x_csv is not None:
write_x_not_ind(H.states, x_csv)
# write_xbp(H.states, config.x_csv, ind=st)
if y_csv is not None:
write_t(T_str_v(T), nt, y_csv)
# ---------------------------------------------------------------------------------------------
if z_csv is not None:
fz_csv.close()
# ---------------------------------------------------------------------------------------------
return True
def run(args):
# ---------------------------------------------------------------------------------------------
ro_0 = H = None
T = nt = dt = l = None
U_csv = x_csv = y_csv = z_csv = None
thres = None
T_list = sink_list = None
sink_limit = None
in_photons = out_photons = None
lindblad = args['lindblad']
if 'in_photons' in args:
in_photons = args['in_photons']
if 'out_photons' in args:
out_photons = args['out_photons']
# ---------------------------------------------------------------------------------------------
if 'ro_0' in args:
ro_0 = args['ro_0']
Assert(ro_0 is not None, 'param[\'ro_0\'] is not set')
# ---------------------------------------------------------------------------------------------
if 'H' in args:
H = args['H']
Assert(H is not None, 'param[\'H\'] is not set')
# ---------------------------------------------------------------------------------------------
if 'l' in args:
l = args['l']
# ---------------------------------------------------------------------------------------------
if 'T' in args:
T = args['T']
if 'dt' in args:
dt = args['dt']
if 'nt' in args:
nt = args['nt']
if 'sink_limit' in args:
sink_limit = args['sink_limit']
if 'precision' in args:
precision = args['precision']
else:
precision = 1e-10
# ---------------------------------------------------------------------------------------------
if 'U_csv' in args:
U_csv = args['U_csv']
if 'x_csv' in args:
x_csv = args['x_csv']
if 'y_csv' in args:
y_csv = args['y_csv']
if 'z_csv' in args:
z_csv = args['z_csv']
# ---------------------------------------------------------------------------------------------
if 'thres' in args:
thres = args['thres']
if 'T_list' in args:
T_list = args['T_list']
T_list.clear()
if 'sink_list' in args:
sink_list = args['sink_list']
sink_list.clear()
# ---------------------------------------------------------------------------------------------
Assert(dt is not None, 'param[\'dt\'] is not set')
# ---------------------------------------------------------------------------------------------
# print("run starts ...")
# ---------------------------------------------------------------------------------------------
# a = operator_a(H, H.capacity, H.cavity.n_atoms)
# acrossa = operator_acrossa(H, H.capacity, H.cavity.n_atoms)
# across = a.conj()
# # a.print(precision=3)
# # print()
# # across.print(precision=3)
# # exit(0)
# if check:
# across_a = Matrix(H.size, H.size, dtype=np.longdouble)
# across_a.data = lil_matrix((across.data).dot(a.data), dtype=np.longdouble)
# # a_cross_a.print()
# # across_a__cross = Matrix(H.size, H.size, dtype=np.double)
# across_a__cross = across_a.conj()
# aa_cross = Matrix(H.size, H.size, dtype=np.double)
# aa_cross.data = (aa.data.dot(aa.data.transpose()))
# ---------------------------------------------------------------------------------------------
# Unitary
U = Unitary(H, dt)
# print(type(U.data))
if U_csv is not None:
U.to_csv(args['U_csv'])
U_data = U.data
U_conj = U.conj()
# print(type(U_conj))
# ---------------------------------------------------------------------------------------------
ro_t = ro_0
# ---------------------------------------------------------------------------------------------
if "z_csv" is not None:
fz_csv = open("z_csv", "w")
writer = csv.writer(fz_csv, quoting=csv.QUOTE_NONE, lineterminator="\n")
states_dim = []
en = 0
# states_bin = {}
# for k, v in enumerate(H.states):
# en = v[0] + np.sum(v[1])
# if en not in states_bin:
# states_bin[en] = []
# states_bin[en].append(k)
# if v[0] + np.sum(v[1]) > en:
# en += 1
# states_dim.append(k-1)
# print(states_dim)
# exit(0)
# ll = across_a.data+across.data
# ll_cross = ll.getH()
L_ro = L_out = operator_L(ro_t, args['lindblad']['out'])
diag_abs = np.abs(ro_t.data.diagonal(), dtype=np.longdouble)
start_energy = ro_t.energy(
H.capacity, H.cavity.n_atoms, H.states_bin, diag_abs)
start_energy = np.sum(start_energy)
t = 0
while True:
# for t in range(0, nt+1):
# -----------------------------------------------------------
# print("\t", t)
# print(t, "/", nt)
diag_abs = np.abs(ro_t.data.diagonal(), dtype=np.longdouble)
# print(diag_abs)
energy = ro_t.energy(H.capacity, H.cavity.n_atoms,
H.states_bin, diag_abs)
print(np.sum(energy))
if sink_list is None:
# print(np.round(diag_abs, 3))
# energy = ro_t.energy(H.capacity, H.cavity.n_atoms, H.states_bin, diag_abs)
| |
<reponame>ClearCalcs/pydyf
"""
A low-level PDF generator.
"""
import re
import zlib
from codecs import BOM_UTF16_BE
VERSION = __version__ = '0.1.2'
def _to_bytes(item):
"""Convert item to bytes."""
if isinstance(item, bytes):
return item
elif isinstance(item, Object):
return item.data
elif isinstance(item, float):
if item.is_integer():
return f'{int(item):d}'.encode('ascii')
else:
return f'{item:f}'.encode('ascii')
elif isinstance(item, int):
return f'{item:d}'.encode('ascii')
return str(item).encode('ascii')
class Object:
"""Base class for PDF objects."""
def __init__(self):
#: Number of the object.
self.number = None
#: Position in the PDF of the object.
self.offset = 0
#: Version number of the object, non-negative.
self.generation = 0
#: Indicate if an object is used (``'n'``), or has been deleted
#: and therefore is free (``'f'``).
self.free = 'n'
@property
def indirect(self):
"""Indirect representation of an object."""
return b'\n'.join((
str(self.number).encode() + b' ' +
str(self.generation).encode() + b' obj',
self.data,
b'endobj',
))
@property
def reference(self):
"""Object identifier."""
return (
str(self.number).encode() + b' ' +
str(self.generation).encode() + b' R')
@property
def data(self):
"""Data contained in the object. Shall be defined in each subclass."""
raise NotImplementedError()
class Dictionary(Object, dict):
"""PDF Dictionary object.
Inherits from :class:`Object` and Python :obj:`dict`.
"""
def __init__(self, values=None):
Object.__init__(self)
dict.__init__(self, values or {})
@property
def data(self):
result = [b'<<']
for key, value in self.items():
result.append(b'/' + _to_bytes(key) + b' ' + _to_bytes(value))
result.append(b'>>')
return b'\n'.join(result)
general_graphics_state = ['w', 'J', 'j', 'M', 'd', 'ri', 'i', 'gs']
special_graphics_state = ['q', 'Q', 'cm']
path_construction = ['m', 'l', 'c', 'v', 'y', 'h', 're']
path_painting = ['S', 's', 'f', 'F', 'f*', 'B', 'B*', 'b', 'b*', 'n']
clipping_paths = ['W', 'W*']
text_state = ["Tc", "Tw", "Tz", "TL", "Tf", "Tr", "Ts"]
text_positioning = ["Td", "TD", "Tm", "T*"]
text_showing = ["TJ", "Tj", "'", "\""]
color = ["CS", "cs", "SC", "SCN", "sc", "scn", "G", "g", "RG", "rg", "K", "k"]
shading_patterns = ['sh']
marked_content = ["MP", "DP", "BMC", "BDC", "EMC"]
class StreamValidator(Object):
def __init__(self):
self.state = 'page'
self.depth = 0
def op(self, operator):
operator = operator.decode()
print(self.state, operator)
def invalid():
raise Exception(f"Invalid operator {operator} in state {self.state}")
if self.state == 'page':
if str(operator) in ['BT']:
self.state = 'text'
elif str(operator) in ['m', 're']:
self.state = 'path'
elif str(operator) in ['BI']:
self.state = 'inline'
elif str(operator) in ['Do', 'sh']:
# Immediate return
pass
elif str(operator) not in [*general_graphics_state, *special_graphics_state, *color, *text_state, *marked_content]:
invalid()
elif self.state == 'text':
if str(operator) in ['ET']:
self.state = 'page'
elif str(operator) not in [*general_graphics_state, *color, *text_state, *text_showing, *text_positioning, *marked_content]:
invalid()
elif self.state == 'path':
if str(operator) in path_painting:
self.state = 'page'
elif str(operator) in clipping_paths:
self.state = 'clipping_path'
# elif str(operator) not in path_construction:
elif str(operator) not in [*path_construction, *color, *general_graphics_state]:
invalid()
elif self.state == 'clipping_path':
if str(operator) in path_painting:
self.state = 'page'
else:
invalid()
elif self.state == 'inline':
if str(operator) in ['BI']:
self.state = 'page'
elif str(operator) not in ['ID']:
invalid()
else:
raise Exception(f"Invalid state: {self.state}")
class Stream(Object):
"""PDF Stream object.
Inherits from :class:`Object`.
"""
def __init__(self, stream=None, extra=None, compress=False):
super().__init__()
#: Python array of data composing stream.
self.stream = stream or []
#: Metadata containing at least the length of the Stream.
self.extra = extra or {}
#: Compress the stream data if set to ``True``. Default is ``False``.
self.compress = compress
self.validator = StreamValidator()
def begin_text(self):
"""Begin a text object."""
op = b'BT'
self.validator.op(op)
self.stream.append(op)
def clip(self, even_odd=False):
"""Modify current clipping path by intersecting it with current path.
Use the nonzero winding number rule to determine which regions lie
inside the clipping path by default.
Use the even-odd rule if ``even_odd`` set to ``True``.
"""
op = b'W*' if even_odd else b'W'
self.validator.op(op)
self.stream.append(op)
def close(self):
"""Close current subpath.
Append a straight line segment from the current point to the starting
point of the subpath.
"""
op = b'h'
self.validator.op(op)
self.stream.append(op)
def color_space(self, space, stroke=False):
"""Set the nonstroking color space.
If stroke is set to ``True``, set the stroking color space instead.
"""
op = b'CS' if stroke else b'cs'
self.validator.op(op)
self.stream.append(
b'/' + _to_bytes(space) + b' ' + op)
def curve_to(self, x1, y1, x2, y2, x3, y3):
"""Add cubic Bézier curve to current path.
The curve shall extend from ``(x3, y3)`` using ``(x1, y1)`` and ``(x2,
y2)`` as the Bézier control points.
"""
op = b'c'
self.validator.op(op)
self.stream.append(b' '.join((
_to_bytes(x1), _to_bytes(y1),
_to_bytes(x2), _to_bytes(y2),
_to_bytes(x3), _to_bytes(y3), op)))
def curve_start_to(self, x2, y2, x3, y3):
"""Add cubic Bézier curve to current path
The curve shall extend to ``(x3, y3)`` using the current point and
``(x2, y2)`` as the Bézier control points.
"""
op = b'v'
self.validator.op(op)
self.stream.append(b' '.join((
_to_bytes(x2), _to_bytes(y2),
_to_bytes(x3), _to_bytes(y3), op)))
def curve_end_to(self, x1, y1, x3, y3):
"""Add cubic Bézier curve to current path
The curve shall extend to ``(x3, y3)`` using `(x1, y1)`` and ``(x3,
y3)`` as the Bézier control points.
"""
op = b'y'
self.validator.op(op)
self.stream.append(b' '.join((
_to_bytes(x1), _to_bytes(y1),
_to_bytes(x3), _to_bytes(y3), op)))
def draw_x_object(self, reference):
"""Draw object given by reference."""
op = b'Do'
self.validator.op(op)
self.stream.append(b'/' + _to_bytes(reference) + b' ' + op)
def end(self):
"""End path without filling or stroking."""
op = b'n'
self.validator.op(op)
self.stream.append(op)
def end_text(self):
"""End text object."""
op = b'ET'
self.validator.op(op)
self.stream.append(op)
def fill(self, even_odd=False):
"""Fill path using nonzero winding rule.
Use even-odd rule if ``even_odd`` is set to ``True``.
"""
op = b'f*' if even_odd else b'f'
self.validator.op(op)
self.stream.append(op)
def fill_and_stroke(self, even_odd=False):
"""Fill and stroke path usign nonzero winding rule.
Use even-odd rule if ``even_odd`` is set to ``True``.
"""
op = b'B*' if even_odd else b'B'
self.validator.op(op)
self.stream.append(op)
def fill_stroke_and_close(self, even_odd=False):
"""Fill, stroke and close path using nonzero winding rule.
Use even-odd rule if ``even_odd`` is set to ``True``.
"""
op = b'b*' if even_odd else b'b'
self.validator.op(op)
self.stream.append(op)
def line_to(self, x, y):
"""Add line from current point to point ``(x, y)``."""
op = b'l'
self.validator.op(op)
self.stream.append(b' '.join((_to_bytes(x), _to_bytes(y), op)))
def move_to(self, x, y):
"""Begin new subpath by moving current point to ``(x, y)``."""
op = b'm'
self.validator.op(op)
self.stream.append(b' '.join((_to_bytes(x), _to_bytes(y), op)))
def shading(self, name):
"""Paint shape and color shading using shading dictionary ``name``."""
op = b'sh'
self.validator.op(op)
self.stream.append(b'/' + _to_bytes(name) + b' ' + op)
def pop_state(self):
"""Restore graphic state."""
op = b'Q'
self.validator.op(op)
self.stream.append(op)
def push_state(self):
"""Save graphic state."""
op = b'q'
self.validator.op(op)
self.stream.append(op)
def rectangle(self, x, y, width, height):
"""Add rectangle to current path as complete subpath.
``(x, y)`` is the lower-left corner and width and height the
dimensions.
"""
op = b're'
self.validator.op(op)
self.stream.append(b' '.join((
_to_bytes(x), _to_bytes(y),
_to_bytes(width), _to_bytes(height), op)))
def set_color_rgb(self, r, g, b, stroke=False):
"""Set RGB color for nonstroking operations.
Set RGB color for stroking operations instead if ``stroke`` is set to
``True``.
"""
op = b'RG' if stroke else b'rg'
self.validator.op(op)
self.stream.append(b' '.join((
_to_bytes(r), _to_bytes(g), _to_bytes(b),
op)))
def set_color_special(self, name, stroke=False):
"""Set color for nonstroking operations.
Set color for stroking operation if ``stroke`` is set to ``True``.
"""
op = b'SCN' if stroke else b'scn'
self.validator.op(op)
self.stream.append(
b'/' + _to_bytes(name) + b' ' + op)
def set_dash(self, dash_array, dash_phase):
"""Set dash line pattern.
:param dash_array: Dash pattern.
:type dash_array: :term:`iterable`
:param dash_phase: Start of dash phase.
:type dash_phase: :obj:`int`
"""
op = b'd'
self.validator.op(op)
self.stream.append(b' '.join((
Array(dash_array).data, _to_bytes(dash_phase), op)))
def set_font_size(self, font, size):
"""Set font name and size."""
op = b'Tf'
self.validator.op(op)
self.stream.append(
b'/' + _to_bytes(font) + b' ' + _to_bytes(size) + b' ' + op)
def set_text_rendering(self, mode):
"""Set text rendering mode."""
op = b'Tr'
self.validator.op(op)
self.stream.append(_to_bytes(mode) + b' ' + op)
def set_line_cap(self, line_cap):
"""Set line cap style."""
op = b'J'
self.validator.op(op)
self.stream.append(_to_bytes(line_cap) + b' ' + op)
def set_line_join(self, line_join):
"""Set line join style."""
op = b'j'
self.validator.op(op)
self.stream.append(_to_bytes(line_join) + b' ' + op)
def set_line_width(self, width):
"""Set line width."""
op = b'w'
self.validator.op(op)
self.stream.append(_to_bytes(width) + b' ' + op)
def set_miter_limit(self, miter_limit):
"""Set miter limit."""
op = b'M'
self.validator.op(op)
self.stream.append(_to_bytes(miter_limit) + b' ' + op)
def set_state(self, state_name):
"""Set specified parameters in graphic state.
:param state_name: Name of the graphic state.
"""
op = b'gs'
self.validator.op(op)
self.stream.append(b'/' + _to_bytes(state_name) + b' ' + op)
def show_text(self, text):
"""Show text."""
op = b'TJ'
self.validator.op(op)
self.stream.append(b'[' + _to_bytes(text) + b'] ' + op)
def stroke(self):
"""Stroke | |
<reponame>yesArjan/Playing-Go-with-RNNs
from tensorflow.python.ops import nn_ops
from tensorflow.python.keras import activations
import tensorflow as tf
import numpy as np
class ConvRNNCell(tf.nn.rnn_cell.RNNCell):
"""A RNN cell with convolutions instead of multiplications."""
def __init__(self, input_shape, output_channels, kernel_shape, activation=None, reuse=None, name="conv_rnn_cell"):
"""Construct ConvGRUCell.
Args:
input_shape: (int, int, int) Shape of the input as int tuple, excluding the batch size
output_channels: (int) number of output channels of the conv LSTM
kernel_shape: (int, int) Shape of kernel as in tuple of size 2
activation: Activation function.
reuse: (bool) whether to reuse the weights of a previous layer by the same name.
name: Name of the module
Raises:
ValueError: If data_format is not 'channels_first' or 'channels_last'
"""
super(ConvRNNCell, self).__init__(_reuse=reuse, name=name)
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
self._output_size = tf.TensorShape([self._output_channels] + self._input_shape[1:])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._output_size
def call(self, inputs, state, scope=None):
args = [inputs, state]
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) != 4:
raise ValueError("Conv Linear expects 4D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args "
"to be of same Dimension: %s" % str(shapes))
else:
total_arg_size_depth += shape[1]
dtype = [a.dtype for a in args][0]
inputs = tf.concat(args, axis=1)
strides = shape_length * [1]
kernel = tf.get_variable('kernel', self._kernel_shape + [total_arg_size_depth, self._output_channels],
dtype=dtype)
new_hidden = tf.nn.conv2d(inputs, kernel, strides, padding='SAME', data_format='NCHW')
output = self._activation(new_hidden)
return output, output
class ConvGRUCell(tf.nn.rnn_cell.RNNCell):
"""A GRU cell with convolutions instead of multiplications."""
def __init__(self, input_shape, output_channels, kernel_shape,
use_bias=True,
activation=tf.tanh,
normalize=True,
data_format='channels_last',
reuse=None):
"""Construct ConvGRUCell.
Args:
input_shape: (int, int, int) Shape of the input as int tuple, excluding the batch size
output_channels: (int) number of output channels of the conv LSTM
kernel_shape: (int, int) Shape of kernel as in tuple of size 2
use_bias: (bool) whether the convolutions use biases
activation: Activation function.
normalize: (bool) whether to layer normalize the conv output
data_format: A string, one of 'channels_last' (default) or 'channels_first'. The ordering of the dimensions in
the inputs. channels_last corresponds to inputs with shape (batch, height, width, channels) while
channels_first corresponds to inputs with shape (batch, channels, height, width)
reuse: (bool) whether to reuse the weights of a previous layer by the same name.
Raises:
ValueError: If data_format is not 'channels_first' or 'channels_last'
"""
super(ConvGRUCell, self).__init__(_reuse=reuse)
self._filters = output_channels
self._kernel = kernel_shape
self._use_bias = use_bias
self._activation = activation
self._normalize = normalize
self._channel_first_dict = {1: 'NCW', 2: 'NCHW', 3: 'NCDHW'}
if data_format == 'channels_last':
self._size = tf.TensorShape(input_shape + [self._filters])
self._feature_axis = self._size.ndims
self._data_format = None
elif data_format == 'channels_first':
self._size = tf.TensorShape([self._filters] + input_shape)
self._feature_axis = 1
self._data_format = self._channel_first_dict[len(self._kernel)]
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return self._size
@property
def output_size(self):
return self._size
def call(self, x, h):
channels = x.shape[self._feature_axis].value
with tf.variable_scope('gates'):
inputs = tf.concat([x, h], axis=self._feature_axis)
n = channels + self._filters
m = 2 * self._filters if self._filters > 1 else 2
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
r, u = tf.split(y, 2, axis=self._feature_axis)
r = tf.contrib.layers.layer_norm(r)
u = tf.contrib.layers.layer_norm(u)
elif self._use_bias:
y += tf.get_variable('bias', [m], initializer=tf.ones_initializer())
r, u = tf.split(y, 2, axis=self._feature_axis)
else:
r, u = tf.split(y, 2, axis=self._feature_axis)
r, u = tf.sigmoid(r), tf.sigmoid(u)
with tf.variable_scope('candidate'):
inputs = tf.concat([x, r * h], axis=self._feature_axis)
n = channels + self._filters
m = self._filters
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
y = tf.contrib.layers.layer_norm(y)
elif self._use_bias:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
h = u * h + (1 - u) * self._activation(y)
return h, h
class MyConv2DLSTMCell(tf.nn.rnn_cell.RNNCell):
"""A LSTM cell with convolutions instead of multiplications.
Reference:
Xingjian, <NAME>., et al. "Convolutional LSTM network: A machine learning approach for precipitation
nowcasting." Advances in Neural Information Processing Systems. 2015.
"""
def __init__(self,
input_shape,
output_channels,
kernel_shape,
use_bias=True,
forget_bias=1.0,
activation=tf.tanh,
data_format='channels_last',
reuse=None,
name="conv_2d_lstm_cell"):
"""Construct Conv2DLSTMCell.
Args:
input_shape: (int, int, int) Shape of the input as int tuple, excluding the batch size
output_channels: (int) number of output channels of the conv LSTM
kernel_shape: (int, int) Shape of kernel as in tuple of size 2
use_bias: (bool) whether the convolutions use biases
forget_bias: (float) Forget bias
activation: Activation function.
data_format: A string, one of 'channels_last' (default) or 'channels_first'. The ordering of the dimensions in
the inputs. channels_last corresponds to inputs with shape (batch, height, width, channels) while
channels_first corresponds to inputs with shape (batch, channels, height, width)
reuse: (bool) whether to reuse the weights of a previous layer by the same name.
name: Name of the module
Raises:
ValueError: If data_format is not 'channels_first' or 'channels_last'
"""
super(MyConv2DLSTMCell, self).__init__(_reuse=reuse, name=name)
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._use_bias = use_bias
self._forget_bias = forget_bias
self._activation = activation
if data_format == 'channels_last':
state_size = tf.TensorShape(self._input_shape[:-1] + [self._output_channels])
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(state_size, state_size)
self._output_size = state_size
self._feature_axis = self.state_size.ndims
self._data_format = None
elif data_format == 'channels_first':
state_size = tf.TensorShape([self._output_channels] + self._input_shape[1:])
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(state_size, state_size)
self._output_size = state_size
self._feature_axis = 1
self._data_format = 'NCHW'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state, scope=None):
cell, hidden = state
args = [inputs, hidden]
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) != 4:
raise ValueError("Conv Linear expects 4D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args "
"to be of same Dimension: %s" % str(shapes))
else:
total_arg_size_depth += shape[self._feature_axis]
dtype = [a.dtype for a in args][0]
inputs = tf.concat(args, axis=self._feature_axis)
num_features = 4 * self._output_channels if self._output_channels > 1 else 4
strides = shape_length * [1]
kernel = tf.get_variable('kernel', self._kernel_shape + [total_arg_size_depth, num_features], dtype=dtype)
new_hidden = nn_ops.conv2d(inputs, kernel, strides, padding='SAME', data_format=self._data_format)
if self._use_bias:
new_hidden += tf.get_variable('bias', [num_features], initializer=tf.zeros_initializer(), dtype=dtype)
gates = tf.split(new_hidden, 4, axis=self._feature_axis)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = tf.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += tf.sigmoid(input_gate) * self._activation(new_input)
output = self._activation(new_cell) * tf.sigmoid(output_gate)
new_state = tf.nn.rnn_cell.LSTMStateTuple(new_cell, output)
return output, new_state
class BNConvGRUCell(tf.nn.rnn_cell.RNNCell):
"""A GRU cell with convolutions instead of multiplications."""
def __init__(self, input_shape, output_channels, kernel_shape, max_bn_steps, training,
use_bias=False,
activation=tf.tanh,
momentum=0.95,
initial_scale=0.1,
reuse=None):
"""Construct ConvGRUCell.
Args:
input_shape: (int, int, int) Shape of the input as int tuple, excluding the batch size
output_channels: (int) number of output channels of the conv LSTM
kernel_shape: (int, int) Shape of kernel as in tuple of size 2
activation: Activation function.
reuse: (bool) whether to reuse the weights of a previous layer by the same name.
Raises:
ValueError: If data_format is not 'channels_first' or 'channels_last'
"""
super(BNConvGRUCell, self).__init__(_reuse=reuse)
self._filters = output_channels
self._kernel = kernel_shape
self._max_bn_steps = max_bn_steps
self._training = tf.constant(training, tf.bool)
self._use_bias = use_bias
self._activation = activation
self._momentum = momentum
self._initial_scale = initial_scale
self._size = tf.TensorShape(input_shape + [self._filters])
self._feature_axis = self._size.ndims
@property
def state_size(self):
return self._size
@property
def output_size(self):
return self._size
def _batch_norm(self, x, name_scope, step, epsilon=1e-5):
with tf.variable_scope(name_scope):
size = x.get_shape().as_list()[-1]
scale = tf.get_variable('scale', [size], initializer=tf.constant_initializer(self._initial_scale))
offset = 0
pop_mean_all_steps = tf.get_variable('pop_mean', [self._max_bn_steps, size],
initializer=tf.zeros_initializer(), trainable=False)
pop_var_all_steps = tf.get_variable('pop_var', [self._max_bn_steps, size],
initializer=tf.ones_initializer(), trainable=False)
step = tf.minimum(step, self._max_bn_steps - 1)
pop_mean = pop_mean_all_steps[step]
pop_var = pop_var_all_steps[step]
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
def batch_statistics():
pop_mean_new = pop_mean * self._momentum + batch_mean * (1 - self._momentum)
pop_var_new = pop_var * self._momentum + batch_var * (1 - self._momentum)
with tf.control_dependencies([pop_mean.assign(pop_mean_new), pop_var.assign(pop_var_new)]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, offset, scale, epsilon)
def population_statistics():
return tf.nn.batch_normalization(x, pop_mean, pop_var, offset, scale, epsilon)
return tf.cond(self._training, batch_statistics, population_statistics)
def call(self, x, state, scope=None):
h, step = state
with tf.variable_scope('gates'):
channels = x.shape[-1].value
m = 2 * self._filters
kernel_x = tf.get_variable('kernel_x', self._kernel + [channels, m])
kernel_h = tf.get_variable('kernel_h', self._kernel + [self._filters, m])
rux = tf.nn.convolution(x, kernel_x, 'SAME')
ruh = tf.nn.convolution(h, kernel_h, 'SAME')
rux = self._batch_norm(rux, 'rux', step)
ruh = self._batch_norm(ruh, 'ruh', step)
if self._use_bias:
bias = tf.get_variable('bias_r', [m], initializer=tf.ones_initializer())
ru = tf.nn.bias_add(rux + ruh, bias)
| |
0, -1, -1, 3, -1, -1, 9, -1, -1, -1, 47, -1, -1, 50,
-1, 10, -1, -1, -1, 46, -1, -1, 49, -1],
[-1, 46, -1, -1, 49, -1, -1, 52, -1, -1, 45, -1, -1, 48, -1, -1, 51,
-1, 38, -1, -1, 41, -1, -1, 44, -1, -1],
[-1, 47, -1, -1, 50, -1, -1, 53, -1, -1, 46, -1, -1, 49, -1, -1, 52,
-1, -1, 45, -1, -1, 48, -1, -1, 51, -1],
[ 0, -1, -1, 3, -1, -1, 6, -1, -1, -1, 47, -1, -1, 50, -1, -1, 53,
-1, -1, 46, -1, -1, 49, -1, -1, 52, -1],
[-1, 49, -1, -1, 52, -1, 34, -1, -1, -1, 48, -1, -1, 51, -1, 35, -1,
-1, 41, -1, -1, 44, -1, -1, -1, -1, -1],
[-1, 50, -1, -1, 53, -1, 33, -1, -1, -1, 49, -1, -1, 52, -1, 34, -1,
-1, -1, 48, -1, -1, 51, -1, 35, -1, -1],
[ 3, -1, -1, 6, -1, -1, -1, -1, -1, -1, 50, -1, -1, 53, -1, 33, -1,
-1, -1, 49, -1, -1, 52, -1, 34, -1, -1]])
def starting_model3d():
"""
Build and return a new neural network using the current model architecture
"""
import numpy as np
from keras.models import Model
from keras.layers import Conv2D, Input, BatchNormalization, Dense, Flatten, Activation, add, Lambda, Reshape
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
from keras.regularizers import l2
import keras.backend as K
import tensorflow as tf
neighbors[neighbors == -1] = 54
def special_cube_conv(in_tensor, filter_size):
"""
Takes in a None (samples) x 54 x ? (filters) tensor.
It embedds it into 5 x 5 grid, and does a 3D convolution
using only the nodes in the orginal embedding.
To speed things up, it actually does the folowing:
- pads the end with a zero (in the last dimension):
None (samples) x 55 x ? (filters) (neighbors)
- align neighbors to get an output of dim:
None (samples) x 54 x 27 x ? (filters) (neighbors)
- 2d convolution with filter (1, 27) and no padding to get an output of dim:
None (samples) x 54 x filter_size
- reshape to remove last dimension:
None (samples) x filter_size x 54
"""
# pad (output dim: None x 55 x ?)
padded = Lambda(lambda x: K.temporal_padding(x, (0, 1)))(in_tensor) # just pad end
# align neighbors (output dim: None x 54 x 27 x ?)
#aligned = K.gather(padded, neighbors)
#aligned = padded[ neighbors[np.newaxis].astype(np.int32), :]
aligned = Lambda(lambda x: tf.gather(x, neighbors, axis=1))(padded)
# 2D convolution in one axis (output dim: None x 54 x 1 x filter_size)
conv = Conv2D(filter_size, kernel_size=(1, 27),
strides=(1, 1),
padding='valid',
data_format="channels_last",
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(aligned)
# reshape (output dim: None x 54 x filter_size)
out_tensor = Lambda(lambda x: K.squeeze(x, axis=2))(conv)
return out_tensor
def conv_block(in_tensor, filter_size):
conv = special_cube_conv(in_tensor, filter_size)
batch = BatchNormalization(axis=1)(conv)
relu = Activation('relu')(batch)
return relu
def residual_block(in_tensor, filter_size):
conv1 = special_cube_conv(in_tensor, filter_size)
batch1 = BatchNormalization(axis=1)(conv1)
relu1 = Activation('relu')(batch1)
conv2 = special_cube_conv(relu1, filter_size)
batch2 = BatchNormalization(axis=1)(conv2)
combine = add([batch2, in_tensor])
relu = Activation('relu')(combine)
return relu
def policy_block(in_tensor, filter_size, hidden_size):
conv = conv_block(in_tensor, filter_size=filter_size)
flat = Flatten()(conv)
hidden = Dense(hidden_size, activation='relu',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(flat)
output = Dense(12, activation='softmax',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001),
name='policy_output')(hidden)
return output
def value_block(in_tensor, filter_size, hidden_size):
conv = conv_block(in_tensor, filter_size=filter_size)
flat = Flatten()(conv)
hidden = Dense(hidden_size, activation='relu',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(flat)
output = Dense(1, activation='sigmoid',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001),
name='value_output')(hidden)
return output
# the network
state_input = Input(shape=(54, 6), name='state_input')
# convolutional
block = conv_block(state_input, filter_size=64)
# multiple residuals
block = residual_block(block, filter_size=64)
block = residual_block(block, filter_size=64)
block = residual_block(block, filter_size=64)
block = residual_block(block, filter_size=64)
# policy head
policy_output = policy_block(block, filter_size=64, hidden_size=64)
# value head
value_output = value_block(block, filter_size=64, hidden_size=64)
# combine
model = Model(inputs=state_input, outputs=[policy_output, value_output])
model.compile(loss={'policy_output': categorical_crossentropy,
'value_output': 'mse'},
loss_weights={'policy_output': 1., 'value_output': 1.},
optimizer=Adam(lr=.001))
return model
def starting_model2d():
"""
Build and return a new neural network using the current model architecture
"""
import numpy as np
from keras.models import Model
from keras.layers import Conv2D, Input, BatchNormalization, Dense, Flatten, Activation, add, Lambda, Reshape
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
from keras.regularizers import l2
import keras.backend as K
import tensorflow as tf
neighbors[neighbors == -1] = 54
def special_cube_conv(in_tensor, filter_size):
"""
Takes in a None (samples) x 54 x ? (filters) tensor.
It embedds it into 5 x 5 grid, and does a 3D convolution
using only the nodes in the orginal embedding.
To speed things up, it actually does the folowing:
- pads the end with a zero (in the last dimension):
None (samples) x 55 x ? (filters) (neighbors)
- align neighbors to get an output of dim:
None (samples) x 54 x 27 x ? (filters) (neighbors)
- 2d convolution with filter (1, 27) and no padding to get an output of dim:
None (samples) x 54 x filter_size
- reshape to remove last dimension:
None (samples) x filter_size x 54
"""
print("in ", in_tensor.shape)
# pad (output dim: None x 55 x ?)
padded = Lambda(lambda x: K.temporal_padding(x, (0, 1)))(in_tensor) # just pad end
print("padded", padded.shape)
# align neighbors (output dim: None x 54 x 27 x ?)
#aligned = K.gather(padded, neighbors)
#aligned = padded[ neighbors[np.newaxis].astype(np.int32), :]
aligned = Lambda(lambda x: tf.gather(x, neighbors, axis=1))(padded)
print("align ", aligned.shape)
# 2D convolution in one axis (output dim: None x 54 x 1 x filter_size)
conv = Conv2D(filter_size, kernel_size=(1, 27),
strides=(1, 1),
padding='valid',
data_format="channels_last",
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(aligned)
print("conv ", conv.shape)
# reshape (output dim: None x 54 x filter_size)
out_tensor = Lambda(lambda x: K.squeeze(x, axis=2))(conv)
return out_tensor
def conv_block(in_tensor, filter_size):
conv = special_cube_conv(in_tensor, filter_size)
batch = BatchNormalization(axis=1)(conv)
relu = Activation('relu')(batch)
return relu
def residual_block(in_tensor, filter_size):
conv1 = special_cube_conv(in_tensor, filter_size)
batch1 = BatchNormalization(axis=1)(conv1)
relu1 = Activation('relu')(batch1)
conv2 = special_cube_conv(relu1, filter_size)
batch2 = BatchNormalization(axis=1)(conv2)
combine = add([batch2, in_tensor])
relu = Activation('relu')(combine)
return relu
def policy_block(in_tensor, filter_size, hidden_size):
conv = conv_block(block, filter_size=32)
flat = Flatten()(conv)
hidden = Dense(hidden_size, activation='relu',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(flat)
output = Dense(12, activation='softmax',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001),
name='policy_output')(hidden)
return output
def value_block(in_tensor, filter_size, hidden_size):
conv = conv_block(block, filter_size=32)
flat = Flatten()(conv)
hidden = Dense(hidden_size, activation='relu',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001))(flat)
output = Dense(1, activation='sigmoid',
kernel_regularizer=l2(0.001),
bias_regularizer=l2(0.001),
name='value_output')(hidden)
return output
# the network
state_input = Input(shape=(54, 6), name='state_input')
# convolutional
block = conv_block(state_input, filter_size=32)
# 2 residuals
block = residual_block(block, filter_size=32)
block = residual_block(block, filter_size=32)
# policy head
policy_output = policy_block(block, filter_size=32, hidden_size=32)
# value head
value_output = value_block(block, filter_size=32, hidden_size=32)
# combine
model = Model(inputs=state_input, outputs=[policy_output, value_output])
model.compile(loss={'policy_output': categorical_crossentropy,
'value_output': 'mse'},
loss_weights={'policy_output': 1., 'value_output': 1.},
optimizer=Adam(lr=.001))
return model
import threading
import queue
class Task:
def __init__(self):
self.lock = threading.Condition()
self.input = None
self.output = None
class BatchProcessHelper:
def __init__(self):
self.lock = threading.RLock()
self._batch_size = 5
def get_batch_size(self):
with self.lock:
return self._batch_size
def decrement_batch_size(self):
with self.lock:
self._batch_size -= 1
def set_batch_size(self, batch_size):
with self.lock:
self._batch_size = batch_size
input_queue = queue.Queue()
def get_value(input_value):
task = Task()
# put the value on the queue to be processed
task.input = input_value
with task.lock:
input_queue.put(task) # put task on queue to be processed
task.lock.wait() # wait until task is processed
return task.output # return output
def batch_process(get_output, batch_process_helper):
import numpy as np
task_list = []
while True:
# retrieve items from the queue
task = input_queue.get()
task_list.append(task)
if len(task_list) >= batch_process_helper.get_batch_size():
array = np.array([task.input.squeeze(axis=0) for task in task_list])
policies, values = get_output([array, 0])
for p, v, task in zip(policies, values, task_list):
with task.lock:
task.output = [p, v]
task.lock.notify() # mark as being complete
task_list = []
if __name__ == '__main__':
import time
from keras import backend as K
model = starting_model3d()
get_output = K.function([model.input, K.learning_phase()], [model.output[0], model.output[1]])
batch_process_helper = BatchProcessHelper()
worker = threading.Thread(target=batch_process, args=(get_output, batch_process_helper,))
worker.daemon = True
worker.start()
# just use random data
inputs = np.random.choice(2, size=(2**10, 54, 6), p=[48/54, 6/54]).astype(bool)
for i in range(11):
batch_size | |
def __call__(self, obj_agent):
# self._current_agent_id = obj_agent._id2env
return self
class fx(object):
legs = {}
online = True
config_file = 'twap.conf'
now_val = 0
pending_callbacks = {}
symbols_callbacks = {}
time_callbacks = {}
initial_time = 0
trade_callback_used = {}
@staticmethod
def now(b_str=False, b_ts=False, b_old=False):
if b_ts:
s_ts = ENV.order_matching.s_time[:23]
if not s_ts:
return 0
fx_now = (datetime.datetime.strptime(s_ts, '%Y-%m-%d %H:%M:%S.%f'))
fx_now = (fx_now-datetime.datetime(1970, 1, 1)).total_seconds()
return fx_now
if b_str:
return ENV.order_matching.s_time
if not fx.initial_time:
s_ts = ENV.order_matching.s_time[:10]
s_ts += ' 02:00:00.000'
fx_now = (datetime.datetime.strptime(s_ts, '%Y-%m-%d %H:%M:%S.%f'))
fx_now = (fx_now-datetime.datetime(1970, 1, 1)).total_seconds()
fx.initial_time = fx_now
if b_old:
return ENV.order_matching.f_time
return fx.initial_time + ENV.order_matching.f_time
@staticmethod
def broadcast(s_msg):
if s_msg:
print(s_msg)
@staticmethod
def notify(eventtype, s_msg):
'''
...
:param eventtype: NotificationEvent.
:param s_msg: string.
'''
if s_msg:
print(s_msg)
@staticmethod
def getConfigFile():
return fx.config_file
@staticmethod
def configureOrder(symbol, leg_number, order, client=None):
'''
First step in the process of sending an order.
It sets some fields before further processing.
:param symbol:
:param leg_number
:param order:
:param client: string. Used just in neutrinogym
:return:
'''
order.symbol = symbol
order.leg_number = leg_number
if client:
order.client = client
# symbol, 0, 3000.0, 3500.0, 100, '%06.1f', '%03d'
# const std::string &name, size32_t leg, double pxmin, double pxmax,
# quant_t qmax, const char *px_mask, const char *qty_mask
@staticmethod
def setLeg(symbol, legid, pmin, pmax, qmax, px_mask, qty_mask):
'''
Called only once per process, the `setLeg` method is used to setup all
the symbols that will be used in the subsequent calls. Typically
`setLeg` is called at system startup retrieving its parameters from
some configuration file. Internally the API will allocate a transaction
manager for every leg that will be used to send actual order requests.
Currently there is a maximum of three legs, and an exception will be
thrown in if the leg index falls outside this range.
:param symbol: String identifying the symbol this leg will manage.
:param legid: Leg index for this symbol.
:param pmin: Minimum price for this symbol. Order requests out of the
range [pmin-pmax] will be rejected.
:param pmax: Maximum price for this symbol.
:param qmax: Max quantity for this symbol.
:param px_mask: A printf style mask used to format the prices for this
symbol when sending orders.
:param qty_mask: A printf style mask used to format quantities for this
symbol when sending orders.
:return: None
'''
fx.legs[symbol] = {}
fx.legs[symbol][legid] = {'pmin': pmin, 'pmax': pmax, 'qmax': qmax,
'px_mask': px_mask, 'qty_mask': qty_mask}
@staticmethod
def attachToLeg(order, legs_ix):
'''
Before actually sending any order, one has to 'attach' it to a
transaction. `attachToLeg` does just that. The API maintains an array
of 'legs', every symbol has its own leg. For single-symbol algorithms,
`leg_index` is always 0.
:param order: Reference to an Order memory object
:param legs_ix: leg index in the internal array. T
:return: None
'''
fx.legs[legs_ix] = order
def setLookup(self, symbol):
pass
@staticmethod
def isOnline(bookname):
return fx.online
@staticmethod
def quit(i_id=11):
ENV.done = True
this_agent = ENV.agents_actions[i_id].owner
if hasattr(this_agent, 'agent'):
this_agent = this_agent.agent
if hasattr(this_agent, 'finalize'):
this_agent.finalize(QuitReason.USER_QUIT)
@staticmethod
def cancel(order_mem):
'''
Delivers a cancel request
:param order_mem: reference to a Order memory object.
:return:
'''
ENV.agents_actions[order_mem.client].append_msg(('cancel', order_mem))
return True
@staticmethod
def send(order_mem):
'''
Delivers a send request
:param order_mem: reference to a Order memory object.
:return:
'''
# NOTE: the next.status, before send(), is IDLE. After send, is PENDING
order_mem.next.status = FIXStatus.PENDING
ENV.agents_actions[order_mem.client].append_msg(('new', order_mem))
return True
@staticmethod
def book(symbol):
'''
Return an Instance of Book class related to the symbol passed
:param symbol: String identifying the symbol this leg will manage
:return: Book objebct
'''
# TODO: change the return to be a neutrino book object
return ENV.get_order_book(symbol, False)
# @staticmethod
# def get_book(symbol):
# '''
# Return an Instance of Book class related to the symbol passed
# :param symbol: String identifying the symbol this leg will manage
# :return: Book objebct
# '''
# # TODO: change the return to be a neutrino book object
# return ENV.get_order_book(symbol, False)
@staticmethod
def getTrades(book_obj, b_from_candles=False):
'''
Return an Instance of TradeBuffer class related to the book passed
:param book_obj: neutrino Book.
:*param b_from_candles: boolean. Only exist in simulation
:return: TradeBuffer objebct
'''
return ENV.get_last_trades(book_obj, b_from_candles)
# @staticmethod
# def get_trades(book_obj, b_from_candles=False):
# '''
# Return an Instance of TradeBuffer class related to the book passed
# :param book_obj: neutrino Book.
# :*param b_from_candles: boolean. Only exist in simulation
# :return: TradeBuffer objebct
# '''
# return ENV.get_last_trades(book_obj, b_from_candles)
@staticmethod
def getSummary(book_obj, b_from_candles=False):
'''
Return an Instance of Summary class related to the book passed
:param book_obj: neutrino Book.
:*param b_from_candles: boolean. Only exist in simulation
:return: Summary objebct
'''
obj_rtn = Summary()
obj_rtn.bidCount = book_obj.get_counts('BID', 'Total')
obj_rtn.askCount = book_obj.get_counts('ASK', 'Total')
obj_rtn.statusChanged = 0 # is 1 only when the book status changes
# NOTE: fileds presented just in simulation (for now)
obj_rtn.newBidOrders = book_obj.get_counts('BID', 'New')
obj_rtn.canceledBidOrders = book_obj.get_counts('BID', 'Canceled')
obj_rtn.replacedBidOrders = book_obj.get_counts('BID', 'Replaced')
obj_rtn.newAskOrders = book_obj.get_counts('ASK', 'New')
obj_rtn.canceledAskOrders = book_obj.get_counts('ASK', 'Canceled')
obj_rtn.replacedAskOrders = book_obj.get_counts('ASK', 'Replaced')
i_aux = book_obj.get_counts('BID', 'Partially Filled')
i_aux += book_obj.get_counts('BID', 'Filled')
obj_rtn.tradeCountIncrement = i_aux
# NOTE: As I just sent the incremental, the tradeCount is always the
# size of the TradeBuffer. However, in the production, it should be
# used to calculate the index to iterate in the trading list
if b_from_candles:
obj_rtn.tradeCount = len(book_obj.last_trades_aux)
else:
obj_rtn.tradeCount = len(book_obj.last_trades)
return obj_rtn
@staticmethod
def add(symbol, trade_callback='default', book_callback='default',
trade_buffer_size=64, i_id=11):
'''
Create new callbacks to the symbol's book and trades updates
:param symbol: string.
:param trade_callback: function.
:param book_callback: function.
:param trade_buffer_size: int.
:param agent_id: integer. Only valid to simulations
'''
s_err = '[neutrino error] Symbol %s is not registered' % symbol
assert symbol in ENV.l_instrument, s_err
d_pcbacks = fx.pending_callbacks
d_scbacks = fx.symbols_callbacks
if i_id not in d_scbacks:
d_scbacks[i_id] = set()
fx.trade_callback_used[i_id] = None
if i_id not in d_pcbacks:
d_pcbacks[i_id] = init_pending_cbacks()
this_agent = ENV.agents_actions[i_id].owner
if hasattr(ENV.agents_actions[i_id].owner, 'agent'):
this_agent = ENV.agents_actions[i_id].owner.agent
d_pcbacks[i_id]['checked'] = False
if trade_callback and trade_callback != 'default':
d_scbacks[i_id].add(symbol)
d_pcbacks[i_id]['trade'].append([symbol, trade_callback])
fx.trade_callback_used[i_id] = trade_callback
# self.last_trades = TradeBuffer()
# self.last_trades_aux = TradeBuffer()
elif trade_callback == 'default':
d_pcbacks[i_id]['trade'].append([symbol, this_agent.on_data])
if book_callback and book_callback != 'default':
d_scbacks[i_id].add(symbol)
d_pcbacks[i_id]['book'].append([symbol, book_callback])
elif book_callback == 'default':
d_pcbacks[i_id]['book'].append([symbol, this_agent.on_data])
# NOTE: use fx.online here is not quite right, but it is OK for sim
return InstrumentRegister(symbol, b_ready=fx.isOnline(symbol))
@staticmethod
def remove(symbol_propty, i_id=11):
symbol = symbol_propty._s_instr
# remove from fx
d_pcbacks = fx.pending_callbacks
d_scbacks = fx.symbols_callbacks
if i_id in d_scbacks and i_id in d_pcbacks:
d_pcbacks[i_id]['trade'] = []
d_pcbacks[i_id]['book'] = []
if not len(d_pcbacks[i_id]['other']):
d_pcbacks.pop(i_id)
if symbol in d_scbacks[i_id]:
d_scbacks[i_id].remove(symbol)
# remove from environment
ENV.remove_callback(trigger=Source.MARKET, i_id=i_id,
s_instr=symbol)
s_err = '[neutrino info] Symbol %s removed' % symbol
print(s_err)
@staticmethod
def get(symbol, i_id=11):
return InstrumentRegister(symbol)
@staticmethod
def every(name, interval, callback, i_id=11):
'''
Schedule a new callback to run every interval specified
:param name: string.
:param interval: neutrino.Interval object.
:param callback: function.
:param agent_id: integer. Only valid to simulations
'''
# initialize callbacks dict, if needed
d_pcbacks = fx.pending_callbacks
d_tcbacks = fx.time_callbacks
if i_id not in d_tcbacks:
d_tcbacks[i_id] = set()
if i_id not in d_pcbacks:
d_pcbacks[i_id] = init_pending_cbacks()
# append the callback
schdl = SchaduleInfos(name)
schdl.kind = 'every'
schdl.every = interval
d_tcbacks[i_id].add(schdl)
d_pcbacks[i_id]['checked'] = False
d_pcbacks[i_id]['other'].append([schdl, callback])
obj_rtn = ScheduledFunction(
function=callback, s_name=name, i_interval=interval)
schdl._scheduled_obj = obj_rtn
return obj_rtn
@staticmethod
def at(name, hour, minute, callback, i_id=11):
'''
Schedule a new callback to run at the time specified
:param name: string.
:param hour: integer.
:param minute: integer.
:param callback: function.
:param agent_id: integer. Only valid to simulations
'''
# initialize callbacks dict, if needed
d_pcbacks = fx.pending_callbacks
d_tcbacks = fx.time_callbacks
if i_id not in d_tcbacks:
d_tcbacks[i_id] = set()
if i_id not in d_pcbacks:
d_pcbacks[i_id] = init_pending_cbacks()
# append the callback
schdl = SchaduleInfos(name)
schdl.kind = 'at'
schdl.at = hour * 60**2 + minute * 60
d_tcbacks[i_id].add(schdl)
d_pcbacks[i_id]['checked'] = False
d_pcbacks[i_id]['other'].append([schdl, callback])
obj_rtn = ScheduledFunction(
function=callback, s_name=name, i_hour=hour, i_minute=minute)
schdl._scheduled_obj = obj_rtn
return obj_rtn
@staticmethod
def remove_schedule(name, i_id=11):
'''
Remove
:param name: string.
:param | |
<reponame>MarkusShepherd/flamme-rouge
# -*- coding: utf-8 -*-
""" tracks """
import logging
import re
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
Deque,
Generator,
Iterable,
Iterator,
Optional,
Tuple,
Type,
Union,
cast,
overload,
)
from .cards import Card
from .utils import class_from_path, window
if TYPE_CHECKING:
# pylint: disable=cyclic-import,unused-import
from .teams import Cyclist
LOGGER = logging.getLogger(__name__)
CLASS_REGEX = re.compile(r"[^\w.]+")
class Section:
""" section on the track """
LANE_STR_WIDTH = 20
def __init__(
self,
position: int,
lanes: int = 2,
slipstream: bool = True,
min_speed: Optional[int] = None,
max_speed: Optional[int] = None,
) -> None:
self.position = position
self.lanes = lanes
self.slipstream = slipstream
self.min_speed = min_speed
self.max_speed = max_speed
self._cyclists: Deque["Cyclist"] = deque(maxlen=lanes)
@property
def cyclists(self) -> Tuple["Cyclist", ...]:
""" cyclists """
return tuple(self._cyclists)
@property
def empty(self) -> bool:
""" true if section is empty """
return not self._cyclists
@property
def full(self) -> bool:
""" true if section is filled to capacity """
return len(self._cyclists) >= self.lanes
def add_cyclist(self, cyclist: "Cyclist") -> bool:
""" add a rider to the section """
if self.full:
return False
self._cyclists.append(cyclist)
cyclist.section = self
return True
def remove_cyclist(self, cyclist: "Cyclist") -> bool:
""" remove a rider from this section """
try:
self._cyclists.remove(cyclist)
return True
except ValueError:
pass
finally:
if cyclist.section == self:
cyclist.section = None
return False
def lane(self, cyclist: "Cyclist") -> Optional[int]:
""" lane number for the given cyclist """
for lane, occupant in enumerate(self._cyclists):
if cyclist == occupant:
return lane
return None
def reset(self) -> "Section":
""" reset this section """
self._cyclists = deque(maxlen=self.lanes)
LOGGER.debug("position: %d, cyclists: %s", self.position, self.cyclists)
return self
def __str__(self) -> str:
total = (self.LANE_STR_WIDTH + 1) * self.lanes - 1
left = (total - 5) // 2
right = total - left - 5
top = "+" + "-" * left + f" {self.position:3d} " + "-" * right + "+"
if not self.slipstream:
top += " 🚫"
lane_str = f" {{:{self.LANE_STR_WIDTH - 2}s}} "
cyclists = tuple(map(str, self.cyclists))
cyclists += ("",) * (self.lanes - len(self._cyclists))
# TODO format correctly without messing up colors
# lane_str.format(str(cyclist)[:self.LANE_STR_WIDTH - 2]) for cyclist in cyclists)
cyclists = tuple(map(lane_str.format, cyclists))
middle = "|".join(("",) + cyclists + ("",))
if self.max_speed is not None:
middle = f"{middle} ≤{self.max_speed}"
bottom = "+" + "-" * total + "+"
if self.min_speed is not None:
bottom = f"{bottom} ≥{self.min_speed}"
return "\n".join((top, middle, bottom))
class Section3(Section):
""" 3 lane section """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=3)
class Finish(Section):
""" finish section """
def __init__(self, position: int) -> None:
super().__init__(position=position, slipstream=False)
class Finish3(Section):
""" finish section with 3 lanes """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=3, slipstream=False)
class MountainUp(Section):
""" up section """
def __init__(self, position: int) -> None:
super().__init__(position=position, slipstream=False, max_speed=5)
class MountainDown(Section):
""" down section """
def __init__(self, position: int) -> None:
super().__init__(position=position, min_speed=5)
class Supply(Section):
""" supply zone section """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=3, min_speed=4)
class Cobblestone1(Section):
""" cobblestone with one lane """
def __init__(self, position: int) -> None:
super().__init__(position=position, lanes=1, slipstream=False)
class Cobblestone2(Section):
""" cobblestone with two lanes """
def __init__(self, position: int) -> None:
super().__init__(position=position, slipstream=False)
class Track:
""" track """
def __init__(
self,
name: str,
sections: Iterable[Section],
start: int = 5,
finish: int = -5,
min_players: int = 3,
max_players: int = 4,
) -> None:
self.name = name
self.sections = tuple(sections)
self.start = start
self.finish = finish if finish > 0 else len(self) + finish
self.min_players = min_players
self.max_players = max_players
def __len__(self) -> int:
return len(self.sections)
# pylint: disable=function-redefined
@overload
def __getitem__(self, key: int) -> Section:
pass
@overload
def __getitem__(self, key: slice) -> Tuple[Section, ...]:
pass
def __getitem__(self, key):
return self.sections[key]
def __iter__(self) -> Iterator[Section]:
return iter(self.sections)
def __reversed__(self) -> Iterator[Section]:
return reversed(self.sections)
@property
def available_start(self) -> Tuple[Section, ...]:
""" available starting positions """
return tuple(
section for section in self.sections[: self.start] if not section.full
)
def cyclists(self) -> Generator["Cyclist", None, None]:
""" generator of riders from first to last """
for section in reversed(self.sections):
yield from section.cyclists
def _move_cyclist(
self, cyclist: "Cyclist", value: int, start: int, min_speed: bool = False,
) -> int:
min_speed_value = self.sections[start].min_speed
value = (
value
if not min_speed or min_speed_value is None
else max(value, min_speed_value)
)
for i, section in enumerate(self.sections[start : start + value + 1]):
max_speed = section.max_speed
if max_speed is None:
continue
if i > max_speed:
value = i - 1
break
value = min(value, max_speed)
for pos in range(min(start + value, len(self) - 1), start, -1):
section = self.sections[pos]
if section.add_cyclist(cyclist):
if pos >= self.finish:
cyclist.finished = True
return pos
return start
def move_cyclist(
self, cyclist: "Cyclist", card: Union[Card, int], min_speed: bool = False,
) -> int:
""" move cyclists """
if isinstance(card, int):
value = card
elif cyclist.team is None:
value = card.value_front
else:
others = (c for c in cyclist.team.cyclists if c is not cyclist)
value = (
card.value_behind
if any(c.ahead_of(cyclist, self) for c in others)
else card.value_front
)
for pos, section in enumerate(self.sections):
if cyclist not in section.cyclists:
continue
end = self._move_cyclist(
cyclist=cyclist, value=value, start=pos, min_speed=min_speed
)
if pos != end:
section.remove_cyclist(cyclist)
return end - pos
raise ValueError("something went wrong during movement")
def do_slipstream(self) -> None:
""" move cyclists through slipstream """
while True:
for sec in window(self.sections, 3):
if (
all(s.slipstream for s in sec)
and sec[0].cyclists
and sec[1].empty
and sec[2].cyclists
):
for cyclist in sec[0].cyclists:
LOGGER.info("🚴 <%s> receives slipstream", cyclist)
self.move_cyclist(cyclist, 1)
break # start over to move cyclists at the end of the pack
else:
return # all slipstreams done
def do_exhaustion(self) -> None:
""" add exhaustion cards """
for sec0, sec1 in window(self.sections[: self.finish + 1], 2):
if sec1.empty:
for cyclist in sec0.cyclists:
if not cyclist.team or cyclist.team.exhaustion:
LOGGER.info("🚴 <%s> gets exhausted", cyclist)
cyclist.discard(Card.EXHAUSTION)
@property
def leading(self) -> Optional["Cyclist"]:
""" leading cyclist """
return next(self.cyclists(), None)
def non_empty(self) -> Generator[Section, None, None]:
""" non-empty sections """
for section in self.sections:
if not section.empty:
yield section
def finished(self, all_cyclists: bool = False) -> bool:
""" game finished """
if all_cyclists:
return all(section.empty for section in self.sections[: self.finish])
return any(not section.empty for section in self.sections[self.finish :])
def reset(self) -> "Track":
""" reset this track """
for section in self.sections:
section.reset()
LOGGER.debug(
"start: %d, finish: %d, available start: <%s>, finished: %s, track: %s",
self.start,
self.finish,
", ".join(str(s.position) for s in self.available_start),
self.finished(),
self,
)
return self
def compare(self, cyclist_1: "Cyclist", cyclist_2: "Cyclist",) -> int:
""" returns +1 if cyclist_1 is ahead else -1 """
for cyclist in self.cyclists():
if cyclist == cyclist_1:
return +1
if cyclist == cyclist_2:
return -1
raise RuntimeError(f"unable to find either of {cyclist_1} or {cyclist_2}")
def __str__(self) -> str:
start = next(self.non_empty(), None)
start_pos = start.position - 1 if start is not None and start.position else 0
finish = max(start_pos, self.finish)
total = (Section.LANE_STR_WIDTH + 1) * 2 + 1
sections = (
cast(Tuple[Any], (self.name,))
+ self.sections[start_pos:finish]
+ ("#" * total,)
+ self.sections[finish:]
)
return "\n".join(map(str, sections))
@classmethod
def from_sections(
cls, sections: Union[str, Iterable[str], Iterable[Type[Section]]], **kwargs,
) -> "Track":
""" create a track from a sequence of sections """
if isinstance(sections, str):
sections = CLASS_REGEX.split(sections)
classes = filter(None, map(class_from_path, sections))
sections = (clazz(i) for i, clazz in enumerate(classes))
return cls(sections=sections, **kwargs)
_SEC: Tuple[Type[Section]] = (Section,)
_SEC3: Tuple[Type[Section]] = (Section3,)
_FIN: Tuple[Type[Section]] = (Finish,)
_FIN3: Tuple[Type[Section]] = (Finish3,)
_UP: Tuple[Type[Section]] = (MountainUp,)
_DOWN: Tuple[Type[Section]] = (MountainDown,)
_SUP: Tuple[Type[Section]] = (Supply,)
_COB1: Tuple[Type[Section]] = (Cobblestone1,)
_COB2: Tuple[Type[Section]] = (Cobblestone2,)
AVENUE_CORSO_PASEO = Track.from_sections(
name="AVENUE_CORSO_PASEO", sections=_SEC * 73 + _FIN * 5
)
FIRENZE_MILANO = Track.from_sections(
name="FIRENZE_MILANO",
sections=_SEC * 22
+ _UP * 5
+ _DOWN * 3
+ _SEC * 16
+ _UP * 7
+ _DOWN * 3
+ _SEC * 17
+ _FIN * 5,
)
LA_CLASSICISSIMA = Track.from_sections(
name="LA_CLASSICISSIMA",
sections=_SEC * 14
+ _UP * 10
+ _DOWN * 4
+ _SEC * 12
+ _UP * 5
+ _DOWN * 4
+ _SEC * 5
+ _UP * 3
+ _DOWN * 3
+ _SEC * 13
+ _FIN * 5,
start=4,
)
LA_HAUT_MONTAGNE = Track.from_sections(
name="LA_HAUT_MONTAGNE",
sections=_SEC * 36 + _UP | |
env, 3. file
secret_env = os.environ.get(env_name)
if not secret and secret_env:
secret_from = 'env'
self.log.info("Loading %s from env[%s]", trait_name, env_name)
secret = binascii.a2b_hex(secret_env)
if not secret and os.path.exists(secret_file):
secret_from = 'file'
self.log.info("Loading %s from %s", trait_name, secret_file)
try:
if not _mswindows: # Windows permissions don't follow POSIX rules
perm = os.stat(secret_file).st_mode
if perm & 0o07:
msg = "cookie_secret_file can be read or written by anybody"
raise ValueError(msg)
with open(secret_file) as f:
text_secret = f.read().strip()
if HEX_RE.match(text_secret):
# >= 0.8, use 32B hex
secret = binascii.a2b_hex(text_secret)
else:
# old b64 secret with a bunch of ignored bytes
secret = binascii.a2b_base64(text_secret)
self.log.warning(dedent("""
Old base64 cookie-secret detected in {0}.
JupyterHub >= 0.8 expects 32B hex-encoded cookie secret
for tornado's sha256 cookie signing.
To generate a new secret:
openssl rand -hex 32 > "{0}"
""").format(secret_file))
except Exception as e:
self.log.error(
"Refusing to run JupyterHub with invalid cookie_secret_file. "
"%s error was: %s",
secret_file, e)
self.exit(1)
if not secret:
secret_from = 'new'
self.log.debug("Generating new %s", trait_name)
secret = os.urandom(COOKIE_SECRET_BYTES)
if secret_file and secret_from == 'new':
# if we generated a new secret, store it in the secret_file
self.log.info("Writing %s to %s", trait_name, secret_file)
text_secret = binascii.b2a_hex(secret).decode('ascii')
with open(secret_file, 'w') as f:
f.write(text_secret)
f.write('\n')
if not _mswindows: # Windows permissions don't follow POSIX rules
try:
os.chmod(secret_file, 0o600)
except OSError:
self.log.warning("Failed to set permissions on %s", secret_file)
# store the loaded trait value
self.cookie_secret = secret
def init_internal_ssl(self):
"""Create the certs needed to turn on internal SSL."""
if self.internal_ssl:
from certipy import Certipy, CertNotFoundError
certipy = Certipy(store_dir=self.internal_certs_location,
remove_existing=self.recreate_internal_certs)
# Here we define how trust should be laid out per each component
self.internal_ssl_components_trust = {
'hub-ca': list(self.internal_ssl_authorities.keys()),
'proxy-api-ca': ['hub-ca', 'services-ca', 'notebooks-ca'],
'proxy-client-ca': ['hub-ca', 'notebooks-ca'],
'notebooks-ca': ['hub-ca', 'proxy-client-ca'],
'services-ca': ['hub-ca', 'proxy-api-ca'],
}
hub_name = 'hub-ca'
# If any external CAs were specified in external_ssl_authorities
# add records of them to Certipy's store.
self.internal_ssl_authorities.update(self.external_ssl_authorities)
for authority, files in self.internal_ssl_authorities.items():
if files:
self.log.info("Adding CA for %s", authority)
certipy.store.add_record(
authority, is_ca=True, files=files)
self.internal_trust_bundles = certipy.trust_from_graph(
self.internal_ssl_components_trust)
default_alt_names = ["IP:127.0.0.1", "DNS:localhost"]
if self.subdomain_host:
default_alt_names.append("DNS:%s" % urlparse(self.subdomain_host).hostname)
# The signed certs used by hub-internal components
try:
internal_key_pair = certipy.store.get_record("hub-internal")
except CertNotFoundError:
alt_names = list(default_alt_names)
# In the event the hub needs to be accessed externally, add
# the fqdn and (optionally) rev_proxy to the set of alt_names.
alt_names += (["DNS:" + socket.getfqdn()]
+ self.trusted_alt_names)
self.log.info(
"Adding CA for %s: %s",
"hub-internal",
";".join(alt_names),
)
internal_key_pair = certipy.create_signed_pair(
"hub-internal",
hub_name,
alt_names=alt_names,
)
else:
self.log.info("Using existing hub-internal CA")
# Create the proxy certs
proxy_api = 'proxy-api'
proxy_client = 'proxy-client'
for component in [proxy_api, proxy_client]:
ca_name = component + '-ca'
alt_names = default_alt_names + self.trusted_alt_names
try:
record = certipy.store.get_record(component)
except CertNotFoundError:
self.log.info(
"Generating signed pair for %s: %s",
component,
';'.join(alt_names),
)
record = certipy.create_signed_pair(
component,
ca_name,
alt_names=alt_names,
)
else:
self.log.info("Using existing %s CA", component)
self.internal_proxy_certs[component] = {
"keyfile": record['files']['key'],
"certfile": record['files']['cert'],
"cafile": record['files']['cert'],
}
self.internal_ssl_key = internal_key_pair['files']['key']
self.internal_ssl_cert = internal_key_pair['files']['cert']
self.internal_ssl_ca = self.internal_trust_bundles[hub_name]
# Configure the AsyncHTTPClient. This will affect anything using
# AsyncHTTPClient.
ssl_context = make_ssl_context(
self.internal_ssl_key,
self.internal_ssl_cert,
cafile=self.internal_ssl_ca,
)
AsyncHTTPClient.configure(
None, defaults={"ssl_options" : ssl_context}
)
def init_db(self):
"""Create the database connection"""
self.log.debug("Connecting to db: %s", self.db_url)
if self.upgrade_db:
dbutil.upgrade_if_needed(self.db_url, log=self.log)
try:
self.session_factory = orm.new_session_factory(
self.db_url,
reset=self.reset_db,
echo=self.debug_db,
**self.db_kwargs
)
self.db = self.session_factory()
except OperationalError as e:
self.log.error("Failed to connect to db: %s", self.db_url)
self.log.debug("Database error was:", exc_info=True)
if self.db_url.startswith('sqlite:///'):
self._check_db_path(self.db_url.split(':///', 1)[1])
self.log.critical('\n'.join([
"If you recently upgraded JupyterHub, try running",
" jupyterhub upgrade-db",
"to upgrade your JupyterHub database schema",
]))
self.exit(1)
except orm.DatabaseSchemaMismatch as e:
self.exit(e)
def init_hub(self):
"""Load the Hub URL config"""
hub_args = dict(
base_url=self.hub_prefix,
public_host=self.subdomain_host,
certfile=self.internal_ssl_cert,
keyfile=self.internal_ssl_key,
cafile=self.internal_ssl_ca,
)
if self.hub_bind_url:
# ensure hub_prefix is set on bind_url
self.hub_bind_url = urlunparse(
urlparse(self.hub_bind_url)
._replace(path=self.hub_prefix)
)
hub_args['bind_url'] = self.hub_bind_url
else:
hub_args['ip'] = self.hub_ip
hub_args['port'] = self.hub_port
# routespec for the Hub is the *app* base url
# not the hub URL, so it receives requests for non-running servers
# use `/` with host-based routing so the Hub
# gets requests for all hosts
host = ''
if self.subdomain_host:
routespec = '/'
else:
routespec = self.base_url
self.hub = Hub(routespec=routespec, **hub_args)
if self.hub_connect_ip:
self.hub.connect_ip = self.hub_connect_ip
if self.hub_connect_port:
self.hub.connect_port = self.hub_connect_port
self.log.warning(
"JupyterHub.hub_connect_port is deprecated as of 0.9."
" Use JupyterHub.hub_connect_url to fully specify"
" the URL for connecting to the Hub."
)
if self.hub_connect_url:
# ensure hub_prefix is on connect_url
self.hub_connect_url = urlunparse(
urlparse(self.hub_connect_url)
._replace(path=self.hub_prefix)
)
self.hub.connect_url = self.hub_connect_url
if self.internal_ssl:
self.hub.proto = 'https'
async def init_users(self):
"""Load users into and from the database"""
db = self.db
if self.authenticator.enable_auth_state:
# check that auth_state encryption is available
# if it's not, exit with an informative error.
ck = crypto.CryptKeeper.instance()
try:
ck.check_available()
except Exception as e:
self.exit("auth_state is enabled, but encryption is not available: %s" % e)
if self.admin_users and not self.authenticator.admin_users:
self.log.warning(
"\nJupyterHub.admin_users is deprecated since version 0.7.2."
"\nUse Authenticator.admin_users instead."
)
self.authenticator.admin_users = self.admin_users
admin_users = [
self.authenticator.normalize_username(name)
for name in self.authenticator.admin_users
]
self.authenticator.admin_users = set(admin_users) # force normalization
for username in admin_users:
if not self.authenticator.validate_username(username):
raise ValueError("username %r is not valid" % username)
if not admin_users:
self.log.warning("No admin users, admin interface will be unavailable.")
self.log.warning("Add any administrative users to `c.Authenticator.admin_users` in config.")
new_users = []
for name in admin_users:
# ensure anyone specified as admin in config is admin in db
user = orm.User.find(db, name)
if user is None:
user = orm.User(name=name, admin=True)
new_users.append(user)
db.add(user)
else:
user.admin = True
# the admin_users config variable will never be used after this point.
# only the database values will be referenced.
whitelist = [
self.authenticator.normalize_username(name)
for name in self.authenticator.whitelist
]
self.authenticator.whitelist = set(whitelist) # force normalization
for username in whitelist:
if not self.authenticator.validate_username(username):
raise ValueError("username %r is not valid" % username)
if not whitelist:
self.log.info("Not using whitelist. Any authenticated user will be allowed.")
# add whitelisted users to the db
for name in whitelist:
user = orm.User.find(db, name)
if user is None:
user = orm.User(name=name)
new_users.append(user)
db.add(user)
db.commit()
# Notify authenticator of all users.
# This ensures Auth whitelist is up-to-date with the database.
# This lets whitelist be used to set up initial list,
# but changes to the whitelist can occur in the database,
# and persist across sessions.
for user in db.query(orm.User):
try:
await maybe_future(self.authenticator.add_user(user))
except Exception:
self.log.exception("Error adding user %s already in db", user.name)
if self.authenticator.delete_invalid_users:
self.log.warning("Deleting invalid user %s from the Hub database", user.name)
db.delete(user)
else:
self.log.warning(dedent("""
You can set
c.Authenticator.delete_invalid_users = True
to automatically delete users from the Hub database that no longer pass
Authenticator validation,
such as when user accounts are deleted from the external system
without notifying JupyterHub.
"""))
else:
# handle database upgrades where user.created is undefined.
# we don't want to allow user.created to be undefined,
# so initialize it to last_activity (if defined) or now.
if not user.created:
user.created = user.last_activity or datetime.utcnow()
db.commit()
# The whitelist set and the users in the db are now the same.
# From this point on, any user changes should be done simultaneously
# to the whitelist set and user db, unless the whitelist is empty (all users allowed).
async def init_groups(self):
"""Load predefined groups into the database"""
db = self.db
for name, usernames in self.load_groups.items():
group = orm.Group.find(db, name)
if group is None:
group = orm.Group(name=name)
db.add(group)
for username in usernames:
username = self.authenticator.normalize_username(username)
if not (await maybe_future(self.authenticator.check_whitelist(username))):
raise ValueError("Username %r is not in whitelist" % username)
user = orm.User.find(db, name=username)
if user is None:
if not self.authenticator.validate_username(username):
raise ValueError("Group username %r is not valid" % username)
user = orm.User(name=username)
db.add(user)
group.users.append(user)
db.commit()
async def _add_tokens(self, token_dict, kind):
"""Add tokens for users or services to the database"""
if kind == 'user':
Class = orm.User
elif kind == 'service':
Class = orm.Service
else:
raise ValueError("kind must be user or service, not %r" % kind)
db = self.db
for token, name in token_dict.items():
if kind == 'user':
name = self.authenticator.normalize_username(name)
if | |
of the argument to be deleted"
)
add_args(del_config_parser, config_path)
add_args(del_config_parser, shipped_configs)
del_config_parser.add_argument(
'-a', '--all',
action='store_true',
default=False,
help='delete all configs (asks the user to confirm before deleting) [default: False]'
)
del_config_parser.add_argument(
'-f', '--force',
action='store_true',
default=False,
help='force deletion; do not ask to confirm deletion [default: False]'
)
add_args(del_config_parser, verbose)
# =========================================================================
# view subparser
# =========================================================================
view_parser = subparsers.add_parser(
'view', description="View a summary of an SFF file", help="view file summary")
view_parser.add_argument('from_file', help="any SFF file")
add_args(view_parser, config_path)
add_args(view_parser, shipped_configs)
view_parser.add_argument(
'-V', '--version', action='store_true', help="show SFF format version")
view_parser.add_argument('-C', '--show-chunks', action='store_true',
help="show sequence of chunks in IMOD file; only works with IMOD model files (.mod) [default: False]")
view_parser.add_argument(*verbose['args'], **verbose['kwargs'])
# =============================================================================
# notes parser
# =============================================================================
notes_parser = subparsers.add_parser(
'notes',
description="The EMDB-SFF Annotation Toolkit",
help="annotate an EMDB-SFF file",
)
notes_subparsers = notes_parser.add_subparsers(
title='Annotation tools',
dest='notes_subcommand',
description='The EMDB-SFF Annotation Toolkit provides the following tools:',
metavar="EMDB-SFF annotation tools",
)
# =========================================================================
# notes: search
# =========================================================================
search_notes_parser = notes_subparsers.add_parser(
'search',
description="Search ontologies for annotation by text labels",
help="search for terms by labels",
)
search_notes_parser.add_argument(
'search_term',
nargs='?',
default='',
help="the term to search; add quotes if spaces are included")
add_args(search_notes_parser, config_path)
add_args(search_notes_parser, shipped_configs)
search_notes_parser.add_argument(
'-R', '--resource', default=RESOURCE_LIST.keys()[0], choices=RESOURCE_LIST.keys(),
help='the resource to search for terms or accessions; other valid options are {resources} [default: {default}]'.format(
resources=RESOURCE_LIST.keys(),
default=RESOURCE_LIST.keys()[0],
)
)
search_notes_parser.add_argument(
'-s', '--start', type=int, default=1, help="start index [default: 1]"
)
search_notes_parser.add_argument(
'-r', '--rows', type=int, default=10, help="number of rows [default: 10]"
)
ols_parser = search_notes_parser.add_argument_group(
title='EBI Ontology Lookup Service (OLS)',
description='The Ontology Lookup Service (OLS) is a repository for biomedical ontologies that aims to provide a '
'single point of access to the latest ontology versions. You can use the following options to modify '
'your search against OLS by ensuring that the -R/--resource flag is set to \'ols\' (default).'
)
ols_parser.add_argument(
'-O', '--ontology', default=None, help="the ontology to search [default: None]")
ols_parser.add_argument(
'-x', '--exact', default=False, action='store_true', help="exact matches? [default: False]")
ols_parser.add_argument(
'-o', '--obsoletes', default=False, action='store_true', help="include obsoletes? [default: False]")
ols_parser.add_argument(
'-L', '--list-ontologies', default=False,
action='store_true', help="list available ontologies [default: False]"
)
ols_parser.add_argument(
'-l', '--short-list-ontologies', default=False,
action='store_true', help="short list of available ontologies [default: False]"
)
# todo: add resource-specific argument groups
# emdb_parser = search_notes_parser.add_argument_group(
# title='The Electron Microscopy Data Bank (EMDB)',
# description='The Electron Microscopy Data Bank (EMDB) is a public repository for electron microscopy density maps '
# 'of macromolecular complexes and subcellular structures. Searching against EMDB can use the following '
# 'options:'
# )
# uniprot_parser = search_notes_parser.add_argument_group(
# title='The Universal Protein Resource (UniProt)',
# description='The Universal Protein Resource (UniProt) is a comprehensive resource for protein sequence and '
# 'annotation data. Searching against UniProt can use the following options:'
# )
# pdb_parser = search_notes_parser.add_argument_group(
# title='The Protein Data Bank archive (PDB)',
# description='Since 1971, the Protein Data Bank archive (PDB) has served as the single repository of information '
# 'about the 3D structures of proteins, nucleic acids, and complex assemblies. Searching against EMDB '
# 'can use the following options:'
# )
# =========================================================================
# notes: suggest
# =========================================================================
# todo: suggest terms from a description
# TBA
# =========================================================================
# notes: list
# =========================================================================
list_notes_parser = notes_subparsers.add_parser(
'list',
description="List all available annotations present in an EMDB-SFF file",
help="list available annotations",
)
add_args(list_notes_parser, sff_file)
add_args(list_notes_parser, header)
add_args(list_notes_parser, config_path)
add_args(list_notes_parser, shipped_configs)
long_format = {
'args': ['-l', '--long-format'],
'kwargs': {
'default': False,
'action': 'store_true',
'help': "only show segment ID and description (if present) [default: False]"
}
}
add_args(list_notes_parser, long_format)
list_notes_parser.add_argument('-D', '--sort-by-name', default=False,
action='store_true', help="sort listings by segment name [default: False (sorts by ID)]")
list_notes_parser.add_argument(
'-r', '--reverse', default=False, action='store_true', help="reverse the sort order [default: False]")
list_notes_parser.add_argument('-I', '--list-ids', default=False, action='store_true',
help="only list the IDs for segments one per line [default: False]")
add_args(list_notes_parser, verbose)
# =========================================================================
# notes: show
# =========================================================================
show_notes_parser = notes_subparsers.add_parser(
'show',
description="Show a specific annotations by ID present in an EMDB-SFF file",
help="show an annotation by ID",
)
add_args(show_notes_parser, sff_file)
add_args(show_notes_parser, config_path)
add_args(show_notes_parser, shipped_configs)
add_args(show_notes_parser, header)
add_args(show_notes_parser, long_format)
add_args(show_notes_parser, verbose)
show_segment_id = deepcopy(segment_id)
# todo: use nargs='+' instead of csv
show_segment_id['kwargs'][
'help'] += "; pass more than one ID as a comma-separated list with no spaces e.g. 'id1,id2,...,idN'"
show_notes_parser.add_argument(
*show_segment_id['args'], **show_segment_id['kwargs'])
# =========================================================================
# notes:add
# =========================================================================
add_notes_parser = notes_subparsers.add_parser(
'add',
description="Add a new annotation to an EMDB-SFF file",
help="add new annotations",
)
# all notes refer to some sff file
add_args(add_notes_parser, sff_file)
add_args(add_notes_parser, config_path)
add_args(add_notes_parser, shipped_configs)
# external references apply to both
external_ref['kwargs']['action'] = 'append'
add_args(add_notes_parser, external_ref)
add_args(add_notes_parser, verbose)
del external_ref['kwargs']['action']
# global notes
add_global_notes_parser = add_notes_parser.add_argument_group(
title="add global notes",
description="add global attributes to an EMDB-SFF file"
)
add_args(add_global_notes_parser, name)
add_args(add_global_notes_parser, software_name)
add_args(add_global_notes_parser, software_version)
add_args(add_global_notes_parser, software_proc_details)
# add_args(add_global_notes_parser, file_path)
add_args(add_global_notes_parser, details)
# segment notes
add_segment_notes_parser = add_notes_parser.add_argument_group(
title="add segment notes",
description="add attributes to a single segment in an EMDB-SFF file"
)
add_args(add_segment_notes_parser, segment_id)
add_args(add_segment_notes_parser, segment_name)
add_args(add_segment_notes_parser, description)
add_args(add_segment_notes_parser, number_of_instances)
add_args(add_segment_notes_parser, complexes)
add_args(add_segment_notes_parser, macromolecules)
# =========================================================================
# notes: edit
# =========================================================================
edit_notes_parser = notes_subparsers.add_parser(
'edit',
description="Edit an existing annotation to an EMDB-SFF file",
help="edit existing annotations",
)
add_args(edit_notes_parser, sff_file)
add_args(edit_notes_parser, config_path)
add_args(edit_notes_parser, shipped_configs)
add_args(edit_notes_parser, external_ref_id)
external_ref['kwargs']['action'] = 'append'
add_args(edit_notes_parser, external_ref)
del external_ref['kwargs']['action']
# global notes
edit_global_notes_parser = edit_notes_parser.add_argument_group(
title="edit global notes",
description="edit global attributes to an EMDB-SFF file"
)
add_args(edit_global_notes_parser, name)
add_args(edit_global_notes_parser, software_name)
add_args(edit_global_notes_parser, software_version)
add_args(edit_global_notes_parser, software_proc_details)
# add_args(edit_global_notes_parser, file_path)
add_args(edit_global_notes_parser, details)
# segment notes
edit_segment_notes_parser = edit_notes_parser.add_argument_group(
title="edit segment notes",
description="edit attributes to a single segment in an EMDB-SFF file"
)
add_args(edit_segment_notes_parser, segment_id)
add_args(edit_segment_notes_parser, segment_name)
add_args(edit_segment_notes_parser, description)
add_args(edit_segment_notes_parser, number_of_instances)
add_args(edit_segment_notes_parser, complex_id)
add_args(edit_segment_notes_parser, complexes)
add_args(edit_segment_notes_parser, macromolecule_id)
add_args(edit_segment_notes_parser, macromolecules)
# =========================================================================
# notes: del
# =========================================================================
# todo: sff notes del -e 1,3,4,5,6 file.json
del_notes_parser = notes_subparsers.add_parser(
'del',
description="Delete an existing annotation to an EMDB-SFF file",
help="delete existing annotations",
)
add_args(del_notes_parser, sff_file)
add_args(del_notes_parser, config_path)
add_args(del_notes_parser, shipped_configs)
add_args(del_notes_parser, external_ref_id)
# global notes
del_global_notes_parser = del_notes_parser.add_argument_group(
title="delete global notes",
description="delete global attributes to an EMDB-SFF file"
)
name['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the name [default: False]',
}
add_args(del_global_notes_parser, name)
software_name['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the software name [default: False]'
}
add_args(del_global_notes_parser, software_name)
software_version['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the software version [default: False]'
}
add_args(del_global_notes_parser, software_version)
software_proc_details['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the software processing details [default: False]'
}
add_args(del_global_notes_parser, software_proc_details)
# file_path['kwargs'] = {
# 'action': 'store_true',
# 'default': False,
# 'help': 'delete the file path [default: False]'
# }
# add_args(del_global_notes_parser, file_path)
details['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the details [default: False]'
}
add_args(del_global_notes_parser, details)
# segment notes
del_segment_notes_parser = del_notes_parser.add_argument_group(
title="delete segment notes",
description="delete attributes to a single segment in an EMDB-SFF file"
)
add_args(del_segment_notes_parser, segment_id)
segment_name['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the segment name [default: False]'
}
add_args(del_segment_notes_parser, segment_name)
description['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the description [default: False]',
}
add_args(del_segment_notes_parser, description)
del number_of_instances['kwargs']['type']
number_of_instances['kwargs'] = {
'action': 'store_true',
'default': False,
'help': 'delete the number of instances [default: False]',
}
add_args(del_segment_notes_parser, number_of_instances)
add_args(del_segment_notes_parser, complex_id)
add_args(del_segment_notes_parser, macromolecule_id)
# =============================================================================
# notes: copy
# =============================================================================
copy_notes_parser = notes_subparsers.add_parser(
'copy',
description="Copy notes from one/multiple segment to one/multiple/all other segments within the same EMDB-SFF file",
help="copy notes across segments within the same EMDB-SFF file"
)
add_args(copy_notes_parser, sff_file)
add_args(copy_notes_parser, config_path)
add_args(copy_notes_parser, shipped_configs)
# todo: merge with segment_id above
copy_notes_parser.add_argument(
'-i', '--segment-id',
help="segment ID or a comma-separated sequence of segment IDs of source segment(s); run 'sff notes list <file>' for a list of "
"segment IDs",
)
copy_global_notes_parse = copy_notes_parser.add_mutually_exclusive_group()
copy_global_notes_parse.add_argument(
'--from-global',
action='store_true',
default=False,
help="copy notes from global (metadata) to --to-segment segments"
)
copy_global_notes_parse.add_argument(
'--to-global',
action='store_true',
default=False,
help="copy notes from --segment-id segment to global (metadata)"
)
to_segment_or_all_copy_notes_parser = copy_notes_parser.add_mutually_exclusive_group()
to_segment_or_all_copy_notes_parser.add_argument(
'-t', '--to-segment',
help="segment ID or a comma-separated sequence of segment IDs of destination segment(s); run 'sff notes list <file>' for a list of "
"segment IDs",
)
to_segment_or_all_copy_notes_parser.add_argument(
'--to-all',
action='store_true',
default=False,
help="copy notes from --segment-id segment to all (other) segments"
)
# =============================================================================
# notes: clear
# =============================================================================
clear_notes_parser = notes_subparsers.add_parser(
'clear',
description="Clear all notes for one or more segments in an EMDB-SFF file",
help="clear notes in an EMDB-SFF file"
)
add_args(clear_notes_parser, config_path)
add_args(clear_notes_parser, shipped_configs)
add_args(clear_notes_parser, sff_file)
add_args(clear_notes_parser, verbose)
clear_notes_parser.add_argument(
'--all',
action='store_true',
default=False,
help="clear all notes; USE WITH CARE!"
)
clear_notes_parser.add_argument(
'--from-global',
action='store_true',
default=False,
help="clear notes from global (metadata)"
)
from_segment_or_all_clear_notes_parser = clear_notes_parser.add_mutually_exclusive_group()
from_segment_or_all_clear_notes_parser.add_argument(
'-i', '--segment-id',
help="segment ID or a comma-separated sequence of segment IDs of source segment(s); run 'sff notes list <file>' for a list of "
"segment IDs",
)
from_segment_or_all_clear_notes_parser.add_argument(
'--from-all-segments',
action='store_true',
default=False,
help="clear notes from all segments"
)
# =============================================================================
# notes: merge
# =============================================================================
merge_notes_parser = notes_subparsers.add_parser(
'merge',
description="Merge notes from two EMDB-SFF files",
help="merge notes from two EMDB-SFF files"
)
add_args(merge_notes_parser, config_path)
add_args(merge_notes_parser, shipped_configs)
merge_notes_parser.add_argument('--source', help="EMDB-SFF file from which to obtain notes", required=True)
merge_notes_parser.add_argument('other',
help="EMDB-SFF file whose content will be merged with notes from the file specified with --source")
output['kwargs'][
'help'] = "file to convert to; the extension (.sff, .hff, .json) determines the output format; if not specified then NOTES IN OTHER ONLY will be overwritten [default: None]"
merge_notes_parser.add_argument(*output['args'], **output['kwargs'])
merge_notes_parser.add_argument(*verbose['args'], **verbose['kwargs'])
# =========================================================================
# notes: save
# =========================================================================
save_notes_parser = notes_subparsers.add_parser(
'save',
description="Save all changes made to the actual file",
help="write all changes made since the last 'save' action"
)
save_notes_parser.add_argument(*sff_file['args'], **sff_file['kwargs'])
add_args(save_notes_parser, config_path)
add_args(save_notes_parser, shipped_configs)
# =========================================================================
# notes: trash
# =========================================================================
trash_notes_parser = notes_subparsers.add_parser(
'trash',
description="Discard all notes by deleting the temporary file",
help="discard all changes made since the last the edit action (add, edit, del)",
)
trash_notes_parser.add_argument(*sff_file['args'], **sff_file['kwargs'])
add_args(trash_notes_parser, config_path)
add_args(trash_notes_parser, shipped_configs)
# get the full list of tools from the Parser object
# tool_list = Parser._actions[1].choices.keys()
# print(tool_list)
tool_list = ['all', 'core', 'formats', 'notes', 'readers', 'schema', 'main']
# | |
<gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the core classes used by DINO.
"""
import math
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, Any, Union
import openai
import torch
from tqdm import tqdm
from transformers import GPT2Tokenizer, PreTrainedTokenizer, PreTrainedModel
from generation import SelfDebiasingGPT2LMHeadModel
from utils import DatasetEntry
PLACEHOLDER_STR = "<X1>"
class DinoGenerator:
"""
This class represents a generative language model which can be used to generate datasets from instructions.
"""
def __init__(self, task_spec: Dict[str, Any], model: Union['str', 'ModelWrapper'] = None, openai_api_key: Optional[str] = None,
max_output_length: int = 40, decay_constant: float = 100, top_p: float = 0.9, top_k: int = 5,
remove_duplicates: bool = True, remove_identical_pairs: bool = False, min_num_words: int = -1, min_num_tokens: int = -1,
keep_outputs_without_eos: bool = False, allow_newlines_in_outputs: bool = False):
"""
:param task_spec: the task specification
:param model: a wrapper around the underlying language model.
If GPT-3 is used, this should instead be the name of the GPT-3 model (e.g., "davinci")
:param openai_api_key: an optional API key for GPT-3. If given, GPT-3 is used as a language model
:param max_output_length: the maximum output length for each generated text
:param decay_constant: the decay constant for self-debiasing
:param top_p: p value for top-p sampling (set to 0 to perform no top-p sampling)
:param top_k: k value for top-k sampling (set to 0 to perform no top-k sampling)
:param remove_duplicates: whether duplicates should be removed from the generated dataset
:param remove_identical_pairs: whether text pairs with identical texts should be removed (only for text pair datasets)
:param min_num_words: the minimum number of (whitespace-separated) words for each dataset entry
:param min_num_tokens: the minimum number of tokens for each dataset entry
:param keep_outputs_without_eos: if set to true, examples where the language model does not output a quotation mark (which is
interpreted as a signal that it has completed its output) are not removed from the dataset.
:param allow_newlines_in_outputs: if set to true, model outputs that contain a newline character before the end-of-sequence token
(a quotation mark) are not removed from the dataset
"""
self.model = model
self.openai_api_key = openai_api_key
self.max_output_length = max_output_length
self.decay_constant = decay_constant
self.top_p = top_p
self.top_k = top_k
self.remove_duplicates = remove_duplicates
self.remove_identical_pairs = remove_identical_pairs
self.min_num_words = min_num_words
self.min_num_tokens = min_num_tokens
self.keep_outputs_without_eos = keep_outputs_without_eos
self.allow_newlines_in_outputs = allow_newlines_in_outputs
self.labels = list(task_spec['labels'].keys())
self.instructions = {label: task_spec['labels'][label]['instruction'] for label in self.labels}
self.counter_labels = {label: task_spec['labels'][label].get('counter_labels', []) for label in self.labels}
def generate_dataset(self, input_texts: Optional[List[str]], num_entries_per_input_and_label: Optional[int] = None,
num_entries_per_label: Optional[int] = None, batch_size: Optional[int] = None) -> List[DatasetEntry]:
"""
Generate a new dataset.
:param input_texts: an optional list of raw texts; this is required for generating text pair datasets
:param num_entries_per_input_and_label: the number of entries to generate for each pair of input text and label
:param num_entries_per_label: the number of entries to generate for each label
:param batch_size: the number of entries to generate simultaneously
:return: the generated dataset
"""
generate_with_inputs = input_texts is not None
if not generate_with_inputs:
input_texts = list(range(math.ceil(num_entries_per_label / batch_size)))
num_entries_per_input_and_label = batch_size
input_iterator = tqdm(input_texts, desc="Dataset Entries")
dataset = []
for input_text_or_id in input_iterator:
for label in self.labels:
dataset += self._generate_dataset_entries(input_text_or_id, label=label, num_entries=num_entries_per_input_and_label,
generate_with_inputs=generate_with_inputs)
dataset = self._postprocess_dataset(dataset, generate_with_inputs)
return dataset
def _generate_dataset_entries(self, input_text_or_id: Union[str, int], label: str, num_entries: int,
generate_with_inputs: bool) -> List[DatasetEntry]:
instruction = self._build_instruction(label, input_text_or_id, generate_with_inputs)
if self.openai_api_key is not None:
try:
model_responses = [openai.Completion.create(
engine=self.model, prompt=instruction, max_tokens=self.max_output_length, top_p=self.top_p, stop=['"']
) for _ in range(num_entries)]
model_outputs = [model_response["choices"][0]["text"] for model_response in model_responses]
except openai.error.RateLimitError as e:
print(e)
return []
else:
counter_instructions = [
self._build_instruction(other_label, input_text_or_id, generate_with_inputs) for other_label in self.counter_labels[label]
]
model_outputs = self.model.generate_self_debiasing(
input_text=instruction, debiasing_texts=counter_instructions, num_samples=num_entries, decay_constant=self.decay_constant,
do_sample=True, min_length=self.max_output_length, max_length=self.max_output_length, top_k=self.top_k, top_p=self.top_p
)
model_outputs = [
self._process_output(input_text=input_text_or_id, output_text=output, label=label, generate_with_inputs=generate_with_inputs)
for output in model_outputs
]
model_outputs = [output for output in model_outputs if output is not None]
return model_outputs
def _build_instruction(self, label: str, text: str, generate_with_inputs: bool) -> str:
instruction_template = self.instructions[label]
if generate_with_inputs:
assert instruction_template.count(PLACEHOLDER_STR) == 1, \
f"An input text was provided, but the instruction for label '{label}' does not contain exactly one placeholder"
return instruction_template.replace(PLACEHOLDER_STR, text)
else:
assert instruction_template.count(PLACEHOLDER_STR) == 0, \
f"No input text was provided, but the instruction for label '{label}' contains a placeholder"
return instruction_template
def _process_output(self, input_text: Union[str, int], output_text: str, label: str, generate_with_inputs: bool) \
-> Optional[DatasetEntry]:
output_text = output_text.split('"')[0] if '"' in output_text else (output_text if self.keep_outputs_without_eos else None)
if output_text and ('\n' not in output_text or self.allow_newlines_in_outputs):
text_a = input_text if generate_with_inputs else output_text
text_b = output_text if generate_with_inputs else None
return DatasetEntry(text_a=text_a, text_b=text_b, label=label)
return None
def _postprocess_dataset(self, dataset: List[DatasetEntry], generate_with_inputs: bool) -> List[DatasetEntry]:
if self.remove_duplicates:
dataset = list(set(dataset))
if self.min_num_words > 0:
if generate_with_inputs:
dataset = [entry for entry in dataset if len(entry.text_b.split()) >= self.min_num_words]
else:
dataset = [entry for entry in dataset if len(entry.text_a.split()) >= self.min_num_words]
if self.min_num_tokens > 0:
if generate_with_inputs:
dataset = [entry for entry in dataset if len(self.model._tokenizer.tokenize(entry.text_b)) >= self.min_num_tokens]
else:
dataset = [entry for entry in dataset if len(self.model._tokenizer.tokenize(entry.text_a)) >= self.min_num_tokens]
if generate_with_inputs and self.remove_identical_pairs:
dataset = [entry for entry in dataset if entry.text_a != entry.text_b]
return dataset
class ModelWrapper(ABC):
"""
This class represents a wrapper for a pretrained language model that provides high-level functions for the generation of texts with
the self-debiasing method described in https://arxiv.org/abs/2103.00453.
"""
def __init__(self, use_cuda: bool = True):
"""
:param use_cuda: whether to use CUDA
"""
self._device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self._tokenizer = None # type: Optional[PreTrainedTokenizer]
self._model = None # type: Optional[PreTrainedModel]
def query_model(self, input_text: str) -> torch.FloatTensor:
"""For a given input text, returns the probability distribution over possible next tokens."""
return self.query_model_batch([input_text])[0]
@abstractmethod
def query_model_batch(self, input_texts: List[str]) -> torch.FloatTensor:
"""For a batch of input texts, returns the probability distribution over possible next tokens."""
pass
@abstractmethod
def generate(self, input_text: str, **kwargs) -> str:
"""Generates a continuation for a given input text."""
pass
@abstractmethod
def generate_self_debiasing(self, input_text: str, debiasing_texts: List[str], num_samples: int = 1, decay_constant: float = 100,
epsilon: float = 0.01, debug: bool = False, **kwargs) -> List[str]:
"""
Generates continuations for the given input texts with self-debiasing.
:param input_texts: the input texts to generate continuations for
:param debiasing_prefixes: the debiasing prefixes to be used
:param decay_constant: the decay constant (lambda in the paper)
:param epsilon: the minimum factor by which each probability is multiplied
:param debug: whether to print additional debugging output
:param kwargs: further arguments are passed on to the original generate function
:return: the list of generated continuations
"""
pass
class GPT2Wrapper(ModelWrapper):
def __init__(self, model_name: str = "gpt2-xl", use_cuda: bool = True):
"""
:param model_name: the name of the pretrained GPT2 model (default: "gpt2-xl")
:param use_cuda: whether to use CUDA
"""
super().__init__(use_cuda=use_cuda)
self._tokenizer = GPT2Tokenizer.from_pretrained(model_name)
self._model = SelfDebiasingGPT2LMHeadModel.from_pretrained(model_name) # type: SelfDebiasingGPT2LMHeadModel
if use_cuda:
self._model.parallelize()
self._tokenizer.pad_token = self._tokenizer.eos_token
self._model.config.pad_token_id = self._tokenizer.eos_token_id
def query_model_batch(self, input_texts: List[str]):
inputs = self._tokenizer.batch_encode_plus(input_texts, padding=True, max_length=512, return_tensors='pt')
inputs = {key: val.to(self._device) for key, val in inputs.items()}
output_indices = inputs['attention_mask'].sum(dim=1) - 1
output = self._model(**inputs)['logits']
return torch.stack([output[example_idx, last_word_idx, :] for example_idx, last_word_idx in enumerate(output_indices)])
def generate(self, input_text: str, **kwargs):
input_ids = self._tokenizer.encode(input_text, return_tensors='pt').to(self._device)
output_ids = self._model.generate(input_ids, **kwargs)[0]
return self._tokenizer.decode(output_ids)
def generate_self_debiasing(self, input_text: str, debiasing_texts: List[str], num_samples: int = 1, decay_constant: float = 100,
epsilon: float = 0.01, debug: bool = False, min_length: int = None, max_length: int = None,
**kwargs) -> List[str]:
self._model.init_logits_processor(num_debiasing_prefixes=len(debiasing_texts), decay_constant=decay_constant, epsilon=epsilon,
debug=debug, tokenizer=self._tokenizer)
inputs = [input_text] * num_samples
for debiasing_text in debiasing_texts:
inputs += [debiasing_text] * num_samples
inputs = self._tokenizer.batch_encode_plus(inputs, padding=True, return_tensors='pt')
inputs['attention_mask'] = torch.flip(inputs['attention_mask'], dims=[1])
shifts = inputs['attention_mask'].shape[-1] - inputs['attention_mask'].sum(dim=-1)
for batch_idx in | |
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : <EMAIL>
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def disassemble(self, buffer):
myDisasm = Disasm(buffer)
myDisasm.infos.SecurityBlock = len(buffer)
if myDisasm.infos.SecurityBlock != 0:
myDisasm.read()
if myDisasm.length != UNKNOWN_OPCODE and myDisasm.length < 0:
assert_equal(myDisasm.infos.Error, OUT_OF_BLOCK)
def disasmVEX0F(self, i):
myVEX = VEX('VEX.NDS.128.0F.W0')
myVEX.vvvv = 0b1111
myVEX.R = 1
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.L1.66.0F.W0')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}{:02x}cb'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.128.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F2.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F3.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.66.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F2.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F3.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F2.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F3.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.66.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F2.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F3.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c5(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c5(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F2.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c5(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F3.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c5(), i))
self.disassemble(Buffer)
def disasmVEX0FNoModrm(self, i):
myVEX = VEX('VEX.NDS.128.0F.W0')
myVEX.vvvv = 0b1111
myVEX.R = 1
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.L1.66.0F.W0')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.128.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}443322'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F2.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F3.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.66.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F2.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F3.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F2.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F3.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.66.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F2.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.256.F3.0F.W1')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c4(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c5(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.66.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c5(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F2.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c5(), i))
self.disassemble(Buffer)
myVEX = VEX('VEX.NDS.128.F3.0F.W0')
myVEX.R = 1
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}{:02x}'.format(myVEX.c5(), i))
self.disassemble(Buffer)
def disasmNoModrm(self, i):
self.disassemble(bytes.fromhex('{:02x}'.format(i)))
self.disassemble(bytes.fromhex('66{:02x}'.format(i)))
self.disassemble(bytes.fromhex('f2{:02x}'.format(i)))
self.disassemble(bytes.fromhex('f3{:02x}'.format(i)))
self.disassemble(bytes.fromhex('f0{:02x}'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}'.format(myREX.byte(), i)))
def disasm0FNoModrm(self, i):
self.disassemble(bytes.fromhex('0f{:02x}'.format(i)))
self.disassemble(bytes.fromhex('660f{:02x}'.format(i)))
self.disassemble(bytes.fromhex('f20f{:02x}'.format(i)))
self.disassemble(bytes.fromhex('f30f{:02x}'.format(i)))
self.disassemble(bytes.fromhex('f00f{:02x}'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}0f{:02x}'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}0f{:02x}'.format(myREX.byte(), i)))
def disasmNoImm(self, i):
self.disassemble(bytes.fromhex('{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('66{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('f2{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('f3{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('f0{:02x}4011'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}89ce00000000'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}89ce00000000'.format(myREX.byte(), i)))
def disasm0FNoImm(self, i):
self.disassemble(bytes.fromhex('0f{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('660f{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('f20f{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('f30f{:02x}4011'.format(i)))
self.disassemble(bytes.fromhex('f00f{:02x}4011'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}0f{:02x}89ce00000000'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}0f{:02x}89ce00000000'.format(myREX.byte(), i)))
def disasmImm8(self, i):
self.disassemble(bytes.fromhex('{:02x}401122'.format(i)))
self.disassemble(bytes.fromhex('66{:02x}401122'.format(i)))
self.disassemble(bytes.fromhex('f2{:02x}401122'.format(i)))
self.disassemble(bytes.fromhex('f3{:02x}401122'.format(i)))
self.disassemble(bytes.fromhex('f0{:02x}401122'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}89ce000000'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}89ce000000'.format(myREX.byte(), i)))
def disasmNoModrmImm8(self, i):
self.disassemble(bytes.fromhex('{:02x}11'.format(i)))
self.disassemble(bytes.fromhex('66{:02x}11'.format(i)))
self.disassemble(bytes.fromhex('f2{:02x}11'.format(i)))
self.disassemble(bytes.fromhex('f3{:02x}11'.format(i)))
self.disassemble(bytes.fromhex('f0{:02x}11'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}22'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}22'.format(myREX.byte(), i)))
def disasmImm32(self, i):
self.disassemble(bytes.fromhex('{:02x}401100112233'.format(i)))
self.disassemble(bytes.fromhex('66{:02x}40110011'.format(i)))
self.disassemble(bytes.fromhex('f3{:02x}401100112233'.format(i)))
self.disassemble(bytes.fromhex('f2{:02x}401100112233'.format(i)))
self.disassemble(bytes.fromhex('f0{:02x}401100112233'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}89ce000000000011'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}89ce000000000011'.format(myREX.byte(), i)))
def disasmNoModrmImm32(self, i):
self.disassemble(bytes.fromhex('{:02x}00112233'.format(i)))
self.disassemble(bytes.fromhex('f2{:02x}00112233'.format(i)))
self.disassemble(bytes.fromhex('66{:02x}0011'.format(i)))
self.disassemble(bytes.fromhex('f3{:02x}00112233'.format(i)))
self.disassemble(bytes.fromhex('f0{:02x}00112233'.format(i)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}00112233'.format(myREX.byte(), i)))
myREX = REX()
myREX.R = 1
self.disassemble(bytes.fromhex('{:02x}{:02x}00112233'.format(myREX.byte(), i)))
def test2(self):
# 1 byte G1
for i in range(0, 8):
self.disassemble(bytes.fromhex('80{:02x}11'.format(i*8)))
self.disassemble(bytes.fromhex('80{:02x}11'.format(i*8 + 0xc0)))
# 1 byte G1
for i in range(0, 8):
self.disassemble(bytes.fromhex('81{:02x}11223344'.format(i*8)))
self.disassemble(bytes.fromhex('81{:02x}11223344'.format(i*8 + 0xc0)))
# 1 byte G1
for i in range(0, 8):
self.disassemble(bytes.fromhex('82{:02x}11'.format(i*8)))
self.disassemble(bytes.fromhex('82{:02x}11'.format(i*8 + 0xc0)))
# 1 byte G1
for i in range(0, 8):
self.disassemble(bytes.fromhex('83{:02x}11'.format(i*8)))
self.disassemble(bytes.fromhex('83{:02x}11'.format(i*8 + 0xc0)))
# 1 byte G2
for i in range(0, 8):
self.disassemble(bytes.fromhex('c0{:02x}11'.format(i*8)))
self.disassemble(bytes.fromhex('c0{:02x}11'.format(i*8 + 0xc0)))
# 1 byte G2
for i in range(0, 8):
self.disassemble(bytes.fromhex('c1{:02x}11'.format(i*8)))
self.disassemble(bytes.fromhex('c1{:02x}11'.format(i*8 + 0xc0)))
# 1 byte G2
for i in range(0, 8):
self.disassemble(bytes.fromhex('d0{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('d0{:02x}'.format(i*8 + 0xc0)))
# 1 byte G2
for i in range(0, 8):
self.disassemble(bytes.fromhex('d1{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('d1{:02x}'.format(i*8 + 0xc0)))
# 1 byte G2
for i in range(0, 8):
self.disassemble(bytes.fromhex('d2{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('d2{:02x}'.format(i*8 + 0xc0)))
# 1 byte G2
for i in range(0, 8):
self.disassemble(bytes.fromhex('d3{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('d3{:02x}'.format(i*8 + 0xc0)))
# 1 byte G3
for i in range(0, 8):
self.disassemble(bytes.fromhex('f6{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('f6{:02x}'.format(i*8 + 0xc0)))
# 1 byte G3
for i in range(0, 8):
self.disassemble(bytes.fromhex('f7{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('f7{:02x}'.format(i*8 + 0xc0)))
# 1 byte G4
for i in range(0, 8):
self.disassemble(bytes.fromhex('fe{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('fe{:02x}'.format(i*8 + 0xc0)))
# 1 byte G5
for i in range(0, 8):
self.disassemble(bytes.fromhex('ff{:02x}'.format(i*8 + 0xc0)))
self.disassemble(bytes.fromhex('ff{:02x}'.format(i*8)))
# 2 bytes G6
for i in range(0, 8):
self.disassemble(bytes.fromhex('0f00{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0f00{:02x}'.format(i*8 + 0xc0)))
# 2 bytes G7
for i in range(0, 8):
for rm in range(0, 8):
index = i*8 + rm
self.disassemble(bytes.fromhex('0f01{:02x}'.format(index)))
self.disassemble(bytes.fromhex('0f01{:02x}'.format(index + 0xc0)))
# 2 bytes G8
for i in range(0, 8):
self.disassemble(bytes.fromhex('0fba{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0fba{:02x}'.format(i*8 + 0xc0)))
# 2 bytes G9
for i in range(0, 8):
self.disassemble(bytes.fromhex('0fc7{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0fc7{:02x}'.format(i*8 + 0xc0)))
myREX = REX()
myREX.W = 1
self.disassemble(bytes.fromhex('{:02x}0fc7{:02x}'.format(myREX.byte(), i*8)))
# 2 bytes G12
for i in range(0, 8):
self.disassemble(bytes.fromhex('0f71{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0f71{:02x}'.format(i*8 + 0xc0)))
# 2 bytes G13
for i in range(0, 8):
self.disassemble(bytes.fromhex('0f72{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0f72{:02x}'.format(i*8 + 0xc0)))
# 2 bytes G14
for i in range(0, 8):
self.disassemble(bytes.fromhex('0f73{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0f73{:02x}'.format(i*8 + 0xc0)))
# 2 bytes G15
for i in range(0, 8):
self.disassemble(bytes.fromhex('0fae{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0fae{:02x}'.format(i*8 + 0xc0)))
self.disassemble(bytes.fromhex('f30fae{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('f30fae{:02x}'.format(i*8 + 0xc0)))
self.disassemble(bytes.fromhex('660fae{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('660fae{:02x}'.format(i*8 + 0xc0)))
# 2 bytes G16
for i in range(0, 8):
self.disassemble(bytes.fromhex('0f18{:02x}'.format(i*8)))
self.disassemble(bytes.fromhex('0f18{:02x}'.format(i*8 + 0xc0)))
# 2 bytes G17
for i in range(0, 8):
myVEX = VEX('VEX.L1.0F38.W0')
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}f3{:02x}'.format(myVEX.c4(), i*8))
self.disassemble(Buffer)
myVEX = VEX('VEX.L1.0F38.W1')
myVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}f3{:02x}'.format(myVEX.c4(), | |
decl, '}'))
else:
decl = ''
if name == '' and decl == '':
res = ''
else:
res = ' '.join([l for l in [stype, decl, name] if l])
return res
@property
def text(self):
"""
Text to construct this instance.
"""
return self.text_formatted(indent=0, linebreak=False)
class Dataset(Struct):
"""
Class for *Dataset*.
See :class:`Struct`.
"""
stype = SType.Dataset
def __init__(self, name='', decl=None, text=None):
super().__init__(name, decl=decl)
if text:
super().__init__(text=text)
class Structure(Struct):
"""
Class for *Structure*.
See :class:`Struct`.
"""
stype = SType.Structure
def __init__(self, name='', decl=None, text=None):
super().__init__(name, decl=decl)
if text:
super().__init__(text=text)
class Sequence(Struct):
"""
Class for *Sequence*.
See :class:`Struct`.
Examples:
>>> text = '''
... Sequence {
... Float64 depth;
... Float64 salinity;
... Float64 oxygen;
... Float64 temperature;
... } cast;'''
>>> Sequence(text=text)
Sequence('cast', {'depth': Var('depth', 'Float64'), 'salinity': Var('salinity', 'Float64'), 'oxygen': Var('oxygen', 'Float64'), 'temperature': Var('temperature', 'Float64')})
"""
stype = SType.Sequence
def __init__(self, name='', decl=None, text=None):
super().__init__(name, decl=decl)
if text:
super().__init__(text=text)
class Grid(Struct):
"""
Class for *Grid*.
| *Grid* := Grid { ARRAY: *declaration* MAPS: *declarations* } (*name* | *name* *arr*)
Attributes:
name(str): *name*
stype(SType): *stype*
array(Decl): ARRAY *declaration*
maps(Decls): MAPS *declarations*
Examples:
>>> text = '''
... Grid {
... ARRAY:
... Float32 tas[time = 8412][lat = 160][lon = 320];
... MAPS:
... Float64 time[time = 8412];
... Float64 lat[lat = 160];
... Float64 lon[lon = 320];
... } tas;'''
>>> Grid(text=text)
Grid('tas', array=Var('tas', 'Float32', arr=[Arr('time', 8412), Arr('lat', 160), Arr('lon', 320)]), maps={'time': Var('time', 'Float64', arr=[Arr('time', 8412)]), 'lat': Var('lat', 'Float64', arr=[Arr('lat', 160)]), 'lon': Var('lon', 'Float64', arr=[Arr('lon', 320)])})
"""
stype = SType.Grid
def __init__(self, name='', array=None, maps=None, text=None):
"""
Parameters:
name(str): *name*
stype(str or SType): *stype*
array(Decl): ARRAY *declaration*
maps(Decls): MAPS *declarations*
text(str): text to be parsed.
If `text` is not ``None``, other attributes are overridden by
the result of :meth:`.parse`.
"""
super().__init__(name, decl=None)
self.array = array
self.maps = maps
if text:
self.parse(text)
def parse(self, text):
"""
Parse `text` to construct :class:`Grid`.
"""
_debug_write(f"{self.__class__.__name__}.parse: text='{text}'")
res = _pat_grid.match(text)
if res:
_debug_write(
f"{self.__class__.__name__}.parse: array_line='{res.group(1).strip()}'"
)
_debug_write(
f"{self.__class__.__name__}.parse: maps_line='{res.group(2).strip()}'"
)
self.array = Var(text=res.group(1))
self.maps = parse_declarations(res.group(2))
self.name = res.group(3)
def __getattr__(self, key):
# print('__getattr__() called')
if key == self.array.name:
return self.array
elif key in self.maps:
return self.maps[key]
else:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{key}'")
def __getitem__(self, key):
# print('__getitem__() called')
if key == self.array.name:
return self.array
elif key in self.maps:
return self.maps[key]
else:
raise KeyError(f"'{key}'")
def __contains__(self, item):
# print('__contains__() called')
return (item in self.__dict__) or (item in self.maps) or (
item == self.array.name)
def __repr__(self):
if self.name:
name = f"'{self.name}'"
else:
name = ''
if self.array:
array = f'array={self.array.__repr__()}'
else:
array = ''
if self.maps:
maps = f'maps={self.maps.__repr__()}'
else:
maps = ''
res = ', '.join([l for l in [name, array, maps] if l])
return (f'{self.__class__.__name__}({res})')
def __str__(self):
return self.text_formatted()
def text_formatted(self, indent=4, linebreak=True):
"""
Return formatted text.
"""
_debug_write(
f'{self.__class__.__name__}.text_formatted:indent={indent},linebreak={linebreak}'
)
if self.name:
name = self.name + ';'
else:
name = ''
if self.stype:
stype = f'{self.stype.name}'
else:
stype = ''
if self.array is None or self.maps is None:
decl = ''
else:
if linebreak:
lb = '\n'
else:
lb = ''
array = f' ARRAY:{lb}' + tw.indent(self.array.text, ' ' * indent)
ll = f'{lb}'.join([
self.maps[d].text_formatted(indent, linebreak)
for d in self.maps if d
])
maps = f' MAPS:{lb}' + tw.indent(ll, ' ' * indent)
decl = f'{lb}'.join(('{', array, maps, '}'))
if name == '' and decl == '':
res = ''
else:
res = ' '.join([l for l in [stype, decl, name] if l])
return res
@property
def text(self):
"""
Text to construct this instance.
"""
return self.text_formatted(indent=0, linebreak=False)
class Var(Decl):
"""
Class for *Var*.
| *Var* := *basetype* (*name*|*name* *arr*)
Attributes:
name (str): *name*
btype (BType): *basetype*
arr (list(Arr)): *array-decl*
"""
def __init__(self, name='', btype=None, arr=None, text=None):
"""
Parameters:
name(str): *name*
btype(str or BType): *basetype*
arr(Arr or list(Arr)): *array-decl*
text(str): text to be parsed
Raises:
TypeError: if `btype` or `arr` is invalid
If `text` is not ``None``, other attributes are overridden by
the result of :meth:`.parse`.
"""
self.name = name
if btype is None:
self.btype = btype
elif isinstance(btype, BType):
self.btype = btype
elif type(btype) is str:
self.btype = BType(btype)
else:
raise TypeError(f'btype={btype} is invalid type: {type(btype)}')
if arr is None or arr == []:
self.arr = None
elif isinstance(arr, Arr):
self.arr = arr
elif type(arr) is list and isinstance(arr[0], Arr):
self.arr = arr
elif isinstance(arr, str):
self.arr = parse_arrdecls(arr)
else:
raise TypeError(f'arr={arr} is invalid type: {type(arr)}')
if text:
self.parse(text)
def parse(self, text):
"""
Parse `text` to construct :class:`Var`.
"""
_debug_write(f'Var.parse():text="{text[:60]}"')
res = _pat_varline.match(text)
if res:
try:
self.btype = BType(res.group(1))
except ValueError:
return None
self.name = res.group(2)
if res.group(3):
self.arr = parse_arrdecls(res.group(3))
def __repr__(self):
if self.name == '':
name = ''
else:
name = f"'{self.name}'"
if self.btype is None:
btype = ''
else:
btype = f"'{self.btype.name}'"
if self.arr:
arr = 'arr=' + str([a for a in self.arr])
else:
arr = ''
args = ', '.join([elem for elem in [name, btype, arr] if elem != ''])
return f'Var({args})'
def __str__(self):
return self.text_formatted()
def text_formatted(self, indent=None, linebreak=None):
"""
Formatted text expression of this instance.
`indent` and `linebreak` are dummy arguments here.
"""
if self.btype is None:
res = ''
else:
res = f'{self.btype.name}'
if self.name != '':
res += f' {self.name}'
if self.arr:
res += ''.join([a.text for a in self.arr])
if res:
res += ';'
return res
@property
def text(self):
"""
Text to construct this instance.
"""
return self.text_formatted()
class Arr():
"""
Class for *arr*.
| *arr* := [integer] | [*name* = integer]
As a text form::
text = '[time = 8412]'
text = '[500]'
Example:
>>> text = '[lat = 160];'
>>> Arr(text=text)
Arr('lat', 160)
>>> text = '[500];'
>>> Arr(text=text)
Arr('', 500)
Attributes:
name (str) : *name*
val (int) : integer
"""
def __init__(self, name='', val=None, text=None):
self.name = name
self.val = val
if text:
self.parse(text)
def parse(self, text):
_debug_write(f"{self.__class__.__name__}.parse():text='{text}'")
res = _pat_arrdecl.match(text)
if res:
self.name = res.group(1)
self.val = int(res.group(2))
else:
res = _pat_arrdecl_valonly.match(text)
if res:
self.val = int(res.group(1))
_debug_write(
f"{self.__class__.__name__}.parse():name='{self.name}',val='{self.val}'"
)
def __eq__(self, other):
if type(other) is not type(self):
return False
res = [getattr(self, a) == getattr(other, a) for a in self.__dict__]
return all(res)
def __repr__(self):
if self.name:
return f"Arr('{self.name}', {self.val})"
elif self.val:
return f"Arr('', {self.val})"
else:
return ''
def __str__(self):
if self.name:
return f"Arr(name='{self.name}', val={self.val})"
elif self.val:
return f"[{self.val}]"
else:
return ''
def text_formatted(self, indent=None, linebreak=None):
"""
Text form of *arr*.
`indent` and `linebreak` are dummy here.
"""
if self.name:
return f"[{self.name} = {self.val}]"
elif self.val:
return f"[{self.val}]"
else:
return ''
@property
def text(self):
return self.text_formatted()
def check_braces_matching(text):
"""
Check if braces(``{`` and ``}``) in given `text` match.
Raises `ValueError` unless match.
Examples:
>>> text = 'Dataset{varline} hoge'
>>> check_braces_matching(text) # True
>>> text = 'Struct{ Sequence{Var} fuga }} hoge'
>>> check_braces_matching(text)
Traceback (most recent call last):
...
ValueError: braces do not match: too many right braces: 1 more.
>>> text = 'Struct{ Sequence{{Var} fuga } hoge'
>>> check_braces_matching(text)
Traceback (most recent call last):
...
ValueError: braces do not match: too many left braces: 1 more.
"""
count = 0
maxcount = 0
_debug_write('check_braces_matching:')
for n, c in enumerate(text):
if c == '{':
count += 1
maxcount = max(maxcount, count)
_debug_write(f'n={n}, count={count}')
if c == '}':
count -= 1
_debug_write(f'n={n}, count={count}')
if (count < 0):
raise ValueError(f'braces do not match: '
f'too many right braces: {abs(count)} more.')
if count > 0:
raise ValueError(f'braces do not match: '
f'too many left braces: {count} more.')
def parse_dataset(text):
"""
Parse toplevel *dataset*.
*dataset* := Dataset { *declarations* } *name*;
"""
check_braces_matching(text)
# Dataset is the toplevel, *greedy* is preferable.
res = _pat_dataset.match(text)
if res:
dataset = Dataset(text=text)
else:
raise ValueError('Given text is not the Dataset definition.')
return dataset
def parse_declarations(text):
"""
Return :class:`Decls`, dict of {`name`: *Decl*} parsed from `text`.
"""
# _debug_write(f'parse_declarations:text="{text}"')
# _debug_write('======parse_declarations======')
res = Decls()
while text != '':
_debug_write('=' * 20)
_debug_write(f"parse_declarations:text='{text}'")
res_ident = _pat_ident.match(text)
if res_ident:
ident = res_ident.group(1)
_debug_write(f"parse_declarations:ident:'{ident}'")
if ident in _idents_stype:
| |
<filename>venv/lib/python3.6/site-packages/madmom/audio/stft.py
# encoding: utf-8
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
"""
This module contains Short-Time Fourier Transform (STFT) related functionality.
"""
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import scipy.fftpack as fftpack
try:
from pyfftw.builders import rfft as rfft_builder
except ImportError:
def rfft_builder(*args, **kwargs):
return None
from ..processors import Processor
from .signal import Signal, FramedSignal
STFT_DTYPE = np.complex64
def fft_frequencies(num_fft_bins, sample_rate):
"""
Frequencies of the FFT bins.
Parameters
----------
num_fft_bins : int
Number of FFT bins (i.e. half the FFT length).
sample_rate : float
Sample rate of the signal.
Returns
-------
fft_frequencies : numpy array
Frequencies of the FFT bins [Hz].
"""
return np.fft.fftfreq(num_fft_bins * 2, 1. / sample_rate)[:num_fft_bins]
def stft(frames, window, fft_size=None, circular_shift=False,
include_nyquist=False, fftw=None):
"""
Calculates the complex Short-Time Fourier Transform (STFT) of the given
framed signal.
Parameters
----------
frames : numpy array or iterable, shape (num_frames, frame_size)
Framed signal (e.g. :class:`FramedSignal` instance)
window : numpy array, shape (frame_size,)
Window (function).
fft_size : int, optional
FFT size (should be a power of 2); if 'None', the 'frame_size' given
by `frames` is used; if the given `fft_size` is greater than the
'frame_size', the frames are zero-padded, if smaller truncated.
circular_shift : bool, optional
Circular shift the individual frames before performing the FFT;
needed for correct phase.
include_nyquist : bool, optional
Include the Nyquist frequency bin (sample rate / 2) in returned STFT.
fftw : :class:`pyfftw.FFTW` instance, optional
If a :class:`pyfftw.FFTW` object is given it is used to compute the
STFT with the FFTW library. Requires 'pyfftw'.
Returns
-------
stft : numpy array, shape (num_frames, frame_size)
The complex STFT of the framed signal.
"""
# check for correct shape of input
if frames.ndim != 2:
# TODO: add multi-channel support
raise ValueError('frames must be a 2D array or iterable, got %s with '
'shape %s.' % (type(frames), frames.shape))
# shape of the frames
num_frames, frame_size = frames.shape
# FFT size to use
if fft_size is None:
fft_size = frame_size
# number of FFT bins to return
num_fft_bins = fft_size >> 1
if include_nyquist:
num_fft_bins += 1
# size of the FFT circular shift (needed for correct phase)
if circular_shift:
fft_shift = frame_size >> 1
# init objects
data = np.empty((num_frames, num_fft_bins), STFT_DTYPE)
# iterate over all frames
for f, frame in enumerate(frames):
if circular_shift:
# if we need to circular shift the signal for correct phase, we
# first multiply the signal frame with the window (or just use it
# as it is if no window function is given)
if window is not None:
signal = np.multiply(frame, window)
else:
signal = frame
# then swap the two halves of the windowed signal; if the FFT size
# is bigger than the frame size, we need to pad the (windowed)
# signal with additional zeros in between the two halves
fft_signal = np.zeros(fft_size)
fft_signal[:fft_shift] = signal[fft_shift:]
fft_signal[-fft_shift:] = signal[:fft_shift]
else:
# multiply the signal frame with the window and or save it directly
# to fft_signal (i.e. bypass the additional copying step above)
if window is not None:
fft_signal = np.multiply(frame, window)
else:
fft_signal = frame
# perform DFT
if fftw:
data[f] = fftw(fft_signal)[:num_fft_bins]
else:
data[f] = fftpack.fft(fft_signal, fft_size, axis=0)[:num_fft_bins]
# return STFT
return data
def phase(stft):
"""
Returns the phase of the complex STFT of a signal.
Parameters
----------
stft : numpy array, shape (num_frames, frame_size)
The complex STFT of a signal.
Returns
-------
phase : numpy array
Phase of the STFT.
"""
return np.angle(stft)
def local_group_delay(phase):
"""
Returns the local group delay of the phase of a signal.
Parameters
----------
phase : numpy array, shape (num_frames, frame_size)
Phase of the STFT of a signal.
Returns
-------
lgd : numpy array
Local group delay of the phase.
"""
# check for correct shape of input
if phase.ndim != 2:
raise ValueError('phase must be a 2D array')
# unwrap phase
unwrapped_phase = np.unwrap(phase)
# local group delay is the derivative over frequency
unwrapped_phase[:, :-1] -= unwrapped_phase[:, 1:]
# set the highest frequency to 0
unwrapped_phase[:, -1] = 0
# return the local group delay
return unwrapped_phase
# alias
lgd = local_group_delay
# mixin providing `num_frames` & `num_bins` properties
class _PropertyMixin(object):
# pylint: disable=missing-docstring
@property
def num_frames(self):
"""Number of frames."""
return len(self)
@property
def num_bins(self):
"""Number of bins."""
return int(self.shape[1])
# short-time Fourier transform class
class ShortTimeFourierTransform(_PropertyMixin, np.ndarray):
"""
ShortTimeFourierTransform class.
Parameters
----------
frames : :class:`.audio.signal.FramedSignal` instance
Framed signal.
window : numpy ufunc or numpy array, optional
Window (function); if a function (e.g. `np.hanning`) is given, a window
with the frame size of `frames` and the given shape is created.
fft_size : int, optional
FFT size (should be a power of 2); if 'None', the `frame_size` given by
`frames` is used, if the given `fft_size` is greater than the
`frame_size`, the frames are zero-padded accordingly.
circular_shift : bool, optional
Circular shift the individual frames before performing the FFT;
needed for correct phase.
include_nyquist : bool, optional
Include the Nyquist frequency bin (sample rate / 2).
fftw : :class:`pyfftw.FFTW` instance, optional
If a :class:`pyfftw.FFTW` object is given it is used to compute the
STFT with the FFTW library. If 'None', a new :class:`pyfftw.FFTW`
object is built. Requires 'pyfftw'.
kwargs : dict, optional
If no :class:`.audio.signal.FramedSignal` instance was given, one is
instantiated with these additional keyword arguments.
Notes
-----
If the :class:`Signal` (wrapped in the :class:`FramedSignal`) has an
integer dtype, the `window` is automatically scaled as if the `signal` had
a float dtype with the values being in the range [-1, 1]. This results in
same valued STFTs independently of the dtype of the signal. On the other
hand, this prevents extra memory consumption since the data-type of the
signal does not need to be converted (and if no decoding is needed, the
audio signal can be memory-mapped).
Examples
--------
Create a :class:`ShortTimeFourierTransform` from a :class:`Signal` or
:class:`FramedSignal`:
>>> sig = Signal('tests/data/audio/sample.wav')
>>> sig
Signal([-2494, -2510, ..., 655, 639], dtype=int16)
>>> frames = FramedSignal(sig, frame_size=2048, hop_size=441)
>>> frames # doctest: +ELLIPSIS
<madmom.audio.signal.FramedSignal object at 0x...>
>>> stft = ShortTimeFourierTransform(frames)
>>> stft # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
ShortTimeFourierTransform([[-3.15249+0.j , 2.62216-3.02425j, ...,
-0.03634-0.00005j, 0.0367 +0.00029j],
[-4.28429+0.j , 2.02009+2.01264j, ...,
-0.01981-0.00933j, -0.00536+0.02162j],
...,
[-4.92274+0.j , 4.09839-9.42525j, ...,
0.0055 -0.00257j, 0.00137+0.00577j],
[-9.22709+0.j , 8.76929+4.0005j , ...,
0.00981-0.00014j, -0.00984+0.00006j]],
dtype=complex64)
A ShortTimeFourierTransform can be instantiated directly from a file name:
>>> stft = ShortTimeFourierTransform('tests/data/audio/sample.wav')
>>> stft # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
ShortTimeFourierTransform([[...]], dtype=complex64)
Doing the same with a Signal of float data-type will result in a STFT of
same value range (rounding errors will occur of course):
>>> sig = Signal('tests/data/audio/sample.wav', dtype=np.float)
>>> sig # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Signal([-0.07611, -0.0766 , ..., 0.01999, 0.0195 ])
>>> frames = FramedSignal(sig, frame_size=2048, hop_size=441)
>>> frames # doctest: +ELLIPSIS
<madmom.audio.signal.FramedSignal object at 0x...>
>>> stft = ShortTimeFourierTransform(frames)
>>> stft # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
ShortTimeFourierTransform([[-3.1524 +0.j , 2.62208-3.02415j, ...,
-0.03633-0.00005j, 0.0367 +0.00029j],
[-4.28416+0.j , 2.02003+2.01257j, ...,
-0.01981-0.00933j, -0.00536+0.02162j],
...,
[-4.92259+0.j , 4.09827-9.42496j, ...,
0.0055 -0.00257j, 0.00137+0.00577j],
[-9.22681+0.j , 8.76902+4.00038j, ...,
0.00981-0.00014j, -0.00984+0.00006j]],
dtype=complex64)
Additional arguments are passed to :class:`FramedSignal` and
:class:`Signal` respectively:
>>> stft = ShortTimeFourierTransform('tests/data/audio/sample.wav', \
frame_size=2048, fps=100, sample_rate=22050)
>>> stft.frames # doctest: +ELLIPSIS
<madmom.audio.signal.FramedSignal object at 0x...>
>>> stft.frames.frame_size
2048
>>> stft.frames.hop_size
220.5
>>> stft.frames.signal.sample_rate
22050
"""
# pylint: disable=super-on-old-class
# pylint: disable=super-init-not-called
# pylint: disable=attribute-defined-outside-init
def __init__(self, frames, window=np.hanning, fft_size=None,
circular_shift=False, include_nyquist=False, fft_window=None,
fftw=None, **kwargs):
# this method is for documentation purposes only
pass
def __new__(cls, frames, window=np.hanning, fft_size=None,
circular_shift=False, include_nyquist=False, fft_window=None,
fftw=None, **kwargs):
# pylint: disable=unused-argument
if isinstance(frames, ShortTimeFourierTransform):
# already a STFT, use the frames thereof
frames = frames.frames
# instantiate a FramedSignal if needed
if not isinstance(frames, FramedSignal):
frames = FramedSignal(frames, **kwargs)
# size of the frames
frame_size = frames.shape[1]
if fft_window is None:
# if a callable window function is given, use the frame size to
# create a window of this size
if hasattr(window, '__call__'):
window = window(frame_size)
# window used for FFT
try:
# if the signal is not scaled, scale the window accordingly
| |
def concat(lib, df1, df2, join, sort, ignore_index):
return lib.concat(
[df1, df2], join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data2,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_with_same_df(self):
def concat(df, **kwargs):
df["f"] = df["a"]
return df
run_and_compare(concat, data=self.data)
def test_setitem_lazy(self):
def applier(df, **kwargs):
df = df + 1
df["a"] = df["a"] + 1
df["e"] = df["a"] + 1
df["new_int8"] = np.int8(10)
df["new_int16"] = np.int16(10)
df["new_int32"] = np.int32(10)
df["new_int64"] = np.int64(10)
df["new_int"] = 10
df["new_float"] = 5.5
df["new_float64"] = np.float64(10.1)
return df
run_and_compare(applier, data=self.data)
def test_setitem_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df["a"] = np.arange(3)
df["b"] = lib.Series(np.arange(3))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_insert_lazy(self):
def applier(df, **kwargs):
df = df + 1
df.insert(2, "new_int", 10)
df.insert(1, "new_float", 5.5)
df.insert(0, "new_a", df["a"] + 1)
return df
run_and_compare(applier, data=self.data)
def test_insert_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df.insert(1, "new_range", np.arange(3))
df.insert(1, "new_series", lib.Series(np.arange(3)))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_concat_many(self):
def concat(df1, df2, lib, **kwargs):
df3 = df1.copy()
df4 = df2.copy()
return lib.concat([df1, df2, df3, df4])
def sort_comparator(df1, df2):
"""Sort and verify equality of the passed frames."""
# We sort values because order of rows in the 'union all' result is inconsistent in OmniSci
df1, df2 = (
try_cast_to_pandas(df).sort_values(df.columns[0]) for df in (df1, df2)
)
return df_equals(df1, df2)
run_and_compare(
concat, data=self.data, data2=self.data2, comparator=sort_comparator
)
def test_concat_agg(self):
def concat(lib, df1, df2):
df1 = df1.groupby("a", as_index=False).agg(
{"b": "sum", "d": "sum", "e": "sum"}
)
df2 = df2.groupby("a", as_index=False).agg(
{"c": "sum", "b": "sum", "f": "sum"}
)
return lib.concat([df1, df2])
run_and_compare(concat, data=self.data, data2=self.data2, allow_subqueries=True)
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_single(self, join, sort, ignore_index):
def concat(lib, df, join, sort, ignore_index):
return lib.concat([df], join=join, sort=sort, ignore_index=ignore_index)
run_and_compare(
concat,
data=self.data,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_groupby_concat_single(self):
def concat(lib, df):
df = lib.concat([df])
return df.groupby("a").agg({"b": "min"})
run_and_compare(
concat,
data=self.data,
)
@pytest.mark.parametrize("join", ["inner"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_join(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index, **kwargs):
return lib.concat(
[df1, df2], axis=1, join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data3,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_index_name(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index("a")
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index("f")
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.name = "a"
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
def test_concat_index_names(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index(["a", "b"])
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index(["f", "h"])
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.names = ["a", "b"]
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
class TestGroupby:
data = {
"a": [1, 1, 2, 2, 2, 1],
"b": [11, 21, 12, 22, 32, 11],
"c": [101, 201, 202, 202, 302, 302],
}
cols_value = ["a", ["a", "b"]]
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).sum()
run_and_compare(groupby_sum, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_count(self, cols, as_index):
def groupby_count(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).count()
run_and_compare(groupby_count, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.xfail(
reason="Currently mean() passes a lambda into query compiler which cannot be executed on OmniSci engine"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).mean()
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_proj_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).c.sum()
run_and_compare(
groupby_sum, data=self.data, cols=cols, as_index=as_index, force_lazy=False
)
@pytest.mark.parametrize("agg", ["count", "size", "nunique"])
def test_groupby_agg(self, agg):
def groupby(df, agg, **kwargs):
return df.groupby("a").agg({"b": agg})
run_and_compare(groupby, data=self.data, agg=agg)
def test_groupby_agg_default_to_pandas(self):
def lambda_func(df, **kwargs):
return df.groupby("a").agg(lambda df: (df.mean() - df.sum()) // 2)
run_and_compare(lambda_func, data=self.data, force_lazy=False)
def not_implemented_func(df, **kwargs):
return df.groupby("a").agg("cumprod")
run_and_compare(lambda_func, data=self.data, force_lazy=False)
@pytest.mark.xfail(
reason="Function specified as a string should be passed into query compiler API, but currently it is transformed into a lambda"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_agg_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).agg("mean")
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
def test_groupby_lazy_multiindex(self):
index = generate_multiindex(len(self.data["a"]))
def groupby(df, *args, **kwargs):
df = df + 1
return df.groupby("a").agg({"b": "size"})
run_and_compare(groupby, data=self.data, constructor_kwargs={"index": index})
def test_groupby_lazy_squeeze(self):
def applier(df, **kwargs):
return df.groupby("a").sum().squeeze(axis=1)
run_and_compare(
applier,
data=self.data,
constructor_kwargs={"columns": ["a", "b"]},
force_lazy=True,
)
@pytest.mark.parametrize("method", ["sum", "size"])
def test_groupby_series(self, method):
def groupby(df, **kwargs):
ser = df[df.columns[0]]
return getattr(ser.groupby(ser), method)()
run_and_compare(groupby, data=self.data)
def test_groupby_size(self):
def groupby(df, **kwargs):
return df.groupby("a").size()
run_and_compare(groupby, data=self.data)
@pytest.mark.parametrize("by", [["a"], ["a", "b", "c"]])
@pytest.mark.parametrize("agg", ["sum", "size"])
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_agg_by_col(self, by, agg, as_index):
def simple_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg(agg)
run_and_compare(simple_agg, data=self.data)
def dict_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({by[0]: agg})
run_and_compare(dict_agg, data=self.data)
def dict_agg_all_cols(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({col: agg for col in by})
run_and_compare(dict_agg_all_cols, data=self.data)
# modin-issue#3461
def test_groupby_pure_by(self):
data = [1, 1, 2, 2]
# Test when 'by' is a 'TransformNode'
run_and_compare(lambda df: df.groupby(df).sum(), data=data, force_lazy=True)
# Test when 'by' is a 'FrameNode'
md_ser, pd_ser = pd.Series(data), pandas.Series(data)
md_ser._query_compiler._modin_frame._execute()
assert isinstance(
md_ser._query_compiler._modin_frame._op, FrameNode
), "Triggering execution of the Modin frame supposed to set 'FrameNode' as a frame's op"
set_execution_mode(md_ser, "lazy")
md_res = md_ser.groupby(md_ser).sum()
set_execution_mode(md_res, None)
pd_res = pd_ser.groupby(pd_ser).sum()
df_equals(md_res, pd_res)
taxi_data = {
"a": [1, 1, 2, 2],
"b": [11, 21, 12, 11],
"c": pandas.to_datetime(
["20190902", "20180913", "20190921", "20180903"], format="%Y%m%d"
),
"d": [11.5, 21.2, 12.8, 13.4],
}
# TODO: emulate taxi queries with group by category types when we have loading
# using arrow
# Another way of doing taxi q1 is
# res = df.groupby("cab_type").size() - this should be tested later as well
def test_taxi_q1(self):
def taxi_q1(df, **kwargs):
return df.groupby("a").size()
run_and_compare(taxi_q1, data=self.taxi_data)
def test_taxi_q2(self):
def taxi_q2(df, **kwargs):
return df.groupby("a").agg({"b": "mean"})
run_and_compare(taxi_q2, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q3(self, as_index):
def taxi_q3(df, as_index, **kwargs):
return df.groupby(["b", df["c"].dt.year], as_index=as_index).size()
run_and_compare(taxi_q3, data=self.taxi_data, as_index=as_index)
def test_groupby_expr_col(self):
def groupby(df, **kwargs):
df = df.loc[:, ["b", "c"]]
df["year"] = df["c"].dt.year
df["month"] = df["c"].dt.month
df["id1"] = df["year"] * 12 + df["month"]
df["id2"] = (df["id1"] - 24000) // 12
df = df.groupby(["id1", "id2"], as_index=False).agg({"b": "max"})
return df
run_and_compare(groupby, data=self.taxi_data)
def test_series_astype(self):
def series_astype(df, **kwargs):
return df["d"].astype("int")
run_and_compare(series_astype, data=self.taxi_data)
def test_df_astype(self):
def df_astype(df, **kwargs):
return df.astype({"b": "float", "d": "int"})
run_and_compare(df_astype, data=self.taxi_data)
def test_df_indexed_astype(self):
def df_astype(df, **kwargs):
df = df.groupby("a").agg({"b": "sum"})
return df.astype({"b": "float"})
run_and_compare(df_astype, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q4(self, as_index):
def taxi_q4(df, **kwargs):
df["c"] = df["c"].dt.year
df["d"] = df["d"].astype("int64")
df = df.groupby(["b", "c", "d"], sort=True, as_index=as_index).size()
if as_index:
df = df.reset_index()
return df.sort_values(
by=["c", 0 if as_index else "size"],
ignore_index=True,
ascending=[True, False],
)
run_and_compare(taxi_q4, data=self.taxi_data)
h2o_data = {
"id1": ["id1", "id2", "id3", "id1", "id2", "id3", "id1", "id2", "id3", "id1"],
"id2": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
"id3": ["id4", "id5", "id6", "id4", "id5", "id6", "id4", "id5", "id6", "id4"],
"id4": [4, 5, 4, 5, 4, 5, 4, 5, 4, 5],
"id5": [7, 8, 9, 7, 8, 9, 7, 8, 9, 7],
"id6": [7, 8, 7, 8, 7, 8, 7, 8, 7, 8],
"v1": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"v2": [1, 3, 5, 7, 9, 10, 8, 6, 4, 2],
"v3": [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0],
}
def _get_h2o_df(self):
df = pandas.DataFrame(self.h2o_data)
df["id1"] = df["id1"].astype("category")
df["id2"] = df["id2"].astype("category")
df["id3"] = df["id3"].astype("category")
return df
def test_h2o_q1(self):
df = self._get_h2o_df()
ref = df.groupby(["id1"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
df_equals(ref, exp)
def test_h2o_q2(self):
df = self._get_h2o_df()
ref = df.groupby(["id1", "id2"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1", "id2"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
exp["id2"] = exp["id2"].astype("category")
df_equals(ref, exp)
def test_h2o_q3(self):
df = self._get_h2o_df()
ref = df.groupby(["id3"], observed=True).agg({"v1": "sum", "v3": "mean"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id3"], observed=True, as_index=False).agg(
{"v1": "sum", "v3": "mean"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
def test_h2o_q4(self):
df = self._get_h2o_df()
ref = df.groupby(["id4"], observed=True).agg(
{"v1": "mean", "v2": "mean", "v3": "mean"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = | |
10.1115/1.2900803.
[5] <NAME> and <NAME> (2015): Mechanical vibrations. Theory and application to structural dynamics.
ISBN 978-1-118-90020-8.
"""
super().__init__()
# Set function handles for calling in residual and jacobian
self.M = M
self.f_int = f_int
self.f_ext = f_ext
self.K = K
self.D = D
# Set timeintegration parameters
self.alpha_m = alpha_m
self.alpha_f = alpha_f
self.beta = beta
self.gamma = gamma
@staticmethod
def _get_midstep(alpha, x_n, x_p):
return (1 - alpha) * x_p + alpha * x_n
def residual_int(self, q_p):
"""
Return internal part of the residual for the generalized-alpha time integration scheme.
"""
t_m = self._get_midstep(self.alpha_m, self._t_n, self.t_p)
q_m = self._get_midstep(self.alpha_m, self._q_n, q_p)
dq_m = self._get_midstep(self.alpha_m, self._dq_n, self.dq_p)
ddq_m = self._get_midstep(self.alpha_m, self._ddq_n, self.ddq_p)
t_f = self._get_midstep(self.alpha_f, self._t_n, self.t_p)
q_f = self._get_midstep(self.alpha_f, self._q_n, q_p)
dq_f = self._get_midstep(self.alpha_f, self._dq_n, self.dq_p)
M = self.M(q_m, dq_m, t_m)
f_int_f = self.f_int(q_f, dq_f, t_f)
res = M @ ddq_m + f_int_f
return res
def residual_ext(self, q_p):
"""
Return external part of the residual for the generalized-alpha time integration scheme
"""
t_f = self._get_midstep(self.alpha_f, self._t_n, self.t_p)
q_f = self._get_midstep(self.alpha_f, self._q_n, q_p)
dq_f = self._get_midstep(self.alpha_f, self._dq_n, self.dq_p)
f_ext_f = self.f_ext(q_f, dq_f, t_f)
res = - f_ext_f
return res
def jacobian(self, q_p):
"""
Return Jacobian for the generalized-alpha time integration scheme.
"""
t_m = self._get_midstep(self.alpha_m, self._t_n, self.t_p)
q_m = self._get_midstep(self.alpha_m, self._q_n, q_p)
dq_m = self._get_midstep(self.alpha_m, self._dq_n, self.dq_p)
t_f = self._get_midstep(self.alpha_f, self._t_n, self.t_p)
q_f = self._get_midstep(self.alpha_f, self._q_n, q_p)
dq_f = self._get_midstep(self.alpha_f, self._dq_n, self.dq_p)
M = self.M(q_m, dq_m, t_m)
D = self.D(q_f, dq_f, t_f)
K = self.K(q_f, dq_f, t_f)
Jac = (1 - self.alpha_m) / (self.beta * self.dt ** 2) * M + (1 - self.alpha_f) * self.gamma / (
self.beta * self.dt) * D + (1 - self.alpha_f) * K
return Jac
def set_prediction(self, q_n, dq_n, ddq_n, t_n):
"""
Predict variables for the generalized-alpha time integration scheme.
"""
self._t_n = t_n
self._q_n = q_n
self._dq_n = dq_n
self._ddq_n = ddq_n
self.q_p = copy(self._q_n)
self.dq_p = (1 - self.gamma / self.beta) * self._dq_n + self.dt * (1 - self.gamma / (2 * self.beta)) * self._ddq_n
self.ddq_p = -1 / (self.beta * self.dt) * self._dq_n - (1 / (2 * self.beta) - 1) * self._ddq_n
self.t_p = t_n + self.dt
return
def set_correction(self, q_p):
"""
Correct variables for the generalized-alpha time integration scheme.
"""
delta_q_p = q_p - self.q_p
self.q_p = copy(q_p)
self.dq_p += self.gamma / (self.beta * self.dt) * delta_q_p
self.ddq_p += 1 / (self.beta * self.dt ** 2) * delta_q_p
return
class VelocityGeneralizedAlpha(GeneralizedAlpha):
def __init__(self, M, f_int, f_ext, K, D, alpha_m=0.4210526315789474, alpha_f=0.4736842105263158,
beta=0.27700831024930755, gamma=0.5526315789473684):
super().__init__(M, f_int, f_ext, K, D, alpha_m=alpha_m, alpha_f=alpha_f, beta=beta, gamma=gamma)
def residual_int(self, dq_p):
"""
Return internal part of the residual for the generalized-alpha time integration scheme.
"""
t_m = self._get_midstep(self.alpha_m, self._t_n, self.t_p)
q_m = self._get_midstep(self.alpha_m, self._q_n, self.q_p)
dq_m = self._get_midstep(self.alpha_m, self._dq_n, dq_p)
ddq_m = self._get_midstep(self.alpha_m, self._ddq_n, self.ddq_p)
t_f = self._get_midstep(self.alpha_f, self._t_n, self.t_p)
q_f = self._get_midstep(self.alpha_f, self._q_n, self.q_p)
dq_f = self._get_midstep(self.alpha_f, self._dq_n, dq_p)
M = self.M(q_m, dq_m, t_m)
f_int_f = self.f_int(q_f, dq_f, t_f)
res = M @ ddq_m + f_int_f
return res
def residual_ext(self, dq_p):
"""
Return external part of the residual for the generalized-alpha time integration scheme.
"""
t_f = self._get_midstep(self.alpha_f, self._t_n, self.t_p)
q_f = self._get_midstep(self.alpha_f, self._q_n, self.q_p)
dq_f = self._get_midstep(self.alpha_f, self._dq_n, dq_p)
f_ext_f = self.f_ext(q_f, dq_f, t_f)
res = - f_ext_f
return res
def jacobian(self, dq_p):
"""
Return Jacobian for the generalized-alpha time integration scheme.
"""
t_m = self._get_midstep(self.alpha_m, self._t_n, self.t_p)
q_m = self._get_midstep(self.alpha_m, self._q_n, self.q_p)
dq_m = self._get_midstep(self.alpha_m, self._dq_n, dq_p)
t_f = self._get_midstep(self.alpha_f, self._t_n, self.t_p)
q_f = self._get_midstep(self.alpha_f, self._q_n, self.q_p)
dq_f = self._get_midstep(self.alpha_f, self._dq_n, dq_p)
M = self.M(q_m, dq_m, t_m)
D = self.D(q_f, dq_f, t_f)
K = self.K(q_f, dq_f, t_f)
Jac = (1 - self.alpha_m) / (self.gamma * self.dt) * M + (1 - self.alpha_f) * D + (1 - self.alpha_f) * self.dt * (self.beta/self.gamma) * K
return Jac
def set_prediction(self, q_n, dq_n, ddq_n, t_n):
"""
Predict variables for the generalized-alpha time integration scheme.
"""
self._t_n = t_n
self._q_n = q_n
self._dq_n = dq_n
self._ddq_n = ddq_n
self.q_p = self._q_n + self.dt * self._dq_n + self.dt ** 2 * (1 / 2 - self.beta / self.gamma) * self._ddq_n
self.dq_p = copy(self._dq_n)
self.ddq_p = - (1 - self.gamma) / self.gamma * self._ddq_n
self.t_p = t_n + self.dt
return
def set_correction(self, dq_p):
"""
Correct variables for the generalized-alpha time integration scheme.
"""
delta_dq_p = dq_p - self.dq_p
self.q_p += self.dt * self.beta / self.gamma * delta_dq_p
self.dq_p = copy(dq_p)
self.ddq_p += 1 / (self.gamma * self.dt) * delta_dq_p
return
class NewmarkBeta(GeneralizedAlpha):
def __init__(self, M, f_int, f_ext, K, D, beta=0.25, gamma=0.5):
"""
Newmark-beta integration scheme.
Parameters
----------
M : function
Mass Matrix function, signature M(q, dq, ddq, t)
f_int : function
Internal restoring force function, signature f_int(q, dq, ddq, t)
f_ext : function
External force function, signature, f_ext(q, dq, ddq, t)
K : function
Jacobian of f_int, signature K(q, dq, ddq, t)
D : function
Linear viscous damping matrix, signature D(q, dq, ddq, t)
beta : float
Default value beta = 1/4.
gamma : float
Default value gamma = 1/2.
Unconditional stability for beta >= gamma/2 >= 1/4. Unconditionally stability and second-order accuracy but no
numerical damping for beta >= 1/4 and gamma = 1/2. Unconditionally stability, second-order accuracy and best
following of phase but no numerical damping for beta = 1/4 and gamma = 1/2 (corresponds to trapezoidal rule,
default values). Alternative parametrization as Newmark-beta scheme with alpha-damping (modified average
constant acceleration) -- in general not second-order accurate -- via beta = 1/4*(1 + alpha)^2 and
gamma = 1/2 + alpha with damping alpha >= 0.
"""
alpha_m = 0.0
alpha_f = 0.0
super().__init__(M, f_int, f_ext, K, D, alpha_m, alpha_f, beta, gamma)
class VelocityNewmarkBeta(VelocityGeneralizedAlpha):
def __init__(self, M, f_int, f_ext, K, D, beta=0.25, gamma=0.5):
alpha_m = 0.0
alpha_f = 0.0
super().__init__(M, f_int, f_ext, K, D, alpha_m, alpha_f, beta, gamma)
class WBZAlpha(GeneralizedAlpha):
def __init__(self, M, f_int, f_ext, K, D, rho_inf=0.9):
"""
Parametrize generalized-alpha time integration scheme as WBZ-alpha scheme.
Parameters
----------
M : function
Mass Matrix function, signature M(q, dq, t)
f_int : function
Internal restoring force function, signature f_int(q, dq, t)
f_ext : function
External force function, signature, f_ext(q, dq, t)
K : function
Jacobian of f_int, signature K(q, dq, t)
D : function
Linear viscous damping matrix, signature D(q, dq, t)
rho_inf : float
High frequency spectral radius. 0 <= rho_inf <= 1. Default value rho_inf = 0.9. For alternative
parametrization via alpha_m set rho_inf = (1 + alpha_m)/(1 - alpha_m) with -1 <= alpha_m <= 0.
"""
alpha_m = (rho_inf - 1) / (rho_inf + 1)
alpha_f = 0.0
beta = 0.25 * (1 - alpha_m) ** 2
gamma = 0.5 - alpha_m
super().__init__(M, f_int, f_ext, K, D, alpha_m, alpha_f, beta, gamma)
return
class HHTAlpha(GeneralizedAlpha):
def __init__(self, M, f_int, f_ext, K, D, rho_inf=0.9):
"""
Parametrize generalized-alpha time integration scheme as HHT-alpha scheme.
Parameters
----------
M : function
Mass Matrix function, signature M(q, dq, ddq, t)
f_int : function
Internal restoring force function, signature f_int(q, dq, ddq, t)
f_ext : function
External force function, signature, f_ext(q, dq, ddq, t)
K : function
Jacobian of f_int, signature K(q, dq, ddq, t)
D : function
Linear viscous damping matrix, signature D(q, dq, ddq, t)
rho_inf : float
High frequency spectral radius. 1/2 <= rho_inf <= 1. Default value rho_inf = 0.9. For alternative
parametrization via alpha_f set rho_inf = (1 - alpha_f)/(1 + alpha_f) with 0 <= alpha_m <= 1/3.
"""
alpha_m = 0.0
alpha_f = (1 - rho_inf) / (1 + rho_inf)
beta = 0.25 * (1 + alpha_f) ** 2
gamma = 0.5 + alpha_f
super().__init__(M, f_int, f_ext, K, D, alpha_m, alpha_f, beta, gamma)
return
class VariationalMidPoint(OneStepIntegratorBase):
def __init__(self, M, f_int, f_ext, K, D, alpha=0.5):
super().__init__()
# Set function handles for calling in residual and jacobian
self.M = M
self.f_int = f_int
self.f_ext = f_ext
self.K = K
self.D = D
self.alpha = alpha
@staticmethod
| |
#!/usr/bin/env python
# The contents of this file are subject to the Python Software Foundation
# License Version 2.3 (the License). You may not copy or use this file, in
# either source code or executable form, except in compliance with the License.
# You may obtain a copy of the License at http://www.python.org/license.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# By <NAME>
# I was playing with doctest when I wrote this. I still haven't
# decided how useful doctest is as opposed to implementing unit tests
# directly. --Dave
if __name__ == '__main__':
import sys
sys.path = ['.','..'] + sys.path # HACK to simplify unit testing.
from BTL.translation import _
class BEGIN: # represents special BEGIN location before first next.
pass
from UserDict import DictMixin
from cmap_swig import *
import sys
from weakref import WeakKeyDictionary
LEAK_TEST = False
class CMap(object,DictMixin):
"""In-order mapping. Provides same operations and behavior as a dict,
but provides in-order iteration. Additionally provides operations to
find the nearest key <= or >= a given key.
This provides a significantly wider set of operations than
berkeley db BTrees, but it provides no means for persistence.
LIMITATION: The key must be a python numeric type, e.g., an integer
or a float. The value can be any python object.
Operation: Time Applicable
Complexity: Methods:
---------------------------------------------------
Item insertion: O(log n) append, __setitem__
Item deletion: O(log n + k) __delitem__, erase
Key search: O(log n) __getitem__, get, find,
__contains__
Value search: n/a
Iteration step: amortized O(1), next, prev
worst-case O(log n)
Memory: O(n)
n = number of elements in map. k = number of iterators pointing
into map. CMap assumes there are few iterators in existence at
any given time.
Iterators are not invalidated by insertions. Iterators are
invalidated by deletions only when the key-value pair
referenced is deleted. Deletion has a '+k' because the
__delitem__ searches linearly through the set of iterators
pointing into this map to find any iterator pointing at the
deleted item and then invalidates the iterator.
This class is backed by the C++ STL map class, but conforms
to the Python container interface."""
class _AbstractIterator:
"""Iterates over elements in the map in order."""
def __init__(self, m, si = BEGIN ): # "s.." implies swig object.
"""Creates an iterator pointing to element si in map m.
Do not instantiate directly. Use iterkeys, itervalues, or
iteritems.
The _AbstractIterator takes ownership of any C++ iterator
(i.e., the swig object 'si') and will deallocate it when
the iterator is deallocated.
Examples of typical behavior:
>>> from CMap import *
>>> m = CMap()
>>> m[12] = 6
>>> m[9] = 4
>>> for k in m:
... print int(k)
...
9
12
>>>
Example edge cases (empty map):
>>> from CMap import *
>>> m = CMap()
>>> try:
... i = m.__iter__()
... i.value()
... except IndexError:
... print 'IndexError.'
...
IndexError.
>>> try:
... i.next()
... except StopIteration:
... print 'stopped'
...
stopped
@param map: CMap.
@param node: Node that this iterator will point at. If None
then the iterator points to end(). If BEGIN
then the iterator points to one before the beginning.
"""
assert isinstance(m, CMap)
assert not isinstance(si, CMap._AbstractIterator)
if si == None:
self._si = map_end(m._smap)
else:
self._si = si # C++ iterator wrapped by swig.
self._map = m
m._iterators[self] = 1 # using map as set of weak references.
def __hash__(self):
return id(self)
def __cmp__(self, other):
if not self._si or not other._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN and other._si == BEGIN: return 0
if self._si == BEGIN and other._si != BEGIN: return -1
elif self._si != BEGIN and other._si == BEGIN: return 1
return iter_cmp(self._map._smap, self._si, other._si )
def at_begin(self):
"""equivalent to self == m.begin() where m is a CMap.
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.begin()
>>> i == m.begin()
True
>>> i.at_begin()
True
>>> i == m.end() # no elements so begin()==end()
True
>>> i.at_end()
True
>>> m[6] = 'foo' # insertion does not invalidate iterators.
>>> i = m.begin()
>>> i == m.end()
False
>>> i.value()
'foo'
>>> try: # test at_begin when not at beginning.
... i.next()
... except StopIteration:
... print 'ok'
ok
>>> i.at_begin()
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN: # BEGIN is one before begin(). Yuck!!
return False
return map_iter_at_begin(self._map._smap, self._si)
def at_end(self):
"""equivalent to self == m.end() where m is a CMap, but
at_end is faster because it avoids the dynamic memory
alloation in m.end().
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'foo'
>>> i = m.end() # test when at end.
>>> i == m.end()
True
>>> i.at_end()
True
>>> int(i.prev())
6
>>> i.at_end() # testing when not at end.
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
return False
return map_iter_at_end(self._map._smap, self._si)
def key(self):
"""@return: the key of the key-value pair referenced by this
iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to .next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_key(self._si)
def value(self):
"""@return: the value of the key-value pair currently referenced
by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_value(self._si)
def item(self):
"""@return the key-value pair referenced by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
return self.key(), self.value()
def _next(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
self._si = map_begin(self._map._smap)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
return
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
iter_incr(self._si)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
def _prev(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise StopIteration()
elif map_iter_at_begin(self._map._smap, self._si):
self._si = BEGIN
raise StopIteration
iter_decr(self._si)
def __del__(self):
# Python note: if a reference to x is intentionally
# eliminated using "del x" and there are other references
# to x then __del__ does not get called at this time.
# Only when the last reference is deleted by an intentional
# "del" or when the reference goes out of scope does
# the __del__ method get called.
self._invalidate()
def _invalidate(self):
if self._si == None:
return
try:
del self._map._iterators[self]
except KeyError:
pass # could've been removed because weak reference,
# and because _invalidate is called from __del__.
if self._si != BEGIN:
iter_delete(self._si)
self._si = None
def __iter__(self):
"""If the iterator is itself iteratable then we do things like:
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[11] = 'bar'
>>> for x in m.itervalues():
... print x
...
foo
bar
"""
return self
def __len__(self):
return len(self._map)
class KeyIterator(_AbstractIterator):
def next(self):
"""Returns the next key in the map.
Insertion does not invalidate iterators. Deletion only
invalidates an iterator if the iterator pointed at the
key-value pair being deleted.
This is implemented by moving the iterator and then
dereferencing it. If we dereferenced and then moved
then we would get the odd behavior:
Ex: I have keys [1,2,3]. The iterator i points at 1.
print i.next() # prints 1
print i.next() # prints 2
print i.prev() # prints 3
print i.prev() # prints 2
However, because we move and then dereference, when an
iterator is first created it points to nowhere
so that the first next moves to the first element.
Ex:
>>> from CMap import *
>>> m = | |
from bitcoin_tools.analysis.plots import get_cdf
from bitcoin_tools.analysis.status.data_dump import transaction_dump, utxo_dump
from bitcoin_tools.analysis.status.utils import parse_ldb, aggregate_dust_np
from .data_processing import get_samples, get_filtered_samples
from bitcoin_tools.analysis.status.plots import plot_pie_chart_from_samples, overview_from_file, plots_from_samples
from bitcoin_tools import CFG
from getopt import getopt
from sys import argv
def set_out_names(count_p2sh, non_std_only):
"""
Set the name of the input / output files from the experiment depending on the given flags.
:param count_p2sh: Whether P2SH should be taken into account.
:type count_p2sh: bool
:param non_std_only: Whether the experiment will be run only considering non standard outputs.
:type non_std_only: bool
:return: Four string representing the names of the utxo, parsed_txs, parsed_utxos and dust file names.
:rtype: str, str, str, str
"""
f_utxos = "/decoded_utxos.json"
f_parsed_txs = "/parsed_txs.json"
# In case of the parsed files we consider the parameters
f_parsed_utxos = "/parsed_utxos"
f_dust = "/dust"
if non_std_only:
f_parsed_utxos += "_nstd"
f_dust += "_nstd"
if count_p2sh:
f_parsed_utxos += "_wp2sh"
f_dust += "_wp2sh"
f_parsed_utxos += ".json"
f_dust += ".json"
return f_utxos, f_parsed_txs, f_parsed_utxos, f_dust
def non_std_outs_analysis(samples):
"""
Perform the non standard out analysis for a given set of samples.
:param samples: List of samples that will form the chart.
:type samples: list
:return: None
:rtype: None
"""
# We can use get_unique_values() to obtain all values for the non_std_type attribute found in the analysed samples:
# get_unique_values("non_std_type", fin_name=f_parsed_utxos)
# Once we know all the possible values, we can create a pie chart, assigning a piece of the pie to the main values
# and grouping all the rest into an "Other" category. E.g., we create pieces for multisig 1-1, 1-2, 1-3, 2-2, 2-3
# and 3-3, and put the rest into "Other".
groups = [['multisig-1-3'], ['multisig-1-2'], ['multisig-1-1'], ['multisig-3-3'], ['multisig-2-2'],
['multisig-2-3'], ["P2WSH"], ["P2WPKH"], [False, 'multisig-OP_NOTIF-OP_NOTIF',
'multisig-<2153484f55544f555420544f2023424954434f494e2d41535345545320202020202020202'
'0202020202020202020202020202020202020202020202020202020>-1']]
labels = ['M. 1-3', 'M. 1-2', 'M. 1-1', 'M. 3-3', 'M. 2-2', 'M. 2-3', "P2WSH", "P2WPKH", 'Other']
out_name = "utxo_non_std_type"
plot_pie_chart_from_samples(samples=samples, save_fig=out_name, labels=labels, groups=groups, title="",
colors=["#165873", "#428C5C", "#4EA64B", "#ADD96C", "#B1D781", "#FAD02F",
"#A69229", "#B69229", "#F69229"], labels_out=True)
def tx_based_analysis(tx_fin_name):
"""
Performs a transaction based analysis from a given input file (resulting from a transaction dump of the chainstate)
:param tx_fin_name: Input file path which contains the chainstate transaction dump.
:type: str
:return: None
:rtype: None
"""
x_attributes = ['height', 'total_len', 'total_value', 'num_utxos']
xlabels = ['Height', 'Total length (bytes)', 'Total value', 'Number of UTXOs per tx']
out_names = ["tx_height", ["tx_total_len", "tx_total_len_logx"], "tx_total_value_logx",
["tx_num_utxos", "tx_num_utxos_logx"]]
log_axis = [False, [False, 'x'], 'x', [False, 'x']]
x_attr_pie = 'coinbase'
xlabels_pie = [['Coinbase', 'No-coinbase']]
out_names_pie = ['tx_coinbase']
pie_groups = [[[1], [0]]]
pie_colors = [["#165873", "#428C5C"]]
samples = get_samples(x_attributes + [x_attr_pie], fin_name=tx_fin_name)
samples_pie = samples.pop(x_attr_pie)
for attribute, label, log, out in zip(x_attributes, xlabels, log_axis, out_names):
xs, ys = get_cdf(samples[attribute], normalize=True)
plots_from_samples(xs=xs, ys=ys, xlabel=label, log_axis=log, save_fig=out, ylabel="Number of txs")
for label, out, groups, colors in (list(zip(xlabels_pie, out_names_pie, pie_groups, pie_colors))):
plot_pie_chart_from_samples(samples=samples_pie, save_fig=out, labels=label, title="", groups=groups,
colors=colors, labels_out=True)
def utxo_based_analysis(utxo_fin_name):
"""
Performs a utxo based analysis from a given input file (resulting from a utxo dump of the chainstate)
:param utxo_fin_name: Input file path which contains the chainstate utxo dump.
:type: str
:return: None
:rtype: None
"""
x_attributes = ['tx_height', 'amount', 'index', 'out_type', 'utxo_data_len', 'register_len']
xlabels = ['Tx. height', 'Amount', 'UTXO index', 'Out type', 'UTXO data length', 'Register length']
out_names = ["utxo_tx_height", "utxo_amount_logx", ["utxo_index", "utxo_index_logx"],
["utxo_out_type", "utxo_out_type_logx"], ["utxo_data_len", "utxo_data_len_logx"],
['utxo_register_len', 'utxo_register_len_logx']]
log_axis = [False, 'x', [False, 'x'], [False, 'x'], [False, 'x'], [False, 'x']]
x_attributes_pie = ['out_type', 'out_type']
xlabels_pie = [['C-even', 'C-odd', 'U-even', 'U-odd'], ['P2PKH', 'P2PK', 'P2SH', 'Other']]
out_names_pie = ["utxo_pk_types", "utxo_types"]
pie_groups = [[[2], [3], [4], [5]], [[0], [2, 3, 4, 5], [1]]]
x_attribute_special = 'non_std_type'
# Since the attributes for the pie chart are already included in the normal chart, we won't pass them to the
# sampling function.
samples = get_samples(x_attributes + [x_attribute_special], fin_name=utxo_fin_name)
samples_special = samples.pop(x_attribute_special)
for attribute, label, log, out in zip(x_attributes, xlabels, log_axis, out_names):
xs, ys = get_cdf(samples[attribute], normalize=True)
plots_from_samples(xs=xs, ys=ys, xlabel=label, log_axis=log, save_fig=out, ylabel="Number of UTXOs")
for attribute, label, out, groups in (list(zip(x_attributes_pie, xlabels_pie, out_names_pie, pie_groups))):
plot_pie_chart_from_samples(samples=samples[attribute], save_fig=out, labels=label, title="", groups=groups,
colors=["#165873", "#428C5C", "#4EA64B", "#ADD96C"], labels_out=True)
# Special case: non-standard
non_std_outs_analysis(samples_special)
def dust_analysis(utxo_fin_name, f_dust, fltr=None):
"""
Performs a dust analysis by aggregating al the dust of a utxo dump file.
:param utxo_fin_name: Input file path which contains the chainstate utxo dump.
:type: str
:param f_dust: Output file name where the aggregated dust will be stored.
:type f_dust: str
:param fltr: Filter to be applied to the samples. None by default.
:type fltr: function
:return: None
:rtype: None
"""
# Generate plots for dust analysis (including percentage scale).
# First, the dust accumulation file is generated
data = aggregate_dust_np(utxo_fin_name, fout_name=f_dust, fltr=fltr)
# # Or we can load it from a dust file if we have already created it
# data = load(open(CFG.data_path + f_dust))
dict_labels = [["dust_utxos", "np_utxos", "npest_utxos"],
["dust_value", "np_value", "npest_value"],
["dust_data_len", "np_data_len", "npest_data_len"]]
outs = ["dust_utxos", "dust_value", "dust_data_len"]
totals = ['total_utxos', 'total_value', 'total_data_len']
ylabels = ["Number of UTXOs", "UTXOs Amount (satoshis)", "UTXOs Sizes (bytes)"]
legend = ["Dust", "Non-profitable min.", "Non-profitable est."]
for labels, out, total, ylabel in zip(dict_labels, outs, totals, ylabels):
xs = [sorted(list(data[l].keys()), key=int) for l in labels]
ys = [sorted(list(data[l].values()), key=int) for l in labels]
plots_from_samples(xs=xs, ys=ys, save_fig=out, legend=legend, legend_loc=4, xlabel='Fee rate (sat./byte)',
ylabel=ylabel)
# Get values in percentage
ys_perc = []
for y_samples in ys:
y_perc = [y / float(data[total]) for y in y_samples]
ys_perc.append(y_perc)
plots_from_samples(xs=xs, ys=ys_perc, save_fig='perc_' + out, legend=legend, legend_loc=4,
xlabel='Fee rate (sat./byte)', ylabel=ylabel)
def dust_analysis_all_fees(utxo_fin_name):
"""
Performs a dust analysis for all fee rates, that is, up until all samples are considered dust (plot shows cdf up
until 1).
:param utxo_fin_name: Input file path which contains the chainstate utxo dump.
:type: str
:return: None
:rtype: None
"""
x_attributes = [["dust", "non_profitable", "non_profitable_est"]]
xlabels = ['Dust/non_prof_min/non_prof_est value']
out_names = ["dust_utxos_all"]
legends = [["Dust", "Non-profitable min.", "Non-profitable est."]]
log_axis = ['x']
for attribute, label, log, out, legend in zip(x_attributes, xlabels, log_axis, out_names, legends):
samples = get_samples(attribute, fin_name=utxo_fin_name)
xs = []
ys = []
for a in attribute:
x, y = get_cdf(samples[a], normalize=True)
xs.append(x)
ys.append(y)
plots_from_samples(xs=xs, ys=ys, xlabel=label, log_axis=log, save_fig=out, ylabel="Number of UTXOs",
legend=legend, legend_loc=4)
def utxo_based_analysis_with_filters(utxo_fin_name):
"""
Performs an utxo data analysis using different filters, to obtain for examples the amount of SegWit outputs.
:param utxo_fin_name: Input file path which contains the chainstate utxo dump.
:type: str
:return: None
:rtype: None
"""
x_attribute = 'tx_height'
xlabel = 'Block height'
out_names = ['utxo_height_out_type', 'utxo_height_amount', 'segwit_upper_bound', 'utxo_height_1_satoshi']
filters = [lambda x: x["out_type"] == 0,
lambda x: x["out_type"] == 1,
lambda x: x["out_type"] in [2, 3, 4, 5],
lambda x: x["non_std_type"] == "P2WPKH",
lambda x: x["non_std_type"] == "P2WSH",
lambda x: x["non_std_type"] is not False and "multisig" in x["non_std_type"],
lambda x: x["non_std_type"] is False,
lambda x: x["amount"] == 1,
lambda x: 1 < x["amount"] <= 10 ** 1,
lambda x: 10 < x["amount"] <= 10 ** 2,
lambda x: 10 ** 2 < x["amount"] <= 10 ** 4,
lambda x: 10 ** 4 < x["amount"] <= 10 ** 6,
lambda x: 10 ** 6 < x["amount"] <= 10 ** 8,
lambda x: x["amount"] > 10 ** 8,
lambda x: x["out_type"] == 1,
lambda x: x["amount"] == 1]
legends = [['P2PKH', 'P2SH', 'P2PK', 'P2WPKH', 'P2WSH', 'Multisig', 'Other'],
['$=1$', '$1 < x \leq 10$', '$10 < x \leq 10^2$', '$10^2 < x \leq 10^4$', '$10^4 < x \leq 10^6$',
'$10^6 < x \leq 10^8$', '$10^8 < x$'], ['P2SH'], ['Amount = 1']]
comparative = [True, True, False, False]
legend_loc = 2
samples = get_filtered_samples(x_attribute, fin_name=utxo_fin_name, filtr=filters)
for out, legend, comp in zip(out_names, legends, comparative):
xs = []
ys = []
for _ in range(len(legend)):
x, y = get_cdf(samples.pop(0), normalize=True)
xs.append(x)
ys.append(y)
plots_from_samples(xs=xs, ys=ys, xlabel=xlabel, save_fig=out, legend=legend, legend_loc=legend_loc,
ylabel="Number of UTXOs")
def tx_based_analysis_with_filters(tx_fin_name):
"""
Performs a transaction data analysis using different filters, to obtain for example the amount of coinbase
transactions.
:param tx_fin_name: Input file path which contains the chainstate transaction dump.
:type: str
:return: None
:rtype: None
"""
x_attributes = 'height'
xlabels = ['Height']
out_names = ['tx_height_coinbase']
filters = [lambda x: x["coinbase"]]
samples = | |
"""Defining domain generalization algorithms"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import matplotlib.pyplot as plt
OBJECTIVES = [
'ERM',
'GroupDRO',
'IRM',
'VREx',
'SD',
# 'ANDMask', # Requires update
# 'IGA', # Requires update
# 'Fish', # Requires update
'IB_ERM',
# 'IB_IRM' # Requires update
]
def get_objective_class(objective_name):
"""Return the objective class with the given name."""
if objective_name not in globals():
raise NotImplementedError("objective not found: {}".format(objective_name))
return globals()[objective_name]
class Objective(nn.Module):
"""
A subclass of Objective implements a domain generalization Gradients.
Subclasses should implement the following:
- update
- predict
"""
def __init__(self, hparams):
super(Objective, self).__init__()
self.hparams = hparams
def predict(self, all_x):
raise NotImplementedError
def update(self, losses):
"""
Computes the Gradients for model update
Admits a list of unlabeled losses from the test domains: losses
"""
raise NotImplementedError
class ERM(Objective):
"""
Empirical Risk Minimization (ERM)
"""
def __init__(self, model, dataset, optimizer, hparams):
super(ERM, self).__init__(hparams)
# Save hparams
self.device = self.hparams['device']
# Save training components
self.model = model
self.dataset = dataset
self.optimizer = optimizer
# Get some other useful info
self.nb_training_domains = dataset.get_nb_training_domains()
def predict(self, all_x):
return self.model(all_x)
def update(self):
# Put model into training mode
self.model.train()
# Get next batch
X, Y = self.dataset.get_next_batch()
# Split into input / target
# X, Y = self.dataset.split_input(batch)
# print("input shape:", X.shape)
# Get predict and get (logit, features)
out, _ = self.predict(X)
# # print("output shape", out.shape)
# Compute mean loss
domain_losses = self.dataset.loss_by_domain(out, Y, self.nb_training_domains)
# # print("domain_losses shape: ", domain_losses.shape)
# Compute objective
objective = domain_losses.mean()
# Back propagate
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
class GroupDRO(ERM):
"""
GroupDRO
"""
def __init__(self, model, dataset, optimizer, hparams):
super(GroupDRO, self).__init__(model, dataset, optimizer, hparams)
# Save hparams
self.device = self.hparams['device']
self.eta = hparams['eta']
self.register_buffer("q", torch.Tensor())
# Save training components
self.model = model
self.dataset = dataset
self.optimizer = optimizer
# Get some other useful info
self.nb_training_domains = dataset.get_nb_training_domains()
def predict(self, all_x):
return self.model(all_x)
def update(self):
# Put model into training mode
self.model.train()
# Get next batch
X, Y = self.dataset.get_next_batch()
if not len(self.q):
print("hello, creating Q")
self.q = torch.ones(self.nb_training_domains).to(self.device)
# Split input / target
# X, Y = self.dataset.split_input(batch)
# Get predict and get (logit, features)
out, _ = self.predict(X)
# Compute losses
domain_losses = self.dataset.loss_by_domain(out, Y, self.nb_training_domains)
# Update weights
for dom_i, dom_loss in enumerate(domain_losses):
self.q[dom_i] *= (self.eta * dom_loss.data).exp()
self.q /= self.q.sum()
# Compute objective
objective = torch.dot(domain_losses, self.q)
# Back propagate
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
class IRM(ERM):
"""
Invariant Risk Minimization (IRM)
"""
def __init__(self, model, dataset, optimizer, hparams):
super(IRM, self).__init__(model, dataset, optimizer, hparams)
# Hyper parameters
self.penalty_weight = self.hparams['penalty_weight']
self.anneal_iters = self.hparams['anneal_iters']
# Memory
self.penalty = 0
self.register_buffer('update_count', torch.tensor([0]))
@staticmethod
def _irm_penalty(logits, y):
device = "cuda" if logits[0][0].is_cuda else "cpu"
scale = torch.tensor(1.).to(device).requires_grad_()
loss_1 = F.cross_entropy(logits[::2] * scale, y[::2])
loss_2 = F.cross_entropy(logits[1::2] * scale, y[1::2])
grad_1 = autograd.grad(loss_1, [scale], create_graph=True)[0]
grad_2 = autograd.grad(loss_2, [scale], create_graph=True)[0]
result = torch.sum(grad_1 * grad_2)
return result
def update(self):
# Define penalty value (Annealing)
penalty_weight = (self.penalty_weight if self.update_count >= self.anneal_iters
else 1.0)
# Put model into training mode
self.model.train()
# Get next batch
X, Y = self.dataset.get_next_batch()
# Split input / target
# X, Y = self.dataset.split_input(batch)
# Get predict and get (logit, features)
out, _ = self.predict(X)
# Compute losses
n_domains = self.dataset.get_nb_training_domains()
domain_losses = self.dataset.loss_by_domain(out, Y, n_domains)
# Create domain dimension in tensors.
# e.g. for source domains: (ENVS * batch_size, ...) -> (ENVS, batch_size, ...)
# for time domains: (batch_size, ENVS, ...) -> (ENVS, batch_size, ...)
out, labels = self.dataset.split_tensor_by_domains(out, Y, n_domains)
# env_labels = self.dataset.split_tensor_by_domains(n_domains, Y)
# Compute loss and penalty for each domains
irm_penalty = torch.zeros(n_domains).to(self.device)
for i, (env_out, env_labels) in enumerate(zip(out, labels)):
irm_penalty[i] += self._irm_penalty(env_out, env_labels)
# Compute objective
irm_penalty = irm_penalty.mean()
# print(domain_losses.mean(), irm_penalty)
objective = domain_losses.mean() + (penalty_weight * irm_penalty)
# Reset Adam, because it doesn't like the sharp jump in gradient
# magnitudes that happens at this step.
if self.update_count == self.anneal_iters:
self.optimizer = torch.optim.Adam(
self.model.parameters(),
lr=self.optimizer.param_groups[0]['lr'],
weight_decay=self.optimizer.param_groups[0]['weight_decay'])
# Back propagate
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
# Update memory
self.update_count += 1
class VREx(ERM):
"""
V-REx Objective from http://arxiv.org/abs/2003.00688
"""
def __init__(self, model, dataset, optimizer, hparams):
super(VREx, self).__init__(model, dataset, optimizer, hparams)
# Hyper parameters
self.penalty_weight = self.hparams['penalty_weight']
self.anneal_iters = self.hparams['anneal_iters']
# Memory
self.register_buffer('update_count', torch.tensor([0]))
def update(self):
# Define stuff
penalty_weight = (self.penalty_weight if self.update_count >= self.anneal_iters
else 1.0)
# Put model into training mode
self.model.train()
# Get next batch
X, Y = self.dataset.get_next_batch()
# Split input / target
# X, Y = self.dataset.split_input(batch)
# Get predict and get (logit, features)
out, _ = self.predict(X)
# Compute losses
n_domains = self.dataset.get_nb_training_domains()
domain_losses = self.dataset.loss_by_domain(out, Y, n_domains)
# Compute objective
mean = domain_losses.mean()
penalty = ((domain_losses - mean).pow(2)).mean()
objective = mean + penalty_weight * penalty
# Reset Adam, because it doesn't like the sharp jump in gradient
# magnitudes that happens at this step.
if self.update_count == self.anneal_iters:
self.optimizer = torch.optim.Adam(
self.model.parameters(),
lr=self.optimizer.param_groups[0]['lr'],
weight_decay=self.optimizer.param_groups[0]['weight_decay'])
# Back propagate
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
# Update memory
self.update_count += 1
class SD(ERM):
"""
Gradient Starvation: A Learning Proclivity in Neural Networks
Equation 25 from [https://arxiv.org/pdf/2011.09468.pdf]
"""
def __init__(self, model, dataset, optimizer, hparams):
super(SD, self).__init__(model, dataset, optimizer, hparams)
# Hyper parameters
self.penalty_weight = self.hparams['penalty_weight']
def update(self):
# Put model into training mode
self.model.train()
# Get next batch
X, Y = self.dataset.get_next_batch()
# Split input / target
# X, Y = self.dataset.split_input(env_batches)
# Get predict and get (logit, features)
out, _ = self.predict(X)
# Compute losses
n_domains = self.dataset.get_nb_training_domains()
domain_losses = self.dataset.loss_by_domain(out, Y, n_domains)
# Create domain dimension in tensors:
# e.g. for source domains: (ENVS * batch_size, ...) -> (ENVS, batch_size, ...)
# for time domains: (batch_size, ENVS, ...) -> (ENVS, batch_size, ...)
domain_out, _ = self.dataset.split_tensor_by_domains(out, Y, n_domains)
# Compute loss for each environment
sd_penalty = torch.pow(out, 2).sum(dim=-1)
# sd_penalty = torch.zeros(env_out.shape[0]).to(env_out.device)
# for i in range(env_out.shape[0]):
# for t_idx in range(env_out.shape[2]): # Number of time steps
# sd_penalty[i] += (env_out[i, :, t_idx, :] ** 2).mean()
sd_penalty = sd_penalty.mean()
objective = domain_losses.mean() + self.penalty_weight * sd_penalty
# Back propagate
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
# class ANDMask(ERM):
# """
# Learning Explanations that are Hard to Vary [https://arxiv.org/abs/2009.00329]
# AND-Mask implementation from [https://github.com/gibipara92/learning-explanations-hard-to-vary]
# """
# def __init__(self, model, dataset, loss_fn, optimizer, hparams):
# super(ANDMask, self).__init__(model, dataset, loss_fn, optimizer, hparams)
# # Hyper parameters
# self.tau = self.hparams['tau']
# def mask_grads(self, tau, gradients, params):
# for param, grads in zip(params, gradients):
# grads = torch.stack(grads, dim=0)
# grad_signs = torch.sign(grads)
# mask = torch.mean(grad_signs, dim=0).abs() >= self.tau
# mask = mask.to(torch.float32)
# avg_grad = torch.mean(grads, dim=0)
# mask_t = (mask.sum() / mask.numel())
# param.grad = mask * avg_grad
# param.grad *= (1. / (1e-10 + mask_t))
# def update(self, minibatches_device, dataset, device):
# ## Group all inputs and send to device
# all_x = torch.cat([x for x,y in minibatches_device]).to(device)
# all_y = torch.cat([y for x,y in minibatches_device]).to(device)
# # Get logit and make prediction on PRED_TIME
# ts = torch.tensor(dataset.PRED_TIME).to(device)
# out, _ = self.predict(all_x, ts, device)
# # Split data in shape (n_train_envs, batch_size, len(PRED_TIME), num_classes)
# out_split = dataset.split_output(out)
# labels_split = dataset.split_labels(all_y)
# # Compute loss for each environment
# env_losses = torch.zeros(out_split.shape[0]).to(device)
# for i in range(out_split.shape[0]):
# for t_idx in range(out_split.shape[2]): # Number of time steps
# env_losses[i] += self.loss_fn(out_split[i, :, t_idx, :], labels_split[i,:,t_idx])
# # Compute gradients for each env
# param_gradients = [[] for _ in self.model.parameters()]
# for env_loss in env_losses:
# env_grads = autograd.grad(env_loss, self.model.parameters(), retain_graph=True)
# for grads, env_grad in zip(param_gradients, env_grads):
# grads.append(env_grad)
# # Back propagate
# self.optimizer.zero_grad()
# self.mask_grads(self.tau, param_gradients, self.model.parameters())
# self.optimizer.step()
# class IGA(ERM):
# """
# Inter-environmental Gradient Alignment
# From https://arxiv.org/abs/2008.01883v2
# """
# def __init__(self, model, dataset, optimizer, hparams):
# super(IGA, self).__init__(model, dataset, optimizer, hparams)
# # Hyper parameters
# self.penalty_weight = self.hparams['penalty_weight']
# def update(self):
# # Put model into training mode
# self.model.train()
# # Get next batch
# X, Y = self.dataset.get_next_batch()
# # Split input / target
# # X, Y = self.dataset.split_input(env_batches)
# # There is an unimplemented feature of cudnn that makes it impossible to perform double backwards pass on the network
# # This is a workaround to make it work proposed by pytorch, but I'm not sure if it's the right way to do | |
11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
def lisp_process_api ( process , lisp_socket , data_structure ) :
IiI1I1 , III11I1 = data_structure . split ( "%" )
if 48 - 48: i11iIiiIii * o0oOOo0O0Ooo
lprint ( "Process API request '{}', parameters: '{}'" . format ( IiI1I1 ,
III11I1 ) )
if 8 - 8: iII111i
i11iII1IiI = [ ]
if ( IiI1I1 == "map-cache" ) :
if ( III11I1 == "" ) :
i11iII1IiI = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , i11iII1IiI )
else :
i11iII1IiI = lisp_process_api_map_cache_entry ( json . loads ( III11I1 ) )
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
if ( IiI1I1 == "site-cache" ) :
if ( III11I1 == "" ) :
i11iII1IiI = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
i11iII1IiI )
else :
i11iII1IiI = lisp_process_api_site_cache_entry ( json . loads ( III11I1 ) )
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
if ( IiI1I1 == "map-server" ) :
III11I1 = { } if ( III11I1 == "" ) else json . loads ( III11I1 )
i11iII1IiI = lisp_process_api_ms_or_mr ( True , III11I1 )
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
if ( IiI1I1 == "map-resolver" ) :
III11I1 = { } if ( III11I1 == "" ) else json . loads ( III11I1 )
i11iII1IiI = lisp_process_api_ms_or_mr ( False , III11I1 )
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if ( IiI1I1 == "database-mapping" ) :
i11iII1IiI = lisp_process_api_database_mapping ( )
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
i11iII1IiI = json . dumps ( i11iII1IiI )
IIi1IIII = lisp_api_ipc ( process , i11iII1IiI )
lisp_ipc ( IIi1IIII , lisp_socket , "lisp-core" )
return
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
if 33 - 33: II111iiii
if 61 - 61: I1Ii111
def lisp_process_api_map_cache ( mc , data ) :
if 56 - 56: I1ii11iIi11i - OoooooooOO
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 31 - 31: Oo0Ooo * IiII / IiII
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 3 - 3: I1Ii111
if 65 - 65: iIii1I11I1II1 % Oo0Ooo % I11i / OoooooooOO
if 82 - 82: o0oOOo0O0Ooo
if 33 - 33: OoOoOO00 / i11iIiiIii - I1IiiI - OoooooooOO + i1IIi * I1Ii111
if 92 - 92: iII111i + OoO0O00
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 70 - 70: iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
def lisp_gather_map_cache_data ( mc , data ) :
iIIiI11iI1Ii1 = { }
iIIiI11iI1Ii1 [ "instance-id" ] = str ( mc . eid . instance_id )
iIIiI11iI1Ii1 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
iIIiI11iI1Ii1 [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
iIIiI11iI1Ii1 [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
iIIiI11iI1Ii1 [ "expires" ] = lisp_print_elapsed ( mc . uptime )
iIIiI11iI1Ii1 [ "action" ] = lisp_map_reply_action_string [ mc . action ]
iIIiI11iI1Ii1 [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 91 - 91: I11i
if 54 - 54: I1ii11iIi11i / i1IIi
if 14 - 14: iIii1I11I1II1 * I11i . I11i * ooOoO0o * iII111i
if 60 - 60: iIii1I11I1II1 + i1IIi + oO0o - iIii1I11I1II1 . i11iIiiIii * OoooooooOO
if 23 - 23: iII111i - IiII % i11iIiiIii
iii1Ii1i1i1I = [ ]
for oOOoo0O00 in mc . rloc_set :
O00oo00o000o = { }
if ( oOOoo0O00 . rloc_exists ( ) ) :
O00oo00o000o [ "address" ] = oOOoo0O00 . rloc . print_address_no_iid ( )
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
if 60 - 60: OOooOOo - I1Ii111 * Oo0Ooo
if ( oOOoo0O00 . translated_port != 0 ) :
O00oo00o000o [ "encap-port" ] = str ( oOOoo0O00 . translated_port )
if 9 - 9: OoooooooOO * OOooOOo % OoO0O00 - ooOoO0o + Ii1I
O00oo00o000o [ "state" ] = oOOoo0O00 . print_state ( )
if ( oOOoo0O00 . geo ) : O00oo00o000o [ "geo" ] = oOOoo0O00 . geo . print_geo ( )
if ( oOOoo0O00 . elp ) : O00oo00o000o [ "elp" ] = oOOoo0O00 . elp . print_elp ( False )
if ( oOOoo0O00 . rle ) : O00oo00o000o [ "rle" ] = oOOoo0O00 . rle . print_rle ( False )
if ( oOOoo0O00 . json ) : O00oo00o000o [ "json" ] = oOOoo0O00 . json . print_json ( False )
if ( oOOoo0O00 . rloc_name ) : O00oo00o000o [ "rloc-name" ] = oOOoo0O00 . rloc_name
oO000O0oooOo = oOOoo0O00 . stats . get_stats ( False , False )
if ( oO000O0oooOo ) : O00oo00o000o [ "stats" ] = oO000O0oooOo
O00oo00o000o [ "uptime" ] = lisp_print_elapsed ( oOOoo0O00 . uptime )
O00oo00o000o [ "upriority" ] = str ( oOOoo0O00 . priority )
O00oo00o000o [ "uweight" ] = str ( oOOoo0O00 . weight )
O00oo00o000o [ "mpriority" ] = str ( oOOoo0O00 . mpriority )
O00oo00o000o [ "mweight" ] = str ( oOOoo0O00 . mweight )
Ii1i = oOOoo0O00 . last_rloc_probe_reply
if ( Ii1i ) :
O00oo00o000o [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( Ii1i )
O00oo00o000o [ "rloc-probe-rtt" ] = str ( oOOoo0O00 . rloc_probe_rtt )
if 82 - 82: I1ii11iIi11i * iIii1I11I1II1 * Oo0Ooo / i1IIi / i11iIiiIii
O00oo00o000o [ "rloc-hop-count" ] = oOOoo0O00 . rloc_probe_hops
O00oo00o000o [ "recent-rloc-hop-counts" ] = oOOoo0O00 . recent_rloc_probe_hops
if 9 - 9: I1ii11iIi11i / i1IIi + OoooooooOO * OOooOOo . Oo0Ooo
oOOO = [ ]
for OOOOo000o in oOOoo0O00 . recent_rloc_probe_rtts : oOOO . append ( str ( OOOOo000o ) )
O00oo00o000o [ "recent-rloc-probe-rtts" ] = oOOO
if 54 - 54: I1IiiI + IiII
iii1Ii1i1i1I . append ( O00oo00o000o )
if 7 - 7: Ii1I % I1Ii111 + I1ii11iIi11i * IiII . OoO0O00 / I11i
iIIiI11iI1Ii1 [ "rloc-set" ] = iii1Ii1i1i1I
if 39 - 39: Oo0Ooo + OOooOOo . I1IiiI + OoO0O00 . OoooooooOO
data . append ( iIIiI11iI1Ii1 )
return ( [ True , data ] )
if 31 - 31: OoO0O00
if 55 - 55: OoOoOO00 + I1Ii111 * o0oOOo0O0Ooo - I1ii11iIi11i + OoOoOO00
if 6 - 6: II111iiii % iIii1I11I1II1 * I1Ii111
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo | |
from __future__ import annotations
import pathlib
import tkinter as tk
import typing
from math import pi
from tkinter import ttk
from interface import general_functions
from interface.material_and_measures import Distance, Force, WireMaterial
class Note:
""" A single note in an instrument, contains functions and data related to the wire used """
instrument: Instrument
_std_note: int
_material_select: tk.StringVar
_length: tk.DoubleVar
_diameter: tk.DoubleVar
_wire_count: tk.IntVar
_frequency_var: tk.StringVar
_frequency_float: float
_tkk_items: list[ttk.Label | ttk.Combobox | ttk.Entry]
tkk_input_items: list[ttk.Combobox | ttk.Entry]
def __init__(self, instrument: Instrument, std_note: int):
"""
:param instrument: parent :class:`Instrument`
:param std_note: standard note number, A0 = 1, C0 = 4, A4 = 49
"""
if not isinstance(std_note, int):
raise ValueError(std_note)
_row = std_note * 2 + 10
self.instrument = instrument
# Initialize variables
self._std_note = std_note
self._frequency_var = tk.StringVar(instrument, '0')
self._wire_count = tk.IntVar(instrument, 1)
self._material_select = tk.StringVar(instrument, None)
self._diameter = tk.DoubleVar(instrument, '')
self._length = tk.DoubleVar(instrument, '')
self._force = tk.DoubleVar(instrument)
self._frequency_float = 0
self.calculate_frequency()
# set tk items
_lbl_std_note = ttk.Label(instrument, text=self._std_note, width=6)
_lbl_str_note = ttk.Label(instrument, text=self.get_std_note_name(), width=6)
_lbl_frequency = ttk.Label(instrument, textvariable=self._frequency_var)
_combo_material_select = ttk.Combobox(instrument, textvariable=self._material_select,
postcommand=lambda: _combo_material_select.configure(
values=WireMaterial.code_name_list()))
_ent_length = ttk.Entry(instrument, textvariable=self._length)
_ent_diameter = ttk.Entry(instrument, textvariable=self._diameter)
_ent_wire_count = ttk.Entry(instrument, textvariable=self._wire_count)
_ent_force = ttk.Label(instrument, textvariable=self._force)
self._tkk_items = [_lbl_std_note, _lbl_str_note, _lbl_frequency,
_ent_length, _combo_material_select, _ent_diameter, _ent_wire_count, _ent_force]
self.tkk_input_items = [_ent_length, _combo_material_select, _ent_diameter, _ent_wire_count]
# set grid positions of items, uses the order set by tkk_input_items list
for i, _t in enumerate(self._tkk_items):
_t.grid(row=_row, column=i, sticky=tk.EW)
# bind movement keys to tk input items
for _n, _t in enumerate(self.tkk_input_items):
# bind force calculation on focus loss
_t.bind("<FocusOut>", self.update_force, add=True)
# bind return to drop a cell down
_t.bind("<Right>", lambda e, _n=_n: self.instrument.get_next_note_input(self._std_note, _n, 0, 1))
_t.bind("<Left>", lambda e, _n=_n: self.instrument.get_next_note_input(self._std_note, _n, 0, -1))
_t.bind("<Shift-Return>", lambda e, _n=_n: self.instrument.get_next_note_input(self._std_note, _n, -1))
_t.bind("<Up>", lambda e, _n=_n: self.instrument.get_next_note_input(self._std_note, _n, -1))
_t.bind("<Return>", lambda e, _n=_n: self.instrument.get_next_note_input(self._std_note, _n, 1))
_t.bind("<Down>", lambda e, _n=_n: self.instrument.get_next_note_input(self._std_note, _n, 1))
# highlighter bindings
general_functions.bind_highlighting_on_focus(_ent_length, _ent_diameter, _ent_wire_count)
# add separator above each C
if std_note % 12 == 4:
_separator = ttk.Separator(instrument, orient=tk.HORIZONTAL)
_separator.grid(column=0, columnspan=len(self._tkk_items), row=_row - 1, sticky=tk.NSEW)
def destroy(self):
for i_ in self._tkk_items:
i_.destroy()
def calculate_frequency(self):
""" calculate the _frequency_var of `Note` based the pitch of A1 in the parent :class:`Instrument` """
# _frequency_var = 2 ** ((note_number - 49) / 12 ) * (_frequency_var of A1)
self._frequency_float = 2 ** ((self.get_std_note_number() - 49) / 12) * self.instrument.get_pitch()
self._frequency_var.set(f"{self._frequency_float:>.2f}hz")
def get_std_note_number(self) -> int:
return self._std_note
def get_std_note_name(self) -> str:
return general_functions.note_number_to_name(self._std_note)
def get_frequency(self) -> float:
return self._frequency_float
def get_wire_count(self) -> int:
return self._wire_count.get()
def get_wire_type(self) -> WireMaterial:
return WireMaterial.get_by_code(self._material_select.get().split(' ')[0])
def get_diameter(self) -> Distance:
return Distance(mm=self._diameter.get())
def get_length(self) -> Distance:
return Distance(mm=self._length.get())
def get_force(self) -> Force:
"""
calculate the tension and _diameter of the wire using methods from \n
`sound_from_wire_equation <https://www.school-for-champions.com/science/sound_from_wire_equation.htm>`_ \n
--- **f** is the _frequency_var in hertz (Hz) or cycles per second \n
--- **L** is the _length of the wire in centimeters (cm) \n
--- **d** is the _diameter of the wire in cm \n
--- **T** is the tension on the wire in gm-cm/s² \n
--- **π** is the Greek letter pi = 3.14 \n
--- **δ** is the density of the wire in gm/cm³ (Greek letter small delta) \n
:return: :class:`Tension` of the string as `T = πf²L²d²δ`
"""
pi_hz = pi * self.get_frequency() ** 2 # πf²
le = self.get_length().cm() ** 2 # L²
di = self.get_diameter().cm() ** 2 # d²
den = self.get_wire_type().density.g_cm3() # δ
gcm = pi_hz * le * di * den
return Force(g_cm_s2=gcm * self.get_wire_count())
def update_force(self, *arg):
try:
self._force.set(str(self.get_force()))
except ValueError or TypeError:
# ValueError is only thrown when missing required data
pass
def state_import(self, data: dict):
""" convert dict of input fields to a Note """
self._wire_count.set(int(data['_wire_count']))
self._material_select.set(str(data['_material_select']))
self._diameter.set(float(data['_diameter']))
self._length.set(float(data['_length']))
self.calculate_frequency()
self.update_force()
def state_export(self) -> dict[str, int | str]:
""" convert input fields to a dict """
return dict(_wire_count=self._wire_count.get(),
_material_select=self._material_select.get(),
_diameter=self._diameter.get(),
_length=self._length.get())
def set_focus_to_input(self, input_pos):
""" Used for binding <Enter>
:param input_pos: position on the input items list
"""
try:
self.tkk_input_items[input_pos].focus_set()
except IndexError as ie:
# ignore index errors here, on the off chance its needed
print(f"IndexError {ie} in Note.set_focus_to_input(self, {input_pos})")
pass
class Instrument(ttk.Frame):
notes: dict[int, Note]
lowest_key: tk.StringVar
highest_key: tk.StringVar
pitch: tk.DoubleVar
file_uri: pathlib.Path | None
def __init__(self, parent):
super(Instrument, self).__init__(parent)
"""
"""
self.parent = parent
self.file_uri = None
# Labels
_lbl_inst_name = ttk.Label(self, text="Instrument Name")
_lbl_lowest_key = ttk.Label(self, text="Lowest Key")
_lbl_highest_key = ttk.Label(self, text="Highest Key")
_lbl_pitch = ttk.Label(self, text="Pitch of A1 (hz)")
# Labels position
_lbl_inst_name.grid(row=0, column=0, columnspan=3)
_lbl_lowest_key.grid(row=0, column=3)
_lbl_highest_key.grid(row=0, column=4)
_lbl_pitch.grid(row=0, column=5)
# Variables Initialize
self.inst_name = tk.StringVar(self, 'Instrument')
self.lowest_key = tk.StringVar(self, '1')
self.highest_key = tk.StringVar(self, '40')
self.pitch = tk.DoubleVar(self, 440)
# Variables set tk types
_inst_name = ttk.Entry(self, textvariable=self.inst_name)
_lowest_key = ttk.Entry(self, textvariable=self.lowest_key)
_highest_key = ttk.Entry(self, textvariable=self.highest_key)
_pitch = ttk.Entry(self, textvariable=self.pitch)
_button = ttk.Button(self, text="Update Instrument", command=self.update_notes)
# Variables position
_inst_name.grid(row=1, column=0, columnspan=3, sticky=tk.EW)
_lowest_key.grid(row=1, column=3, sticky=tk.EW)
_highest_key.grid(row=1, column=4, sticky=tk.EW)
_pitch.grid(row=1, column=5, sticky=tk.EW)
_button.grid(row=0, column=6, rowspan=2, columnspan=3, sticky=tk.S)
# add heading labels for Notes
for i, name in enumerate(['Number', 'Name', 'Frequency', 'Length(mm)', 'Material',
'Diameter(mm)', 'Count', 'Force(kgF)']):
ttk.Label(self, text=name, anchor=tk.CENTER).grid(row=2, column=i, sticky=tk.EW)
self.grid_columnconfigure(i,
weight=1,
minsize=75 if i in {0, 1, 2, 7} else 50)
# bindings
general_functions.bind_highlighting_on_focus(_inst_name, _lowest_key, _highest_key, _pitch)
self.notes = dict()
def update_notes(self, *args):
"""
update the notes shown based on the lowest and highest keys given,
destroys notes outside of the given range
"""
note_numbers = list(range(self.get_lowest_key(), self.get_highest_key() + 1))
# add new notes
for nt in note_numbers:
if nt in self.notes:
continue
self.notes[nt] = Note(self, nt)
# delete no longer required Notes
for k in list(self.notes.keys()):
if k not in set(note_numbers):
self.notes[k].destroy()
self.notes.pop(k)
# update frequencies
for k, nt in self.notes.items():
nt.calculate_frequency()
def get_name(self) -> str:
""" get the given Instrument name as a string """
return self.inst_name.get()
def get_pitch(self) -> float:
""" get Instrument pitch as a float"""
return self.pitch.get()
def get_lowest_key(self) -> int:
""" get lowest key as an integer """
return general_functions.note_name_to_number(self.lowest_key.get())
def get_highest_key(self) -> int:
""" get highest key as an integer """
return general_functions.note_name_to_number(self.highest_key.get())
def apply_to_note(self, function: typing.Callable[[Note], any], note_number: int | str):
"""
Apply function to a single note
:param function: any function, applied as function(note)
:param note_number: any note, given as std number (A0=1) or scientific name 'A#2'
"""
if isinstance(note_number, str):
note_number = general_functions.note_name_to_number(note_number)
if note_number not in self.notes.items():
return
return function(self.notes[note_number])
def apply_to_note_list(self, function: typing.Callable[[Note], None], note_list: list[int | str]):
"""
Apply function to each note in the note list given
:param function: any function, applied as function(note)
:param note_list: list of notes, given as std number (A0=1) or Scientific Name 'A#2' or a combination of both
"""
note_list = [var if isinstance(
var, int) else general_functions.note_name_to_number(var) for var in note_list]
for note in note_list:
self.apply_to_note(function, note)
def apply_to_all_notes(self, function: typing.Callable[[Note], None]):
"""
Apply function to all notes in the Instrument
:param function: any function, applied as function(note)
"""
for k, note in self.notes.items():
function(note)
def iter_notes(self) -> typing.Iterator[Note]:
"""
Iterate through each note in the Instrument starting from lowest note to highest note
:returns: each Note in the Instrument from lowest to highest
"""
for _, note in self.notes.items():
yield note
def note_list(self) -> list[Note]:
""" get a list of all Notes currently in the Instrument """
return list(self.notes.values())
def state_import(self, data: dict):
"""
Convert dict of input fields to an Instrument, includes calls for Note fields.
This resets all current notes.
"""
for k_, note in self.notes.items():
note.destroy()
self.notes = dict()
self.inst_name.set(data['inst_name'])
self.lowest_key.set(data['lowest_key'])
self.highest_key.set(data['highest_key'])
self.pitch.set(float(data['pitch']))
self.update_notes()
for key, var in self.notes.items():
key = int(key)
var.state_import(data['notes'][str(key)])
def state_export(self) -> dict:
""" convert all input fields to a dictionary, this includes all Notes and their inputs"""
note_dict = {k: n_.state_export() for k, n_ in self.notes.items()}
return dict(inst_name=self.inst_name.get(),
lowest_key=self.lowest_key.get(),
highest_key=self.highest_key.get(),
pitch=self.pitch.get(),
notes=note_dict)
def get_next_note_input(self, note_number: int, input_pos: int, note_increment=0, input_increment=0):
"""
Used for binding <Enter>
:param note_number: integer representation of the note
:param input_pos: position in the input items list of a Note
:param note_increment: number of | |
<filename>assembler/cas.py
#!/bin/python3
from typing import List, TextIO, Tuple
import os, re, sys
class ParseException(Exception):
pass
class SourceLine:
'''
A class that represent a single line in the source file
'''
def __init__(self, line: str, num: int):
self.line = line
self.num = num
def __str__(self):
return self.num + ": " + self.line
class ASMStatement:
'''
A class that represents a single assembly statement
'''
def __init__(self, statement: str, source_line: SourceLine, pc: int = None):
self.statement = statement
self.source_line = source_line
self.pc = pc
def __str__(self):
return (str(self.pc) if self.pc is not None else "x") + ": " + \
self.statement
ASMSRC_REGEX = re.compile("([a-zA-Z0-9_\-\.]+)\.s$")
ARGS2_REGEX = re.compile("^\s*([a-zA-Z0-9_\-]+)\s*,\s*([a-zA-Z0-9_\-]+)\s*$")
ARGS3_REGEX = re.compile(
"^\s*([a-zA-Z0-9_\-]+)\s*,\s*([a-zA-Z0-9_\-]+)\s*,\s*([a-zA-Z0-9_\-]+)\s*$")
COMMENT_REGEX = re.compile("^(.*);(.*)$")
INSTR_REGEX = re.compile("^\s*([a-zA-Z0-9_\-]+)\s+(.*)$")
LABEL_REGEX = re.compile("^\s*([a-zA-Z0-9_\-]+)\s*:(.*)$")
NOP_STOP_REGEX = re.compile("^\s*(noop|stop)\s*$")
INSTRUCTIONS = {
"add" : 0b00011,
"and" : 0b10111,
"call" : 0b1010, # call subroutine
"gt" : 0b01100, # greater than unsigned
"gts" : 0b11100, # greater than signed
"jez" : 0b0110, # jump if zero
"jmp" : 0b0010, # unconditional jump
"jnz" : 0b1110, # jump if not zero
"ldhi" : 0b10001, # load 8-bit immediate into high byte, preserve low byte
"ldi" : 0b01001, # load 8-bit immediate unsigned
"ldsi" : 0b11001, # load 8-bit immediate signed
"ldw" : 0b1000001, # load word from memory
"lt" : 0b00100, # less than unsigned
"lts" : 0b10100, # less than signed
"noop" : 0, # no operation
"or" : 0b11011,
"stw" : 0b1100001, # store word to memory
"sll" : 0b10011, # shift left logically
"sra" : 0b01111, # shift right arithmetically
"srl" : 0b01011, # shift right logically
"stop" : 0x8000, # terminate execution and lock up processor
"sub" : 0b00111,
"xor" : 0b11111
}
REGISTERS = {"r0": 0, "r1": 1, "r2": 2, "r3": 3, "r4": 4, "r5": 5, "r6": 6,
"r7": 7, "sp": 6, "lr": 7}
INSTR_PARSER = {
"add" : lambda x, y, z: parse_alu_statement(x, y),
"and" : lambda x, y, z: parse_alu_statement(x, y),
"call" : lambda x, y, z: parse_unconditional_jump_statement(x, y, z),
"gt" : lambda x, y, z: parse_alu_statement(x, y),
"gts" : lambda x, y, z: parse_alu_statement(x, y),
"jez" : lambda x, y, z: parse_conditional_jump_statement(x, y, z),
"jmp" : lambda x, y, z: parse_unconditional_jump_statement(x, y, z),
"jnz" : lambda x, y, z: parse_conditional_jump_statement(x, y, z),
"ldhi" : lambda x, y, z: parse_load_imm_statement(x, y),
"ldi" : lambda x, y, z: parse_load_imm_statement(x, y),
"ldsi" : lambda x, y, z: parse_load_imm_statement(x, y),
"ldw" : lambda x, y, z: parse_load_store_statement(x, y),
"lt" : lambda x, y, z: parse_alu_statement(x, y),
"lts" : lambda x, y, z: parse_alu_statement(x, y),
"noop" : lambda x, y, z: INSTRUCTIONS["noop"],
"or" : lambda x, y, z: parse_alu_statement(x, y),
"stw" : lambda x, y, z: parse_load_store_statement(x, y),
"sll" : lambda x, y, z: parse_alu_statement(x, y),
"sra" : lambda x, y, z: parse_alu_statement(x, y),
"srl" : lambda x, y, z: parse_alu_statement(x, y),
"stop" : lambda x, y, z: INSTRUCTIONS["stop"],
"sub" : lambda x, y, z: parse_alu_statement(x, y),
"xor" : lambda x, y, z: parse_alu_statement(x, y)
}
LABELS = {}
def parse_int_literal(literal_str: str) -> int:
'''
Converts the string representation of an integer literal into an integer.
'''
try:
if literal_str.startswith("0b"): return int(literal_str, base=2)
elif literal_str.startswith("0o"): return int(literal_str, base=8)
elif literal_str.startswith("0x"): return int(literal_str, base=16)
else: return int(literal_str)
except:
return None
def int_bin(num: int, n: int) -> str:
'''
Returns a number as an n-bit binary string.
'''
bin_rep = ""
for i in range(n-1, -1, -1):
bin_rep += str((num >> i) & 1)
return bin_rep
def int_hex(num: int, n: int) -> str:
'''
Returns a number as an n-digit hex string.
'''
hex_dict = {10: "a", 11: "b", 12: "c", 13: "d", 14: "e", 15: "f"}
shift = 4 * (n - 1)
hex_rep = ""
for i in range(n):
hex_num = (num >> shift) & 15
hex_num = str(hex_num) if hex_num < 10 else hex_dict[hex_num]
hex_rep += hex_num
shift -= 4
return hex_rep
def parse_alu_statement(instr: str, args: str) -> int:
'''
Parses instructions that use the ALU: add, and, gt, gts, lt, lts, or, sll,
sra, srl, sub, xor. Returns the integer representation of the machine code of
the statement.
'''
args_match_result = ARGS3_REGEX.match(args)
if not args_match_result:
raise ParseException("invalid arguments", end="")
dest = args_match_result.group(1)
op_a = args_match_result.group(2)
op_b = args_match_result.group(3)
opcode = INSTRUCTIONS[instr]
if dest not in REGISTERS:
raise ParseException("first argument must be a register")
if op_a not in REGISTERS:
raise ParseException("second argument must be a register")
# Integer representation of the register numbers
dest = REGISTERS[dest]
op_a = REGISTERS[op_a]
if op_b in REGISTERS: # Register third argument
return opcode | dest << 13 | op_a << 10 | REGISTERS[op_b] << 7
op_b = parse_int_literal(op_b) # Integer literal third argument
if op_b is None:
raise ParseException(
"third argument must be a register or an integer literal")
# 1 << 5 is for specifying that the third argument is an immediate
# & 15 masks 4 bits of the immediate
# & 65535 masks the lowest 16 bits of the instruction
return (opcode | 1 << 5 | dest << 13 | op_a << 10 | (op_b & 15) << 6) & 65535
def parse_load_imm_statement(instr: str, args: str) -> int:
'''
Parses ldi, ldsi, ldhi and returns the integer representation of the machine
code of the statement.
'''
args_match_result = ARGS2_REGEX.match(args)
if not args_match_result:
raise ParseException("invalid arguments")
opcode = INSTRUCTIONS[instr]
dest = args_match_result.group(1)
immop = args_match_result.group(2)
if dest not in REGISTERS:
raise ParseException("first argument must be a register")
dest = REGISTERS[dest]
immop = parse_int_literal(immop)
if immop is None:
raise ParseException("second argument must be an integer literal")
# & 255 only 8 bits of immediate
# & 65535 masks the lowest 16 bits of the instruction
return (opcode | (immop & 255) << 5 | dest << 13) & 65535
def parse_load_store_statement(instr: str, args: str) -> int:
'''
Parses ldw and stw, and returns the integer representation of the machine
code of the statement.
'''
args_match_result = ARGS2_REGEX.match(args)
if not args_match_result:
raise ParseException("invalid arguments")
opcode = INSTRUCTIONS[instr]
data = args_match_result.group(1)
address = args_match_result.group(2)
if data not in REGISTERS:
raise ParseException("first argument must be a register")
if address not in REGISTERS:
raise ParseException("second argument must be a register")
data = REGISTERS[data]
address = REGISTERS[address]
# & 65535 masks the lowest 16 bits
return (opcode | data << 13 | address << 10) & 65535
def parse_conditional_jump_statement(instr: str, args: str, pc: int) -> int:
'''
Parses jez and jnz, and returns the integer representation of the machine
code of the statement.
'''
args_match_result = ARGS2_REGEX.match(args)
if not args_match_result:
raise ParseException("invalid arguments")
opcode = INSTRUCTIONS[instr]
cond = args_match_result.group(1)
offset = args_match_result.group(2)
if cond not in REGISTERS:
raise ParseException("first argument must be a register")
if offset in REGISTERS:
return opcode | cond << 13 | REGISTERS[offset] << 10
if offset not in LABELS:
raise ParseException("second argument must be a register or a label")
cond = REGISTERS[cond]
offset = LABELS[offset] - (pc + 1)
if offset > 127 or offset < -128: # 8-bit offset range
raise ParseException("jump out of range")
# 1 << 4 specifies that an offset is used instead of a register
# & 65535 masks the lowest 16 bits
return (opcode | 1 << 4 | (offset & 255) << 5 | cond << 13) & 65535
def parse_unconditional_jump_statement(instr: str, args: str, pc: int) -> int:
'''
Parses call and jmp, and returns the integer representation of the machine
code of the statement.
'''
opcode = INSTRUCTIONS[instr]
offset = 0
if args in REGISTERS:
return opcode | REGISTERS[args] << 10
if args not in LABELS:
raise ParseException("argument must be a register or a label")
offset = LABELS[args] - (pc + 1)
if offset > 1023 or offset < -1024: # 11-bit offset range
raise ParseException("call/jump out of range")
# 1 << 4 specifies that an offset is used instead of a register
# & 65535 masks the lowest 16 | |
#!/usr/bin/env python
###################### INFORMATION ##############################
# Extract the table of content (TOC) from Reichanzeiger
#Program: **crop**
#Info: **Python3**
#Author: **<NAME>**
#Date: **08.04.2021**
####################### IMPORT ##################################
import argparse
import copy
import os
import warnings
import numpy as np
import scipy.misc as misc
import skimage as ski
import skimage.color as color
import skimage.filters.thresholding as th
import skimage.morphology as morph
import skimage.transform as transform
from scipy.ndimage import measurements
from skimage.io import imread, imsave
####################### CLASSES & METHODS ###########################
class Clippingmask():
def __init__(self, image):
self.height_start, self.width_start = 0, 0
if len(image.shape) > 2:
self.height_stop, self.width_stop, self.rgb = image.shape
else:
self.height_stop, self.width_stop = image.shape
self.user = None
class ImageParam():
def __init__(self, image, input):
if len(image.shape) > 2:
self.height, self.width, self.rgb = image.shape
else:
self.height, self.width = image.shape
self.path = os.path.dirname(input)
self.pathout = os.path.normpath(os.path.dirname(input)+"/TOC-Extraction/")
self.deskewpath = None
self.name = os.path.splitext(os.path.basename(input))[0]
class Linecoords():
def __init__(self, binary, value ,object):
self.height_start = object[0].start
self.height_stop = object[0].stop
self.width_start = object[1].start
self.width_stop = object[1].stop
self.middle = None
self.object = object
self.object_value = value
self.object_matrix = copy.deepcopy(binary[object])
self.segmenttype = None
class SpliceParam():
def __init__(self, input, parts):
self.name = os.path.splitext(input)[0]
self.segment = parts[len(parts)-2]
self.segmenttype = parts[len(parts)-1]
####################### FUNCTIONS ##################################
def create_dir(newdir):
if not os.path.isdir(newdir):
try:
os.makedirs(newdir)
print(newdir)
except IOError:
print(("cannot create %s directoy" % newdir))
def crop_lcol(args, image, image_param, list_linecoords, clippingmask):
# Find left column
pixelheight = set_pixelground(image_param.height)
image = np.rot90(image, args.horlinepos)
for idx, linecoords in enumerate(list_linecoords):
# Header
if idx == 0:
if not args.quiet: print("header")
roi = image[0:linecoords.height_start - 2, 0:image_param.width] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
# Crop middle segments
if linecoords.segmenttype == 'B':
if not args.quiet: print("blank")
# Add sum extra space to the cords
roi = image[linecoords.height_start + 2 - pixelheight(
args.addstartheightc):linecoords.height_stop - 2 + pixelheight(args.addstopheightc),
linecoords.width_start:linecoords.width_stop] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1:
idx = len(list_linecoords) - idx
if 'c' in args.croptypes:
pass
if linecoords.segmenttype == 'L':
# Fixing column size
if idx == 0:
print("line-first")
# linecoords.height_start = clippingmask.height_start + 17
if not args.quiet: print("line")
roi = image[
linecoords.height_start - pixelheight(args.addstartheightab):linecoords.height_stop + pixelheight(
args.addstopheightab),
0:linecoords.width_stop - 2] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1 and 'b' in args.croptypes:
idx = len(list_linecoords) - idx
elif 'a' in args.croptypes:
return roi
roi = image[
linecoords.height_start - pixelheight(args.addstartheightab):linecoords.height_stop + pixelheight(
args.addstopheightab),
0 + 1:clippingmask.width_stop]
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1 and 'a' in args.croptypes:
return roi
elif 'a' in args.croptypes:
return roi
return None
def cropping_lcol(imgpath, args):
# Main cropping function that deskew, analyse and crops the image
# read image
print(f"Find toc in {imgpath}")
try:
image = imread("%s" % imgpath)
image_param = ImageParam(image, imgpath)
if args.imgmask != [0.0, 1.0, 0.0, 1.0]:
image = image[int(args.imgmask[0]*image_param.height):int(args.imgmask[1]*image_param.height),
int(args.imgmask[2]*image_param.width):int(args.imgmask[3]*image_param.width)]
image_param = ImageParam(image, imgpath)
except IOError:
print(("cannot open %s" % imgpath))
return 1
create_dir(image_param.pathout)
####################### ANALYSE - LINECOORDS #######################
print("start linecoord-analyse")
clippingmask = Clippingmask(image)
border, labels, list_linecoords, topline_width_stop = linecoords_analyse(args, image, image_param, clippingmask)
####################### CROP #######################################
print("start crop lcol")
lcol = crop_lcol(args, image, image_param, list_linecoords, clippingmask)
return lcol
def cropping_toc(lcol, args):
image_param = ImageParam(lcol, args.input)
if args.imgmask != [0.0, 1.0, 0.0, 1.0]:
lcol = lcol[int(args.imgmask[0] * image_param.height):int(args.imgmask[1] * image_param.height),
int(args.imgmask[2] * image_param.width):int(args.imgmask[3] * image_param.width)]
image_param = ImageParam(lcol, args.input)
clippingmask = Clippingmask(lcol)
border, labels, list_linecoords, topline_width_stop = linecoords_analyse(args, lcol, image_param, clippingmask, get_toc=True)
####################### CROP #######################################
print("start crop toc")
tocpath = crop_toc(args, lcol, image_param, list_linecoords)
return tocpath
def crop_toc(args, image, image_param, list_linecoords):
# Find left column
create_dir(image_param.pathout+os.path.normcase("/"+image_param.name.split(".",1)[0]+"/"))
filepath = image_param.pathout+os.path.normcase("/"+image_param.name.split(".",1)[0]+"/")+image_param.name
image = np.rot90(image, args.horlinepos)
imsave("%s_leftcol.%s" % (filepath, args.extension),image)
for idx, linecoords in enumerate(list_linecoords):
# Header
if idx == 0:
if not args.quiet: print("header")
roi = image[0:linecoords.height_start - 2, 0:image_param.width] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1 and 'f' in args.croptypes:
pass
elif 'h' in args.croptypes:
imgpath = "%s_TOC.%s" % (filepath, args.extension)
print(imgpath)
imsave(imgpath, roi)
return imgpath
imgpath = "%s_TOC.%s" % (filepath, args.extension)
imsave(imgpath, image)
return imgpath
def deskew(args,image, image_param):
# Deskew the given image based on the horizontal line
# Calculate the angle of the points between 20% and 80% of the line
uintimage = get_uintimg(image)
binary = get_binary(args, uintimage)
for x in range(0,args.binary_dilation):
binary = ski.morphology.binary_dilation(binary,selem=np.ones((3, 3)))
labels, numl = measurements.label(binary)
objects = measurements.find_objects(labels)
deskew_path = None
for i, b in enumerate(objects):
linecoords = Linecoords(image, i, b)
# The line has to be bigger than minwidth, smaller than maxwidth, stay in the top (30%) of the img,
# only one obj allowed and the line isn't allowed to start contact the topborder of the image
if int(args.minwidthhor * image_param.width) < get_width(b) < int(args.maxwidthhor * image_param.width) \
and int(image_param.height * args.minheighthor) < get_height(b) < int(image_param.height * args.maxheighthor) \
and int(image_param.height * args.minheighthormask) < (linecoords.height_start+linecoords.height_stop)/2 < int(image_param.height * args.maxheighthormask) \
and linecoords.height_start != 0:
pixelwidth = set_pixelground(binary[b].shape[1])
mean_y = []
#Calculate the mean value for every y-array
old_start = None
for idx in range(pixelwidth(args.deskewlinesize)):
value_y = measurements.find_objects(labels[b][:, idx + pixelwidth((1.0-args.deskewlinesize)/2)] == i + 1)[0]
if old_start is None:
old_start = value_y[0].start
if abs(value_y[0].start-old_start) < 5:
mean_y.append(value_y[0].start)
old_start = value_y[0].start
polyfit_value = np.polyfit(list(range(0,len(mean_y))), mean_y, 1)
deskewangle = np.arctan(polyfit_value[0]) * (360 / (2 * np.pi))
args.ramp = True
deskew_image = transform.rotate(image, deskewangle, mode="edge")
create_dir(image_param.pathout+os.path.normcase("/deskew/"))
deskew_path = "%s_deskew.%s" % (image_param.pathout+os.path.normcase("/deskew/")+image_param.name, args.extension)
deskewinfo = open(image_param.pathout+os.path.normcase("/deskew/")+image_param.name + "_deskewangle.txt", "w")
deskewinfo.write("Deskewangle:\t%f" % deskewangle)
deskewinfo.close()
image_param.deskewpath = deskew_path
with warnings.catch_warnings():
#Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
misc.imsave(deskew_path, deskew_image)
break
return deskew_path
def get_binary(args, image):
thresh = th.threshold_sauvola(image, args.threshwindow, args.threshweight)
binary = image > thresh
binary = 1 - binary # inverse binary
binary = np.rot90(binary, args.horlinepos)
return binary
def get_height(s):
return s[0].stop-s[0].start
def get_linecoords(s):
return [[s[0].start,s[0].stop],[s[1].start,s[1].stop]]
def get_mindist(s,length):
# Computes the min. distance to the border and cuts the smallest one in half
d1 = s[1].start
d2 = length - s[1].stop
if d1 < d2:
return d1-int(d1*0.5)
else:
return d2-int(d2*0.5)
def get_uintimg(image):
if len(image.shape) > 2:
uintimage = color.rgb2gray(copy.deepcopy(image))
else:
uintimage = copy.deepcopy(image)
if uintimage.dtype == "float64":
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
uintimage = ski.img_as_uint(uintimage, force_copy=True)
return uintimage
def get_width(s):
return s[1].stop-s[1].start
def linecoords_analyse(args,origimg, image_param, clippingmask, get_toc=False):
# Computes the clipping coords of the masks
image = get_uintimg(origimg)
origimg = np.rot90(origimg, args.horlinepos)
binary = get_binary(args, image)
labels, numl = measurements.label(binary)
objects = measurements.find_objects(labels)
count_height = 0
count_width = 0
pixelheight = set_pixelground(image_param.height)
pixelwidth = set_pixelground(image_param.width)
list_linecoords = []
border = image_param.width
topline_width_stop = image_param.height# Init list of linecoordinates the format is: [0]: width.start, width.stopt,
# [1]:height.start, height.stop, [2]: Type of line [B = blank, L = vertical line]
for i, b in enumerate(objects):
# The line has to be bigger than minwidth, smaller than maxwidth, stay in the top (30%) of the img,
# only one obj allowed and the line isn't allowed to start contact the topborder of the image
linecoords = Linecoords(labels, i, b)
if pixelwidth(0.8) < get_width(b) < pixelwidth(args.maxwidthhor):
print(b)
if pixelwidth(args.minwidthhor) < get_width(b) < pixelwidth(args.maxwidthhor) \
and pixelheight(args.minheighthor) < get_height(b) < pixelheight(args.maxheighthor) \
and pixelheight(args.minheighthormask) < linecoords.height_stop < pixelheight(args.maxheighthormask) \
and count_width == 0 \
and linecoords.height_start != 0:
# Distance Calculation - defining the clippingmask
border = get_mindist(b, image_param.width)
topline_width_stop = b[0].stop + 2 # Lowest Point of object + 2 Pixel
if clippingmask.user is None:
clippingmask.width_start = border
clippingmask.width_stop = image_param.width - border
clippingmask.height_start = copy.deepcopy(topline_width_stop)
clippingmask.height_stop = 0
# Get coordinats of the line
labels[b][labels[b] == i + 1] = 0
count_width += 1
if get_toc:
list_linecoords.append(copy.deepcopy(linecoords))
# We knew there must be first a horizontal line
if count_width == 0: continue
if pixelheight(args.minheightver) < get_height(b) < pixelheight(args.maxheightver) | |
<filename>gspy.py
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
#def adj_matrix_from_coords(coords,theta,show_progress=False):
# [N,M] = coords.shape
# A = np.zeros((N,N))
# for i in np.arange(1,N):
# if show_progress:
# #print 100.0*i/N, '% of adj_matrix_from_coords process completed.'
# for j in np.arange(i):
# x1 = coords[i,0]
# y1 = coords[i,1]
# x2 = coords[j,0]
# y2 = coords[j,1]
# distance = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
# if distance < 2*theta:
# A[i,j] = np.exp(-(distance**2)/(2*theta**2))
# print 'adj_matrix_from_coords process is completed.'
# return A + A.transpose()
def adj_matrix_from_coords_limited(coords,limit):
#print 'adj_matrix_from_coords_limited has initiated.'
[N,M] = coords.shape
A = np.zeros((N,N))
# coords_dist = np.sqrt((coords[:,0])**2 + (coords[:,1])**2)
for i in np.arange(1,N):
# print 'adj_matrix_from_coords_limited: ', 100.0*i/N, '% completed.'
dist2i = np.sqrt((coords[:,0] - coords[i,0])**2 + (coords[:,1] - coords[i,1])**2)
idx = np.argsort(dist2i)[1:limit+1]
for j in idx:
x1 = coords[i,0]
y1 = coords[i,1]
x2 = coords[j,0]
y2 = coords[j,1]
distance = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
if A[i,j] == 0:
A[i,j] = np.exp(-(distance**2))
#print 'adj_matrix_from_coords_limited process is completed.'
return A + A.transpose()
def adj_matrix_from_coords2(coords,min_threshold):
[N,M] = coords.shape
A = np.zeros((N,N))
for i in np.arange(1,N):
#print 100.0*i/N, '% of adj_matrix_from_coords2 process completed.'
for j in np.arange(i):
x1 = coords[i,0]
y1 = coords[i,1]
x2 = coords[j,0]
y2 = coords[j,1]
distance = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
weight = 1.0/distance
if weight > min_threshold:
A[i,j] = weight
#print 'adj_matrix_from_coords2 process is completed.'
return A + A.transpose()
def adj_matrix_directed_ring(N,c=0):
# Returns the adjacency matrix of a ring graph.
# N: number of graph nodes.
# c: first column of the adjacency matrix. It carries the edge weights.
if c==0: # case in which the edge weights were not entered. Then, they are made equal to 1.
c = np.zeros(N)
c[1] = 1
A = linalg.circulant(c)
return A
def coords_ring_graph(N):
coords = np.zeros((N,2))
n = np.arange(N)
coords[:,0] = np.cos(2.0*np.pi*n/N)
coords[:,1] = -np.sin(2.0*np.pi*n/N)
return coords
def coords_line_graph(A,coords,a):
# Calculating the number of vertices of original graph
N = len(coords)
# Calculating the number of edges of original graph
E = np.sum(A)
coords_line_graph = np.zeros((E,2))
row_idx = np.zeros(E,dtype=int)
col_idx = np.zeros(E,dtype=int)
e = 0
for i in range(N):
for j in range(N):
if A[i,N-1-j]!=0:
row_idx[e] = i
col_idx[e] = N-1-j
e = e + 1
coords_line_graph[:,0] = coords[row_idx,0] + a*(coords[col_idx,0] - coords[row_idx,0])
coords_line_graph[:,1] = coords[row_idx,1] + a*(coords[col_idx,1] - coords[row_idx,1])
return coords_line_graph
def plot_graph(A,coords,display_edges=1,display_axis=0,color='b',graph_node_size=80,show_progress=False,h_length_param=0.03):
#print 'plot_graph has initiated.'
[rows,cols] = np.where(A!=0)
plt.figure()
if display_edges==1:
if np.array_equal(A.transpose(),A):
# Undirected graph
for i in range(len(rows)):
#if show_progress:
#print 'plot_graph: ', 100.0*i/len(rows), '% of loop completed.'
x1, y1 = coords[cols[i],0], coords[cols[i],1]
x2, y2 = coords[rows[i],0], coords[rows[i],1]
plt.plot([x1, x2], [y1, y2], c='0.5',zorder=1)
else:
# Directed graph
# Arrow parameters (set proportionally to the plot dimensions)
x_max = np.max(coords[:,0])
x_min = np.min(coords[:,0])
y_max = np.max(coords[:,1])
y_min = np.min(coords[:,1])
h_length = h_length_param*np.max([x_max - x_min, y_max - y_min])
# Drawing the edges (arrows)
for j in range(len(cols)):
#if show_progress:
#print 100.0*j/len(cols), '% of loop completed.'
x1, y1 = coords[cols[j],0], coords[cols[j],1]
x2, y2 = coords[rows[j],0], coords[rows[j],1]
plt.arrow(x1, y1, x2-x1, y2-y1, head_width=h_length/2.0, head_length=h_length, fc='0.5', ec='0.5',length_includes_head=True,overhang=0.3,zorder=1)
plt.scatter(coords[:,0],coords[:,1],s=graph_node_size,c=color,edgecolor='face',zorder=2)
if display_axis==0:
plt.axis('off')
plt.axis('tight')
#print 'plot_graph completed.'
return True
def plot_graph_signal(A,coords,signal,display_edges=1,display_axis=0,cmin=0,cmax=0,graph_node_size=150,cfontsize=22,create_figure=True,edge_color_face=True,show_progress=False,arrow_scale=1.0,lwidth=1.0):
#print 'plot_graph_signal has initiated.'
if cmin==cmax:
# case in which the user did not specify the colormap range.
cmin = np.min(signal)
cmax = np.max(signal)
[rows,cols] = np.where(A!=0)
if create_figure:
plt.figure()
if display_edges==1:
if np.array_equal(A.transpose(),A):
# Undirected graph
for i in range(len(rows)):
#if show_progress:
#print 'plot_graph_signal: ', 100.0*i/len(rows), '% of loop completed.'
x1, y1 = coords[cols[i],0], coords[cols[i],1]
x2, y2 = coords[rows[i],0], coords[rows[i],1]
plt.plot([x1, x2], [y1, y2], c='0.5',zorder=1,linewidth=lwidth)
else:
# Directed graph
# Arrow parameters (set proportionally to the plot dimensions)
x_max = np.max(coords[:,0])
x_min = np.min(coords[:,0])
y_max = np.max(coords[:,1])
y_min = np.min(coords[:,1])
h_length = 0.05*np.max([x_max - x_min, y_max - y_min])
# Drawing the edges (arrows)
for j in range(len(cols)):
#if show_progress:
#print 'plot_graph_signal: ', 100.0*j/len(cols), '% of loop completed.'
x1, y1 = coords[cols[j],0], coords[cols[j],1]
x2, y2 = coords[rows[j],0], coords[rows[j],1]
plt.arrow(x1, y1, x2-x1, y2-y1, head_width=arrow_scale*h_length/2.0, head_length=arrow_scale*h_length, fc='0.5', ec='0.5',length_includes_head=True,overhang=0.3,zorder=1)
if edge_color_face==True:
plt.scatter(coords[:,0],coords[:,1],s=graph_node_size,c=signal,edgecolor='face',zorder=2)
else:
plt.scatter(coords[:,0],coords[:,1],s=graph_node_size,c=signal,zorder=2)
cticks = np.linspace(cmin, cmax, 5, endpoint=True)
if create_figure:
cbar = plt.colorbar()
plt.clim(cmin,cmax)
plt.set_cmap('seismic')
cbar.ax.tick_params(labelsize=cfontsize)
if display_axis==0:
plt.axis('off')
plt.axis('tight')
#print 'plot_graph_signal completed.'
return True
def random_sensor_graph(N,theta=0.2):
coords = np.random.rand(N,2)
A = adj_matrix_from_coords(coords,theta)
return A,coords
def undir2dir(A_undirected):
# RANDOM ORIENTATION an undirected graph
N = len(A_undirected)
A_directed = np.zeros((N,N))
for row in (np.arange(N-1)+1):
for col in range(row):
if A_undirected[row,col] != 0:
if np.random.randint(2)==0:
A_directed[row,col] = A_undirected[row,col]
else:
A_directed[col,row] = A_undirected[row,col]
return A_directed
def stem(x,y,fsize=18,msize=10,color='b',linestyle='--',labelstr=0,alph=1):
for i in range(np.array(x).size):
plt.plot([x[i], x[i]], [0, y[i]],linestyle,c=color,zorder=1)
if labelstr!=0:
plt.scatter(x,y,s=10*msize,c=color,edgecolor='face',zorder=2,alpha=alph,label=labelstr)
else:
plt.scatter(x,y,s=10*msize,c=color,edgecolor='face',alpha=alph,zorder=2)
plt.axis('tight')
plt.tick_params(axis='both', which='major', labelsize=fsize-2)
return True
def laplacian(A):
# Returns the Laplacian of a graph, considering the in-degree matrix.
[N,M] = A.shape
if N!=M:
#print "Error! Adjacency matrix is not square."
return 0
Din = np.diag(np.sum(A,axis=1)) # in-degree matrix
L = Din - A
return L
def find_nearest(array,value):
# Function to find the entry of the array "array" closest to the value "value"
# CREDITS: written by "unutbu", as in https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
idx = (np.abs(array-value)).argmin()
return array[idx]
def number_of_zero_crossings(A,x):
'''
Returns the number of zero crossings in the signal x defined over the graph with A as adjacency matrix.
'''
nzc = 0
if np.sum(x >= 0)==0 or np.sum(x > 0)==0 or np.sum(x <= 0)==0 or np.sum(x < 0)==0:
return nzc # no zero crossings
if not np.array_equal(A.transpose(),A):
# Directed graph
for i in range(len(x)):
row = A[i,:]
adj_nodes = np.where(row!=0)[0]
for j in adj_nodes:
if x[i]*x[j] < 0:
nzc += 1
return nzc
# For undirected graphs:
# Which sign is predominant in the signal, plus or minus? Checking:
if np.sum(x>=0) > np.sum(x < 0):
index_minus = np.where(x<0)[0]
for i in index_minus:
row = A[i,:]
adj_nodes = np.where(row!=0)[0]
for j in adj_nodes:
if x[i]*x[j] < 0:
nzc += 1
else:
index_plus = np.where(x>=0)[0]
for i in index_plus:
row = A[i,:]
adj_nodes = np.where(row!=0)[0]
for j in adj_nodes:
if x[i]*x[j] < 0:
nzc += 1
return nzc
def total_variation(A,x,eigval_max=0):
'''
Computes the total variation on graphs from the definition.
'''
if eigval_max == 0:
# If eigval_max == 0 then we suppose the user didn't want to diagonalize A beforehand.
[eigvals,V] = np.linalg.eig(A)
eigval_max = np.max(np.abs(eigvals))
Anorm = A/(1.0*eigval_max)
TV = np.sum(np.abs(x - np.dot(Anorm,x)))
return TV
def total_variation_eig(A):
'''
Computes the total variation on graphs of all the eigenvectors, normalized according to norm-l1. The computation follows equation (25) in "Discrete Signal Processing on Graphs: Frequency Analysis", Sandryhaila and Moura (2014).
'''
[eigvals,V] = np.linalg.eig(A)
eigval_max = np.max(np.abs(eigvals))
TVeig = np.abs(eigvals - eigval_max)/eigval_max
return TVeig
def translation_girault(L):
[eigvals,U] = np.linalg.eig(L)
eigvals[np.where(eigvals < 1e-15)] = 0
Uinv = np.linalg.inv(U)
d = np.diag(L) # degrees
A = np.diag(d) - L
rho = np.max(np.dot(A,d)/(1.0*d)) # Obs.: element-wise division
N = len(eigvals)
TGhat = np.diag(np.exp(- 1j * np.pi * np.sqrt(eigvals/rho)))
TG = np.dot(U,np.dot(TGhat,Uinv))
return TG
def normalize_mtx_l1(A):
# Returns a version of the matrix with each column normalized to 1
# with respect to the l1-norm (sum == 1).
s = np.sum(A,axis=0)
sinv = 1.0/s
return np.dot(A,np.diag(sinv))
def line_graph(A,coords,a=0.5):
# Warning: the graph is supposed to have only UNITARY weights.
N = len(coords)
A = 1*(A!=0) # FORCES UNITARY WEIGHTS.
if (a<=0) or (a>=1):
#print "Error! Fractional parameter is out of bounds! (should be >0 and <1)"
return 0
if np.array_equal(A.transpose(),A):
# Undirected graph
#print "Error. line_graph is not implemented for undirected graphs yet."
return 1
else:
# Directed graph
E = np.sum(A!=0) # number of edges
LA = np.zeros((E,E))
Lcoords = np.zeros((E,2))
row_idx = np.zeros(E)
col_idx = np.zeros(E)
# In what follows, we pick the indexes of A corresponding to linked vertices. Each pair row_idx[i], col_idx[i] corresponds to an edge in the digraph, and therefore to a vertex in the line digraph. The order in which we pick them will define the ordering of vertices in the line digraph, and we do so column-by-column, from left to right, from top to bottom.
col_idx, row_idx = np.where(np.transpose(A)!=0)
Lcoords[:,0] = coords[row_idx,0] + a*(coords[col_idx,0] - coords[row_idx,0])
Lcoords[:,1] = coords[row_idx,1] + a*(coords[col_idx,1] - coords[row_idx,1])
for e in range(E):
LA[np.where(col_idx==row_idx[e]),e] = 1
return [LA,Lcoords]
def merge_line_digraph(A,coords,LA,Lcoords):
# Takes a digraph and its line graph and merges into one graph. The line graph vertices are the last ones in the coords_merged array.
N = len(coords)
E = len(Lcoords)
coords_merged = np.zeros((N+E,2))
A_merged = np.zeros((N+E,N+E))
coords_merged[0:N,:] = coords
coords_merged[N:N+E,:] = Lcoords
col_idx, row_idx = np.where(np.transpose(A)!=0) # indexes of connected vertices in the digraph
Lcol_idx, Lrow_idx = np.where(np.transpose(LA)!=0) # indexes of connected vertices in the line graph
for e in range(E):
A_merged[N+e,col_idx[e]] = 1 # from source-vertex to edge
A_merged[row_idx[e],N+e] = 1 # from edge to end-vertex
return [A_merged,coords_merged]
def gft(M,x,showprogress=False):
'''
GFT of a signal as decomposition into the eigenbasis of matrix M.
>> M: adjacency or Laplacian matrix.
'''
#if showprogress:
#print 'Starting the computation of the Fourier basis.'
[eigvals,V] = np.linalg.eig(M)
#if showprogress:
#print 'Computing the Fourier matrix.'
Minv = np.linalg.inv(M)
xhat = np.dot(Minv,x) # possibly a complex array!
return xhat
def nn_coords(im_rows,im_cols,tilted=0):
'''
Coordinates of the vertices of a rectangular-grid graph, modeling
an image through the nearest-neighbor model. The vertices are numbered
from left to right, top to bottom.
If tilted == 1, the even-numbered rows (zero-indexed) are shifted slightly to the
right, forming an hexagonal dot grid.
'''
coords = np.zeros((im_rows*im_cols,2))
for i in range(im_rows):
for j in range(im_cols):
coords[i*im_cols + j,0] = j * (1.0/im_cols)
if tilted and (i%2==0):
coords[i*im_cols + j,0] += (0.5/im_cols)
coords[i*im_cols + j,1] = - i * (1.0/im_rows)
return coords
def bst_mtx_3diag(diag):
'''
Returns an M-by-M 3-diagonal band symmetric Toeplitz matrix with second diagonal given by the array diag.
This is an auxiliary function, called by nn_adjacency_matrix().
'''
M = len(diag) + 1
B = np.zeros((M,M))
for i in range(len(diag)):
B[i,i+1] = diag[i]
B = B + B.transpose()
return B
def nn_adjacency_matrix(a,b):
'''
Adjacency matrix of a rectangular-grid graph, modeling
an image through the nearest-neighbor model. The vertices are numbered
from left to right, top to bottom.
The matrix A can be seen as a square im_rows-by-im_rows block matrix, with each entry
being a im_cols-by-im_cols square matrix.
>> a: 1D array of length (im_rows - 1).
>> b: 1D array of | |
eul[0] = math.atan2(R[1, 2], R[0, 2])
sp = math.sin(eul[0])
cp = math.cos(eul[0])
eul[1] = math.atan2(cp * R[0, 2] + sp * R[1, 2], R[2, 2])
eul[2] = math.atan2(-sp * R[0, 0] + cp * R[1, 0], -sp * R[0, 1] + cp * R[1, 1])
if unit == "deg":
eul *= 180 / math.pi
return eul
# ------------------------------------------------------------------------------------------------------------------- #
def tr2rpy(T, unit="rad", order="zyx", check=False):
r"""
Convert SO(3) or SE(3) to roll-pitch-yaw angles
:param R: SE(3) or SO(3) matrix
:type R: ndarray(4,4) or ndarray(3,3)
:param unit: 'rad' or 'deg'
:type unit: str
:param order: 'xyz', 'zyx' or 'yxz' [default 'zyx']
:type order: str
:param check: check that rotation matrix is valid
:type check: bool
:return: Roll-pitch-yaw angles
:rtype: ndarray(3)
:raises ValueError: bad arguments
``tr2rpy(R)`` are the roll-pitch-yaw angles corresponding to
the rotation part of ``R``.
The 3 angles RPY = :math:`[\theta_R, \theta_P, \theta_Y]` correspond to
sequential rotations about the Z, Y and X axes respectively. The axis order
sequence can be changed by setting:
- ``order='xyz'`` for sequential rotations about X, Y, Z axes
- ``order='yxz'`` for sequential rotations about Y, X, Z axes
By default the angles are in radians but can be changed setting
``unit='deg'``.
.. runblock:: pycon
>>> from spatialmath.base import *
>>> T = rpy2tr(0.2, 0.3, 0.5)
>>> print(T)
>>> tr2rpy(T)
.. note::
- There is a singularity for the case where :math:`\theta_P = \pi/2` in
which case we arbitrarily set :math:`\theta_R=0` and
:math:`\theta_Y = \theta_R + \theta_Y`.
- If the input is SE(3) the translation component is ignored.
:seealso: :func:`~rpy2r`, :func:`~rpy2tr`, :func:`~tr2eul`,
:func:`~tr2angvec`
:SymPy: not supported
"""
if base.ismatrix(T, (4, 4)):
R = base.t2r(T)
else:
R = T
if not isrot(R, check=check):
raise ValueError("not a valid SO(3) matrix")
rpy = np.zeros((3,))
if order == "xyz" or order == "arm":
# XYZ order
if abs(abs(R[0, 2]) - 1) < 10 * _eps: # when |R13| == 1
# singularity
rpy[0] = 0 # roll is zero
if R[0, 2] > 0:
rpy[2] = math.atan2(R[2, 1], R[1, 1]) # R+Y
else:
rpy[2] = -math.atan2(R[1, 0], R[2, 0]) # R-Y
rpy[1] = math.asin(np.clip(R[0, 2], -1.0, 1.0))
else:
rpy[0] = -math.atan2(R[0, 1], R[0, 0])
rpy[2] = -math.atan2(R[1, 2], R[2, 2])
k = np.argmax(np.abs([R[0, 0], R[0, 1], R[1, 2], R[2, 2]]))
if k == 0:
rpy[1] = math.atan(R[0, 2] * math.cos(rpy[0]) / R[0, 0])
elif k == 1:
rpy[1] = -math.atan(R[0, 2] * math.sin(rpy[0]) / R[0, 1])
elif k == 2:
rpy[1] = -math.atan(R[0, 2] * math.sin(rpy[2]) / R[1, 2])
elif k == 3:
rpy[1] = math.atan(R[0, 2] * math.cos(rpy[2]) / R[2, 2])
elif order == "zyx" or order == "vehicle":
# old ZYX order (as per Paul book)
if abs(abs(R[2, 0]) - 1) < 10 * _eps: # when |R31| == 1
# singularity
rpy[0] = 0 # roll is zero
if R[2, 0] < 0:
rpy[2] = -math.atan2(R[0, 1], R[0, 2]) # R-Y
else:
rpy[2] = math.atan2(-R[0, 1], -R[0, 2]) # R+Y
rpy[1] = -math.asin(np.clip(R[2, 0], -1.0, 1.0))
else:
rpy[0] = math.atan2(R[2, 1], R[2, 2]) # R
rpy[2] = math.atan2(R[1, 0], R[0, 0]) # Y
k = np.argmax(np.abs([R[0, 0], R[1, 0], R[2, 1], R[2, 2]]))
if k == 0:
rpy[1] = -math.atan(R[2, 0] * math.cos(rpy[2]) / R[0, 0])
elif k == 1:
rpy[1] = -math.atan(R[2, 0] * math.sin(rpy[2]) / R[1, 0])
elif k == 2:
rpy[1] = -math.atan(R[2, 0] * math.sin(rpy[0]) / R[2, 1])
elif k == 3:
rpy[1] = -math.atan(R[2, 0] * math.cos(rpy[0]) / R[2, 2])
elif order == "yxz" or order == "camera":
if abs(abs(R[1, 2]) - 1) < 10 * _eps: # when |R23| == 1
# singularity
rpy[0] = 0
if R[1, 2] < 0:
rpy[2] = -math.atan2(R[2, 0], R[0, 0]) # R-Y
else:
rpy[2] = math.atan2(-R[2, 0], -R[2, 1]) # R+Y
rpy[1] = -math.asin(np.clip(R[1, 2], -1.0, 1.0)) # P
else:
rpy[0] = math.atan2(R[1, 0], R[1, 1])
rpy[2] = math.atan2(R[0, 2], R[2, 2])
k = np.argmax(np.abs([R[1, 0], R[1, 1], R[0, 2], R[2, 2]]))
if k == 0:
rpy[1] = -math.atan(R[1, 2] * math.sin(rpy[0]) / R[1, 0])
elif k == 1:
rpy[1] = -math.atan(R[1, 2] * math.cos(rpy[0]) / R[1, 1])
elif k == 2:
rpy[1] = -math.atan(R[1, 2] * math.sin(rpy[2]) / R[0, 2])
elif k == 3:
rpy[1] = -math.atan(R[1, 2] * math.cos(rpy[2]) / R[2, 2])
else:
raise ValueError("Invalid order")
if unit == "deg":
rpy *= 180 / math.pi
return rpy
# ---------------------------------------------------------------------------------------#
def trlog(T, check=True, twist=False):
"""
Logarithm of SO(3) or SE(3) matrix
:param R: SE(3) or SO(3) matrix
:type R: ndarray(4,4) or ndarray(3,3)
:param check: check that matrix is valid
:type check: bool
:param twist: return a twist vector instead of matrix [default]
:type twist: bool
:return: logarithm
:rtype: ndarray(4,4) or ndarray(3,3)
:raises ValueError: bad argument
An efficient closed-form solution of the matrix logarithm for arguments that
are SO(3) or SE(3).
- ``trlog(R)`` is the logarithm of the passed rotation matrix ``R`` which
will be 3x3 skew-symmetric matrix. The equivalent vector from ``vex()``
is parallel to rotation axis and its norm is the amount of rotation about
that axis.
- ``trlog(T)`` is the logarithm of the passed homogeneous transformation
matrix ``T`` which will be 4x4 augumented skew-symmetric matrix. The
equivalent vector from ``vexa()`` is the twist vector (6x1) comprising [v
w].
.. runblock:: pycon
>>> from spatialmath.base import *
>>> trlog(trotx(0.3))
>>> trlog(trotx(0.3), twist=True)
>>> trlog(rotx(0.3))
>>> trlog(rotx(0.3), twist=True)
:seealso: :func:`~trexp`, :func:`~spatialmath.base.transformsNd.vex`, :func:`~spatialmath.base.transformsNd.vexa`
"""
if ishom(T, check=check):
# SE(3) matrix
if base.iseye(T):
# is identity matrix
if twist:
return np.zeros((6,))
else:
return np.zeros((4, 4))
else:
[R, t] = base.tr2rt(T)
if base.iseye(R):
# rotation matrix is identity
if twist:
return np.r_[t, 0, 0, 0]
else:
return base.Ab2M(np.zeros((3, 3)), t)
else:
S = trlog(R, check=False) # recurse
w = base.vex(S)
theta = base.norm(w)
Ginv = (
np.eye(3)
- S / 2
+ (1 / theta - 1 / math.tan(theta / 2) / 2) / theta * S @ S
)
v = Ginv @ t
if twist:
return np.r_[v, w]
else:
return base.Ab2M(S, v)
elif isrot(T, check=check):
# deal with rotation matrix
R = T
if base.iseye(R):
# matrix is identity
if twist:
return np.zeros((3,))
else:
return np.zeros((3, 3))
elif abs(np.trace(R) + 1) < 100 * _eps:
# check for trace = -1
# rotation by +/- pi, +/- 3pi etc.
diagonal = R.diagonal()
k = diagonal.argmax()
mx = diagonal[k]
I = np.eye(3)
col = R[:, k] + I[:, k]
w = col / np.sqrt(2 * (1 + mx))
theta = math.pi
if twist:
return w * theta
else:
return base.skew(w * theta)
else:
# general case
theta = math.acos((np.trace(R) - 1) / 2)
skw = (R - R.T) / 2 / math.sin(theta)
if twist:
return base.vex(skw * theta)
else:
return skw * theta
else:
raise ValueError("Expect SO(3) or SE(3) matrix")
# ---------------------------------------------------------------------------------------#
def trexp(S, theta=None, check=True):
"""
Exponential of se(3) or so(3) matrix
:param S: se(3), so(3) matrix or equivalent twist vector
:type T: ndarray(4,4) or ndarray(6); or ndarray(3,3) or ndarray(3)
:param θ: motion
:type θ: float
:return: matrix exponential in SE(3) or SO(3)
:rtype: ndarray(4,4) or ndarray(3,3)
:raises ValueError: bad arguments
An efficient closed-form solution of the matrix exponential for arguments
that are so(3) or se(3).
For so(3) the results is an SO(3) rotation matrix:
- ``trexp(Ω)`` is the matrix exponential of the so(3) element ``Ω`` which is
a 3x3 skew-symmetric matrix.
- ``trexp(Ω, θ)`` as above but for an so(3) motion of Ωθ, where ``Ω`` is
unit-norm skew-symmetric matrix representing a rotation axis and a
rotation magnitude given by ``θ``.
- ``trexp(ω)`` is the matrix exponential of the so(3) element ``ω``
expressed as a 3-vector.
- ``trexp(ω, θ)`` as above but for an so(3) motion of ωθ where ``ω`` is a
unit-norm vector representing a rotation axis and a rotation magnitude
given by ``θ``. ``ω`` is expressed as a 3-vector.
.. runblock:: pycon
>>> from spatialmath.base import *
>>> | |
<reponame>masterapps-au/pysaml2
#!/usr/bin/env python
#
# Generated Mon May 2 14:23:33 2011 by parse_xsd.py version 0.4.
#
# A summary of available specifications can be found at:
# https://wiki.oasis-open.org/security/FrontPage
#
# saml core specifications to be found at:
# if any question arise please query the following pdf.
# http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
# The specification was later updated with errata, and the new version is here:
# https://www.oasis-open.org/committees/download.php/56776/sstc-saml-core-errata-2.0-wd-07.pdf
#
try:
from base64 import encodebytes as b64encode
except ImportError:
from base64 import b64encode
from saml2.validate import valid_ipv4, MustValueError
from saml2.validate import valid_ipv6
from saml2.validate import ShouldValueError
from saml2.validate import valid_domain_name
import saml2
from saml2 import SamlBase
import six
from saml2 import xmldsig as ds
from saml2 import xmlenc as xenc
# authentication information fields
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:assertion'
# xmlschema definition
XSD = "xs"
# xmlschema templates and extensions
XS_NAMESPACE = 'http://www.w3.org/2001/XMLSchema'
# xmlschema-instance, which contains several builtin attributes
XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
# xml soap namespace
NS_SOAP_ENC = "http://schemas.xmlsoap.org/soap/encoding/"
# type definitions for xmlschemas
XSI_TYPE = '{%s}type' % XSI_NAMESPACE
# nil type definition for xmlschemas
XSI_NIL = '{%s}nil' % XSI_NAMESPACE
# idp and sp communicate usually about a subject(NameID)
# the format determines the category the subject is in
# custom subject
NAMEID_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified")
# subject as email address
NAMEID_FORMAT_EMAILADDRESS = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress")
# subject as x509 key
NAMEID_FORMAT_X509SUBJECTNAME = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName")
# subject as windows domain name
NAMEID_FORMAT_WINDOWSDOMAINQUALIFIEDNAME = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName")
# subject from a kerberos instance
NAMEID_FORMAT_KERBEROS = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:kerberos")
# subject as name
NAMEID_FORMAT_ENTITY = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:entity")
# linked subject
NAMEID_FORMAT_PERSISTENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:persistent")
# annonymous subject
NAMEID_FORMAT_TRANSIENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:transient")
# subject avaiable in encrypted format
NAMEID_FORMAT_ENCRYPTED = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted")
# dicc for avaiable formats
NAMEID_FORMATS_SAML2 = (
('NAMEID_FORMAT_EMAILADDRESS', NAMEID_FORMAT_EMAILADDRESS),
('NAMEID_FORMAT_ENCRYPTED', NAMEID_FORMAT_ENCRYPTED),
('NAMEID_FORMAT_ENTITY', NAMEID_FORMAT_ENTITY),
('NAMEID_FORMAT_PERSISTENT', NAMEID_FORMAT_PERSISTENT),
('NAMEID_FORMAT_TRANSIENT', NAMEID_FORMAT_TRANSIENT),
('NAMEID_FORMAT_UNSPECIFIED', NAMEID_FORMAT_UNSPECIFIED),
)
# a profile outlines a set of rules describing how to embed SAML assertions.
# https://docs.oasis-open.org/security/saml/v2.0/saml-profiles-2.0-os.pdf
# The specification was later updated with errata, and the new version is here:
# https://www.oasis-open.org/committees/download.php/56782/sstc-saml-profiles-errata-2.0-wd-07.pdf
# XML based values for SAML attributes
PROFILE_ATTRIBUTE_BASIC = (
"urn:oasis:names:tc:SAML:2.0:profiles:attribute:basic")
# an AuthnRequest is made to initiate authentication
# authenticate the request with login credentials
AUTHN_PASSWORD = "urn:oasis:names:tc:SAML:2.0:ac:classes:Password"
# authenticate the request with login credentials, over tls/https
AUTHN_PASSWORD_PROTECTED = \
"urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport"
# attribute statements is key:value metadata shared with your app
# custom format
NAME_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified")
# uri format
NAME_FORMAT_URI = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
# XML-based format
NAME_FORMAT_BASIC = "urn:oasis:names:tc:SAML:2.0:attrname-format:basic"
# dicc for avaiable formats
NAME_FORMATS_SAML2 = (
('NAME_FORMAT_BASIC', NAME_FORMAT_BASIC),
('NAME_FORMAT_URI', NAME_FORMAT_URI),
('NAME_FORMAT_UNSPECIFIED', NAME_FORMAT_UNSPECIFIED),
)
# the SAML authority's decision can be predetermined by arbitrary context
# the specified action is permitted
DECISION_TYPE_PERMIT = "Permit"
# the specified action is denied
DECISION_TYPE_DENY = "Deny"
# the SAML authority cannot determine if the action is permitted or denied
DECISION_TYPE_INDETERMINATE = "Indeterminate"
# consent attributes determine wether consent has been given and under
# what conditions
# no claim to consent is made
CONSENT_UNSPECIFIED = "urn:oasis:names:tc:SAML:2.0:consent:unspecified"
# consent has been obtained
CONSENT_OBTAINED = "urn:oasis:names:tc:SAML:2.0:consent:obtained"
# consent has been obtained before the message has been initiated
CONSENT_PRIOR = "urn:oasis:names:tc:SAML:2.0:consent:prior"
# consent has been obtained implicitly
CONSENT_IMPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-implicit"
# consent has been obtained explicitly
CONSENT_EXPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-explicit"
# no consent has been obtained
CONSENT_UNAVAILABLE = "urn:oasis:names:tc:SAML:2.0:consent:unavailable"
# no consent is needed.
CONSENT_INAPPLICABLE = "urn:oasis:names:tc:SAML:2.0:consent:inapplicable"
# Subject confirmation methods(scm), can be issued, besides the subject itself
# by third parties.
# http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.0.pdf
# the 3rd party is identified on behalf of the subject given private/public key
SCM_HOLDER_OF_KEY = "urn:oasis:names:tc:SAML:2.0:cm:holder-of-key"
# the 3rd party is identified by subject confirmation and must include a security header
# signing its content.
SCM_SENDER_VOUCHES = "urn:oasis:names:tc:SAML:2.0:cm:sender-vouches"
# a bearer token is issued instead.
SCM_BEARER = "urn:oasis:names:tc:SAML:2.0:cm:bearer"
class AttributeValueBase(SamlBase):
def __init__(self,
text=None,
extension_elements=None,
extension_attributes=None):
self._extatt = {}
SamlBase.__init__(self,
text=None,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
if self._extatt:
self.extension_attributes = self._extatt
if text:
self.set_text(text)
elif not extension_elements:
self.extension_attributes = {XSI_NIL: 'true'}
elif XSI_TYPE in self.extension_attributes:
del self.extension_attributes[XSI_TYPE]
def __setattr__(self, key, value):
if key == "text":
self.set_text(value)
else:
SamlBase.__setattr__(self, key, value)
def verify(self):
if not self.text and not self.extension_elements:
if not self.extension_attributes:
raise Exception(
"Attribute value base should not have extension attributes"
)
if self.extension_attributes[XSI_NIL] != "true":
raise Exception(
"Attribute value base should not have extension attributes"
)
return True
else:
SamlBase.verify(self)
def set_type(self, typ):
try:
del self.extension_attributes[XSI_NIL]
except (AttributeError, KeyError):
pass
try:
self.extension_attributes[XSI_TYPE] = typ
except AttributeError:
self._extatt[XSI_TYPE] = typ
if typ.startswith('xs:'):
try:
self.extension_attributes['xmlns:xs'] = XS_NAMESPACE
except AttributeError:
self._extatt['xmlns:xs'] = XS_NAMESPACE
if typ.startswith('xsd:'):
try:
self.extension_attributes['xmlns:xsd'] = XS_NAMESPACE
except AttributeError:
self._extatt['xmlns:xsd'] = XS_NAMESPACE
def get_type(self):
try:
return self.extension_attributes[XSI_TYPE]
except (KeyError, AttributeError):
try:
return self._extatt[XSI_TYPE]
except KeyError:
return ""
def clear_type(self):
try:
del self.extension_attributes[XSI_TYPE]
except KeyError:
pass
try:
del self._extatt[XSI_TYPE]
except KeyError:
pass
def set_text(self, value, base64encode=False):
def _wrong_type_value(xsd, value):
msg = 'Type and value do not match: {xsd}:{type}:{value}'
msg = msg.format(xsd=xsd, type=type(value), value=value)
raise ValueError(msg)
# only work with six.string_types
_str = unicode if six.PY2 else str
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
type_to_xsd = {
_str: 'string',
int: 'integer',
float: 'float',
bool: 'boolean',
type(None): '',
}
# entries of xsd-types each declaring:
# - a corresponding python type
# - a function to turn a string into that type
# - a function to turn that type into a text-value
xsd_types_props = {
'string': {
'type': _str,
'to_type': _str,
'to_text': _str,
},
'integer': {
'type': int,
'to_type': int,
'to_text': _str,
},
'short': {
'type': int,
'to_type': int,
'to_text': _str,
},
'int': {
'type': int,
'to_type': int,
'to_text': _str,
},
'long': {
'type': int,
'to_type': int,
'to_text': _str,
},
'float': {
'type': float,
'to_type': float,
'to_text': _str,
},
'double': {
'type': float,
'to_type': float,
'to_text': _str,
},
'boolean': {
'type': bool,
'to_type': lambda x: {
'true': True,
'false': False,
}[_str(x).lower()],
'to_text': lambda x: _str(x).lower(),
},
'base64Binary': {
'type': _str,
'to_type': _str,
'to_text': (
lambda x: b64encode(x.encode()) if base64encode else x
),
},
'anyType': {
'type': type(value),
'to_type': lambda x: x,
'to_text': lambda x: x,
},
'': {
'type': type(None),
'to_type': lambda x: None,
'to_text': lambda x: '',
},
}
xsd_string = (
'base64Binary' if base64encode
else self.get_type()
or type_to_xsd.get(type(value)))
xsd_ns, xsd_type = (
['', type(None)] if xsd_string is None
else ['', ''] if xsd_string == ''
else [
XSD if xsd_string in xsd_types_props else '',
xsd_string
] if ':' not in xsd_string
else xsd_string.split(':', 1))
xsd_type_props = xsd_types_props.get(xsd_type, {})
valid_type = xsd_type_props.get('type', type(None))
to_type = xsd_type_props.get('to_type', str)
to_text = xsd_type_props.get('to_text', str)
# cast to correct type before type-checking
if type(value) is _str and valid_type is not _str:
try:
value = to_type(value)
except (TypeError, ValueError, KeyError):
# the cast failed
_wrong_type_value(xsd=xsd_type, value=value)
if type(value) is not valid_type:
_wrong_type_value(xsd=xsd_type, value=value)
text = to_text(value)
self.set_type(
'{ns}:{type}'.format(ns=xsd_ns, type=xsd_type) if xsd_ns
else xsd_type if xsd_type
else '')
SamlBase.__setattr__(self, 'text', text)
return self
def harvest_element_tree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._convert_element_tree_to_member(child)
for attribute, value in iter(tree.attrib.items()):
self._convert_element_attribute_to_member(attribute, value)
# if we have added children to this node
# we consider whitespace insignificant
# and remove/trim/strip whitespace
# and expect to not have actual text content
text = (
tree.text.strip()
if tree.text and self.extension_elements
else tree.text
)
if text:
#print("set_text:", tree.text)
# clear type
#self.clear_type()
self.set_text(text)
# if we have added a text node
# or other children to this node
# remove the nil marker
if text or self.extension_elements:
if XSI_NIL in self.extension_attributes:
del self.extension_attributes[XSI_NIL]
class BaseIDAbstractType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:BaseIDAbstractType element """
c_tag = 'BaseIDAbstractType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NameQualifier'] = ('name_qualifier', 'string', False)
c_attributes['SPNameQualifier'] = ('sp_name_qualifier', 'string', False)
def __init__(self,
name_qualifier=None,
sp_name_qualifier=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.name_qualifier = name_qualifier
self.sp_name_qualifier = sp_name_qualifier
class NameIDType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:NameIDType element """
c_tag = 'NameIDType'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NameQualifier'] = ('name_qualifier', 'string', False)
c_attributes['SPNameQualifier'] = ('sp_name_qualifier', 'string', False)
c_attributes['Format'] = ('format', 'anyURI', False)
c_attributes['SPProvidedID'] = ('sp_provided_id', 'string', False)
def __init__(self,
name_qualifier=None,
sp_name_qualifier=None,
format=None,
sp_provided_id=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.name_qualifier = name_qualifier
self.sp_name_qualifier = sp_name_qualifier
self.format = format
self.sp_provided_id = sp_provided_id
def name_id_type__from_string(xml_string):
return saml2.create_class_from_xml_string(NameIDType_, xml_string)
class EncryptedElementType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedElementType element
"""
c_tag = 'EncryptedElementType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2001/04/xmlenc#}EncryptedData'] = (
'encrypted_data',
xenc.EncryptedData)
c_children['{http://www.w3.org/2001/04/xmlenc#}EncryptedKey'] = (
'encrypted_key',
[xenc.EncryptedKey])
c_cardinality['encrypted_key'] = {"min": 0}
c_child_order.extend(['encrypted_data', 'encrypted_key'])
def __init__(self,
encrypted_data=None,
encrypted_key=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.encrypted_data = encrypted_data
self.encrypted_key = encrypted_key or []
def encrypted_element_type__from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedElementType_, xml_string)
class EncryptedID(EncryptedElementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedID element """
c_tag = | |
<gh_stars>0
# ------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
Provides the definitions needed for the RETURN station type.
"""
import operator
import logging
import logging.handlers
import time
from station.interfaces import IStation
## from __future__ import division
# ------------------------------------------------------------------------------
class Station(IStation):
"""
Provides the implementation for a return to earth station to
support displaying and obtaining angle values from the user
to supply to the MS.
"""
START_STATE = 0
IDLE_STATE = 1
PRE_INPUT_STATE = 2
INPUT_STATE = 3
SUBMITTING_STATE = 4
SUBMITTED_STATE = 5
POST_SUBMITTED_STATE = 17
PASSED_STATE = 6
FAILED_STATE = 7
SHUTDOWN_STATE = 8
PRE_IDLE_STATE = 9
PRE_PASSED_STATE = 10
PRE_FAILED_STATE = 11
PRE_FAILED_RETRY_STATE = 12
POST_FAILED_RETRY_STATE = 13
FAILED_RETRY_STATE = 14
SEND_RESULT_STATE = 15
SEND_FINISH_STATE = 16
ERROR_STATE = 99
# --------------------------------------------------------------------------
def __init__(self, config, hwModule):
""" Initialize the LCD display and the pushbutton monitor
The LCD 2-line display will display 3 two digit angles on each
of the two lines. The push buttons will allow the user to
move the digit cursor entry LEFT or RIGHT, increase (UP) or
decrease (DOWN) the value of the digit under the cursor, and
submit the code by pressing the SELECT button twice. TODO???
The angles maybe be submitted by scanning a QR code (i.e.,
sending a return message from the MS to the station)
Args:
config: a Config object containing properties to configure station characteristics
hwModule: python module object that defines hardware interfaces
"""
logger.debug('Constructing RETURN')
displayClassName = config.DisplayClassName
pushButtonMonitorClassName = config.PushButtonMonitorClassName
displayClass = getattr(hwModule, displayClassName)
pushButtonMonitorClass = getattr(hwModule, pushButtonMonitorClassName)
self._display = displayClass(config.Display)
self._display.setText("Initializing...")
logger.info('Initializing pushButtonMonitor')
self._pushButtonMonitor = pushButtonMonitorClass()
self._pushButtonMonitor.setDevice(self._display._lcd)
for i in config.PushButtons:
logger.info(' Setting button {}'.format(i))
self._pushButtonMonitor.registerPushButton(i.Name,
self.buttonPressed,
i)
self.ConnectionManager = None
self._angle = None # will hold a Angle object
self._timedMsg = None # generator
self._timedMsgNextState = self.IDLE_STATE
self._colorToggle = None # generator
self._cheatMode = config.CheatMode
self._preInputDuration = 6.0 # seconds to display msg
self._passedDuration = 7.5 # seconds to display msg
self._failedDuration = 5.0 # seconds to display msg
self._preIdleDuration = 5.0 # seconds to display msg
self._prePassedDuration = 5.0 # seconds to display msg
self._preFailedDuration = 5.0 # seconds to display msg
self._postFailedDuration = 4.0 # seconds to display msg
self._submittedDuration = 2.0 # seconds to display msg
self._postSubmittedDuration = 30.0 # seconds to display msg
self._sendFinishDuration = 15.0 # seconds to display msg
# Background cycle states: ([list of colors], rate_in_sec)
# TODO: These constants could be moved to runstation.conf
self._preIdleBg = (["CYAN"], 1.0)
self._prePassedBg = (["YELLOW", "WHITE"], 0.1)
self._preFailedBg = (["YELLOW", "WHITE"], 0.1)
self._postFailedBg = (["WHITE"], 1.0)
self._idleBg = (["WHITE", "BLUE", "YELLOW", "GREEN", "RED", "CYAN", "MAGENTA"], 0.75)
self._preInputBg = (["YELLOW", "YELLOW", "YELLOW", "YELLOW", "RED"], 0.15)
self._inputBg = (["CYAN"], 1.0)
self._submit1Bg = (["RED", "WHITE"], 0.15)
self._submit2Bg = (["WHITE"], 1.0)
self._postSubmitBg = (["WHITE"], 1.0)
self._passedBg = (["GREEN", "CYAN"], 1.0)
self._failedBg = (["RED", "RED", "RED", "RED", "RED", "RED", "RED", "RED", "RED", "WHITE"], 0.1)
self._sendFinishBg = (["YELLOW", "YELLOW", "YELLOW", "YELLOW", "RED"], 0.15)
self._shutdownBg = (["BLACK"], 1.0)
self._errorBg = (["RED", "RED", "RED", "RED", "RED", "WHITE"], 0.15)
# Display text for different states
# TODO: These constants could be moved to runstation.conf
self._preIdleText = " Resetting...\n"
self._prePassedText = "- Trying Your -\n- Parameters -"
self._preFailedText = "- Trying Your -\n- Parameters -"
self._postFailedText = " Try entering \nparameters again"
self._idleText = "==== RETURN ====\n== TO = EARTH =="
self._preInputText = " HEY!! \n Scan QR Code "
self._submittingText = "2nd ENTER Sends\n Arrows to edit"
self._submittedText = "Sending to\nGuidance..."
self._postSubmittedText = "Params Sent \nScan Return QR "
self._passedText = " Landing is \n A SUCCESS! "
self._failedText = " Bounced off \n atmosphere! "
self._sendFinishText = " Scan Final QR \n to Complete "
self._shutdownText = "Shutting down..."
self._errorText = " Malfunction!\n"
# Station current operating state
self._returnState = self.START_STATE
self._pushButtonMonitor.setOnTickCallback(self.onTick)
self.enterState(self.IDLE_STATE)
# --------------------------------------------------------------------------
@property
def stationTypeId(self):
return "RETURN"
# --------------------------------------------------------------------------
def start(self):
logger.info('Starting RETURN.')
# Nothing more to do.
# --------------------------------------------------------------------------
def stop(self, signal):
logger.info('Received signal "%s". Stopping RETURN.', signal)
self.enterState(self.SHUTDOWN_STATE)
# --------------------------------------------------------------------------
def onReady(self):
logger.info('RETURN transitioned to Ready state.')
self.enterState(self.IDLE_STATE)
# --------------------------------------------------------------------------
def onProcessing(self, args):
logger.info('RETURN transitioned to Processing state with args [%s].' % (args))
self._angle = Angle(self._cheatMode, *args)
self.refreshDisplayedAngle()
self.enterState(self.INPUT_STATE)
# --------------------------------------------------------------------------
def onProcessing2(self, args):
""" This method just implements an interface method to do nothing.
"""
logger.info('RETURN transitioned to Processing2 state with args [%s].' % (args))
# --------------------------------------------------------------------------
def onProcessingCompleted(self, args):
""" This method just implements an interface method to do nothing.
"""
logger.info('RETURN transitioned to ProcessingCompleted state with args [%s].' % (args))
# --------------------------------------------------------------------------
def refreshDisplayedAngle(self):
lines = self._angle.toString().splitlines()
centerOffset = [0, 0]
for index in range(0,2):
centerOffset[index] = (self._display.lineWidth() - len(lines[index])) // 2
lines[index] = "{0:>{width}}".format(lines[index],
width=len(lines[index]) + centerOffset[index])
self._display.setLine1Text(lines[0])
self._display.setLine2Text(lines[1])
curLine = self._angle.positionLine()
self._display.setCursor(curLine,
self._angle.formattedPosition() + centerOffset[curLine])
# --------------------------------------------------------------------------
def enterState(self, newState):
""" Transition to the specified operating state
This will modify the display text, the display background, the state of
running timers, and the state of the _pushButtonMonitor.
Returns:
The state prior to being called
"""
oldState = self._returnState
# if newState != self._returnState:
while newState != self._returnState:
if self._returnState in (self.PRE_INPUT_STATE,
self.PRE_IDLE_STATE,
self.PRE_PASSED_STATE,
self.PRE_FAILED_STATE,
self.PRE_FAILED_RETRY_STATE,
self.FAILED_RETRY_STATE,
self.POST_FAILED_RETRY_STATE,
self.PASSED_STATE,
self.FAILED_STATE,
self.SUBMITTED_STATE,
self.SEND_FINISH_STATE,
): # leaving this state
self._timedMsg = None
if newState == self.PRE_IDLE_STATE:
self._timedMsg = self.displayTimedMsg(self._preIdleText,
self._preIdleDuration,
self._preIdleBg,
self.IDLE_STATE)
elif newState == self.IDLE_STATE:
self._display.setText(self._idleText)
self.setToggleColors(*self._idleBg)
self._pushButtonMonitor.startListening()
elif newState == self.PRE_INPUT_STATE:
self._timedMsg = self.displayTimedMsg(self._preInputText,
self._preInputDuration,
self._preInputBg,
self.IDLE_STATE)
self._pushButtonMonitor.stopListening()
elif newState == self.INPUT_STATE:
self.setToggleColors(*self._inputBg)
self.refreshDisplayedAngle()
self._pushButtonMonitor.startListening()
elif newState == self.SUBMITTING_STATE:
self._display.setText(self._submittingText)
self.setToggleColors(*self._submit1Bg)
elif newState == self.SUBMITTED_STATE:
logger.debug("initializing SUBMITTED_STATE")
self._timedMsg = self.displayTimedMsg(self._submittedText,
self._submittedDuration,
self._submit2Bg,
self.SEND_RESULT_STATE)
self._pushButtonMonitor.stopListening()
elif newState == self.POST_SUBMITTED_STATE:
self._timedMsg = self.displayTimedMsg(self._postSubmittedText,
self._postSubmittedDuration,
self._postSubmitBg,
self.IDLE_STATE)
elif newState == self.SEND_RESULT_STATE:
# Submit angle to MS
self.submitAngles()
elif newState == self.PRE_PASSED_STATE:
self._timedMsg = self.displayTimedMsg(self._prePassedText,
self._prePassedDuration,
self._prePassedBg,
self.PASSED_STATE)
elif newState == self.PASSED_STATE:
self._timedMsg = self.displayTimedMsg(self._passedText,
self._passedDuration,
self._passedBg,
self.SEND_FINISH_STATE)
elif newState == self.PRE_FAILED_RETRY_STATE:
self._timedMsg = self.displayTimedMsg(self._preFailedText,
self._preFailedDuration,
self._preFailedBg,
self.FAILED_RETRY_STATE)
elif newState == self.FAILED_RETRY_STATE:
self._timedMsg = self.displayTimedMsg(self._failedText,
self._failedDuration,
self._failedBg,
self.POST_FAILED_RETRY_STATE)
elif newState == self.POST_FAILED_RETRY_STATE:
self._timedMsg = self.displayTimedMsg(self._postFailedText,
self._postFailedDuration,
self._postFailedBg,
self.INPUT_STATE)
elif newState == self.PRE_FAILED_STATE:
self._timedMsg = self.displayTimedMsg(self._preFailedText,
self._preFailedDuration,
self._preFailedBg,
self.FAILED_STATE)
elif newState == self.FAILED_STATE:
self._timedMsg = self.displayTimedMsg(self._failedText,
self._failedDuration,
self._failedBg,
self.SEND_FINISH_STATE)
elif newState == self.SEND_FINISH_STATE:
self._timedMsg = self.displayTimedMsg(self._sendFinishText,
self._sendFinishDuration,
self._sendFinishBg,
self.PRE_IDLE_STATE)
elif newState == self.SHUTDOWN_STATE:
self._display.setText(self._shutdownText)
self.setToggleColors(*self._shutdownBg)
self._pushButtonMonitor.stopListening()
else:
self._display.setText(self._errorText)
self.setToggleColors(*self._errorBg)
self._pushButtonMonitor.stopListening()
self._returnState = newState
return oldState
# --------------------------------------------------------------------------
def buttonPressed(self, pushButtonName):
""" Handle angle input pushbutton events.
Only recognizes the push event, not the release. Buttons update the
displayed angle, and may change the operating state.
Args:
pushButtonName (string): Up, Down, Left, Right, or Enter
"""
#logger.info('Push button %s pressed.' % (pushButtonName))
if self._returnState == self.IDLE_STATE:
self.enterState(self.PRE_INPUT_STATE)
elif pushButtonName == 'Up':
if self.enterState(self.INPUT_STATE) == self.INPUT_STATE:
self._angle.incCurrentDigit(1)
self.refreshDisplayedAngle()
elif pushButtonName == 'Down':
if self.enterState(self.INPUT_STATE) == self.INPUT_STATE:
self._angle.decCurrentDigit(1)
self.refreshDisplayedAngle()
elif pushButtonName == 'Left':
if self.enterState(self.INPUT_STATE) == self.INPUT_STATE:
self._angle.moveLeft(1)
self.refreshDisplayedAngle()
elif pushButtonName == 'Right':
if self.enterState(self.INPUT_STATE) == self.INPUT_STATE:
self._angle.moveRight(1)
self.refreshDisplayedAngle()
elif pushButtonName == 'Enter':
if self._returnState == self.INPUT_STATE:
self.enterState(self.SUBMITTING_STATE)
logger.info('1st enter key press received. Waiting for 2nd.')
elif self._returnState == self.SUBMITTING_STATE:
self.enterState(self.SUBMITTED_STATE)
logger.info('2nd enter key press received.')
#self._returnState = self.SUBMITTED_STATE
else:
logger.debug("Invalid pushButtonName received: '{}'".format(pushButtonName))
# --------------------------------------------------------------------------
def onFailed(self, args):
logger.info('RETURN transitioned to Failed state with args [%s].' % (args))
self.enterState(self.POST_SUBMITTED_STATE)
# is_correct, challenge_complete = args
# if challenge_complete.lower() == "true":
# self.enterState(self.PRE_FAILED_STATE)
# else:
# self.enterState(self.PRE_FAILED_RETRY_STATE)
self._pushButtonMonitor.stopListening()
# --------------------------------------------------------------------------
def onPassed(self, args):
logger.info('RETURN transitioned to Passed | |
<filename>bp_to_imgV2.py
import json, time
import numpy as np
import quaternion
import cv2
#from scipy.signal import convolve2d
# block rotation directions
rot_normal = np.array([
[ 0, 0, 1], #0
[ 1, 0, 0], #1
[ 0, 0,-1], #2
[-1, 0, 0], #3
[ 0,-1, 0], #4
[ 0,-1, 0], #5
[ 0,-1, 0], #6
[ 0,-1, 0], #7
[ 0, 1, 0], #8
[ 0, 1, 0], #9
[ 0, 1, 0], #10
[ 0, 1, 0], #11
[ 0, 0, 1], #12
[ 1, 0, 0], #13
[ 0, 0,-1], #14
[-1, 0, 0], #15
[ 0, 0, 1], #16
[ 0, 0,-1], #17
[ 0, 0, 1], #18
[ 0, 0,-1], #19
[ 1, 0, 0], #20
[-1, 0, 0], #21
[ 1, 0, 0], #22
[-1, 0, 0]]) #23
#not testet!!!
rot_tangent = np.array([
[ 0, 1, 0], #0
[ 0, 1, 0], #1
[ 0, 1, 0], #2
[ 0, 1, 0], #3
[ 0, 0, 1], #4
[ 1, 0, 0], #5
[ 0, 0,-1], #6
[-1, 0, 0], #7
[ 0, 0, 1], #8
[ 1, 0, 0], #9
[ 0, 0,-1], #10
[-1, 0, 0], #11
[ 0,-1, 0], #12 this from [1,0,0] with 16
[ 0,-1, 0], #13 this from [0,0,-1] with 22 this from [0,0,1] with 20
[ 0,-1, 0], #14 this from [-1,0,0] with 17
[ 0,-1, 0], #15 this from [0,0,1] with 21
[ 1, 0, 0], #16 this from [0,-1,0] with 12
[ 1, 0, 0], #17 this from [0,-1,0] with 14
[-1, 0, 0], #18
[-1, 0, 0], #19
[ 0, 0, 1], #20 this from [0,-1,0] with 13
[ 0, 0, 1], #21 this from [0,-1,0] with 15
[ 0, 0,-1], #22 this from [0,0,1] with 13
[ 0, 0,-1]]) #23
rot_bitangent = np.cross(rot_normal, rot_tangent)
# transpose
rot_normal = rot_normal.T
rot_tangent = rot_tangent.T
rot_bitangent = rot_bitangent.T
# load blocks and materials configuration
with open("blocks.json", "r") as f:
blocks = json.loads(f.read())
with open("materials.json", "r") as f:
materials = json.loads(f.read())
# add missing "Invisible" keys to materials
for k in materials:
if "Invisible" not in materials[k]:
materials[k]["Invisible"] = False
materials[k]["Color"] = np.array(materials[k]["Color"])
# load size id dictionary
with open("size_id_dictionary.json", "r") as f:
size_id_dict = json.load(f)
size_id_dict = {int(k): v for k, v in size_id_dict.items()}
# store game version
bp_gameversion = None
# Blueprint:
# CSI: block color
# COL: craft colors ["float,float,float"]
# SCs:
# BLP: block position ["int,int,int"]
# BLR: block rotation [int]
# BP1
# BP2
# BCI: maybe block color index
# BEI:
# BlockIds: block ids [int]
async def process_blueprint(fname, silent=False, standaloneMode=False):
"""Load and init blueprint data. Returns blueprint, calculation times, image filename"""
global bp_gameversion
bp_gameversion = None
if not silent: print("Processing blueprint \"", fname, "\"", sep="")
ts1 = time.time()
with open(fname, "r") as f:
bp = json.load(f)
ts1 = time.time() - ts1
if not silent: print("JSON parse completed in", ts1, "s")
# convert to numpy data
ts2 = time.time()
__convert_blueprint(bp)
ts2 = time.time() - ts2
if not silent: print("Conversion completed in", ts2, "s")
# fetch infos
ts3 = time.time()
bp_infos, bp_gameversion = __fetch_infos(bp)
ts3 = time.time() - ts3
if not silent: print("Infos gathered in", ts3, "s")
# create top, side, front view matrices
ts4 = time.time()
top_mats, side_mats, front_mats = __create_view_matrices(bp)
ts4 = time.time() - ts4
if not silent: print("View matrices completed in", ts4, "s")
# create images
ts5 = time.time()
main_img = __create_images(top_mats, side_mats, front_mats, bp_infos)
ts5 = time.time() - ts5
if not silent: print("Image creation completed in", ts5, "s")
# save image
main_img_fname = fname[:-10] + "_view.png"
if not cv2.imwrite(main_img_fname, main_img):
print("ERROR: image could not be saved", main_img_fname)
# return
if standaloneMode:
return bp, [ts1, ts2, ts3, ts4, ts5], main_img
else:
return main_img_fname, [ts1, ts2, ts3, ts4, ts5]
def __convert_blueprint(bp):
"""Convert data to numpy data"""
def blueprint_iter(blueprint, parentglobalrotation=quaternion.one, parentglobalposition=0):
"""Iterate blueprint and sub blueprints"""
# convert rotation ids to np array
blueprint["BLR"] = np.array(blueprint["BLR"])
# convert local rotation to quaternion
localrot_split = blueprint["LocalRotation"].split(",")
globalrotation = np.quaternion(float(localrot_split[3]),
float(localrot_split[0]),
float(localrot_split[1]),
float(localrot_split[2]))
globalrotation = parentglobalrotation * globalrotation
localrot = quaternion.as_rotation_matrix(globalrotation)
localrot_arg = np.argmax(np.abs(localrot), axis=1)
localrot_max = np.sign(localrot[[0, 1, 2], localrot_arg])
localrot[:, :] = 0
localrot[[0, 1, 2], localrot_arg] = localrot_max
blueprint["LocalRotation"] = localrot
# convert local position to np array
blueprint["LocalPosition"] = np.array(blueprint["LocalPosition"].split(","),
dtype=float).round().astype(int)
blueprint["LocalPosition"] = (parentglobalrotation * quaternion.quaternion(*blueprint["LocalPosition"]) *
parentglobalrotation.inverse()).vec.astype(int) + parentglobalposition
# convert min/max coordinates to np array
mincords = np.array(blueprint["MinCords"].split(","),
dtype=float)
maxcords = np.array(blueprint["MaxCords"].split(","),
dtype=float)
# rotate
mincords = (blueprint["LocalRotation"] @ mincords) + blueprint["LocalPosition"]
maxcords = (blueprint["LocalRotation"] @ maxcords) + blueprint["LocalPosition"]
# (round to int) ((done after iteration))
mincords = mincords# .round().astype(int)
maxcords = maxcords# .round().astype(int)
# re-min/max
blueprint["MinCords"] = np.minimum(mincords, maxcords)
blueprint["MaxCords"] = np.maximum(mincords, maxcords)
# create new arrays
blockcount = blueprint["BlockCount"]
if blockcount != len(blueprint["BLP"]):
blockcount = len(blueprint["BLP"])
print("[WARN] Block count is not equal to length of block position array.")
#blockguid_array = np.zeros(blockcount, dtype="<U36") not using guid here
blockid_array = np.array(blueprint["BlockIds"], dtype=int)
# block loop
for i in range(blockcount):
# blockguid_array[i] = bp["ItemDictionary"][str(blueprint["BlockIds"][i])] not using guid here
blueprint["BLP"][i] = blueprint["BLP"][i].split(",")
blueprint["BlockIds"] = blockid_array # guid_array not using guid here
# rotate block position via local rotation and add local position
blockposition_array = np.array(blueprint["BLP"], dtype=float).T
blockposition_array = np.dot(blueprint["LocalRotation"], blockposition_array).T
blueprint["BLP"] = blockposition_array.round().astype(int) + blueprint["LocalPosition"]
# check min/max coords with blp
#mincords = np.min(blueprint["BLP"], 0)
#maxcords = np.max(blueprint["BLP"], 0)
#print(mincords, maxcords)
#print(blueprint["MinCords"], blueprint["MaxCords"])
# re-min/max
#blueprint["MinCords"] = np.minimum(mincords, blueprint["MinCords"])
#blueprint["MaxCords"] = np.maximum(mincords, blueprint["MaxCords"])
# rotate rot_normal, rot_tangent and rot_bitangent via local rotation
blueprint["RotNormal"] = np.dot(blueprint["LocalRotation"], rot_normal).T.round().astype(int)
blueprint["RotTangent"] = np.dot(blueprint["LocalRotation"], rot_tangent).T.round().astype(int)
blueprint["RotBitangent"] = np.dot(blueprint["LocalRotation"], rot_bitangent).T.round().astype(int)
# sub blueprint iteration
for sub_bp in blueprint["SCs"]:
blueprint_iter(sub_bp, globalrotation, blueprint["LocalPosition"])
# merge min/max
blueprint["MinCords"] = np.minimum(blueprint["MinCords"], sub_bp["MinCords"])
blueprint["MaxCords"] = np.maximum(blueprint["MaxCords"], sub_bp["MaxCords"])
# item dictionary conversion
bp["ItemDictionary"] = {int(k): v for k, v in bp["ItemDictionary"].items()}
# main bp fix
bp["Blueprint"]["LocalRotation"] = "0,0,0,1"
bp["Blueprint"]["LocalPosition"] = "0,0,0"
blueprint_iter(bp["Blueprint"])
# set size
bp["Blueprint"]["MinCords"] = bp["Blueprint"]["MinCords"].round().astype(int)
bp["Blueprint"]["MaxCords"] = bp["Blueprint"]["MaxCords"].round().astype(int)
bp["Blueprint"]["Size"] = bp["Blueprint"]["MaxCords"] - bp["Blueprint"]["MinCords"] + 1
def __fetch_infos(bp):
"""Gathers important information of blueprint"""
def safe_max(a, b):
"""Returns max(a,b) or the one which is not None or None if both are None."""
if a is None:
return b
if b is None:
return a
return max(a, b)
infos = {"Name": bp.get("Name")}
if infos["Name"] is None:
infos["Name"] = "Unknown"
infos["Blocks"] = safe_max(bp.get("SavedTotalBlockCount"), bp["Blueprint"].get("TotalBlockCount"))
if infos["Blocks"] is None:
print("Error while gathering blueprint block count info.")
infos["Blocks"] = "?"
try:
infos["Cost"] = str(round(bp.get("SavedMaterialCost")))
except Exception as err:
print("Error while gathering blueprint cost info:", err)
infos["Cost"] = "?"
try:
infos["Size"] = "W:{0} H:{1} L:{2}".format(*bp.get("Blueprint").get("Size"))
except Exception as err:
print("Error while gathering blueprint size info:", err)
infos["Size"] = "?"
try:
infos["Author"] = bp.get("Blueprint").get("AuthorDetails").get("CreatorReadableName")
except Exception as err:
print("Error while gathering blueprint author info:", err)
infos["Author"] = "Unknown"
# gameversion
try:
gameversion = bp.get("Blueprint").get("GameVersion").split(".")
for i in range(len(gameversion)):
if gameversion[i].isnumeric():
gameversion[i] = int(gameversion[i])
else:
numonly = ""
for c in gameversion[i]:
if c.isnumeric():
numonly += c
gameversion[i] = int(numonly)
except Exception as err:
print("Error while gathering blueprint gameversion info:", err)
gameversion = "?"
return infos, gameversion
def __create_view_matrices(bp):
"""Create top, side, front view matrices (color matrix and height matrix)"""
def blueprint_iter(blueprint, mincords, blueprint_desc = "main"):
"""Iterate blueprint and sub blueprints"""
nonlocal actual_min_cords
# subtract min cords
blueprint["BLP"] -= mincords
#print("ViewMat at", blueprint_desc)
# numpyfication
a_guid = np.vectorize(itemdict.get)(blueprint["BlockIds"])
missing_block = blocks.get("missing")
# new version
a_sizeid = np.vectorize(lambda x: blocks.get(x, missing_block).get("SizeId"))(a_guid)
# end new
a_pos = blueprint["BLP"]
a_dir = blueprint["RotNormal"][blueprint["BLR"]]
a_dir_tan = blueprint["RotTangent"][blueprint["BLR"]]
a_dir_bitan = blueprint["RotBitangent"][blueprint["BLR"]]
a_material = np.vectorize(lambda x: blocks.get(x, missing_block).get("Material"))(a_guid)
a_color = np.vectorize(lambda x: materials.get(x)["Color"], signature="()->(n)")(a_material)
a_invisible = np.vectorize(lambda x: materials.get(x)["Invisible"])(a_material)
def fill_color_and_height(color_mat, height_mat, sel_arr, pos_sel_arr, axisX, axisZ, axisY):
"""Fills color_mat and height_mat with selected blocks (sel_arr as index and pos_sel_arr as position).
axisY is the height axis."""
nonlocal a_color, a_invisible
# create slicing indices for axes
axisA = axisX
axisB = axisZ+1 if axisZ > axisX else None
axisS = axisZ - axisX
# selection of higher height
if height_mat.shape[0] <= np.max(pos_sel_arr[:, axisX]):
errortext = f"Axis overflow: {height_mat.shape[0]} to {np.max(pos_sel_arr[:, axisX])}\n" \
f"Block guid: {a_guid[sel_arr[np.argmax(pos_sel_arr[:, axisX])]]}"
raise IndexError(errortext)
if height_mat.shape[1] <= np.max(pos_sel_arr[:, axisZ]):
errortext = f"Axis overflow: {height_mat.shape[1]} to {np.max(pos_sel_arr[:, | |
= Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1238 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1239 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1240 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1241 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1242 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1243 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1244 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1245 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1246 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1247 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1248 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1249 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1250 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1251 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1252 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1253 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1254 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1255 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1256 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1257 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1258 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1259 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1260 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1261 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1262 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1263 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1264 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1265 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1266 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1267 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1268 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1269 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1270 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1271 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1272 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1273 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1274 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1275 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1276 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1277 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1278 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1279 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1280 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1281 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1282 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1283 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1284 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1285 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1286 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1287 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1288 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1289 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1290 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1291 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1292 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1293 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1294 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1295 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1296 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1297 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1298 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1299 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1300 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1301 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1302 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1303 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1304 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1305 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1306 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1307 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1308 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1309 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1310 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1311 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1312 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1313 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1314 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1315 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1316 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1317 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1318 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1319 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1320 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1321 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1322 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1323 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1324 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1325 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1326 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1327 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1328 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1329 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1330 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1331 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1332 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1333 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1334 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1335 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1336 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1337 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1338 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1339 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1340 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1341 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1342 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1343 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1344 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1345 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1346 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1347 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1348 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1349 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1350 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1351 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1352 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1353 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1354 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1355 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1356 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1357 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1358 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1359 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1360 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1361 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1362 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1363 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1364 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1365 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1366 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1367 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1368 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1369 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1370 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1371 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1372 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1373 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1374 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1375 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1376 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1377 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1378 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1379 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1380 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1381 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1382 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1383 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1384 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1385 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1386 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1387 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1388 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1389 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1390 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1391 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1392 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1393 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1394 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1395 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1396 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1397 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1398 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1399 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1400 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1401 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1402 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1403 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1404 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1405 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1406 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1407 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1408 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1409 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1410 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1411 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1412 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1413 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1414 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1415 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1416 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1417 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1418 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1419 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1420 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1421 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1422 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1423 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1424 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1425 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1426 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1427 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1428 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1429 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1430 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1431 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1432 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1433 = Var(within=Reals,bounds=(0,None),initialize=10)
m.x1434 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1435 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1436 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1437 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1438 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1439 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1440 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1441 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1442 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1443 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1444 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1445 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1446 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1447 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1448 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1449 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1450 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1451 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1452 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1453 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1454 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1455 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1456 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1457 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1458 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1459 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1460 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1461 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1462 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1463 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1464 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1465 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1466 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1467 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1468 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1469 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1470 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1471 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1472 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1473 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1474 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1475 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1476 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1477 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1478 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1479 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1480 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1481 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1482 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1483 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1484 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1485 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1486 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1487 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1488 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1489 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1490 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1491 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1492 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1493 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1494 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1495 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1496 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1497 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1498 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1499 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1500 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1501 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1502 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1503 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1504 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1505 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1506 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1507 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1508 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1509 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1510 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1511 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1512 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1513 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1514 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1515 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1516 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1517 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1518 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1519 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1520 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1521 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1522 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1523 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1524 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1525 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1526 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1527 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1528 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1529 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1530 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1531 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1532 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1533 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1534 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1535 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1536 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1537 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1538 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1539 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1540 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1541 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1542 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1543 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1544 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1545 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1546 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1547 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1548 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1549 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1550 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1551 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1552 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1553 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1554 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1555 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1556 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1557 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1558 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1559 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1560 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1561 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1562 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1563 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1564 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1565 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1566 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1567 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1568 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1569 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1570 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1571 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1572 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1573 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1574 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1575 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1576 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1577 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1578 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1579 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1580 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1581 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1582 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1583 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1584 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1585 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1586 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1587 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1588 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1589 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1590 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1591 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1592 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1593 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1594 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1595 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1596 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1597 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1598 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1599 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1600 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1601 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1602 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1603 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1604 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1605 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1606 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1607 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1608 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1609 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1610 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1611 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1612 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1613 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1614 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1615 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1616 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1617 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1618 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1619 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1620 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1621 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1622 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1623 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1624 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1625 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1626 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1627 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1628 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1629 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1630 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1631 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1632 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1633 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1634 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1635 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1636 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1637 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1638 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1639 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1640 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1641 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1642 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1643 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1644 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1645 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1646 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1647 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1648 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1649 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1650 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1651 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1652 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1653 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1654 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1655 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1656 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1657 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1658 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1659 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1660 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1661 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1662 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1663 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1664 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1665 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1666 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1667 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1668 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1669 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1670 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1671 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1672 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1673 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1674 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1675 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1676 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1677 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1678 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1679 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1680 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1681 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1682 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1683 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1684 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1685 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1686 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1687 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1688 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1689 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1690 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1691 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1692 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1693 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1694 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1695 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1696 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1697 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1698 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1699 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1700 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1701 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1702 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1703 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1704 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1705 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1706 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1707 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1708 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1709 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1710 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1711 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1712 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1713 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1714 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1715 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1716 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1717 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1718 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1719 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1720 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1721 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1722 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1723 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1724 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1725 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1726 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1727 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1728 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1729 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1730 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1731 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1732 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1733 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1734 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1735 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1736 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1737 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1738 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1739 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1740 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1741 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1742 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1743 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1744 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1745 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1746 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1747 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1748 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1749 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1750 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1751 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1752 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1753 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1754 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1755 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1756 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1757 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1758 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1759 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1760 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1761 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1762 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1763 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1764 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1765 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1766 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1767 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1768 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1769 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1770 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1771 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1772 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1773 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1774 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1775 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1776 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1777 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1778 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1779 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1780 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1781 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1782 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1783 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1784 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1785 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1786 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1787 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1788 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1789 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1790 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1791 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1792 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1793 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1794 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1795 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1796 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1797 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1798 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1799 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1800 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1801 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1802 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1803 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1804 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1805 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1806 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1807 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1808 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1809 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1810 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1811 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1812 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1813 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1814 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1815 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1816 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1817 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1818 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1819 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1820 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1821 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1822 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1823 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1824 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1825 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1826 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1827 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1828 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1829 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1830 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1831 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1832 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1833 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1834 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1835 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1836 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1837 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1838 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1839 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1840 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1841 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1842 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1843 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1844 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1845 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1846 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1847 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1848 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1849 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1850 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1851 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1852 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1853 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1854 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1855 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1856 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1857 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1858 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1859 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1860 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1861 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1862 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1863 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1864 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1865 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1866 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1867 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1868 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1869 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1870 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1871 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1872 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1873 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1874 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1875 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1876 = Var(within=Reals,bounds=(0,None),initialize=1.42857142857143)
m.x1877 | |
position")
self.hdf5.flush()
def close(self):
logger.debug("In close")
if self.hdf5:
self.flush()
with self._sem:
self.hdf5.close()
self.hdf5 = None
def write(self, data, index=0):
"""
Minimalistic method to limit the overhead.
:param data: array with intensities or tuple (2th,I) or (I,2th,chi)
"""
logger.debug("In write, index %s", index)
radial = None
azimuthal = None
if isinstance(data, numpy.ndarray):
I = data
elif isinstance(data, (list, tuple)):
n = len(data)
if n == 2:
radial, I = data
elif n == 3:
if data[0].ndim == 2:
I, radial, azimuthal = data
else:
radial, I, _error = data
with self._sem:
if self.dataset is None:
logger.warning("Writer not initialized !")
return
if self.fast_scan_width:
index0, index1 = (index // self.fast_scan_width, index % self.fast_scan_width)
if index0 >= self.dataset.shape[0]:
self.dataset.resize(index0 + 1, axis=0)
self.dataset[index0, index1] = data
else:
if index >= self.dataset.shape[0]:
self.dataset.resize(index + 1, axis=0)
self.dataset[index] = I
if (not self.has_azimuthal_values) and \
(azimuthal is not None) and \
self.azimuthal_values is not None:
self.azimuthal_values[:] = azimuthal
if (not self.has_azimuthal_values) and \
(azimuthal is not None) and \
self.azimuthal_values is not None:
self.azimuthal_values[:] = azimuthal
self.has_azimuthal_values = True
if (not self.has_radial_values) and \
(radial is not None) and \
self.radial_values is not None:
self.radial_values[:] = radial
self.has_radial_values = True
class DefaultAiWriter(Writer):
def __init__(self, filename, engine=None):
"""Constructor of the historical writer of azimuthalIntegrator.
:param filename: name of the output file
:param ai: integrator, should provide make_headers method.
"""
self._filename = filename
self._engine = engine
self._already_written = False
def set_filename(self, filename):
"""
Define the filename while will be used
"""
self._filename = filename
self._already_written = False
def make_headers(self, hdr="#", has_mask=None, has_dark=None, has_flat=None,
polarization_factor=None, normalization_factor=None,
metadata=None):
"""
:param hdr: string used as comment in the header
:type hdr: str
:param has_dark: save the darks filenames (default: no)
:type has_dark: bool
:param has_flat: save the flat filenames (default: no)
:type has_flat: bool
:param polarization_factor: the polarization factor
:type polarization_factor: float
:return: the header
:rtype: str
"""
if "make_headers" in dir(self._engine):
header_lst = self._engine.make_headers()
else:
header_lst = [str(self._engine), ""]
header_lst += ["Mask applied: %s" % has_mask,
"Dark current applied: %s" % has_dark,
"Flat field applied: %s" % has_flat,
"Polarization factor: %s" % polarization_factor,
"Normalization factor: %s" % normalization_factor]
if metadata is not None:
header_lst += ["", "Headers of the input frame:"]
header_lst += [i.strip() for i in json.dumps(metadata, indent=2).split("\n")]
header = "\n".join(["%s %s" % (hdr, i) for i in header_lst])
return header
def save1D(self, filename, dim1, I, error=None, dim1_unit="2th_deg",
has_mask=None, has_dark=False, has_flat=False,
polarization_factor=None, normalization_factor=None, metadata=None):
"""This method save the result of a 1D integration as ASCII file.
:param filename: the filename used to save the 1D integration
:type filename: str
:param dim1: the x coordinates of the integrated curve
:type dim1: numpy.ndarray
:param I: The integrated intensity
:type I: numpy.mdarray
:param error: the error bar for each intensity
:type error: numpy.ndarray or None
:param dim1_unit: the unit of the dim1 array
:type dim1_unit: pyFAI.units.Unit
:param has_mask: a mask was used
:param has_dark: a dark-current was applied
:param has_flat: flat-field was applied
:param polarization_factor: the polarization factor
:type polarization_factor: float, None
:param normalization_factor: the monitor value
:type normalization_factor: float, None
:param metadata: JSON serializable dictionary containing the metadata
"""
dim1_unit = units.to_unit(dim1_unit)
with open(filename, "w") as f:
f.write(self.make_headers(has_mask=has_mask, has_dark=has_dark,
has_flat=has_flat,
polarization_factor=polarization_factor,
normalization_factor=normalization_factor,
metadata=metadata))
try:
f.write("\n# --> %s\n" % (filename))
except UnicodeError:
f.write("\n# --> %s\n" % (filename.encode("utf8")))
if error is None:
f.write("#%14s %14s\n" % (dim1_unit, "I "))
f.write("\n".join(["%14.6e %14.6e" % (t, i) for t, i in zip(dim1, I)]))
else:
f.write("#%14s %14s %14s\n" %
(dim1_unit, "I ", "sigma "))
f.write("\n".join(["%14.6e %14.6e %14.6e" % (t, i, s) for t, i, s in zip(dim1, I, error)]))
f.write("\n")
def save2D(self, filename, I, dim1, dim2, error=None, dim1_unit="2th_deg",
has_mask=None, has_dark=False, has_flat=False,
polarization_factor=None, normalization_factor=None,
metadata=None):
"""This method save the result of a 2D integration.
:param filename: the filename used to save the 2D histogram
:type filename: str
:param dim1: the 1st coordinates of the histogram
:type dim1: numpy.ndarray
:param dim1: the 2nd coordinates of the histogram
:type dim1: numpy.ndarray
:param I: The integrated intensity
:type I: numpy.mdarray
:param error: the error bar for each intensity
:type error: numpy.ndarray or None
:param dim1_unit: the unit of the dim1 array
:type dim1_unit: pyFAI.units.Unit
:param has_mask: a mask was used
:param has_dark: a dark-current was applied
:param has_flat: flat-field was applied
:param polarization_factor: the polarization factor
:type polarization_factor: float, None
:param normalization_factor: the monitor value
:type normalization_factor: float, None
:param metadata: JSON serializable dictionary containing the metadata
"""
if fabio is None:
raise RuntimeError("FabIO module is needed to save EDF images")
dim1_unit = units.to_unit(dim1_unit)
# Remove \n and \t)
engine_info = " ".join(str(self._engine).split())
header = OrderedDict()
header["Engine"] = engine_info
if "make_headers" in dir(self._engine):
header.update(self._engine.make_headers("dict"))
header[dim1_unit.name + "_min"] = str(dim1.min())
header[dim1_unit.name + "_max"] = str(dim1.max())
header["chi_min"] = str(dim2.min())
header["chi_max"] = str(dim2.max())
header["has_mask_applied"] = str(has_mask)
header["has_dark_correction"] = str(has_dark)
header["has_flat_correction"] = str(has_flat)
header["polarization_factor"] = str(polarization_factor)
header["normalization_factor"] = str(normalization_factor)
if metadata is not None:
blacklist = ['HEADERID', 'IMAGE', 'BYTEORDER', 'DATATYPE', 'DIM_1',
'DIM_2', 'DIM_3', 'SIZE']
for key, value in metadata.items():
if key.upper() in blacklist or key in header:
continue
else:
header[key] = value
try:
img = fabio.edfimage.edfimage(data=I.astype("float32"),
header=header)
if error is not None:
img.appendFrame(data=error, header={"EDF_DataBlockID": "1.Image.Error"})
img.write(filename)
except IOError:
logger.error("IOError while writing %s", filename)
def write(self, data):
"""
Minimalistic method to limit the overhead.
:param data: array with intensities or tuple (2th,I) or (I,2th,chi)\
:type data: Integrate1dResult, Integrate2dResult
"""
if self._already_written:
raise Exception("This file format do not support multi frame. You have to change the filename.")
self._already_written = True
if fully_qualified_name(data) == 'pyFAI.containers.Integrate1dResult':
self.save1D(filename=self._filename,
dim1=data.radial,
I=data.intensity,
error=data.sigma,
dim1_unit=data.unit,
has_mask=data.has_mask_applied,
has_dark=data.has_dark_correction,
has_flat=data.has_flat_correction,
polarization_factor=data.polarization_factor,
normalization_factor=data.normalization_factor,
metadata=data.metadata)
elif fully_qualified_name(data) == 'pyFAI.containers.Integrate2dResult':
self.save2D(filename=self._filename,
I=data.intensity,
dim1=data.radial,
dim2=data.azimuthal,
error=data.sigma,
dim1_unit=data.unit,
has_mask=data.has_mask_applied,
has_dark=data.has_dark_correction,
has_flat=data.has_flat_correction,
polarization_factor=data.polarization_factor,
normalization_factor=data.normalization_factor,
metadata=data.metadata)
else:
raise Exception("Unsupported data type: %s" % type(data))
def flush(self):
pass
def close(self):
pass
class AsciiWriter(Writer):
"""
Ascii file writer (.xy or .dat)
"""
def __init__(self, filename=None, prefix="fai_", extension=".dat"):
"""
"""
Writer.__init__(self, filename, extension)
self.header = None
if os.path.isdir(filename):
self.directory = filename
else:
self.directory = os.path.dirname(filename)
self.prefix = prefix
self.index_format = "%04i"
self.start_index = 0
def __repr__(self):
return "Ascii writer on file %s" % (self.filename)
def init(self, fai_cfg=None, lima_cfg=None):
"""
Creates the directory that will host the output file(s)
"""
Writer.init(self, fai_cfg, lima_cfg)
with self._sem:
header_lst = ["", "== Detector =="]
if "detector" in self.fai_cfg:
header_lst.append("Detector: %s" % self.fai_cfg["detector"])
if "splineFile" in self.fai_cfg:
header_lst.append("SplineFile: %s" % self.fai_cfg["splineFile"])
if "pixel1" in self.fai_cfg:
header_lst.append("PixelSize: %.3e, %.3e m" % (self.fai_cfg["pixel1"], self.fai_cfg["pixel2"]))
if "mask_file" in self.fai_cfg:
header_lst.append("MaskFile: %s" % (self.fai_cfg["mask_file"]))
header_lst.append("== pyFAI calibration ==")
if "poni1" in self.fai_cfg:
header_lst.append("PONI: %.3e, %.3e m" % (self.fai_cfg["poni1"], self.fai_cfg["poni2"]))
if "dist" in self.fai_cfg:
header_lst.append("Distance Sample to Detector: %s m" % self.fai_cfg["dist"])
if "rot1" in self.fai_cfg:
header_lst.append("Rotations: %.6f %.6f %.6f rad" % (self.fai_cfg["rot1"], self.fai_cfg["rot2"], self.fai_cfg["rot3"]))
if "wavelength" in self.fai_cfg:
header_lst.append("Wavelength: %s" % self.fai_cfg["wavelength"])
if "dark_current" in self.fai_cfg:
header_lst.append("Dark current: %s" % self.fai_cfg["dark_current"])
if "flat_field" in self.fai_cfg:
header_lst.append("Flat field: %s" % self.fai_cfg["flat_field"])
if "polarization_factor" in self.fai_cfg:
header_lst.append("Polarization factor: %s" % self.fai_cfg["polarization_factor"])
header_lst.append("")
if "do_poisson" in self.fai_cfg:
header_lst.append("%14s %14s %s" % (self.fai_cfg["unit"], "I", "sigma"))
else:
header_lst.append("%14s %14s" % (self.fai_cfg["unit"], "I"))
# header_lst.append("")
self.header = os.linesep.join([""] + ["# " + i for i in header_lst] + [""])
self.prefix = lima_cfg.get("prefix", self.prefix)
self.index_format = lima_cfg.get("index_format", self.index_format)
self.start_index = lima_cfg.get("start_index", self.start_index)
if not self.subdir:
self.directory = lima_cfg.get("directory", self.directory)
elif self.subdir.startswith("/"):
self.directory = self.subdir
else:
self.directory = os.path.join(lima_cfg.get("directory", self.directory), self.subdir)
if not os.path.exists(self.directory):
logger.warning("Output directory: %s does not exist,creating it", self.directory)
try:
os.makedirs(self.directory)
except Exception as error:
logger.info("Problem while creating directory %s: %s", self.directory, error)
def write(self, data, index=0):
filename = os.path.join(self.directory, self.prefix + (self.index_format % (self.start_index + index)) + self.extension)
if filename:
with open(filename, "w") as f:
f.write("# Processing time: %s%s" % (get_isotime(), self.header))
numpy.savetxt(f, data)
class FabioWriter(Writer):
"""
Image file writer based on FabIO
TODO !!!
"""
def __init__(self, filename=None):
"""
"""
Writer.__init__(self, filename)
self.header = None
self.directory = None
self.prefix = None
self.index_format = "%04i"
self.start_index = 0
self.fabio_class = None
if fabio is None:
raise RuntimeError("FabIO module is needed to save images")
| |
# -*- coding: utf-8 -*-
################################################################################
# childs.py - Teil von Kodi-Addon-ARDundZDF
# Rahmenmodul für Kinderprg div. Regionalsender von ARD und ZDF
#
# 02.11.2019 Migration Python3 Modul future
# 17.11.2019 Migration Python3 Modul kodi_six + manuelle Anpassungen
################################################################################
#
# <nr>0</nr> # Numerierung für Einzelupdate
# Stand: 08.10.2021
# Python3-Kompatibilität:
from __future__ import absolute_import # sucht erst top-level statt im akt. Verz.
from __future__ import division # // -> int, / -> float
from __future__ import print_function # PYTHON2-Statement -> Funktion
from kodi_six import xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs
# o. Auswirkung auf die unicode-Strings in PYTHON3:
from kodi_six.utils import py2_encode, py2_decode
import os, sys, subprocess
PYTHON2 = sys.version_info.major == 2
PYTHON3 = sys.version_info.major == 3
if PYTHON2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, urlretrieve
from urllib2 import Request, urlopen, URLError
from urlparse import urljoin, urlparse, urlunparse, urlsplit, parse_qs
elif PYTHON3:
from urllib.parse import quote, unquote, quote_plus, unquote_plus, urlencode, urljoin, urlparse, urlunparse, urlsplit, parse_qs
from urllib.request import Request, urlopen, urlretrieve
from urllib.error import URLError
try: # https://github.com/xbmc/xbmc/pull/18345 (Matrix 19.0-alpha 2)
xbmc.translatePath = xbmcvfs.translatePath
except:
pass
import json
import os, sys
import ssl
import datetime, time
import re # u.a. Reguläre Ausdrücke
import string
import ardundzdf # -> ParseMasterM3u, transl_wtag, get_query
from resources.lib.util import *
# Globals
ADDON_ID = 'plugin.video.ardundzdf'
SETTINGS = xbmcaddon.Addon(id=ADDON_ID)
ADDON_NAME = SETTINGS.getAddonInfo('name')
SETTINGS_LOC = SETTINGS.getAddonInfo('profile')
ADDON_PATH = SETTINGS.getAddonInfo('path') # Basis-Pfad Addon
ADDON_VERSION = SETTINGS.getAddonInfo('version')
PLUGIN_URL = sys.argv[0] # plugin://plugin.video.ardundzdf/
HANDLE = int(sys.argv[1])
FANART = xbmc.translatePath('special://home/addons/' + ADDON_ID + '/fanart.jpg')
ICON = xbmc.translatePath('special://home/addons/' + ADDON_ID + '/icon.png')
USERDATA = xbmc.translatePath("special://userdata")
ADDON_DATA = os.path.join("%sardundzdf_data") % USERDATA
if check_AddonXml('"xbmc.python" version="3.0.0"'):
ADDON_DATA = os.path.join("%s", "%s", "%s") % (USERDATA, "addon_data", ADDON_ID)
DICTSTORE = os.path.join(ADDON_DATA, "Dict") # hier nur DICTSTORE genutzt
NAME = 'ARD und ZDF'
BASE_ZDF = 'http://www.zdf.de'
BASE_KIKA = 'http://www.kika.de'
BASE_TIVI = 'https://www.zdf.de/kinder'
# Icons
ICON = 'icon.png' # ARD + ZDF
ICON_CHILDS = 'childs.png'
ICON_DIR_FOLDER = "Dir-folder.png"
ICON_MAIN_TVLIVE= 'tv-livestreams.png'
ICON_MEHR = "icon-mehr.png"
ICON_SEARCH = 'ard-suche.png'
ICON_ZDF_SEARCH = 'zdf-suche.png'
# Github-Icons zum Nachladen aus Platzgründen,externe Nutzung: ZDFRubriken (GIT_ZDFTIVI)
GIT_KIKA = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kika.png?raw=true"
GIT_AZ = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/icon-AZ.png?raw=true"
# Einzelbuchstaben zu A-Z siehe Tivi_AZ
GIT_CAL = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/icon-calendar.png?raw=true"
GIT_VIDEO = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kikaVideo.png?raw=true"
GIT_RADIO = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/radio-kiraka.png?raw=true"
GIT_KANINCHEN = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kikaninchen.png?raw=true"
GIT_KANINVIDEOS = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kikaninchenVideos.png?raw=true"
GIT_KRAMLIEDER = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kikaninchenKramLieder.png?raw=true"
GIT_KRAMSCHNIPP = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kikaninchenKramSchnipsel.png?raw=true"
GIT_ZDFTIVI = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-zdftivi.png?raw=true"
GIT_TIVIHOME = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/zdftivi-home.png?raw=true"
GIT_KIR = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/kiraka.png?raw=true"
GIT_KIR_SHOWS = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/kiraka-shows.png?raw=true"
GIT_KIR_KLICK = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/klicker.png?raw=true"
GIT_DGS = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kikaDGS.png?raw=true"
GIT_AD = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-kikaAD.png?raw=true"
GIT_ARD_KINDER = "https://github.com/rols1/PluginPictures/blob/master/ARDundZDF/KIKA_tivi/tv-ard_kinder-familie.png?raw=true"
KikaCacheTime = 1*86400 # Addon-Cache für A-Z-Seiten: 1 Tag
# ----------------------------------------------------------------------
def Main_childs():
PLog('Main_childs:')
li = xbmcgui.ListItem()
li = home(li, ID=NAME) # Home-Button
fparams="&fparams={'title': '%s'}" % "KIKA"
addDir(li=li, label= "KIKA", action="dirList", dirID="resources.lib.childs.Main_KIKA", fanart=R(ICON_CHILDS),
thumb=GIT_KIKA, fparams=fparams)
fparams="&fparams={'title': '%s'}" % "tivi"
addDir(li=li, label= "ZDFtivi für Kinder", action="dirList", dirID="resources.lib.childs.Main_TIVI",
fanart=R(ICON_CHILDS), thumb=GIT_ZDFTIVI, fparams=fparams)
title = "ARD - Kinder und Familie"
tag = u"Märchen, Spielfilme, Serien, Wissen und Dokus - hier gibt's unterhaltsame und "
tag = u"%s%s" % (tag, u"spannende Videos für Kinder und die ganze Familie!")
img = GIT_ARD_KINDER
path = "https://api.ardmediathek.de/page-gateway/pages/ard/editorial/kinderfamilie?embedded=true"
ID = "Main_childs"
fparams="&fparams={'path': '%s', 'title': '%s', 'widgetID': '', 'ID': '%s'}" %\
(quote(path), quote(title), ID)
addDir(li=li, label=title, action="dirList", dirID="resources.lib.ARDnew.ARDStartRubrik", fanart=img, thumb=img,
tagline=tag, fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
def Main_KIKA(title):
PLog('Main_KIKA:')
li = xbmcgui.ListItem()
li = home(li, ID='Kinderprogramme') # Home-Button
title="Suche in KIKA"
summ = "Suche Sendungen in KIKA"
fparams="&fparams={'query': '', 'title': '%s'}" % title
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kika_Search",
fanart=GIT_KIKA, thumb=R(ICON_SEARCH), fparams=fparams)
title='KIKA Live gucken'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kika_Live",
fanart=GIT_KIKA, thumb=R(ICON_MAIN_TVLIVE), tagline='KIKA TV-Live', fparams=fparams)
title=u'KiRaKa - Sendungen und Hörspiele'
tag = "%s\n\nDer Kinderradiokanal des WDR" % title
fparams="&fparams={}"
addDir(li=li, label=title , action="dirList", dirID="resources.lib.childs.Kiraka",
fanart=GIT_KIKA, thumb=GIT_RADIO, tagline=tag, fparams=fparams)
title='Videos und Bilder (A-Z)'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kika_VideosBuendelAZ",
fanart=GIT_KIKA, thumb=GIT_VIDEO, tagline=title, fparams=fparams)
title='Die beliebtesten Videos (meist geklickt)'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kika_VideosBeliebt",
fanart=GIT_KIKA, thumb=GIT_VIDEO, tagline=title, fparams=fparams)
title=u'Videos mit Gebärdensprache'
path = "https://www.kika.de/videos/alle-dgs/videos-dgs-100.html"
thumb = GIT_DGS
path=py2_encode(path); title=py2_encode(title); thumb=py2_encode(thumb);
fparams="&fparams={'path': '%s', 'title': '%s', 'thumb': '%s'}" %\
(quote(path), quote(title), quote(thumb))
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kika_Barrierearm",
fanart=GIT_KIKA, thumb=GIT_DGS, tagline=title, fparams=fparams)
title=u'Videos als Hörfilme'
path = "https://www.kika.de/videos/alle-ad/videos-ad-100.html"
thumb = GIT_AD
path=py2_encode(path); title=py2_encode(title); thumb=py2_encode(thumb);
fparams="&fparams={'path': '%s', 'title': '%s', 'thumb': '%s'}" %\
(quote(path), quote(title), quote(thumb))
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kika_Barrierearm",
fanart=GIT_KIKA, thumb=GIT_AD, tagline=title, fparams=fparams)
title='KiKANiNCHEN'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kikaninchen_Menu",
fanart=GIT_KIKA, thumb=GIT_KANINCHEN, tagline='für Kinder 3-6 Jahre', fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
def Main_TIVI(title):
PLog('Main_TIVI:')
li = xbmcgui.ListItem()
li = home(li, ID='Kinderprogramme') # Home-Button
title="Suche in ZDFtivi"
summ = "Suche Videos in KIKA"
fparams="&fparams={'query': '', 'title': '%s'}" % title
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Tivi_Search", fanart=GIT_ZDFTIVI,
thumb=R(ICON_ZDF_SEARCH), fparams=fparams)
title='Startseite'
fparams="&fparams={'path': '%s', 'title': '%s'}" % (quote(BASE_TIVI), title)
addDir(li=li, label=title , action="dirList", dirID="ardundzdf.ZDFStart", fanart=GIT_ZDFTIVI,
thumb=GIT_TIVIHOME, tagline=title, fparams=fparams)
title='Sendungen der letzten 7 Tage'
fparams="&fparams={}"
addDir(li=li, label=title , action="dirList", dirID="resources.lib.childs.Tivi_Woche", fanart=GIT_ZDFTIVI,
thumb=GIT_CAL, tagline=title, fparams=fparams)
title='Sendungen A-Z | 0-9'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Tivi_AZ", fanart=GIT_ZDFTIVI,
thumb=GIT_AZ, tagline=title, fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
def Kikaninchen_Menu():
PLog('Kikaninchen_Menu')
li = xbmcgui.ListItem()
li = home(li, ID='Kinderprogramme') # Home-Button
title='Kikaninchen Videos'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kikaninchen_Videoseite", fanart=GIT_KANINCHEN,
thumb=GIT_KANINVIDEOS, tagline='für Kinder 3-6 Jahre', fparams=fparams)
title='Kikaninchen Singen und Tanzen'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.KikaninchenLieder", fanart=GIT_KANINCHEN,
thumb=GIT_KRAMLIEDER, tagline='für Kinder 3-6 Jahre', fparams=fparams)
title='Kikaninchen Tonschnipsel'
fparams="&fparams={}"
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Tonschnipsel", fanart=GIT_KANINCHEN,
thumb=GIT_KRAMSCHNIPP, tagline='für Kinder 3-6 Jahre', fparams=fparams)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
# Die Kika-Suche über www.kika.de/suche/suche104.html?q= ist hier nicht nutzbar, da
# script-generiert und außer den Bildern keine Inhalte als Text erscheinen.
# Lösung: Suche über alle Bündelgruppen (Kika_VideosBuendelAZ) und Abgleich
# mit Sendungstitel. Damit nicht jedesmal sämtliche A-Z-Seiten neu geladen
# werden müssen, lagern wir sie 1 Tag im Cache. Diese Cacheseiten werden von
# Kika_VideosBuendelAZ mitgenutzt.
#
def Kika_Search(query=None, title='Search', pagenr=''):
PLog("Kika_Search:")
if query == '':
query = ardundzdf.get_query(channel='ARD')
PLog(query)
query_org = unquote(query)
query_org = query_org.replace('+', ' ') # für Vergleich entfernen
if query == None or query.strip() == '':
return ""
# Home-Button in Kika_VideosBuendelAZ
li, HrefList = Kika_VideosBuendelAZ(getHrefList=True)
PLog("HrefList: " + str(len(HrefList)))
found_hrefs=[]
for path in HrefList:
fname = stringextract('allevideos-buendelgruppen100_', '.htm', path)
page = Dict("load", fname, CacheTime=KikaCacheTime)
if page == False:
page, msg = get_page(path=path)
if page == '': # hier kein Dialog
PLog("Fehler in Kika_Search: " + msg)
else:
Dict("store", fname, page) # im Addon-Cache speichern
pos = page.find("The bottom navigation") # begrenzen, es folgen A-Z + meist geklickt
page = page[:pos]
pageItems = blockextract('class="media mediaA">', page)
PLog(len(pageItems))
for s in pageItems:
stitle = stringextract('class="linkAll" title="', '"', s)
stitle = cleanhtml(stitle); stitle = unescape(stitle);
if up_low(query_org) in up_low(stitle):
href = BASE_KIKA + stringextract('href="', '\"', s)
if href in found_hrefs: # Doppler vermeiden
continue
found_hrefs.append(href)
img = stringextract('<noscript>', '</noscript>', s).strip() # Bildinfo separieren
img_alt = stringextract('alt="', '"', img)
img_src = stringextract('src="', '"', img)
if img_src.startswith('http') == False:
img_src = BASE_KIKA + img_src
stitle = repl_json_chars(stitle)
img_alt = unescape(img_alt); img_alt = repl_json_chars(img_alt)
PLog('Satz4:')
PLog(query);PLog(href);PLog(stitle);PLog(img_alt);PLog(img_src)
href=py2_encode(href); stitle=py2_encode(stitle); img_src=py2_encode(img_src);
fparams="&fparams={'path': '%s', 'title': '%s', 'thumb': '%s'}" %\
(quote(href), quote(stitle), quote(img_src))
addDir(li=li, label=stitle, action="dirList", dirID="resources.lib.childs.Kika_Videos", fanart=img_src,
thumb=img_src, fparams=fparams, tagline=img_alt)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
# 25.06.2020 Nutzung neue Funktion get_ZDFstreamlinks
def Kika_Live():
PLog('Kika_Live:')
li = xbmcgui.ListItem()
li = home(li, ID='Kinderprogramme') # Home-Button
import resources.lib.EPG as EPG
zdf_streamlinks = get_ZDFstreamlinks()
# Zeile zdf_streamlinks: "webtitle|href|thumb|tagline"
m3u8link=''
for line in zdf_streamlinks:
PLog(line)
webtitle, href, thumb, tagline = line.split('|')
# Bsp.: "ZDFneo " in "ZDFneo Livestream":
if up_low('KiKA') in up_low(webtitle): # Sender mit Blank!
m3u8link = href
break
if m3u8link == '':
PLog('%s: Streamlink fehlt' % 'KiKA ')
ID = 'KIKA'
title = 'KIKA TV-Live'
Merk = ''
rec = EPG.EPG(ID=ID, mode='OnlyNow') # Daten holen - nur aktuelle Sendung
PLog(rec) # bei Bedarf
if len(rec) == 0: # EPG-Satz leer?
title = 'EPG nicht gefunden'
summ = ''
tagline = ''
else:
href=rec[1]; img=rec[2]; sname=rec[3]; stime=rec[4]; summ=rec[5]; vonbis=rec[6]
if img.find('http') == -1: # Werbebilder today.de hier ohne http://
img = R('tv-kika.png')
title = sname.replace('JETZT', ID) # JETZT durch Sender ersetzen
# sctime = "[COLOR red] %s [/COLOR]" % stime # Darstellung verschlechtert
# sname = sname.replace(stime, sctime)
tagline = 'Zeit: ' + vonbis
title = unescape(title); title = repl_json_chars(title)
summ = unescape(summ); summ = repl_json_chars(summ)
PLog("title: " + title); PLog(summ)
title=py2_encode(title); m3u8link=py2_encode(m3u8link);
img=py2_encode(img); summ=py2_encode(summ);
fparams="&fparams={'path': '%s', 'title': '%s', 'thumb': '%s', 'descr': '%s', 'Merk': '%s'}" %\
(quote(m3u8link), quote(title), quote(img), quote_plus(summ), Merk)
addDir(li=li, label=title, action="dirList", dirID="ardundzdf.SenderLiveResolution", fanart=R('tv-EPG-all.png'),
thumb=img, fparams=fparams, summary=summ, tagline=tagline)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
def Kiraka():
PLog('Kiraka:')
li = xbmcgui.ListItem()
li = home(li, ID='Kinderprogramme') # Home-Button
thumb = GIT_KIR
title = u'KiRaKa - Sendungen zum Nachhören'
tagline = u'Die Live-Sendung WDR 5 KiRaKa sieben Tage lang nachhören. Mit allem drum und dran.'
title=py2_encode(title);
fparams="&fparams={'title': '%s'}" % (quote(title))
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kiraka_shows", fanart=GIT_RADIO,
thumb=thumb, fparams=fparams, tagline=tagline)
thumb = GIT_KIR_SHOWS
title = u'KiRaKa - Hörspiele'
tagline = u'Alle KiRaKa - Kinderhörspiele'
title=py2_encode(title);
fparams="&fparams={'title': '%s'}" % (quote(title))
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kiraka_pods", fanart=GIT_RADIO,
thumb=thumb, fparams=fparams, tagline=tagline)
thumb = GIT_KIR_KLICK
title = u'KiRaKa-Klicker - Nachrichten für Kinder'
tagline = u'aktuelle und speziell für Kinder aufbereitete Nachrichten von der Kiraka-Redaktion'
title=py2_encode(title);
fparams="&fparams={'title': '%s'}" % (quote(title))
addDir(li=li, label=title, action="dirList", dirID="resources.lib.childs.Kiraka_klick", fanart=GIT_RADIO,
thumb=thumb, fparams=fparams, tagline=tagline)
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
def Kiraka_shows(title):
PLog('Kiraka_shows:')
li = xbmcgui.ListItem()
li = home(li, ID='Kinderprogramme') # Home-Button
path = "https://kinder.wdr.de/radio/kiraka/kiraka-on-demand-100.html"
page, msg = get_page(path)
if page == '':
msg1 = "Fehler in Kiraka_shows"
msg2 = msg
MyDialog(msg1, msg2, '')
return li
PLog(len(page))
items = blockextract('"AudioObject",', page)
for s in items:
img = stringextract('url" : "', '"', s)
stitle = stringextract('headline" : "', '"', s)
webid = stringextract('"@id" : "', '"', s) # url" : "https://www1.wdr.de/mediathek/..
dur = stringextract('duration" : "', '"', s) # Bsp. PT55M38S
dur = dur[2:5] # min ausschneiden
dur = dur.replace('M', ' min')
stitle = py2_encode(stitle); dur = py2_encode(dur)
tag = "%s | %s | %s" % (title, stitle, dur)
Plot = tag
PLog('Satz5:')
PLog(img); PLog(stitle); PLog(webid); PLog(Plot);
stitle=py2_encode(stitle); webid=py2_encode(webid);
thumb=py2_encode(img); Plot=py2_encode(Plot);
fparams="&fparams={'webid': '%s', 'title': '%s', 'thumb': '%s', 'Plot': '%s'}" % (quote(webid),
quote(stitle), quote(thumb), quote_plus(Plot))
addDir(li=li, label=stitle, action="dirList", dirID="resources.lib.childs.Kiraka_get_mp3", \
fanart=GIT_KIR, thumb=thumb, fparams=fparams, tagline=tag, mediatype='music')
xbmcplugin.endOfDirectory(HANDLE, cacheToDisc=True)
# ----------------------------------------------------------------------
# Kinderhörspiele
# die ermittelte webid wird in Kiraka_get_mp3 zur Web-Url. Auf der
# Webseite wird dann die mp3-Quelle ermittelt.
#
def Kiraka_pods(title):
PLog('Kiraka_pods:')
li = xbmcgui.ListItem()
li = home(li, ID='Kinderprogramme') # Home-Button
base = "https://kinder.wdr.de"
path = base + "/radio/kiraka/hoeren/hoerspiele/kinderhoerspiel-podcast-102.html"
page, msg = get_page(path)
if page == '':
msg1 = "Fehler in Kiraka_pods"
msg2 = msg
MyDialog(msg1, msg2, '')
return li
PLog(len(page))
items = blockextract('podcast-102-entry=', page)
for s in items:
img = stringextract('srcset="', '"', s)
if img.startswith('//'): # //www1.wdr.de/..
img = 'https:' + img
else: # /radio/kiraka/..
img = base + img
stitle = stringextract('mediaTitle">', '</', s)
webid = stringextract("'id':'", "'", s) # podcast-102-entry="{'id':'audio-wie-viele..
day = stringextract('mediaDate">', '</', s)
dur = stringextract('mediaDuration">', '</', s)
dur = cleanhtml(dur)
descr = stringextract('"text">', '</p', s)
descr = mystrip(descr); descr = unescape(descr)
tag = "%s | %s | %s | %s\n\n%s" % | |
"""
# Event source for MAGIC calibrated data files.
# Requires uproot package (https://github.com/scikit-hep/uproot).
"""
import re
import uproot
import logging
import scipy
import scipy.interpolate
import numpy as np
from decimal import Decimal
from enum import Enum, auto
from astropy.coordinates import Angle
from astropy import units as u
from astropy.time import Time
from ctapipe.io.eventsource import EventSource
from ctapipe.io.datalevels import DataLevel
from ctapipe.core import Container, Field
from ctapipe.core.traits import Bool
from ctapipe.coordinates import CameraFrame
from ctapipe.containers import (
ArrayEventContainer,
SimulatedEventContainer,
SimulatedShowerContainer,
SimulationConfigContainer,
PointingContainer,
TelescopePointingContainer,
TelescopeTriggerContainer,
MonitoringCameraContainer,
PedestalContainer,
)
from ctapipe.instrument import (
TelescopeDescription,
SubarrayDescription,
OpticsDescription,
CameraDescription,
CameraReadout,
)
from .version import __version__
from .constants import (
MC_STEREO_TRIGGER_PATTERN,
PEDESTAL_TRIGGER_PATTERN,
DATA_STEREO_TRIGGER_PATTERN
)
__all__ = ['MAGICEventSource', '__version__']
LOGGER = logging.getLogger(__name__)
degrees_per_hour = 15.0
seconds_per_hour = 3600.
msec2sec = 1e-3
nsec2sec = 1e-9
# MAGIC telescope positions in m wrt. to the center of CTA simulations
# MAGIC_TEL_POSITIONS = {
# 1: [-27.24, -146.66, 50.00] * u.m,
# 2: [-96.44, -96.77, 51.00] * u.m
# }
# MAGIC telescope positions in m wrt. to the center of MAGIC simulations, from
# CORSIKA and reflector input card
MAGIC_TEL_POSITIONS = {
1: [31.80, -28.10, 0.00] * u.m,
2: [-31.80, 28.10, 0.00] * u.m
}
# Magnetic field values at the MAGIC site (taken from CORSIKA input cards)
# Reference system is the CORSIKA one, where x-axis points to magnetic north
# i.e. B y-component is 0
# MAGIC_Bdec is the magnetic declination i.e. angle between magnetic and
# geographic north, negative if pointing westwards, positive if pointing
# eastwards
# MAGIC_Binc is the magnetic field inclination
MAGIC_Bx = u.Quantity(29.5, u.uT)
MAGIC_Bz = u.Quantity(23.0, u.uT)
MAGIC_Btot = np.sqrt(MAGIC_Bx**2+MAGIC_Bz**2)
MAGIC_Bdec = u.Quantity(-7.0, u.deg).to(u.rad)
MAGIC_Binc = u.Quantity(np.arctan2(-MAGIC_Bz.value, MAGIC_Bx.value), u.rad)
# MAGIC telescope description
OPTICS = OpticsDescription.from_name('MAGIC')
MAGICCAM = CameraDescription.from_name("MAGICCam")
pulse_shape_lo_gain = np.array([0., 1., 2., 1., 0.])
pulse_shape_hi_gain = np.array([1., 2., 3., 2., 1.])
pulse_shape = np.vstack((pulse_shape_lo_gain, pulse_shape_lo_gain))
MAGICCAM.readout = CameraReadout(
camera_name='MAGICCam',
sampling_rate=u.Quantity(1.64, u.GHz),
reference_pulse_shape=pulse_shape,
reference_pulse_sample_width=u.Quantity(0.5, u.ns)
)
MAGICCAM.geometry.frame = CameraFrame(focal_length=OPTICS.equivalent_focal_length)
GEOM = MAGICCAM.geometry
MAGIC_TEL_DESCRIPTION = TelescopeDescription(
name='MAGIC', tel_type='MAGIC', optics=OPTICS, camera=MAGICCAM)
MAGIC_TEL_DESCRIPTIONS = {1: MAGIC_TEL_DESCRIPTION, 2: MAGIC_TEL_DESCRIPTION}
class MARSDataLevel(Enum):
"""
Enum of the different MARS Data Levels
"""
CALIBRATED = auto() # Calibrated images in charge and time (no waveforms)
STAR = auto() # Cleaned images, with Hillas parametrization
SUPERSTAR = auto() # Stereo parameters reconstructed
MELIBEA = auto() # Reconstruction of hadronness, event direction and energy
class MissingDriveReportError(Exception):
"""
Exception raised when a subrun does not have drive reports.
"""
def __init__(self, message):
self.message = message
class MAGICEventSource(EventSource):
"""
EventSource for MAGIC calibrated data.
This class operates with the MAGIC data subrun-wise for calibrated data.
Attributes
----------
current_run : MarsCalibratedRun
Object containing the info needed to fill the ctapipe Containers
datalevel : DataLevel
Data level according to the definition in ctapipe
file_ : uproot.ReadOnlyFile
A ROOT file opened with uproot
is_mc : bool
Flag indicating real or simulated data
mars_datalevel : int
Data level according to MARS convention
metadata : dict
Dictionary containing metadata
run_numbers : int
Run number of the file
simulation_config : SimulationConfigContainer
Container filled with the information about the simulation
telescope : int
The number of the telescope
use_pedestals : bool
Flag indicating if pedestal events should be returned by the generator
"""
use_pedestals = Bool(
default_value=False,
help=(
'If true, extract pedestal evens instead of cosmic events.'
),
).tag(config=False)
def __init__(self, input_url=None, config=None, parent=None, **kwargs):
"""
Constructor
Parameters
----------
config: traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
Set to None if no configuration to pass.
parent : ctapipe.core.Tool
Tool executable that is calling this component.
Passes the correct logger to the component.
Set to None if no Tool to pass.
kwargs: dict
Additional parameters to be passed.
NOTE: The file mask of the data to read can be passed with
the 'input_url' parameter.
"""
super().__init__(input_url=input_url, config=config, parent=parent, **kwargs)
# Retrieving the list of run numbers corresponding to the data files
self.file_ = uproot.open(self.input_url.expanduser())
run_info = self.parse_run_info()
self.run_numbers = run_info[0]
self.is_mc = run_info[1]
self.telescope = run_info[2]
self.mars_datalevel = run_info[3]
self.metadata = self.parse_metadata_info()
# Retrieving the data level (so far HARDCODED Sorcerer)
self.datalevel = DataLevel.DL0
if self.is_mc:
self.simulation_config = self.parse_simulation_header()
if not self.is_mc:
self.is_stereo, self.is_sumt = self.parse_data_info()
# # Setting up the current run with the first run present in the data
# self.current_run = self._set_active_run(run_number=0)
self.current_run = None
self._subarray_info = SubarrayDescription(
name='MAGIC',
tel_positions=MAGIC_TEL_POSITIONS,
tel_descriptions=MAGIC_TEL_DESCRIPTIONS
)
if self.allowed_tels:
self._subarray_info = self._subarray_info.select_subarray(self.allowed_tels)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Releases resources (e.g. open files).
Parameters
----------
exc_type : Exception
Class of the exception
exc_val : BaseException
Type of the exception
exc_tb : TracebackType
The traceback
"""
self.close()
def close(self):
"""
Closes open ROOT file.
"""
self.file_.close()
@staticmethod
def is_compatible(file_path):
"""
This method checks if the specified file mask corresponds
to MAGIC data files. The result will be True only if all
the files are of ROOT format and contain an 'Events' tree.
Parameters
----------
file_path: str
Path to file
Returns
-------
bool:
True if the masked files are MAGIC data runs, False otherwise.
"""
is_magic_root_file = True
try:
with uproot.open(file_path) as input_data:
mandatory_trees = ['Events', 'RunHeaders', 'RunTails']
trees_in_file = [tree in input_data for tree in mandatory_trees]
if not all(trees_in_file):
is_magic_root_file = False
except ValueError:
# uproot raises ValueError if the file is not a ROOT file
is_magic_root_file = False
return is_magic_root_file
@staticmethod
def get_run_info_from_name(file_name):
"""
This internal method extracts the run number and
type (data/MC) from the specified file name.
Parameters
----------
file_name : str
A file name to process.
Returns
-------
run_number: int
The run number of the file.
is_mc: Bool
Flag to tag MC files
telescope: int
Number of the telescope
datalevel: MARSDataLevel
Data level according to MARS
Raises
------
IndexError
Description
"""
mask_data_calibrated = r"\d{6}_M(\d+)_(\d+)\.\d+_Y_.*"
mask_data_star = r"\d{6}_M(\d+)_(\d+)\.\d+_I_.*"
mask_data_superstar = r"\d{6}_(\d+)_S_.*"
mask_data_melibea = r"\d{6}_(\d+)_Q_.*"
mask_mc_calibrated = r"GA_M(\d)_za\d+to\d+_\d_(\d+)_Y_.*"
mask_mc_star = r"GA_M(\d)_za\d+to\d+_\d_(\d+)_I_.*"
mask_mc_superstar = r"GA_za\d+to\d+_\d_S_.*"
mask_mc_melibea = r"GA_za\d+to\d+_\d_Q_.*"
if re.findall(mask_data_calibrated, file_name):
parsed_info = re.findall(mask_data_calibrated, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.CALIBRATED
is_mc = False
elif re.findall(mask_data_star, file_name):
parsed_info = re.findall(mask_data_star, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.STAR
is_mc = False
elif re.findall(mask_data_superstar, file_name):
parsed_info = re.findall(mask_data_superstar, file_name)
telescope = None
run_number = int(parsed_info[0])
datalevel = MARSDataLevel.SUPERSTAR
is_mc = False
elif re.findall(mask_data_melibea, file_name):
parsed_info = re.findall(mask_data_melibea, file_name)
telescope = None
run_number = int(parsed_info[0])
datalevel = MARSDataLevel.MELIBEA
is_mc = False
elif re.findall(mask_mc_calibrated, file_name):
parsed_info = re.findall(mask_mc_calibrated, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.CALIBRATED
is_mc = True
elif re.findall(mask_mc_star, file_name):
parsed_info = re.findall(mask_mc_star, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.STAR
is_mc = True
elif re.findall(mask_mc_superstar, file_name):
parsed_info = re.findall(mask_mc_superstar, file_name)
telescope = None
run_number = None
datalevel = MARSDataLevel.SUPERSTAR
is_mc = True
elif re.findall(mask_mc_melibea, file_name):
parsed_info = re.findall(mask_mc_melibea, file_name)
telescope = None
run_number = None
datalevel = MARSDataLevel.MELIBEA
is_mc = True
else:
raise IndexError(
'Can not identify the run number and type (data/MC) of the file'
'{:s}'.format(file_name))
return run_number, is_mc, telescope, datalevel
def parse_run_info(self):
"""
Parses run info from the TTrees in the ROOT file
Returns
-------
run_number: int
The run number of the file
is_mc: Bool
Flag to tag MC files
telescope_number: int
Number of the telescope
datalevel: MARSDataLevel
Data level according to MARS
"""
runinfo_array_list = [
'MRawRunHeader.fRunNumber',
'MRawRunHeader.fRunType',
'MRawRunHeader.fTelescopeNumber',
]
run_info = self.file_['RunHeaders'].arrays(
runinfo_array_list, library="np")
run_number = int(run_info['MRawRunHeader.fRunNumber'][0])
run_type = int(run_info['MRawRunHeader.fRunType'][0])
telescope_number = int(run_info['MRawRunHeader.fTelescopeNumber'][0])
# a note about run numbers:
# mono data has run numbers starting with 1 or 2 (telescope dependent)
# stereo data has run numbers starting with 5
# if both telescopes are taking data with no L3,
# also in this case run number starts with 5 (e.g. muon runs)
# Here the data types (from MRawRunHeader.h)
# std data = 0
# pedestal = 1 (_P_)
# calibration = 2 (_C_)
# domino calibration = 3 (_L_)
# linearity calibration = 4 (_N_)
# point run = 7
# monteCarlo = 256
# none = 65535
mc_data_type = 256
if run_type == mc_data_type:
is_mc = True
else:
is_mc = False
events_tree = self.file_['Events']
melibea_trees = ['MHadronness', 'MStereoParDisp', 'MEnergyEst']
superstar_trees = ['MHillas_1', 'MHillas_2', 'MStereoPar']
star_trees = ['MHillas']
datalevel = MARSDataLevel.CALIBRATED
events_keys = events_tree.keys()
trees_in_file = [tree in events_keys for tree in melibea_trees]
if all(trees_in_file):
datalevel = MARSDataLevel.MELIBEA
trees_in_file = [tree in events_keys for tree | |
import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
raise AssertionError(f"invalid score type {self.score_type}")
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder | |
"""show_interface.py
JunOS parsers for the following show commands:
* show interfaces terse
* show interfaces terse | match <interface>
* show interfaces terse {interface}
* show interfaces {interface} terse
* show interfaces descriptions
* show interfaces queue {interface}
* show interfaces policers {interface}
"""
# python
import re
# metaparser
from genie.metaparser import MetaParser
from pyats.utils.exceptions import SchemaError
from genie.metaparser.util.schemaengine import Schema, Any, Optional, Use, Or
# import parser utils
from genie.libs.parser.utils.common import Common
# =======================================================
# Schema for 'show interfaces terse [| match <interface>]
# =======================================================
class ShowInterfacesTerseSchema(MetaParser):
"""Schema for show interfaces terse [| match <interface>]"""
schema = {
Any(): {
'oper_status': str,
Optional('link_state'): str,
Optional('admin_state'): str,
Optional('phys_address'): str,
'enabled': bool,
Optional('protocol'): {
Any():{
Optional(Any()): {
'local': str,
Optional('remote'): str,
},
},
},
}
}
# =======================================================
# Parser for 'show interfaces terse [| match <interface>]
# =======================================================
class ShowInterfacesTerse(ShowInterfacesTerseSchema):
""" Parser for:
- show interfaces terse
- show interfaces {interface} terse
- show interfaces terse {interface}
"""
cli_command = [
'show interfaces terse',
'show interfaces {interface} terse'
]
exclude = [
'duration'
]
def cli(self, interface=None, output=None):
# execute the command
if output is None:
if interface:
cmd = self.cli_command[1]
cmd = cmd.format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
# Interface Admin Link Proto Local Remote
# lo0.0 up up inet 10.1.1.1 --> 0/0
# em1.0 up up inet 10.0.0.4/8
# fxp0 up up
p1 = re.compile(r'^(?P<interface>\S+) +(?P<admin_state>\w+) +(?P<link_state>\w+) *'
'(?P<protocol>\S+)? *(?P<local>[\w\.\:\/]+)?( *'
'[\-\>]+? *(?P<remote>[\w\.\:\/]+))?$')
# 172.16.64.1/2
# inet6 fe80::250:56ff:fe82:ba52/64
# 2001:db8:8d82:0:a::4/64
# tnp 0x4
# 10.11.11.11 --> 0/0
p2 = re.compile(r'^((?P<protocol>\S+) +)?(?P<local>((\d+\.[\d\.\/]+)|(\w+\:[\w\:\/]+)|(0x\d+))+)'
' *(([\-\>]+) *(?P<remote>[\w\.\:\/]+))?$')
# multiservice
p3 = re.compile(r'^((?P<protocol>\S+))$')
for line in out.splitlines():
line = line.replace('\t', ' ')
line = line.strip()
if 'show interfaces terse' in line:
continue
# fxp0 up up
# em1.0 up up inet 10.0.0.4/8
# lo0.0 up up inet 10.1.1.1 --> 0/0
m = p1.match(line)
if m:
groups = m.groupdict()
interface = groups['interface']
intf_dict = ret_dict.setdefault(interface, {})
intf_dict.update({'admin_state': groups['admin_state'],
'link_state': groups['link_state'],
'oper_status': groups['link_state'],
'enabled': 'up' in groups['admin_state']})
if groups['protocol']:
protocol = groups['protocol']
pro_dict = intf_dict.setdefault('protocol', {}).setdefault(groups['protocol'], {})
if groups['local']:
pro_dict = pro_dict.setdefault(groups['local'], {})
pro_dict['local'] = groups['local']
if groups['remote']:
pro_dict['remote'] = groups['remote']
continue
# 172.16.64.1/2
# inet6 fe80::250:56ff:fe82:ba52/64
# 2001:db8:8d82:0:a::4/64
# tnp 0x4
# 10.11.11.11 --> 0/0
m = p2.match(line)
if m:
groups = m.groupdict()
try:
protocol = groups['protocol'] or protocol
except Exception:
continue
pro_dict = intf_dict.setdefault('protocol', {}).setdefault(protocol, {}).setdefault(groups['local'], {})
pro_dict['local'] = groups['local']
if groups['remote']:
pro_dict['remote'] = groups['remote']
continue
# multiservice
m = p3.match(line)
if m:
groups = m.groupdict()
protocol = m.groupdict()['protocol']
pro_dict = intf_dict.setdefault('protocol', {}).setdefault(protocol, {})
continue
return ret_dict
class ShowInterfacesTerseMatch(ShowInterfacesTerse):
""" Parser for:
- show interfaces terse | match {interface}
"""
cli_command = 'show interfaces terse | match {interface}'
def cli(self, interface, output=None):
if output is None:
out = self.device.execute(self.cli_command.format(interface=interface))
else:
out = output
return super().cli(output=out)
class ShowInterfacesTerseInterface(ShowInterfacesTerse):
""" Parser for:
- 'show interfaces terse {interface}'
"""
cli_command = 'show interfaces terse {interface}'
def cli(self, interface, output=None):
if output is None:
out = self.device.execute(self.cli_command.format(interface=interface))
else:
out = output
return super().cli(output=out)
class ShowInterfacesDescriptionsSchema(MetaParser):
""" Schema for:
* show interfaces descriptions
"""
def validate_physical_interface_list(value):
if not isinstance(value, list):
raise SchemaError('physical-interface is not a list')
entry_schema = Schema(
{
"admin-status": str,
"description": str,
"name": str,
"oper-status": str
}
)
for item in value:
entry_schema.validate(item)
return value
schema = {
"interface-information": {
"physical-interface": Use(validate_physical_interface_list)
}
}
class ShowInterfacesDescriptions(ShowInterfacesDescriptionsSchema):
""" Parser for:
* show interfaces descriptions
"""
cli_command = 'show interfaces descriptions'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
# Interface Admin Link Description
p1 = re.compile(r'^Interface +Admin +Link +Description$')
# ge-0/0/0 up up none/100G/in/hktGCS002_ge-0/0/0
p2 = re.compile(r'^(?P<name>\S+) +(?P<admin_status>\S+) +(?P<oper_status>\S+) +(?P<description>\S+)$')
for line in out.splitlines():
line = line.strip()
# Interface Admin Link Description
m = p1.match(line)
if m:
continue
# ge-0/0/0 up up none/100G/in/hktGCS002_ge-0/0/0
m = p2.match(line)
if m:
group = m.groupdict()
entry_list = ret_dict.setdefault("interface-information", {}).setdefault("physical-interface", [])
entry = {}
for group_key, group_value in group.items():
entry_key = group_key.replace('_','-')
entry[entry_key] = group_value
entry_list.append(entry)
continue
return ret_dict
class ShowInterfacesSchema(MetaParser):
""" Parser for:
'show interfaces'
"""
# schema = {
# Optional("@xmlns:junos"): str,
# "interface-information": {
# Optional("@junos:style"): str,
# Optional("@xmlns"): str,
# "physical-interface": [
# {
# "active-alarms": {
# "interface-alarms": {
# "alarm-not-present": str,
# "ethernet-alarm-link-down": str
# }
# },
# "active-defects": {
# "interface-alarms": {
# "alarm-not-present": str,
# "ethernet-alarm-link-down": str
# }
# },
# "admin-status": {
# "#text": str,
# Optional("@junos:format"): str
# },
# "bpdu-error": str,
# "clocking": str,
# "current-physical-address": str,
# "description": str,
# "eth-switch-error": str,
# "ethernet-fec-mode": {
# Optional("@junos:style"): str,
# "enabled_fec_mode": str
# },
# "ethernet-fec-statistics": {
# Optional("@junos:style"): str,
# "fec_ccw_count": str,
# "fec_ccw_error_rate": str,
# "fec_nccw_count": str,
# "fec_nccw_error_rate": str
# },
# "ethernet-pcs-statistics": {
# Optional("@junos:style"): str,
# "bit-error-seconds": str,
# "errored-blocks-seconds": str
# },
# "hardware-physical-address": str,
# "if-auto-negotiation": str,
# "if-config-flags": str,
# "if-device-flags": {
# "ifdf-present": str,
# "ifdf-running": str
# },
# "if-flow-control": str,
# "if-media-flags": {
# "ifmf-none": str
# },
# "if-remote-fault": str,
# "if-type": str,
# "ifd-specific-config-flags": {
# "internal-flags": str
# },
# "interface-flapped": {
# "#text": str,
# Optional("@junos:seconds"): str
# },
# "interface-transmit-statistics": str,
# "l2pt-error": str,
# "ld-pdu-error": str,
# "link-level-type": str,
# "link-type": str,
# "local-index": str,
# "logical-interface": {
# "address-family": [
# {
# "address-family-flags": {
# "ifff-is-primary": str,
# "ifff-no-redirects": str,
# "ifff-none": str,
# "ifff-sendbcast-pkt-to-re": str,
# "internal-flags": str
# },
# "address-family-name": str,
# "interface-address": {
# "ifa-broadcast": str,
# "ifa-destination": str,
# "ifa-flags": {
# "ifaf-current-preferred": str,
# "ifaf-current-primary": str
# },
# "ifa-local": str
# },
# "intf-curr-cnt": str,
# "intf-dropcnt": str,
# "intf-unresolved-cnt": str,
# "max-local-cache": str,
# "maximum-labels": str,
# "mtu": str,
# "new-hold-limit": str
# }
# ],
# "encapsulation": str,
# "filter-information": str,
# "if-config-flags": {
# "iff-snmp-traps": str,
# "iff-up": str,
# "internal-flags": str
# },
# "local-index": str,
# "logical-interface-bandwidth": str,
# "name": str,
# "policer-overhead": str,
# "snmp-index": str,
# "traffic-statistics": {
# Optional("@junos:style"): str,
# "input-packets": str,
# "output-packets": str
# }
# },
# "loopback": str,
# "mru": str,
# "mtu": str,
# "name": str,
# "oper-status": str,
# "pad-to-minimum-frame-size": str,
# "physical-interface-cos-information": {
# "physical-interface-cos-hw-max-queues": str,
# "physical-interface-cos-use-max-queues": str
# },
# "snmp-index": str,
# "sonet-mode": str,
# "source-filtering": str,
# "speed": str,
# "traffic-statistics": {
# Optional("@junos:style"): str,
# "input-bps": str,
# "input-packets": str,
# "input-pps": str,
# "output-bps": str,
# "output-packets": str,
# "output-pps": str
# }
# }
# ]
# }
# }
def verify_physical_interface_list(value):
# Pass physical-interface list of dict in value
if not isinstance(value, list):
raise SchemaError('physical interface is not a list')
def verify_logical_interface_list(value):
# Pass address-family list of dict in value
if not isinstance(value, list):
raise SchemaError('logical-interface is not a list')
def verify_address_family_list(value):
# Pass address-family list of dict in value
if not isinstance(value, list):
raise SchemaError('address-family is not a list')
def verify_interface_address_list(value):
# Pass physical-interface list of dict in value
if not isinstance(value, list) and not isinstance(value, dict):
raise SchemaError('interface-address is not a list/dict')
interface_address_schema = Schema({
Optional("ifa-broadcast"): str,
Optional("ifa-destination"): str,
"ifa-flags": {
Optional("ifaf-current-preferred"): bool,
Optional("ifaf-current-primary"): bool,
Optional("ifaf-is-primary"): bool,
Optional("ifaf-is-preferred"): bool,
Optional("ifaf-kernel"): bool,
Optional("ifaf-preferred"): bool,
Optional("ifaf-primary"): bool,
Optional("ifaf-is-default"): bool,
Optional("ifaf-none"): bool,
Optional("ifaf-dest-route-down"): bool,
},
Optional("ifa-local"): str
})
# Validate each dictionary in list
if isinstance(value, dict):
value = [value]
for item in value:
interface_address_schema.validate(item)
return value
af_schema = Schema({
Optional("address-family-flags"): {
Optional("ifff-is-primary"): bool,
Optional("ifff-no-redirects"): bool,
Optional("ifff-none"): bool,
Optional("ifff-sendbcast-pkt-to-re"): bool,
Optional("internal-flags"): bool,
Optional("ifff-primary"): bool,
Optional("ifff-receive-ttl-exceeded"): bool,
Optional("ifff-receive-options"): bool,
Optional("ifff-encapsulation"): str,
},
"address-family-name": str,
Optional("interface-address"): Use(verify_interface_address_list),
Optional("intf-curr-cnt"): str,
Optional("intf-dropcnt"): str,
Optional("intf-unresolved-cnt"): str,
Optional("generation"): str,
Optional("route-table"): str,
Optional("max-local-cache"): str,
Optional("maximum-labels"): str,
"mtu": str,
Optional("new-hold-limit"): str
})
# Validate each dictionary in list
for item in value:
af_schema.validate(item)
return value
l_i_schema = Schema({
Optional("address-family"): Use(verify_address_family_list),
Optional("encapsulation"): str,
Optional("filter-information"): str,
"if-config-flags": {
"iff-snmp-traps": bool,
"iff-up": bool,
Optional("internal-flags"): str
},
"local-index": str,
Optional("logical-interface-bandwidth"): str,
"name": str,
Optional("policer-overhead"): str,
Optional("snmp-index"): str,
Optional("traffic-statistics"): {
Optional("@junos:style"): str,
"input-packets": str,
Optional("input-bytes"): str,
"output-packets": | |
<reponame>poornasairoyal/Laser-Simulation<filename>laser/misc.py
import numpy as np
from scipy.interpolate import interp1d, interp2d
from scipy.optimize import curve_fit
import matplotlib.image as mpimg
def get_moments(image):
"""
Compute image centroid and statistical waist from the intensity distribution.
Parameters:
-----------
image: 2D numpy array
"""
# Build axes in pixels
ny, nx = image.shape
x, y = np.arange(nx), np.arange(ny)
X, Y = np.meshgrid(x, y)
# Zeroth moment
c0 = np.sum(image)
# First moments
cx = np.sum(X * image) / c0
cy = np.sum(Y * image) / c0
# Second centered moments
sx2 = np.sum((X - cx)**2 * image) / c0
sy2 = np.sum((Y - cy)**2 * image) / c0
return cx, cy, 2 * np.sqrt(sx2), 2 * np.sqrt(sy2)
def get_encircled_energy(image, center="centroid"):
"""
Compute the encircled energy of an intensity distribution
Parameters
----------
image: 2D numpy array
Intensity distribution
center: {"centroid", "peak"} or tuple, optional
Defines from which point is the encircled energy calculated.
"""
# Get the center position
if center == "centroid":
cx, cy, _, _ = get_moments(image)
elif center == "peak":
cy, cx = np.unravel_index(np.argmax(image), image.shape)
else:
cx, cy = center[0], center[1]
# build radius axis
ny, nx = image.shape
x, y = np.arange(nx), np.arange(ny)
Xc, Yc = np.meshgrid(x - cx, y - cy)
R, _ = cart2pol(Xc, Yc)
# Sort the radius and get the index
idx_sort = np.argsort(R, axis=None)
rad_sort = R.ravel()[idx_sort]
# Get the encircled energy
en_circ = np.cumsum(image.ravel()[idx_sort])
en_circ = np.insert(en_circ, 0, 0.0) / np.sum(image)
rad_sort = np.insert(rad_sort, 0, 0.0)
return rad_sort, en_circ
def get_fwhm(intensity, interpolation_factor=1, kind='cubic'):
"""
Get the Full Width at Half Maximum of the 1D intensity distribution
Parameters
----------
intensity: 1D numpy array
intensity distribution
interpolation_factor: int, optional
Interpolate the data for a more accurate calculation
"""
position = np.arange(intensity.size)
pos_i = np.linspace(np.min(position), np.max(position),
interpolation_factor * position.size)
inten_i = interp1d(position[:], intensity[:], kind=kind)
idx = (inten_i(pos_i) >= np.max(inten_i(pos_i)) * 0.5).nonzero()[0]
return pos_i[idx[-1] + 1] - pos_i[idx[0]]
def gauss2D(x, y, fwhmx, fwhmy, x0=0, y0=0, offset=0, order=1, int_FWHM=True):
"""
Define a (super-)Gaussian 2D beam.
Parameters
----------
x: float 2D np.array
Horizontal axis of the Gaussian
y: float 2D np.array
Vertical axis of the Gaussian
fwhmx: float
Horizontal Full Width at Half Maximum
fwhmy: float
Vertical Full Width at Half Maximum
x0: float, optional
Horizontal center position of the Gaussian
y0: float, optional
Vertical center position of the Gaussian
offset: float, optional
Amplitude offset of the Gaussian
order: int, optional
order of the super-Gaussian function.
Defined as: exp( - ( x**2 + y**2 )**order )
int_FWHM: boolean, optional
If True, the FWHM is the FWHM of the square of the Gaussian (intensity).
If False, it is the FWHM of the Gaussian directly (electric field).
"""
coeff = 1.0
if int_FWHM:
coeff = 0.5
return np.exp(-np.log(2) * coeff * ((2 * (x - x0) / fwhmx)**2 + (2 * (y - y0) / fwhmy)**2)**order) + offset
def gauss1D(x, fwhm, x0=0, offset=0, order=1, int_FWHM=True):
"""
Define a (super-)Gaussian 1D beam. Identical to laser.misc.gauss2D but in 1D.
Parameters
----------
x: float 1D np.array
Axis of the Gaussian
fwhm: float
Full Width at Half Maximum
x0: float, optional
Center position of the Gaussian
offset: float, optional
Amplitude offset of the Gaussian
order: int, optional
order of the super-Gaussian function.
Defined as: exp( - ( x**2 )**order )
int_FWHM: boolean, optional
If True, the FWHM is the FWHM of the square of the Gaussian (intensity).
If False, it is the FWHM of the Gaussian directly (electric field).
"""
coeff = 1.0
if int_FWHM:
coeff = 0.5
return np.exp(-np.log(2) * coeff * ((2 * (x - x0) / fwhm)**2)**order) + offset
def cart2pol(x, y):
"""Convert cartesian to polar coordinates"""
return np.abs(x + 1j * y), np.angle(x + 1j * y)
def pol2cart(r, theta):
"""Convert polar to cartesian coodinates"""
return np.real(r * np.exp(1j * theta)), np.imag(r * np.exp(1j * theta))
def array_trim(ar):
"""Trim zeros of 2D map"""
ar_trim = ar.copy()
ar_trim = ar_trim[:, ar_trim.any(axis=0)] # trim columns
ar_trim = ar_trim[ar_trim.any(axis=1), :] # trim rows
return ar_trim
def vect(N):
"""Return a centered array between -0.5 and 0.5"""
return np.linspace(0, N, num=N) / N - 0.5
def norm(a):
"""Normalise an array by it's maximum value"""
return a / np.max(np.abs(a))
def text_progress_bar(iteration, num_iteration, max_char=50):
"""Display a progress bar with the print function"""
num_bar = int(np.floor(iteration / num_iteration * max_char) + 1)
num_dot = max_char - num_bar - 1
return print('|' * (num_bar) + '.' * (num_dot) + ' %.1f %%' % ((iteration + 1) / num_iteration * 100), end='\r')
def waist_from_nf(radius, angle, wavelength):
"""
Calculate the Gaussian beam waist parameters from a near field radius and divergence
"""
w0 = radius * \
np.sqrt(
(1 - np.sqrt(1 - (2 * wavelength / (radius * np.pi * np.tan(angle)))**2)) / 2)
zr = w0**2 * np.pi / wavelength
z0 = -radius / np.tan(angle)
return w0, zr, z0
def rolling_window(a, window):
"""
Reshape an array to calculate rolling statistics
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def rolling_mean(a, window):
"""
Compute the rolling mean
"""
return np.nanmean(rolling_window(a, window), axis=-1)
def rolling_std(a, window):
"""
Compute the rolling standard deviation
"""
return np.nanstd(rolling_window(a, window), axis=-1)
def moving_average(a, window):
"""
Very fast moving average
"""
ret = np.cumsum(a, dtype=float)
ret[window:] = ret[window:] - ret[:-window]
return ret[window - 1:] / window
def add_noise(image, density=None, amplitude=1, kind='quintic', seed=None):
"""
Add noise to a 2D numpy array. If "density" is specified, the noise is interpolated to have smooth variations.
Parameters
----------
image: 2D numpy.array
Image on which the noise should be added
density: int, 2-tuple, optional
Noise density. if equal to the image size, equivalent to "None"
amplitude: float, optional
Amplitude of the noise. If "1", image is modulated by +- 100%
kind: {'linear', 'cubic', 'quintic'}
Type of 2D-interpolation. 'linear' can be used but it is pretty ugly.
seed: int, optional
Seed for random number generation
"""
ny, nx = image.shape
if density is None:
density = (nx, ny)
try:
dx = density[0]
dy = density[1]
except TypeError:
dx = density
dy = density
np.random.seed(seed)
noise_raw = np.random.rand(int(dy), int(dx))
x_raw = np.arange(int(dx))
y_raw = np.arange(int(dy))
noisefunc = interp2d(x_raw, y_raw, noise_raw, kind=kind)
x = np.linspace(np.min(x_raw), np.max(x_raw), nx)
y = np.linspace(np.min(y_raw), np.max(y_raw), ny)
noise = noisefunc(x, y)
noise = (noise - np.min(noise)) / np.ptp(noise) * 2 - 1
image_noise = image * (1 + amplitude * noise) / (1 + amplitude)
image_noise *= np.sum(image) / np.sum(image_noise)
return image_noise
def RGB_image_to_grayscale(image_path, reverse_scale=True, crop=None, downsample=None):
"""
Convert RGB colors to lightness grayscale
Parameters:
===========
image_path: str
location of the image to import and convert to greyscale
reverse_scale: boolean, optional
choose to flip the lightness scale or not. Stays between 0 and 1
crop: None or 4-tuple, optional
Limits to crop the image
downsample: None or int, optional
Downsample the data by the given amount. Currently the downsampling is
done by selecting data with a period given by this parameter
"""
im_rgb = np.float64(mpimg.imread(image_path))/255
if crop is not None:
im_rgb = im_rgb[crop[0]:crop[1],crop[2]:crop[3]]
if downsample is not None:
im_rgb = im_rgb[::downsample,::downsample]
M = np.array([[0.412453,0.357580,0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
im_xyz = (M[None,None,:]@im_rgb[:,:,:,None]).squeeze()
L = np.zeros_like(im_xyz[:,:,0])
select = im_xyz[:,:,1]>(6/29)**3
L[select] = 116*im_xyz[select,1]**(1/3)-16
L[~select] = (29/3)**3*im_xyz[~select,1]
L /= 100
if reverse_scale:
L = 1-L
return L
def norm_minmax(a):
"""
Normalize the data by setting the minimum at 0 and the maximum at 1.
Parameters:
===========
a: numpy.array
Data to normalize
"""
return (a-a.min())/(a.max()-a.min())
def get_ellipse_moments(image, dx=1, dy=1, cut=None):
"""
Compute the moments of the beam profile and give the ellipse parameters.
Parameters:
===========
image: 2D numpy.array
Intensity profile of the data
dx: float, optional
Step of the horizontal axis. Defaults to 1
dy: float, optional
Step of the vertical axis. Defaults to 1
cut: None or float, optional
Threshold below which the data is ignored
Outputs:
========
cx: float
Horizontal position of the center of mass
cy: float
Vertical position of the center of mass
rx: | |
<filename>src/main/python/model/filter.py<gh_stars>0
import json
import logging
from collections import defaultdict
from collections.abc import Sequence
from typing import Optional, Type, Tuple, Any, List
from uuid import uuid4
import math
import qtawesome as qta
from qtpy import QtCore
from qtpy.QtCore import QAbstractTableModel, QModelIndex, QVariant, Qt, QTimer
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QDialog, QFileDialog, QMessageBox, QHeaderView, QTableView, QWidget
from model.iir import FilterType, LowShelf, HighShelf, PeakingEQ, SecondOrder_LowPass, \
SecondOrder_HighPass, ComplexLowPass, ComplexHighPass, q_to_s, s_to_q, max_permitted_s, CompleteFilter, COMBINED, \
Passthrough, Gain, Shelf, LinkwitzTransform, Biquad
from model.limits import DecibelRangeCalculator, PhaseRangeCalculator
from model.magnitude import MagnitudeModel
from model.preferences import SHOW_ALL_FILTERS, SHOW_NO_FILTERS, FILTER_COLOURS, DISPLAY_SHOW_FILTERS, DISPLAY_Q_STEP, \
DISPLAY_GAIN_STEP, DISPLAY_S_STEP, DISPLAY_FREQ_STEP, get_filter_colour, FILTERS_DEFAULT_Q, FILTERS_DEFAULT_FREQ, \
FILTERS_GEOMETRY, FILTERS_DEFAULT_HS_FREQ, FILTERS_DEFAULT_HS_Q, FILTERS_DEFAULT_PEAK_FREQ, FILTERS_DEFAULT_PEAK_Q
from model.xy import MagnitudeData, ComplexData
from ui.filter import Ui_editFilterDialog
logger = logging.getLogger('filter')
class FilterModel(Sequence):
'''
A model to hold onto the filters and provide magnitude data to a chart about those filters.
'''
def __init__(self, view, preferences, label=None, on_update=lambda _: True):
self.__filter = CompleteFilter()
self.__view = view
self.__preferences = preferences
self.__table = None
self.__label = label
self.__on_update = on_update
@property
def filter(self) -> CompleteFilter:
return self.__filter
@filter.setter
def filter(self, filt):
if filt is None:
filt = CompleteFilter()
if isinstance(filt, CompleteFilter):
if self.__table is not None:
self.__table.beginResetModel()
self.__filter = filt
if self.__label is not None:
if self.__filter.listener is not None:
self.__label.setText(f"Filter - {filt.listener.name}")
else:
self.__label.setText(f"Filter - Default")
self.post_update()
if self.__table is not None:
self.__table.endResetModel()
else:
raise ValueError(f"FilterModel only accepts CompleteFilter, ignoring {filt}")
@property
def table(self):
return self.__table
@table.setter
def table(self, table):
self.__table = table
def __getitem__(self, i):
return self.filter[i]
def __len__(self):
return len(self.filter)
def save(self, filter):
'''
Stores the filter.
:param filter: the filter.
'''
if self.__table is not None:
self.__table.beginResetModel()
self.filter.save(filter)
self.post_update()
if self.__table is not None:
self.__table.endResetModel()
def preview(self, filter):
'''
Previews the effect of saving the supplied filter.
:param filter: the filter.
:return: a previewed filter.
'''
return self.filter.preview(filter)
def clone(self, detach=False):
'''
Clones the current filter.
'''
clone = self.filter.preview(None)
if detach is True:
for f in clone:
f.id = uuid4()
return clone
def delete(self, indices):
'''
Deletes the filter at the specified index.
:param indices the indexes to delete.
'''
if self.__table is not None:
self.__table.beginResetModel()
self.filter.removeByIndex(indices)
self.post_update()
if self.__table is not None:
self.__table.endResetModel()
def post_update(self):
'''
Reacts to a change in the model.
'''
visible_filter_names = []
show_filters = self.__preferences.get(DISPLAY_SHOW_FILTERS)
if show_filters != SHOW_NO_FILTERS:
visible_filter_names.append(self.filter.__repr__())
if show_filters == SHOW_ALL_FILTERS:
visible_filter_names += self.filter.child_names()
self.__on_update(visible_filter_names)
def get_curve_data(self, reference=None):
'''
:param reference: the name of the reference data.
:return: the magnitude response of each filter.
'''
show_filters = self.__preferences.get(DISPLAY_SHOW_FILTERS)
if show_filters == SHOW_NO_FILTERS:
return []
elif len(self.filter) == 0:
return []
else:
children = [x.get_transfer_function() for x in self.filter]
combined = self.filter.get_transfer_function()
results = [combined]
if show_filters == SHOW_ALL_FILTERS and len(self) > 1:
results += children
mags = [r.get_magnitude() for r in results]
for idx, m in enumerate(mags):
if m.name == COMBINED:
m.colour = FILTER_COLOURS[0]
else:
m.colour = FILTER_COLOURS[(idx + 1) % len(FILTER_COLOURS)]
if reference is not None:
ref_data = next((x for x in mags if x.name == reference), None)
if ref_data:
mags = [x.normalise(ref_data) for x in mags]
return mags
def resample(self, fs):
'''
:param fs: the requested fs.
:return: the filter at that fs.
'''
return self.filter.resample(fs)
def get_transfer_function(self, fs=None) -> Optional[ComplexData]:
'''
:return: the transfer function for this filter (in total) if we have any filters or None if we have none.
'''
if len(self.filter) > 0:
if fs is not None:
return self.filter.resample(fs).get_transfer_function()
else:
return self.filter.get_transfer_function()
return None
class FilterTableModel(QAbstractTableModel):
'''
A Qt table model to feed the filter view.
'''
def __init__(self, model, parent=None):
super().__init__(parent=parent) if parent is not None else super().__init__()
self.__headers = ['Type', 'Freq', 'Q', 'S', 'Gain', 'Biquads']
self.__filter_model = model
self.__filter_model.table = self
def rowCount(self, parent: QModelIndex = ..., *args, **kwargs):
return len(self.__filter_model)
def columnCount(self, parent: QModelIndex = ..., *args, **kwargs):
return len(self.__headers)
def data(self, index: QModelIndex, role: int = ...) -> Any:
if not index.isValid():
return QVariant()
elif role != Qt.DisplayRole:
return QVariant()
else:
filter_at_row = self.__filter_model[index.row()]
if index.column() == 0:
return QVariant(filter_at_row.filter_type)
elif index.column() == 1:
if hasattr(filter_at_row, 'freq'):
return QVariant(filter_at_row.freq)
else:
return QVariant('N/A')
elif index.column() == 2:
if hasattr(filter_at_row, 'q'):
return QVariant(filter_at_row.q)
else:
return QVariant('N/A')
elif index.column() == 3:
if hasattr(filter_at_row, 'q_to_s'):
return QVariant(round(filter_at_row.q_to_s(), 3))
else:
return QVariant('N/A')
elif index.column() == 4:
if hasattr(filter_at_row, 'gain'):
return QVariant(filter_at_row.gain)
else:
return QVariant('N/A')
elif index.column() == 5:
return QVariant(len(filter_at_row))
else:
return QVariant()
def headerData(self, section: int, orientation: Qt.Orientation, role: int = ...) -> Any:
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return QVariant(self.__headers[section])
return QVariant()
class FilterDialog(QDialog, Ui_editFilterDialog):
'''
Add/Edit Filter dialog
'''
is_shelf = ['Low Shelf', 'High Shelf']
gain_required = is_shelf + ['PEQ', 'Gain']
q_steps = [0.0001, 0.001, 0.01, 0.1]
gain_steps = [0.01, 0.1, 1.0]
freq_steps = [0.01, 0.1, 1.0, 2.0, 5.0]
passthrough = Passthrough()
def __init__(self, preferences, signal, filter_model: FilterModel, redraw_main, selected_filter=None, parent=None,
valid_filter_types=None, small=False, max_filters: Optional[int] = None, **kwargs):
self.__preferences = preferences
self.__allow_variable_q_pass_filter = kwargs.pop('allow_var_q_pass', False)
self.__small_mode = small
self.__max_filters = max_filters
super(FilterDialog, self).__init__(parent) if parent is not None else super(FilterDialog, self).__init__()
self.__redraw_main = redraw_main
self.__mag_update_timer = QTimer(self)
self.__mag_update_timer.setSingleShot(True)
# for shelf filter, allow input via Q or S not both
self.__q_is_active = True
# allow user to control the steps for different fields, default to reasonably quick moving values
self.__q_step_idx = self.__get_step(self.q_steps, self.__preferences.get(DISPLAY_Q_STEP), 3)
self.__s_step_idx = self.__get_step(self.q_steps, self.__preferences.get(DISPLAY_S_STEP), 3)
self.__gain_step_idx = self.__get_step(self.gain_steps, self.__preferences.get(DISPLAY_GAIN_STEP), 0)
self.__freq_step_idx = self.__get_step(self.freq_steps, self.__preferences.get(DISPLAY_FREQ_STEP), 2)
# init the UI itself
self.setupUi(self)
custom_window_title = kwargs.pop('window_title', None)
if custom_window_title:
self.setWindowTitle(custom_window_title)
from model.report import block_signals
with block_signals(self.passFilterType):
for filter_type in FilterType:
self.passFilterType.addItem(filter_type.display_name)
self.__add_snapshot_buttons = [self.addSnapshotRowButton, self.pasteSnapshotRowButton, self.importSnapshotButton]
self.__add_working_buttons = [self.addWorkingRowButton, self.pasteWorkingRowButton, self.importWorkingButton]
self.__snapshot = FilterModel(self.snapshotFilterView, self.__preferences, on_update=self.__on_snapshot_change)
self.__working = FilterModel(self.workingFilterView, self.__preferences, on_update=self.__on_working_change)
self.__selected_id = None
self.__decorate_ui()
self.__set_q_step(self.q_steps[self.__q_step_idx])
self.__set_s_step(self.q_steps[self.__s_step_idx])
self.__set_gain_step(self.gain_steps[self.__gain_step_idx])
self.__set_freq_step(self.freq_steps[self.__freq_step_idx])
# underlying filter model
self.__signal = signal
self.__filter_model = filter_model
if self.__filter_model.filter.listener is not None:
logger.debug(f"Selected filter has listener {self.__filter_model.filter.listener.name}")
self.__magnitude_model = None
# remove unsupported filter types
if valid_filter_types:
to_remove = []
for i in range(self.filterType.count()):
if self.filterType.itemText(i) not in valid_filter_types:
to_remove.append(i)
for i1, i2 in enumerate(to_remove):
self.filterType.removeItem(i2 - i1)
# copy the filter into the working table
self.__working.filter = self.__filter_model.clone()
# and initialise the view
for idx, f in enumerate(self.__working):
selected = selected_filter is not None and f.id == selected_filter.id
if not self.__working.filter.sort_by_id:
f.id = uuid4()
if selected is True:
self.__selected_id = f.id
self.workingFilterView.selectRow(idx)
if self.__selected_id is None:
self.__add_working_filter()
if small is False:
# init the chart
self.__magnitude_model = MagnitudeModel('preview', self.previewChart, preferences,
self.__get_data(), 'Filter', fill_primary=True,
secondary_data_provider=self.__get_data('phase'),
secondary_name='Phase', secondary_prefix='deg', fill_secondary=False,
y_range_calc=DecibelRangeCalculator(30, expand=True),
y2_range_calc=PhaseRangeCalculator(), show_y2_in_legend=False,
**kwargs)
self.__mag_update_timer.timeout.connect(self.__magnitude_model.redraw)
else:
self.previewChart.setVisible(False)
self.fullRangeButton.setVisible(False)
self.limitsButton.setVisible(False)
self.subOnlyButton.setVisible(False)
self.acceptSnapButton.setVisible(False)
self.loadSnapButton.setVisible(False)
self.resetButton.setVisible(False)
self.snapFilterButton.setVisible(False)
self.snapLabel.setVisible(False)
self.optimiseButton.setVisible(False)
self.optimiseLabel.setVisible(False)
self.targetBiquadCount.setVisible(False)
self.showPhase.setVisible(False)
self.showIndividual.setVisible(False)
self.__restore_geometry()
self.filterType.setFocus()
self.__enable_add_more_buttons(self.__add_snapshot_buttons, filter_model)
self.__enable_add_more_buttons(self.__add_working_buttons, filter_model)
def __restore_geometry(self):
''' loads the saved window size '''
if self.__small_mode is True:
self.adjustSize()
else:
geometry = self.__preferences.get(FILTERS_GEOMETRY)
if geometry is not None:
self.restoreGeometry(geometry)
def closeEvent(self, QCloseEvent):
''' Stores the window size on close '''
if self.__small_mode is False:
self.__preferences.set(FILTERS_GEOMETRY, self.saveGeometry())
super().closeEvent(QCloseEvent)
def __select_working_filter(self):
''' Loads the selected filter into the edit fields. '''
selection = self.workingFilterView.selectionModel()
if selection.hasSelection():
idx = selection.selectedRows()[0].row()
self.headerLabel.setText(f"Working Filter {idx+1}")
self.__select_filter(self.__working[idx])
def __select_snapshot_filter(self):
''' Loads the selected filter into the edit fields. '''
selection = self.snapshotFilterView.selectionModel()
if selection.hasSelection():
idx = selection.selectedRows()[0].row()
self.headerLabel.setText(f"Snapshot Filter {idx+1}")
self.__select_filter(self.__snapshot[idx])
def __on_snapshot_change(self, _):
''' makes the snapshot table visible when we have one. '''
self.snapshotFilterView.setVisible(len(self.__snapshot) > 0)
self.snapshotViewButtonWidget.setVisible(len(self.__snapshot) > 0)
if self.__magnitude_model is not None:
self.__trigger_redraw()
return True
def __trigger_redraw(self):
if not self.__mag_update_timer.isActive():
self.__mag_update_timer.start(100)
def __on_working_change(self, visible_names):
''' ensure the graph redraws when a filter changes. '''
if self.__magnitude_model is not None:
self.__trigger_redraw()
return True
def __decorate_ui(self):
''' polishes the UI by setting tooltips, adding icons and connecting widgets to functions. '''
self.__set_tooltips()
self.__set_icons()
self.__connect_working_buttons()
self.__connect_snapshot_buttons()
self.__link_table_views()
def __link_table_views(self):
''' Links the table views into the dialog. '''
self.snapshotFilterView.setVisible(False)
self.snapshotViewButtonWidget.setVisible(False)
self.snapshotFilterView.setModel(FilterTableModel(self.__snapshot))
self.snapshotFilterView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.snapshotFilterView.selectionModel().selectionChanged.connect(self.__select_snapshot_filter)
self.workingFilterView.setModel(FilterTableModel(self.__working))
self.workingFilterView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.workingFilterView.selectionModel().selectionChanged.connect(self.__select_working_filter)
def __set_icons(self):
self.saveButton.setIcon(qta.icon('fa5s.save'))
self.saveButton.setIconSize(QtCore.QSize(32, 32))
self.exitButton.setIcon(qta.icon('fa5s.sign-out-alt'))
self.exitButton.setIconSize(QtCore.QSize(32, 32))
self.snapFilterButton.setIcon(qta.icon('fa5s.copy'))
self.acceptSnapButton.setIcon(qta.icon('fa5s.check'))
self.loadSnapButton.setIcon(qta.icon('fa5s.folder-open'))
self.resetButton.setIcon(qta.icon('fa5s.undo'))
self.optimiseButton.setIcon(qta.icon('fa5s.magic'))
self.addWorkingRowButton.setIcon(qta.icon('fa5s.plus'))
self.addSnapshotRowButton.setIcon(qta.icon('fa5s.plus'))
self.removeWorkingRowButton.setIcon(qta.icon('fa5s.minus'))
self.removeSnapshotRowButton.setIcon(qta.icon('fa5s.minus'))
self.limitsButton.setIcon(qta.icon('fa5s.arrows-alt'))
self.fullRangeButton.setIcon(qta.icon('fa5s.expand'))
self.subOnlyButton.setIcon(qta.icon('fa5s.compress'))
self.importWorkingButton.setIcon(qta.icon('fa5s.file-import'))
self.importSnapshotButton.setIcon(qta.icon('fa5s.file-import'))
self.pasteWorkingRowButton.setIcon(qta.icon('fa5s.paste'))
self.pasteSnapshotRowButton.setIcon(qta.icon('fa5s.paste'))
def __set_tooltips(self):
self.addSnapshotRowButton.setToolTip('Add new filter to snapshot')
self.removeSnapshotRowButton.setToolTip('Remove selected filter from snapshot')
self.addWorkingRowButton.setToolTip('Add new filter')
self.removeWorkingRowButton.setToolTip('Remove selected filter')
self.snapFilterButton.setToolTip('Freeze |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.