input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>mikhailramalho/benchexec<filename>benchexec/util.py
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 <NAME>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains some useful functions for Strings, XML or Lists.
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
# THIS MODULE HAS TO WORK WITH PYTHON 2.7!
import bz2
import collections
import datetime
import errno
import fnmatch
import glob
import logging
import os
import shutil
import signal
import stat
import subprocess
import sys
import time
from ctypes.util import find_library
import ctypes
from xml.etree import ElementTree
try:
from shlex import quote as escape_string_shell
except ImportError:
from pipes import quote as escape_string_shell # noqa: F401 @UnusedImport
try:
read_monotonic_time = time.monotonic # does not exist on Python 2
except AttributeError:
# TODO Should probably warn about wall time affected by changing system clock
read_monotonic_time = time.time
try:
glob.iglob("/", recursive=True)
except TypeError:
def maybe_recursive_iglob(pathname, recursive=False):
"""Workaround for glob.iglob not accepting parameter recursive on Python <= 3.4"""
return glob.iglob(pathname)
else:
maybe_recursive_iglob = glob.iglob
_BYTE_FACTOR = 1000 # byte in kilobyte
def printOut(value, end="\n"):
"""
This function prints the given String immediately and flushes the output.
"""
sys.stdout.write(value)
sys.stdout.write(end)
sys.stdout.flush()
def is_code(filename):
"""
This function returns True, if a line of the file contains bracket '{'.
"""
with open(filename, "r") as file:
for line in file:
# ignore comments and empty lines
if not is_comment(line) and "{" in line: # <-- simple indicator for code
if "${" not in line: # <-- ${abc} variable to substitute
return True
return False
def is_comment(line):
return not line or line.startswith("#") or line.startswith("//")
def remove_all(list_, elemToRemove):
return [elem for elem in list_ if elem != elemToRemove]
def flatten(iterable, exclude=[]):
return [value for sublist in iterable for value in sublist if not value in exclude]
def get_list_from_xml(elem, tag="option", attributes=["name"]):
"""
This function searches for all "option"-tags and returns a list with all attributes and texts.
"""
return flatten(
(
[option.get(attr) for attr in attributes] + [option.text]
for option in elem.findall(tag)
),
exclude=[None],
)
def get_single_child_from_xml(elem, tag):
"""
Get a single child tag from an XML element.
Similar to "elem.find(tag)", but warns if there are multiple child tags with the given name.
"""
children = elem.findall(tag)
if not children:
return None
if len(children) > 1:
logging.warning(
'Tag "%s" has more than one child tags with name "%s" in input file, '
"ignoring all but the first.",
elem.tag,
tag,
)
return children[0]
def text_or_none(elem):
"""
Retrieve the text content of an XML tag, or None if the element itself is None
"""
return elem.text if elem is not None else None
def copy_of_xml_element(elem):
"""
This method returns a shallow copy of a XML-Element.
This method is for compatibility with Python 2.6 or earlier..
In Python 2.7 you can use 'copyElem = elem.copy()' instead.
"""
copyElem = ElementTree.Element(elem.tag, elem.attrib)
for child in elem:
copyElem.append(child)
return copyElem
def decode_to_string(toDecode):
"""
This function is needed for Python 3,
because a subprocess can return bytes instead of a string.
"""
try:
return toDecode.decode("utf-8")
except AttributeError: # bytesToDecode was of type string before
return toDecode
def format_number(number, number_of_digits):
"""
The function format_number() return a string-representation of a number
with a number of digits after the decimal separator.
If the number has more digits, it is rounded.
If the number has less digits, zeros are added.
@param number: the number to format
@param digits: the number of digits
"""
if number is None:
return ""
return "%.{0}f".format(number_of_digits) % number
def parse_int_list(s):
"""
Parse a comma-separated list of strings.
The list may additionally contain ranges such as "1-5",
which will be expanded into "1,2,3,4,5".
"""
result = []
for item in s.split(","):
item = item.strip().split("-")
if len(item) == 1:
result.append(int(item[0]))
elif len(item) == 2:
start, end = item
result.extend(range(int(start), int(end) + 1))
else:
raise ValueError("invalid range: '{0}'".format(s))
return result
def split_number_and_unit(s):
"""Parse a string that consists of a integer number and an optional unit.
@param s a non-empty string that starts with an int and is followed by some letters
@return a triple of the number (as int) and the unit
"""
if not s:
raise ValueError("empty value")
s = s.strip()
pos = len(s)
while pos and not s[pos - 1].isdigit():
pos -= 1
number = int(s[:pos])
unit = s[pos:].strip()
return number, unit
def parse_memory_value(s):
"""Parse a string that contains a number of bytes, optionally with a unit like MB.
@return the number of bytes encoded by the string
"""
number, unit = split_number_and_unit(s)
if not unit or unit == "B":
return number
elif unit == "kB":
return number * _BYTE_FACTOR
elif unit == "MB":
return number * _BYTE_FACTOR * _BYTE_FACTOR
elif unit == "GB":
return number * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR
elif unit == "TB":
return number * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR
else:
raise ValueError(
"unknown unit: {} (allowed are B, kB, MB, GB, and TB)".format(unit)
)
def parse_timespan_value(s):
"""Parse a string that contains a time span, optionally with a unit like s.
@return the number of seconds encoded by the string
"""
number, unit = split_number_and_unit(s)
if not unit or unit == "s":
return number
elif unit == "min":
return number * 60
elif unit == "h":
return number * 60 * 60
elif unit == "d":
return number * 24 * 60 * 60
else:
raise ValueError("unknown unit: {} (allowed are s, min, h, and d)".format(unit))
def expand_filename_pattern(pattern, base_dir):
"""
Expand a file name pattern containing wildcards, environment variables etc.
@param pattern: The pattern string to expand.
@param base_dir: The directory where relative paths are based on.
@return: A list of file names (possibly empty).
"""
# 'join' ignores base_dir, if expandedPattern is absolute.
# 'normpath' replaces 'A/foo/../B' with 'A/B', for pretty printing only
pattern = os.path.normpath(os.path.join(base_dir, pattern))
# expand tilde and variables
pattern = os.path.expandvars(os.path.expanduser(pattern))
# expand wildcards
fileList = glob.glob(pattern)
return fileList
def get_files(paths):
changed = False
result = []
for path in paths:
if os.path.isfile(path):
result.append(path)
elif os.path.isdir(path):
changed = True
for currentPath, dirs, files in os.walk(path):
# ignore hidden files, on Linux they start with '.',
# inplace replacement of 'dirs', because it is used later in os.walk
files = [f for f in files if not f.startswith(".")]
dirs[:] = [d for d in dirs if not d.startswith(".")]
result.extend(os.path.join(currentPath, f) for f in files)
return result if changed else paths
def substitute_vars(template, replacements):
"""Replace certain keys with respective values in a string.
@param template: the string in which replacements should be made
@param replacements: a dict or a list of pairs of keys and values
"""
result = template
for (key, value) in replacements:
result = result.replace("${" + key + "}", value)
if "${" in result:
logging.warning("A variable was not replaced in '%s'.", result)
return result
def find_executable(program, fallback=None, exitOnError=True, use_current_dir=True):
dirs = os.environ["PATH"].split(os.path.pathsep)
if use_current_dir:
dirs.append(os.path.curdir)
found_non_executable = [] # for nicer error message
for dir_ in dirs:
name = os.path.join(dir_, program)
if os.path.isfile(name):
if os.access(name, os.X_OK):
# file exists and is executable
return name
found_non_executable.append(name)
if fallback is not None and os.path.isfile(fallback):
if os.access(fallback, os.X_OK):
return fallback
found_non_executable.append(name)
if exitOnError:
if found_non_executable:
sys.exit(
"ERROR: Could not find '{0}' executable, "
"but found file '{1}' that is not executable.".format(
program, found_non_executable[0]
)
)
else:
sys.exit("ERROR: Could not find '{0}' executable.".format(program))
else:
return fallback
def common_base_dir(l):
# os.path.commonprefix returns the common prefix, not the common directory
return os.path.dirname(os.path.commonprefix(l))
def relative_path(destination, start):
return os.path.relpath(destination, os.path.dirname(start))
def path_is_below(path, target_path):
"""
Check whether path is below target_path.
Works for bytes and strings, but both arguments need to have same type.
"""
# compare with trailing slashes for cases like /foo and /foobar
empty_path = path[:0] # empty string, but | |
"""Tests cac.data.transforms.DataProcessor"""
import unittest
import math
import numpy as np
import torch
import random
from torchaudio.transforms import TimeStretch
import librosa
from numpy.testing import assert_array_equal, assert_raises, \
assert_array_almost_equal
from cac.config import Config
from cac.data.utils import read_dataset_from_config
from cac.data.transforms import DataProcessor, STFT, TimeMasking,\
FrequencyMasking, BackgroundNoise, RandomCrop, RandomPad, Volume,\
Flatten, Squeeze, Unsqueeze, Ensemble, Reshape, ISTFT, Standardize, \
Identity, Flip, Sometimes, TimeStretch, AddValue, Transpose, Log, \
FixedPad, Duration, Tempo, Onsets, \
RMSEnergy, SpectralRolloff, SpectralCentroid, ZeroCrossingRate, \
DeltaMFCC, AxisStats, ToNumpy, ToTensor, BackgroundNoiseOnImage
class DataProcessorTestCase(unittest.TestCase):
"""Class to run tests on DataProcessor"""
@classmethod
def setUpClass(cls):
dataset_config = {
'name': 'flusense',
'version': 'default',
'mode': 'val'
}
data_info = read_dataset_from_config(dataset_config)
cls.signal, cls.rate = librosa.load(data_info['file'][0])
cls.numpy_signal = cls.signal.copy()
cls.signal = torch.from_numpy(cls.signal)
cls.default_stats = ['Mean', 'Median', 'Min', 'Max', 'RMS', 'FirstQuartile',\
'ThirdQuartile', 'IQR', 'StandardDeviation', 'Skewness', 'Kurtosis']
def test_time_stretch(self):
"""Checks TimeStretch"""
dummy = torch.rand((2, 201, 100))
processor = TimeStretch(max_rate=1.3, hop_length=160, n_freq=201)
t_signal, rate = processor(dummy, return_rate=True)
self.assertTrue(rate >= 1 and rate <= 1.3)
self.assertEqual(t_signal.shape, (2, 201, math.ceil(100 / rate)))
def test_log_2(self):
"""Checks Log transform with base 2"""
dummy = torch.rand((2, 201, 100))
processor = Log(base=2)
t_signal = processor(dummy)
target = np.log2(dummy)
assert_array_almost_equal(target, t_signal, decimal=5)
def test_log_natural(self):
"""Checks Log transform with base e"""
dummy = torch.rand((2, 201, 100))
processor = Log(base='natural')
t_signal = processor(dummy)
target = np.log(dummy)
assert_array_almost_equal(target, t_signal, decimal=5)
def test_log_10(self):
"""Checks Log transform with base 10"""
dummy = torch.rand((2, 201, 100))
processor = Log(base=10)
t_signal = processor(dummy)
target = np.log10(dummy)
assert_array_almost_equal(target, t_signal, decimal=5)
def test_identity(self):
"""Checks Identity"""
dummy = torch.ones(100)
processor = Identity()
t_signal = processor(dummy)
assert_array_equal(t_signal, dummy)
def test_add_value(self):
"""Checks AddValue"""
dummy = torch.ones(100)
processor = AddValue(val=0.1)
t_signal = processor(dummy)
assert_array_equal(t_signal, 1.1)
def test_transpose(self):
"""Checks AddValue"""
dummy = torch.ones((10, 20))
processor = Transpose(0, 1)
t_signal = processor(dummy)
self.assertEqual(t_signal.shape, (20, 10))
def test_flip_1d(self):
"""Checks Flip with 1D input"""
dummy = torch.tensor([0, 1, 2])
processor = Flip()
t_signal = processor(dummy)
assert_array_equal(t_signal, [2, 1, 0])
def test_flip_2d(self):
"""Checks Flip with 2D input"""
dummy = torch.tensor([[0, 1, 2], [3, 4, 5]])
processor = Flip(dim=1)
t_signal = processor(dummy)
assert_array_equal(t_signal, [[2, 1, 0], [5, 4, 3]])
def test_sometimes(self):
"""Checks Sometimes with Flip as transform"""
dummy = torch.tensor([0, 1, 2])
transform_cfg = {'name': 'Flip', 'params': {}}
processor = Sometimes(transform_cfg, prob=0.5)
transformed = 0
not_transformed = 0
random.seed(10)
for _ in range(10):
t_signal = processor(dummy)
try:
assert_array_equal(t_signal, [2, 1, 0])
transformed += 1
except AssertionError:
not_transformed += 1
self.assertTrue(not_transformed > 0)
self.assertTrue(transformed > 0)
def test_no_transform(self):
"""Checks the case with no signal transform applied"""
config = []
processor = DataProcessor(config)
transformed_signal = processor(self.signal)
assert_array_equal(self.signal, transformed_signal)
def test_standardize_with_mean_false(self):
"""Tests Standardize with use_mean=False"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
std = dummy.std()
t_dummy = Standardize('mean-std', use_mean=False)(dummy)
target = dummy / std
assert_array_equal(target, t_dummy)
def test_standardize_with_std_false(self):
"""Tests Standardize with use_std=False"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
mean = dummy.mean()
t_dummy = Standardize('mean-std', use_std=False)(dummy)
target = dummy - mean
assert_array_equal(target, t_dummy)
def test_standardize_mean_std(self):
"""Tests Standardize with mean and std specified"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
mean = 0.2
std = 0.1
t_dummy = Standardize('mean-std', mean=mean, std=std)(dummy)
target = (dummy - mean) / std
assert_array_almost_equal(target, t_dummy, decimal=4)
def test_standardize_mean_no_std(self):
"""Tests Standardize with only mean specified"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
mean = 0.2
std = dummy.std()
t_dummy = Standardize('mean-std', mean=mean)(dummy)
target = (dummy - mean) / std
assert_array_almost_equal(target, t_dummy, decimal=4)
def test_standardize_no_mean_no_std(self):
"""Tests Standardize with neither mean nor std specified"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
mean = dummy.mean()
std = dummy.std()
t_dummy = Standardize('mean-std')(dummy)
target = (dummy - mean) / std
assert_array_equal(target, t_dummy)
def test_standardize_mean_std_axis_non_negative(self):
"""Tests Standardize with mean & std specified along axis (axis >= 0)"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
mean = dummy.mean(dim=1)
std = dummy.std(dim=1)
t_dummy = Standardize('mean-std', mean_axis=1, std_axis=1)(dummy)
target = (dummy - mean.unsqueeze(-1)) / std.unsqueeze(-1)
assert_array_almost_equal(target, t_dummy, decimal=5)
def test_standardize_mean_std_axis_negative(self):
"""Tests Standardize with mean & std specified along axis (axis < 0)"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
mean = dummy.mean(dim=-1)
std = dummy.std(dim=-1)
t_dummy = Standardize('mean-std', mean_axis=-1, std_axis=-1)(dummy)
target = (dummy - mean.unsqueeze(-1)) / std.unsqueeze(-1)
assert_array_almost_equal(target, t_dummy, decimal=4)
def test_standardize_min_max(self):
"""Tests Standardize with mode=min-max"""
dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]])
_min = dummy.min()
_max = dummy.max()
t_dummy = Standardize(mode='min-max')(dummy)
target = (dummy - _min) / (_max - _min)
assert_array_almost_equal(target, t_dummy, decimal=4)
def test_reshape(self):
"""Tests Reshape"""
dummy = torch.ones(100)
t_dummy = Reshape(shape=(-1, 10))(dummy)
self.assertEqual(t_dummy.shape, (10, 10))
def test_flatten_1d(self):
"""Tests Flatten on 1D input"""
dummy = torch.ones(10)
t_dummy = Flatten()(dummy)
assert_array_equal(dummy, t_dummy)
def test_flatten_2d(self):
"""Tests Flatten on 2D input"""
dummy = torch.ones((2, 10))
t_dummy = Flatten()(dummy)
self.assertTrue(t_dummy.shape[0], 20)
self.assertTrue(len(t_dummy.shape), 1)
def test_squeeze(self):
"""Tests Squeeze"""
dummy = torch.ones((10, 1))
t_dummy = Squeeze(dim=-1)(dummy)
self.assertTrue(t_dummy.shape, (10,))
def test_unsqueeze(self):
"""Tests Unsqueeze"""
dummy = torch.ones(10)
t_dummy = Unsqueeze(dim=-1)(dummy)
self.assertTrue(t_dummy.shape, (10, 1))
def test_pca_transform(self):
"""Checks the case with PCA transform applied"""
n_components = 10
config = [
{
'name': 'PCA',
'params': {'n_components': n_components, 'norm': True}
}
]
processor = DataProcessor(config)
signal = torch.randn((32, 100))
pca_signal = processor(signal)
self.assertEqual(pca_signal.shape, (32,))
def test_axis_norm_1d(self):
"""Checks the case with axis-norm transform applied"""
config = [
{
'name': 'AxisNorm',
'params': {'order': 2}
}
]
processor = DataProcessor(config)
signal = torch.randn((100))
signal_norm = processor(signal)
self.assertEqual(signal_norm, torch.norm(signal, p=2, dim=-1))
def test_axis_norm_2d(self):
"""Checks the case with axis-norm transform applied"""
config = [
{
'name': 'AxisNorm',
'params': {'order': 2}
}
]
processor = DataProcessor(config)
signal = torch.randn((10, 100))
signal_norm = processor(signal)
computed_signal_norm = torch.norm(signal, p=2, dim=-1)
assert_array_equal(signal_norm, computed_signal_norm)
def test_mean_norm_1d(self):
"""Checks the case with axis-mean transform applied"""
config = [
{
'name': 'AxisMean',
'params': {}
}
]
processor = DataProcessor(config)
signal = torch.randn((100))
signal_mean = processor(signal)
self.assertEqual(signal_mean, torch.mean(signal, dim=-1))
def test_mean_norm_2d(self):
"""Checks the case with axis-mean transform applied"""
config = [
{
'name': 'AxisMean',
'params': {}
}
]
processor = DataProcessor(config)
signal = torch.randn((10, 100))
signal_mean = processor(signal)
computed_signal_mean = torch.mean(signal, dim=-1)
assert_array_equal(signal_mean, computed_signal_mean)
def test_ensemble_empty(self):
config = []
processor = DataProcessor(config)
signal = torch.randn((10, 100))
transformed_signal = processor(signal)
assert_array_equal(transformed_signal, signal)
def test_ensemble_concat(self):
transforms_cfg = [
[
{
'name': 'AxisNorm',
'params': {'order': 1}
}
],
[
{
'name': 'AxisMean',
'params': {}
}
]
]
processor = Ensemble(transforms_cfg=transforms_cfg, combine='concat')
dummy = torch.ones((100, 10))
t_dummy = processor(dummy)
self.assertTrue(t_dummy.shape, (200,))
def test_ensemble_stack(self):
transforms_cfg = [
[
{
'name': 'AxisNorm',
'params': {'order': 1}
}
],
[
{
'name': 'AxisMean',
'params': {}
}
]
]
processor = Ensemble(transforms_cfg=transforms_cfg, combine='stack')
dummy = torch.ones((100, 10))
t_dummy = processor(dummy)
self.assertTrue(t_dummy.shape, (2, 100))
def test_ensemble_mean_and_norm(self):
config = [
{
'name': 'Ensemble',
'params': {
'transforms_cfg': [
[
{
'name': 'AxisNorm',
'params': {'order': 1}
}
],
[
{
'name': 'AxisMean',
'params': {}
}
]
],
'combine': 'concat'
}
}
]
processor = DataProcessor(config)
signal = torch.randn((10, 100))
transformed_signal = processor(signal)
subsignals = [torch.norm(signal, p=1, dim=-1), torch.mean(signal, dim=-1)]
computed_signal = torch.cat(subsignals)
assert_array_equal(transformed_signal, computed_signal)
def test_noise_addition_transform(self):
"""Checks the case with noise addition signal transform applied"""
seed = 0
noise_scale = 0.005
config = [
{
'name': 'WhiteNoise',
'params': {'noise_scale': noise_scale}
}
]
torch.manual_seed(seed)
processor = DataProcessor(config)
pred_transformed_signal = processor(self.signal)
torch.manual_seed(seed)
noise = torch.randn_like(self.signal) * noise_scale
gt_transformed_signal = self.signal + noise
self.assertEqual(self.signal.shape, pred_transformed_signal.shape)
assert_array_equal(gt_transformed_signal, pred_transformed_signal)
def test_resize_1d(self):
"""Checks Resize transform with 1D data"""
size = (1, 1000)
config = [
{
'name': 'Resize',
'params': {'size': size}
}
]
processor = DataProcessor(config)
dummy_signal = torch.zeros(8000)
transformed_signal = processor(dummy_signal)
self.assertEqual(transformed_signal.shape, (*size[1:],))
def test_resize_2d(self):
"""Checks Resize transform with 2D data"""
size = (128, 20)
config = [
{
'name': 'Resize',
'params': {'size': size}
}
]
processor = DataProcessor(config)
dummy_signal = torch.zeros((128, 50))
transformed_signal = processor(dummy_signal)
self.assertEqual(transformed_signal.shape, size)
def test_resize_3d(self):
"""Checks Resize transform with 3D data"""
size = (128, 20)
config = [
{
'name': 'Resize',
'params': {'size': size}
}
]
processor = DataProcessor(config)
dummy_signal = torch.zeros((2, 128, 50))
transformed_signal = processor(dummy_signal)
self.assertEqual(transformed_signal.shape, (2, *size))
def test_rescale_transform(self):
"""Checks Resize transform"""
config = [
{
'name': 'Rescale',
'params': {'value': 255}
}
]
processor = DataProcessor(config)
dummy_signal = torch.ones(100) * 255.
transformed_signal = processor(dummy_signal)
self.assertTrue(transformed_signal.max(), 1.0)
def test_spectrogram_transform(self):
"""Tests Spectrogram with no window specified"""
n_fft = 440
config = [
{
'name': 'Spectrogram',
'params': {'n_fft': n_fft}
}
| |
for mp
multiprocessing = multiprocessing and self.multiprocessing
# model & kwargs preparation
if method=='fast':
if 'Nterms' in kwargs:
Nterms = kwargs['Nterms']
else:
kwargs['Nterms'] = 5
if model=='Fourier':
Nterms = kwargs['Nterms']
MODEL, P0_FUNC, KWARGS = self.check_model(model,p0_func,**kwargs)
# debug mode option outputs the progress
# (TODO: change this to verbosity - or logger?)
if debug:
t0 = time.time()
print(f'{time.time()-t0:.3f}s --- starting the process...')
print(f'{time.time()-t0:.3f}s --- preparing data...')
# prepare data
x,y,yerr = self.prepare_data(x,y,yerr)
# get periodogram
if debug:
print(f'{time.time()-t0:.3f}s --- getting a periodogram...')
period,power = self.periodogram(p_min=p_min,p_max=p_max,x=x,y=y,yerr=yerr,
method=method,model=model,p0_func=p0_func,N0=N0,
multiprocessing=multiprocessing,**kwargs)
# calculate peak SDE
period_SDE = self.get_SDE(power,peak_only=True)
# select top peaks_to_test independent peaks
if debug:
print(f'{time.time()-t0:.3f}s --- detecting top {peaks_to_test} peaks...')
peak_idx = []
T = x.max()-x.min()
peak_width = p_min**2 *T / (T**2-0.25)
peak_idx_width = int(peak_width/(period[1]-period[0]))
idx_tmp = 0
sorted_idx = np.flip(power.argsort())
while len(peak_idx) < peaks_to_test:
if np.all(abs(sorted_idx[idx_tmp]-peak_idx)>peak_idx_width):
peak_idx.append(sorted_idx[idx_tmp])
idx_tmp += 1
peak_periods = period[peak_idx]
# perform finer sampling near the peaks
if debug:
print(f'{time.time()-t0:.3f}s --- preparing for finer sampling near peaks...')
custom_periods = np.array([])
for peak in peak_periods:
custom_periods = np.concatenate((custom_periods,np.linspace(peak-peak_width,peak+peak_width,R_peak)))
if debug:
print(f'{time.time()-t0:.3f}s --- performing finer sampling near peaks...')
period,power = self.periodogram(
custom_periods=custom_periods,N0=N0,
x=x,y=y,yerr=yerr,method=method,model=model,p0_func=p0_func,
multiprocessing=multiprocessing,**kwargs
)
period = period[power==power.max()][0]
if debug:
print(f'{time.time()-t0:.3f}s --- period candidate: ', period)
model_bestfit = get_bestfit(MODEL,P0_FUNC,x,y,yerr,period,return_yfit=False,return_params=True,**kwargs)
# detect aliasing
if model=='Fourier':
if Nterms>1:
if debug:
print(f'{time.time()-t0:.3f}s --- detecting aliasing...')
factor = np.argmax(abs(model_bestfit[1:Nterms]))+1
if factor != 1:
period /= factor
model_bestfit = get_bestfit(MODEL,P0_FUNC,x,y,yerr,period,return_yfit=False,return_params=True,**KWARGS)
if debug:
print(f'{time.time()-t0:.3f}s --- alias factor: ',factor)
print(f'{time.time()-t0:.3f}s --- period candidate: ',period)
# get uncertainty
if debug:
print(f'{time.time()-t0:.3f}s --- estimating the uncertainty...')
KWARGS['maxfev'] = 100000
popt, pcov = get_bestfit(MODEL,P0_FUNC,x,y,yerr,period,return_yfit=False,return_params=True,return_pcov=True,fit_period=True,**KWARGS)
period_err = np.sqrt(np.diag(pcov))[0]
if debug:
print(f'{time.time()-t0:.3f}s --- period candidate: ',period)
print(f'{time.time()-t0:.3f}s --- period fitted*: ',popt[0])
print(f'{time.time()-t0:.3f}s --- period error: ',period_err)
if period_err == np.inf:
# automatically activate the refinement process
period_err = 0
# re-sample if sampling size is not fine enough
if (period_err < (2*peak_width/R_peak)*10) or force_refine:
if debug:
print(f'{time.time()-t0:.3f}s --- refining samples...')
print(f'{time.time()-t0:.3f}s --- refining search width = {peak_width/10:.3e}')
# prepare new search width -- narrower and thus refined
#TODO: discuss this method
new_search_width = peak_width/R_peak*100
custom_periods = np.linspace(period-new_search_width,period+new_search_width,R_peak)
# get periodogram
periods,power = self.periodogram(
custom_periods=custom_periods,N0=N0,
x=x,y=y,yerr=yerr,method=method,model=model,p0_func=p0_func,multiprocessing=multiprocessing,**kwargs
)
period = periods[power==power.max()][0]
# get uncertainty
KWARGS['maxfev'] = 100000
popt, pcov = get_bestfit(MODEL,P0_FUNC,x,y,yerr,period,return_yfit=False,return_params=True,return_pcov=True,fit_period=True,**KWARGS)
period_err = np.sqrt(np.diag(pcov))[0]
if debug:
print(f'{time.time()-t0:.3f}s --- period candidate: ',period)
print(f'{time.time()-t0:.3f}s --- period fitted*: ',popt[0])
print(f'{time.time()-t0:.3f}s --- period error: ',period_err)
# check: is the size of uncertainty close to the deviation size
# within a factor of two or less?
fit_peak_deviation = abs(popt[0]-period)
if debug:
print(f'{time.time()-t0:.3f}s --- * validating period error...')
print(f'{time.time()-t0:.3f}s --- * fitted period - peak period = {fit_peak_deviation:.2e}')
print(f'{time.time()-t0:.3f}s --- * expected deviation size = {period_err:.2e}')
if (fit_peak_deviation > 2*period_err) or (period_err==np.inf):
if not ignore_warning:
warningMessage = 'warning: provided uncertainty may not be accurate. Try increasing sampling size (N0, default 10).'
print(warningMessage)
elif debug:
print(f'{time.time()-t0:.3f}s --- * period error validated')
if period_err == np.inf:
print('warning: error size infinity: replacing with periodogram peak width')
period_err = peak_width
# finalize
if not no_overwrite:
self.period = period
self.period_err = period_err
self.period_SDE = period_SDE
if debug:
print(f'{time.time()-t0:.3f}s ---','period = {:.{precision}f} +- {:.{precision}f}d'.format(period,period_err,precision=5 if period_err==np.inf else int(abs(np.log10(period_err))+2)))
print(f'{time.time()-t0:.3f}s --- process completed.')
if return_SDE == True:
return period,period_err,period_SDE
return period,period_err
def _get_period_likelihood(self,period=None,period_err=None,p_min=0.1,p_max=4.0,R_peak=1000,N_noise=5000,Nsigma_range=5,return_SDE=False,repr_mode='likelihood',**kwargs):
''' Calculates the period, uncertainty, and significance based on the given initial guesses. This function requires an initial guess, which can be automatically obtained with the chi-square method when not specified. See our paper for detailed discussion.
Args:
period: initial guess for the period value.
period_err: initial guess for the uncertainty of the period value.
p_min: the minimum value of the period search range (passed to chi-square method).
p_max: the maximum value of the period search range (passed to chi-square method).
R_peak: the resolution of sampling at the peak. Default 1000.
N_noise: the number of samples for the bootstrapping to compute Z-value. Increasing this value makes the Z-value more reliable with an increased computing cost.
Nsigma_range: the size of periodogram sampling window with respect to the initial guess of the 1-sigma uncertainty (period_err).
return_SDE (bool): the option to return the SDE value.
repr_mode (str)['likelihood','lik','log-likelihood','loglik']: the representation of periodogram.
Returns:
period_mu: the best-fit period value based on a Gausian fit to (log-)likelihood periodogram.
period_sigma: the best-fit uncertainty value of period based on a Gaussian fit to (log-)likelihood periodogram.
Z: the statistical significance of the detected period.
'''
if period is None and period_err is None:
if return_SDE:
period, period_err,SDE = self._get_period(p_min=p_min,p_max=p_max,return_SDE=return_SDE,**kwargs)
else:
period, period_err = self._get_period(p_min=p_min,p_max=p_max,**kwargs)
def Gaussian(x,mu,sigma,amp):
return amp*np.exp(-0.5*(x-mu)**2/sigma**2)
def log_Gaussian(x,mu,sigma,offset):
return -0.5*(x-mu)**2/sigma**2 + offset
# sample likelihood near the period
periods,lik = self.periodogram(
p_min = period-period_err*Nsigma_range,
p_max = period+period_err*Nsigma_range,
N=R_peak,
repr_mode='loglik',
raise_warnings=False,
normalize=False,
**kwargs
)
popt,_ = curve_fit(log_Gaussian,periods,lik,p0=[period,period_err,lik.max()],bounds=[[0,0,-np.inf],[np.inf,np.inf,np.inf]])
signal_log = lik.max()
period_mu,period_sigma,_ = popt
try:
# get normalized likelihood periodogram
periods,lik = self.periodogram(
p_min = period_mu-period_sigma*Nsigma_range,
p_max = period_mu+period_sigma*Nsigma_range,
N=R_peak,
repr_mode='lik',
raise_warnings=False,
**kwargs
)
popt,_ = curve_fit(Gaussian,periods,lik,p0=[period_mu,period_sigma,lik.max()],bounds=[[0,0,0],[np.inf,np.inf,np.inf]],maxfev=10000)
period_mu,period_sigma,_ = popt
# # log-likelihood without normalization (for Z-test)
# _,_pow = self.periodogram(custom_periods=[period_mu],repr_mode='loglik',normalize=False)
# signal_log = _pow[0]
# try fitting to the log-likelihood if the linear scale Gaussian fails
except Exception:
print('warning: Gaussian fit failed in likelihood. Using log-likelihood fit instead (may be less accurate)')
# sample likelihood for shuffled data
if ('x' in kwargs) or ('y' in kwargs) or ('yerr' in kwargs):
assert ('x' in kwargs) and ('y' in kwargs) and ('yerr' in kwargs), "All [x,y,yerr] need to be provided when custom data is given"
x,y,yerr = self.prepare_data(kwargs['x'],kwargs['y'],kwargs['yerr'])
else:
x,y,yerr = self.prepare_data()
idx = np.arange(len(x))
np.random.shuffle(idx)
kwargs['x'] = x
kwargs['y'] = y[idx]
kwargs['yerr'] = yerr[idx]
_,loglik_noise = self.periodogram(
p_min=p_min, p_max=p_max, N=N_noise,
repr_mode = 'log-likelihood',
raise_warnings=False,
normalize=False,
**kwargs
)
noise_mu,noise_sigma = loglik_noise.mean(),loglik_noise.std()
Zscore = (signal_log-noise_mu)/noise_sigma
self.period = period_mu
self.period_err = period_sigma
if return_SDE:
return period_mu,period_sigma,Zscore, SDE
return period_mu, period_sigma, Zscore
def prewhiten(self,x=None,y=None,yerr=None,period=None,**kwargs):
""" performs a prewhitening based on the best-fit curve at the given data.
Args:
x: the time values.
y: the mag/flux values.
yerr: the unceertainties in the mag/flux values.
period: the phase-folding period.
Returns:
x: the time values. Same as the input or the class attribute (self.x).
y_new: the prewhitened mag/flux value.
yerr: the uncertainties in the prewhitened mag/flux value.
"""
x,y,yerr = self.prepare_data(x,y,yerr)
if period is None:
period = self.get_period(**kwargs)
_,y_th = self.get_bestfit_curve(
x = x,
y = y,
yerr = yerr,
period = period,
use_original_x = True,
**kwargs
)
return x,y-y_th,yerr
def get_period_multi(self,N,model='Fourier',p0_func=None,**kwargs):
''' multi-period detection.
Re-detects P1 and then proceeds to P2, P3, ... PN.
Args:
N: the number of periods to be detected.
model (str/obj): A name of the light curve model that's pre-implemented or a user-defined function.
p0_func (str/obj): A function that generates the initial guess values.
Returns:
periods: a list of detected periods.
period_errors: a list of uncertainties for periods.
z_vals: the Z-values for each period. Only returned when return_Z==True is passed.
amplitudes: a list of the amplitudes of the best-fit lightcurve at each period (evaluated at each prewhitening process).
'''
# TODO: implement Z-cut
# model & kwargs preparation
if model=='Fourier':
if 'Nterms' in kwargs:
Nterms = kwargs['Nterms']
else:
kwargs['Nterms'] = 5
Nterms = kwargs['Nterms']
MODEL, P0_FUNC, KWARGS = self.check_model(model,p0_func,**kwargs)
# data prep
_x = self.x.copy()
_y = self.y.copy()
_yerr = self.yerr.copy()
# repeats period detection -> prewhitening
periods = []
period_errors = []
z_vals = []
amplitudes = []
for _ in range(N):
returns = self.get_period(
x = _x,
y = _y,
yerr = _yerr,
model = model,
p0_func = p0_func,
**kwargs)
period = returns[0]
periods.append(period)
period_errors.append(returns[1])
if len(returns)>2:
z_vals.append(returns[2:])
amp = self.get_bestfit_amplitude(
x = _x,
y = _y,
yerr = _y,
period = period,
model = model,
**kwargs)
amplitudes.append(amp)
_x,_y,_yerr = self.prewhiten(
x = _x,
y = _y,
yerr = _yerr,
period = period,
model = model,
**kwargs
)
if len(z_vals)>0:
return periods,period_errors,z_vals,amplitudes
return periods,period_errors,amplitudes
def amplitude_spectrum(self,p_min,p_max,N,model='Fourier',p0_func=None,grid=10000,**kwargs):
''' Returns the amplitude spectrum.
Args:
p_min: | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cloudpickle as pickle
import hashlib
import inspect
import json
import numpy as np
import redis
import traceback
import ray.local_scheduler
import ray.signature as signature
import ray.worker
from ray.utils import random_string, binary_to_hex, hex_to_binary
def random_actor_id():
return ray.local_scheduler.ObjectID(random_string())
def random_actor_class_id():
return random_string()
def get_actor_method_function_id(attr):
"""Get the function ID corresponding to an actor method.
Args:
attr (str): The attribute name of the method.
Returns:
Function ID corresponding to the method.
"""
function_id_hash = hashlib.sha1()
function_id_hash.update(attr.encode("ascii"))
function_id = function_id_hash.digest()
assert len(function_id) == 20
return ray.local_scheduler.ObjectID(function_id)
def fetch_and_register_actor(actor_class_key, worker):
"""Import an actor.
This will be called by the worker's import thread when the worker receives
the actor_class export, assuming that the worker is an actor for that class.
"""
actor_id_str = worker.actor_id
(driver_id, class_id, class_name,
module, pickled_class, actor_method_names) = worker.redis_client.hmget(
actor_class_key, ["driver_id", "class_id", "class_name", "module",
"class", "actor_method_names"])
actor_name = class_name.decode("ascii")
module = module.decode("ascii")
actor_method_names = json.loads(actor_method_names.decode("ascii"))
# Create a temporary actor with some temporary methods so that if the actor
# fails to be unpickled, the temporary actor can be used (just to produce
# error messages and to prevent the driver from hanging).
class TemporaryActor(object):
pass
worker.actors[actor_id_str] = TemporaryActor()
def temporary_actor_method(*xs):
raise Exception("The actor with name {} failed to be imported, and so "
"cannot execute this method".format(actor_name))
for actor_method_name in actor_method_names:
function_id = get_actor_method_function_id(actor_method_name).id()
worker.functions[driver_id][function_id] = (actor_method_name,
temporary_actor_method)
try:
unpickled_class = pickle.loads(pickled_class)
except Exception:
# If an exception was thrown when the actor was imported, we record the
# traceback and notify the scheduler of the failure.
traceback_str = ray.worker.format_error_message(traceback.format_exc())
# Log the error message.
worker.push_error_to_driver(driver_id, "register_actor", traceback_str,
data={"actor_id": actor_id_str})
else:
# TODO(pcm): Why is the below line necessary?
unpickled_class.__module__ = module
worker.actors[actor_id_str] = unpickled_class.__new__(unpickled_class)
for (k, v) in inspect.getmembers(
unpickled_class, predicate=(lambda x: (inspect.isfunction(x) or
inspect.ismethod(x)))):
function_id = get_actor_method_function_id(k).id()
worker.functions[driver_id][function_id] = (k, v)
# We do not set worker.function_properties[driver_id][function_id]
# because we currently do need the actor worker to submit new tasks for
# the actor.
def attempt_to_reserve_gpus(num_gpus, driver_id, local_scheduler, worker):
"""Attempt to acquire GPUs on a particular local scheduler for an actor.
Args:
num_gpus: The number of GPUs to acquire.
driver_id: The ID of the driver responsible for creating the actor.
local_scheduler: Information about the local scheduler.
Returns:
True if the GPUs were successfully reserved and false otherwise.
"""
assert num_gpus != 0
local_scheduler_id = local_scheduler["DBClientID"]
local_scheduler_total_gpus = int(local_scheduler["NumGPUs"])
success = False
# Attempt to acquire GPU IDs atomically.
with worker.redis_client.pipeline() as pipe:
while True:
try:
# If this key is changed before the transaction below (the multi/exec
# block), then the transaction will not take place.
pipe.watch(local_scheduler_id)
# Figure out which GPUs are currently in use.
result = worker.redis_client.hget(local_scheduler_id, "gpus_in_use")
gpus_in_use = dict() if result is None else json.loads(
result.decode("ascii"))
num_gpus_in_use = 0
for key in gpus_in_use:
num_gpus_in_use += gpus_in_use[key]
assert num_gpus_in_use <= local_scheduler_total_gpus
pipe.multi()
if local_scheduler_total_gpus - num_gpus_in_use >= num_gpus:
# There are enough available GPUs, so try to reserve some. We use the
# hex driver ID in hex as a dictionary key so that the dictionary is
# JSON serializable.
driver_id_hex = binary_to_hex(driver_id)
if driver_id_hex not in gpus_in_use:
gpus_in_use[driver_id_hex] = 0
gpus_in_use[driver_id_hex] += num_gpus
# Stick the updated GPU IDs back in Redis
pipe.hset(local_scheduler_id, "gpus_in_use", json.dumps(gpus_in_use))
success = True
pipe.execute()
# If a WatchError is not raised, then the operations should have gone
# through atomically.
break
except redis.WatchError:
# Another client must have changed the watched key between the time we
# started WATCHing it and the pipeline's execution. We should just
# retry.
success = False
continue
return success
def select_local_scheduler(local_schedulers, num_gpus, worker):
"""Select a local scheduler to assign this actor to.
Args:
local_schedulers: A list of dictionaries of information about the local
schedulers.
num_gpus (int): The number of GPUs that must be reserved for this actor.
Returns:
The ID of the local scheduler that has been chosen.
Raises:
Exception: An exception is raised if no local scheduler can be found with
sufficient resources.
"""
driver_id = worker.task_driver_id.id()
local_scheduler_id = None
# Loop through all of the local schedulers in a random order.
local_schedulers = np.random.permutation(local_schedulers)
for local_scheduler in local_schedulers:
if local_scheduler["NumCPUs"] < 1:
continue
if local_scheduler["NumGPUs"] < num_gpus:
continue
if num_gpus == 0:
local_scheduler_id = hex_to_binary(local_scheduler["DBClientID"])
break
else:
# Try to reserve enough GPUs on this local scheduler.
success = attempt_to_reserve_gpus(num_gpus, driver_id, local_scheduler,
worker)
if success:
local_scheduler_id = hex_to_binary(local_scheduler["DBClientID"])
break
if local_scheduler_id is None:
raise Exception("Could not find a node with enough GPUs or other "
"resources to create this actor. The local scheduler "
"information is {}.".format(local_schedulers))
return local_scheduler_id
def export_actor_class(class_id, Class, actor_method_names, worker):
if worker.mode is None:
raise NotImplemented("TODO(pcm): Cache actors")
key = b"ActorClass:" + class_id
d = {"driver_id": worker.task_driver_id.id(),
"class_name": Class.__name__,
"module": Class.__module__,
"class": pickle.dumps(Class),
"actor_method_names": json.dumps(list(actor_method_names))}
worker.redis_client.hmset(key, d)
worker.redis_client.rpush("Exports", key)
def export_actor(actor_id, class_id, actor_method_names, num_cpus, num_gpus,
worker):
"""Export an actor to redis.
Args:
actor_id: The ID of the actor.
actor_method_names (list): A list of the names of this actor's methods.
num_cpus (int): The number of CPUs that this actor requires.
num_gpus (int): The number of GPUs that this actor requires.
"""
ray.worker.check_main_thread()
if worker.mode is None:
raise Exception("Actors cannot be created before Ray has been started. "
"You can start Ray with 'ray.init()'.")
key = "Actor:{}".format(actor_id.id())
# For now, all actor methods have 1 return value.
driver_id = worker.task_driver_id.id()
for actor_method_name in actor_method_names:
# TODO(rkn): When we create a second actor, we are probably overwriting
# the values from the first actor here. This may or may not be a problem.
function_id = get_actor_method_function_id(actor_method_name).id()
worker.function_properties[driver_id][function_id] = (1, num_cpus, 0)
# Get a list of the local schedulers from the client table.
client_table = ray.global_state.client_table()
local_schedulers = []
for ip_address, clients in client_table.items():
for client in clients:
if client["ClientType"] == "local_scheduler" and not client["Deleted"]:
local_schedulers.append(client)
# Select a local scheduler for the actor.
local_scheduler_id = select_local_scheduler(local_schedulers, num_gpus,
worker)
assert local_scheduler_id is not None
# We must put the actor information in Redis before publishing the actor
# notification so that when the newly created actor attempts to fetch the
# information from Redis, it is already there.
worker.redis_client.hmset(key, {"class_id": class_id,
"num_gpus": num_gpus})
# Really we should encode this message as a flatbuffer object. However, we're
# having trouble getting that to work. It almost works, but in Python 2.7,
# builder.CreateString fails on byte strings that contain characters outside
# range(128).
# TODO(rkn): There is actually no guarantee that the local scheduler that we
# are publishing to has already subscribed to the actor_notifications
# channel. Therefore, this message may be missed and the workload will hang.
# This is a bug.
worker.redis_client.publish("actor_notifications",
actor_id.id() + driver_id + local_scheduler_id)
def actor(*args, **kwargs):
raise Exception("The @ray.actor decorator is deprecated. Instead, please "
"use @ray.remote.")
def make_actor(Class, num_cpus, num_gpus):
class_id = random_actor_class_id()
# The list exported will have length 0 if the class has not been exported
# yet, and length one if it has. This is just implementing a bool, but we
# don't use a bool because we need to modify it inside of the NewClass
# constructor.
exported = []
# The function actor_method_call gets called if somebody tries to call a
# method on their local actor stub object.
def actor_method_call(actor_id, attr, function_signature, *args, **kwargs):
ray.worker.check_connected()
ray.worker.check_main_thread()
args = signature.extend_args(function_signature, args, kwargs)
function_id = get_actor_method_function_id(attr)
object_ids = ray.worker.global_worker.submit_task(function_id, "", args,
actor_id=actor_id)
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
class ActorMethod(object):
def __init__(self, method_name, actor_id, method_signature):
self.method_name = method_name
self.actor_id = actor_id
self.method_signature = method_signature
def __call__(self, *args, **kwargs):
raise Exception("Actor methods cannot be called directly. Instead "
"of running 'object.{}()', try 'object.{}.remote()'."
.format(self.method_name, self.method_name))
def remote(self, *args, **kwargs):
return actor_method_call(self.actor_id, self.method_name,
self.method_signature, *args, **kwargs)
class NewClass(object):
def __init__(self, *args, **kwargs):
raise Exception("Actor classes cannot be instantiated directly. "
"Instead of running '{}()', try '{}.remote()'."
.format(Class.__name__, Class.__name__))
@classmethod
def remote(cls, *args, **kwargs):
actor_object = cls.__new__(cls)
actor_object._manual_init(*args, **kwargs)
return actor_object
def _manual_init(self, *args, **kwargs):
self._ray_actor_id = random_actor_id()
self._ray_actor_methods = | |
from dataclasses import dataclass
from lzma import is_check_supported
from PIL import Image, ImageDraw, ImageFilter
from typing import List
from seedsigner.gui.renderer import Renderer
from seedsigner.models.threads import BaseThread
from .screen import ButtonListScreen, WarningScreen
from ..components import (BtcAmount, Button, Icon, FontAwesomeIconConstants, IconTextLine, FormattedAddress, GUIConstants, Fonts, SeedSignerCustomIconConstants, TextArea,
calc_bezier_curve, linear_interp)
@dataclass
class PSBTOverviewScreen(ButtonListScreen):
spend_amount: int = 0
change_amount: int = 0
fee_amount: int = 0
num_inputs: int = 0
num_self_transfer_outputs: int = 0
num_change_outputs: int = 0
destination_addresses: List[str] = None
def __post_init__(self):
# Customize defaults
self.title = "Review PSBT"
self.is_bottom_list = True
self.button_data = ["Review Details"]
# This screen can take a while to load while parsing the PSBT
self.show_loading_screen = True
super().__post_init__()
# Prep the headline amount being spent in large callout
# icon_text_lines_y = self.components[-1].screen_y + self.components[-1].height
icon_text_lines_y = self.top_nav.height + GUIConstants.COMPONENT_PADDING
if not self.destination_addresses:
# This is a self-transfer
spend_amount = self.change_amount
else:
spend_amount = self.spend_amount
self.components.append(BtcAmount(
total_sats=spend_amount,
screen_y=icon_text_lines_y,
))
# Prep the transaction flow chart
self.chart_x = 0
self.chart_y = self.components[-1].screen_y + self.components[-1].height + int(GUIConstants.COMPONENT_PADDING/2)
chart_height = self.buttons[0].screen_y - self.chart_y - GUIConstants.COMPONENT_PADDING
# We need to supersample the whole panel so that small/thin elements render
# clearly.
ssf = 4 # super-sampling factor
# Set up our temp supersampled rendering surface
image = Image.new(
"RGB",
(self.canvas_width * ssf, chart_height * ssf),
GUIConstants.BACKGROUND_COLOR
)
draw = ImageDraw.Draw(image)
font_size = GUIConstants.BODY_FONT_MIN_SIZE * ssf
font = Fonts.get_font(GUIConstants.BODY_FONT_NAME, font_size)
(left, top, right, bottom) = font.getbbox(text="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890[]", anchor="lt")
chart_text_height = bottom
vertical_center = int(image.height/2)
# Supersampling renders thin elements poorly if they land on an even line before scaling down
if vertical_center % 2 == 1:
vertical_center += 1
association_line_color = "#666"
association_line_width = 3*ssf
curve_steps = 4
chart_font_color = "#ddd"
# First calculate how wide the inputs col will be
inputs_column = []
if self.num_inputs == 1:
inputs_column.append("1 input")
elif self.num_inputs > 5:
inputs_column.append("input 1")
inputs_column.append("input 2")
inputs_column.append("[ ... ]")
inputs_column.append(f"input {self.num_inputs-1}")
inputs_column.append(f"input {self.num_inputs}")
else:
for i in range(0, self.num_inputs):
inputs_column.append(f"input {i+1}")
max_inputs_text_width = 0
for input in inputs_column:
tw, th = font.getsize(input)
max_inputs_text_width = max(tw, max_inputs_text_width)
# Given how wide we want our curves on each side to be...
curve_width = 4*GUIConstants.COMPONENT_PADDING*ssf
# ...and the minimum center divider width...
center_bar_width = 2*GUIConstants.COMPONENT_PADDING*ssf
# We can calculate how wide the destination col can be
max_destination_col_width = image.width - (GUIConstants.EDGE_PADDING*ssf + max_inputs_text_width + \
int(GUIConstants.COMPONENT_PADDING*ssf/4) + curve_width + \
center_bar_width + \
curve_width + int(GUIConstants.COMPONENT_PADDING*ssf/4) + \
GUIConstants.EDGE_PADDING*ssf)
# if self.num_inputs == 1:
# # Use up more of the space on the input side
# max_destination_col_width += curve_width
# Now let's maximize the actual destination col by adjusting our addr truncation
def calculate_destination_col_width(truncate_at: int):
def truncate_destination_addr(addr):
return f"{addr[:truncate_at]}..."
destination_column = []
if len(self.destination_addresses) + self.num_self_transfer_outputs <= 3:
for addr in self.destination_addresses:
destination_column.append(truncate_destination_addr(addr))
for i in range(0, self.num_self_transfer_outputs):
destination_column.append(f"self-transfer")
else:
# destination_column.append(f"{len(self.destination_addresses)} recipients")
destination_column.append(f"recipient 1")
destination_column.append(f"[ ... ]")
destination_column.append(f"recipient {len(self.destination_addresses) + self.num_self_transfer_outputs}")
destination_column.append(f"fee")
if self.num_change_outputs > 0:
for i in range(0, self.num_change_outputs):
destination_column.append("change")
max_destination_text_width = 0
for destination in destination_column:
tw, th = font.getsize(destination)
max_destination_text_width = max(tw, max_destination_text_width)
return (max_destination_text_width, destination_column)
if len(self.destination_addresses) + self.num_self_transfer_outputs > 3:
# We're not going to display any destination addrs so truncation doesn't matter
(destination_text_width, destination_column) = calculate_destination_col_width(truncate_at=0)
else:
# Steadliy widen out the destination column until we run out of space
for i in range(6, 13):
(new_width, new_col_text) = calculate_destination_col_width(truncate_at=i)
if new_width > max_destination_col_width:
break
destination_text_width = new_width
destination_column = new_col_text
destination_col_x = image.width - (destination_text_width + GUIConstants.EDGE_PADDING*ssf)
# Now we can finalize our center bar values
center_bar_x = GUIConstants.EDGE_PADDING*ssf + max_inputs_text_width + int(GUIConstants.COMPONENT_PADDING*ssf/4) + curve_width
# Center bar stretches to fill any excess width
center_bar_width = destination_col_x - int(GUIConstants.COMPONENT_PADDING*ssf/4) - curve_width - center_bar_x
# Position each input row
num_rendered_inputs = len(inputs_column)
if self.num_inputs == 1:
inputs_y = vertical_center - int(chart_text_height/2)
inputs_y_spacing = 0 # Not used
else:
inputs_y = int((image.height - num_rendered_inputs*chart_text_height) / (num_rendered_inputs + 1))
inputs_y_spacing = inputs_y + chart_text_height
# Don't render lines from an odd number
if inputs_y % 2 == 1:
inputs_y += 1
if inputs_y_spacing % 2 == 1:
inputs_y_spacing += 1
inputs_conjunction_x = center_bar_x
inputs_x = GUIConstants.EDGE_PADDING*ssf
input_curves = []
for input in inputs_column:
# Calculate right-justified input display
tw, th = font.getsize(input)
cur_x = inputs_x + max_inputs_text_width - tw
draw.text(
(cur_x, inputs_y),
text=input,
font=font,
fill=chart_font_color,
anchor="lt",
)
# Render the association line to the conjunction point
# First calculate a bezier curve to an inflection point
start_pt = (
inputs_x + max_inputs_text_width + int(GUIConstants.COMPONENT_PADDING*ssf/4),
inputs_y + int(chart_text_height/2)
)
conjunction_pt = (inputs_conjunction_x, vertical_center)
mid_pt = (
int(start_pt[0]*0.5 + conjunction_pt[0]*0.5),
int(start_pt[1]*0.5 + conjunction_pt[1]*0.5)
)
if len(inputs_column) == 1:
# Use fewer segments for single input straight line
bezier_points = [
start_pt,
linear_interp(start_pt, conjunction_pt, 0.33),
linear_interp(start_pt, conjunction_pt, 0.66),
conjunction_pt
]
else:
bezier_points = calc_bezier_curve(
start_pt,
(mid_pt[0], start_pt[1]),
mid_pt,
curve_steps
)
# We don't need the "final" point as it's repeated below
bezier_points.pop()
# Now render the second half after the inflection point
bezier_points += calc_bezier_curve(
mid_pt,
(mid_pt[0], conjunction_pt[1]),
conjunction_pt,
curve_steps
)
input_curves.append(bezier_points)
prev_pt = bezier_points[0]
for pt in bezier_points[1:]:
draw.line(
(prev_pt[0], prev_pt[1], pt[0], pt[1]),
fill=association_line_color,
width=association_line_width + 1,
joint="curve",
)
prev_pt = pt
inputs_y += inputs_y_spacing
# Render center bar
draw.line(
(
center_bar_x,
vertical_center,
center_bar_x + center_bar_width,
vertical_center
),
fill=association_line_color,
width=association_line_width
)
# Position each destination
num_rendered_destinations = len(destination_column)
if num_rendered_destinations == 1:
destination_y = vertical_center - int(chart_text_height/2)
destination_y_spacing = 0
else:
destination_y = int((image.height - num_rendered_destinations*chart_text_height) / (num_rendered_destinations + 1))
destination_y_spacing = destination_y + chart_text_height
# Don't render lines from an odd number
if destination_y % 2 == 1:
destination_y += 1
if destination_y_spacing % 2 == 1:
destination_y_spacing += 1
destination_conjunction_x = center_bar_x + center_bar_width
recipients_text_x = destination_col_x
output_curves = []
for destination in destination_column:
draw.text(
(recipients_text_x, destination_y),
text=destination,
font=font,
fill=chart_font_color,
anchor="lt"
)
# Render the association line from the conjunction point
# First calculate a bezier curve to an inflection point
conjunction_pt = (destination_conjunction_x, vertical_center)
end_pt = (
conjunction_pt[0] + curve_width,
destination_y + int(chart_text_height/2)
)
mid_pt = (
int(conjunction_pt[0]*0.5 + end_pt[0]*0.5),
int(conjunction_pt[1]*0.5 + end_pt[1]*0.5)
)
bezier_points = calc_bezier_curve(
conjunction_pt,
(mid_pt[0], conjunction_pt[1]),
mid_pt,
curve_steps
)
# We don't need the "final" point as it's repeated below
bezier_points.pop()
# Now render the second half after the inflection point
curve_bias = 1.0
bezier_points += calc_bezier_curve(
mid_pt,
(int(mid_pt[0]*curve_bias + end_pt[0]*(1.0-curve_bias)), end_pt[1]),
end_pt,
curve_steps
)
output_curves.append(bezier_points)
prev_pt = bezier_points[0]
for pt in bezier_points[1:]:
draw.line(
(prev_pt[0], prev_pt[1], pt[0], pt[1]),
fill=association_line_color,
width=association_line_width + 1,
joint="curve",
)
prev_pt = pt
destination_y += destination_y_spacing
# Resize to target and sharpen final image
image = image.resize((self.canvas_width, chart_height), Image.LANCZOS)
self.paste_images.append((image.filter(ImageFilter.SHARPEN), (self.chart_x, self.chart_y)))
# Pass input and output curves to the animation thread
self.threads.append(
PSBTOverviewScreen.TxExplorerAnimationThread(
inputs=input_curves,
outputs=output_curves,
supersampling_factor=ssf,
offset_y=self.chart_y,
renderer=self.renderer
)
)
class TxExplorerAnimationThread(BaseThread):
def __init__(self, inputs, outputs, supersampling_factor, offset_y, renderer: Renderer):
super().__init__()
# Translate the point coords into renderer space
ssf = supersampling_factor
self.inputs = [[(int(i[0]/ssf), int(i[1]/ssf + offset_y)) for i in curve] for curve in inputs]
self.outputs = [[(int(i[0]/ssf), int(i[1]/ssf + offset_y)) for i in curve] for curve in outputs]
self.renderer = renderer
def run(self):
pulse_color = GUIConstants.ACCENT_COLOR
reset_color = "#666"
line_width = 3
pulses = []
# The center bar needs to be segmented to support animation across it
start_pt = self.inputs[0][-1]
end_pt = self.outputs[0][0]
if start_pt == end_pt:
# In single input the center bar width can be zeroed out.
# Ugly hack: Insert this line segment that will be skipped otherwise.
center_bar_pts = [end_pt, self.outputs[0][1]]
else:
center_bar_pts = [
start_pt,
linear_interp(start_pt, end_pt, 0.25),
linear_interp(start_pt, end_pt, 0.50),
linear_interp(start_pt, end_pt, 0.75),
end_pt,
]
def draw_line_segment(curves, i, j, color):
# print(f"draw: {curves[0][i]} to {curves[0][j]}")
for points in curves:
pt1 = points[i]
pt2 = points[j]
self.renderer.draw.line(
(pt1[0], pt1[1], pt2[0], pt2[1]),
fill=color,
width=line_width
)
prev_color = reset_color
while self.keep_running:
with self.renderer.lock:
# Only generate one new pulse at a time; trailing "reset_color" pulse
# erases the most recent pulse.
if not pulses or (
prev_color == pulse_color and pulses[-1][0] == | |
self.redeploy_ui1_with_http_info(live, **kwargs) # noqa: E501
return data
def redeploy_ui1_with_http_info(self, live, **kwargs): # noqa: E501
"""Redeploy UI from current dev branch. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.redeploy_ui1_with_http_info(live, async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool live: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['live'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method redeploy_ui1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'live' is set
if ('live' not in local_var_params or
local_var_params['live'] is None):
raise ValueError("Missing the required parameter `live` when calling `redeploy_ui1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'live' in local_var_params:
path_params['live'] = local_var_params['live'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api2/json/redeployUI/{live}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_user_account(self, token, **kwargs): # noqa: E501
"""Remove the user account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_user_account(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: (required)
:return: APIPlanSubscriptionOut
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_user_account_with_http_info(token, **kwargs) # noqa: E501
else:
(data) = self.remove_user_account_with_http_info(token, **kwargs) # noqa: E501
return data
def remove_user_account_with_http_info(self, token, **kwargs): # noqa: E501
"""Remove the user account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_user_account_with_http_info(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: (required)
:return: APIPlanSubscriptionOut
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_user_account" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in local_var_params or
local_var_params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `remove_user_account`") # noqa: E501
collection_formats = {}
path_params = {}
if 'token' in local_var_params:
path_params['token'] = local_var_params['token'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api2/json/removeUserAccount/{token}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='APIPlanSubscriptionOut', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_user_account_on_behalf(self, api_key, **kwargs): # noqa: E501
"""Remove (on behalf) a user account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_user_account_on_behalf(api_key, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_key: (required)
:return: APIPlanSubscriptionOut
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_user_account_on_behalf_with_http_info(api_key, **kwargs) # noqa: E501
else:
(data) = self.remove_user_account_on_behalf_with_http_info(api_key, **kwargs) # noqa: E501
return data
def remove_user_account_on_behalf_with_http_info(self, api_key, **kwargs): # noqa: E501
"""Remove (on behalf) a user account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_user_account_on_behalf_with_http_info(api_key, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_key: (required)
:return: APIPlanSubscriptionOut
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['api_key'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_user_account_on_behalf" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'api_key' is set
if ('api_key' not in local_var_params or
local_var_params['api_key'] is None):
raise ValueError("Missing the required parameter `api_key` when calling `remove_user_account_on_behalf`") # noqa: E501
collection_formats = {}
path_params = {}
if 'api_key' in local_var_params:
path_params['apiKey'] = local_var_params['api_key'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api2/json/removeUserAccountOnBehalf/{apiKey}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='APIPlanSubscriptionOut', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def shutdown(self, **kwargs): # noqa: E501
"""Stop learning and shutdown system. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shutdown(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.shutdown_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.shutdown_with_http_info(**kwargs) # noqa: E501
return data
def shutdown_with_http_info(self, **kwargs): # noqa: E501
"""Stop learning and shutdown system. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.shutdown_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method shutdown" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api2/json/shutdown', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def software_version(self, **kwargs): # noqa: E501
"""Get the current software version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.software_version(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SoftwareVersionOut
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.software_version_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.software_version_with_http_info(**kwargs) # noqa: E501
return data
def software_version_with_http_info(self, **kwargs): # noqa: E501
"""Get the current software version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.software_version_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SoftwareVersionOut
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method software_version" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api2/json/softwareVersion', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SoftwareVersionOut', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def source_stats(self, source, **kwargs): # noqa: E501
"""Print basic source statistics. # noqa: E501
This method makes a synchronous HTTP request by default. | |
# (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import print_function, division, absolute_import
import os
import sys
import shutil
import tarfile
import tempfile
from os.path import isdir, join, basename, exists, abspath
from difflib import get_close_matches
import logging
import errno
import conda.config as config
import conda.plan as plan
import conda.instructions as inst
import conda.misc as misc
from conda.api import get_index
from conda.cli import common
from conda.cli.find_commands import find_executable
from conda.resolve import NoPackagesFound, Resolve, MatchSpec
import conda.install as ci
log = logging.getLogger(__name__)
def install_tar(prefix, tar_path, verbose=False):
if not exists(tar_path):
sys.exit("File does not exist: %s" % tar_path)
tmp_dir = tempfile.mkdtemp()
t = tarfile.open(tar_path, 'r')
t.extractall(path=tmp_dir)
t.close()
paths = []
for root, dirs, files in os.walk(tmp_dir):
for fn in files:
if fn.endswith('.tar.bz2'):
paths.append(join(root, fn))
misc.install_local_packages(prefix, paths, verbose=verbose)
shutil.rmtree(tmp_dir)
def check_prefix(prefix, json=False):
name = basename(prefix)
error = None
if name.startswith('.'):
error = "environment name cannot start with '.': %s" % name
if name == config.root_env_name:
error = "'%s' is a reserved environment name" % name
if exists(prefix):
error = "prefix already exists: %s" % prefix
if error:
common.error_and_exit(error, json=json, error_type="ValueError")
def clone(src_arg, dst_prefix, json=False, quiet=False, index=None):
if os.sep in src_arg:
src_prefix = abspath(src_arg)
if not isdir(src_prefix):
common.error_and_exit('no such directory: %s' % src_arg,
json=json,
error_type="NoEnvironmentFound")
else:
src_prefix = common.find_prefix_name(src_arg)
if src_prefix is None:
common.error_and_exit('could not find environment: %s' % src_arg,
json=json,
error_type="NoEnvironmentFound")
if not json:
print("src_prefix: %r" % src_prefix)
print("dst_prefix: %r" % dst_prefix)
with common.json_progress_bars(json=json and not quiet):
actions, untracked_files = misc.clone_env(src_prefix, dst_prefix,
verbose=not json,
quiet=quiet, index=index)
if json:
common.stdout_json_success(
actions=actions,
untracked_files=list(untracked_files),
src_prefix=src_prefix,
dst_prefix=dst_prefix
)
def print_activate(arg):
print("#")
print("# To activate this environment, use:")
if sys.platform == 'win32':
print("# > activate %s" % arg)
else:
print("# $ source activate %s" % arg)
print("#")
print("# To deactivate this environment, use:")
print("# $ source deactivate")
print("#")
def get_revision(arg, json=False):
try:
return int(arg)
except ValueError:
common.error_and_exit("expected revision number, not: '%s'" % arg,
json=json,
error_type="ValueError")
def install(args, parser, command='install'):
"""
conda install, conda update, and conda create
"""
newenv = bool(command == 'create')
if newenv:
common.ensure_name_or_prefix(args, command)
prefix = common.get_prefix(args, search=not newenv)
if newenv:
check_prefix(prefix, json=args.json)
if config.force_32bit and plan.is_root_prefix(prefix):
common.error_and_exit("cannot use CONDA_FORCE_32BIT=1 in root env")
if command == 'update':
if not args.file:
if not args.all and len(args.packages) == 0:
common.error_and_exit("""no package names supplied
# If you want to update to a newer version of Anaconda, type:
#
# $ conda update --prefix %s anaconda
""" % prefix,
json=args.json,
error_type="ValueError")
if command == 'update' and not args.all:
linked = ci.linked(prefix)
for name in args.packages:
common.arg2spec(name, json=args.json)
if '=' in name:
common.error_and_exit("Invalid package name: '%s'" % (name),
json=args.json,
error_type="ValueError")
if name not in set(ci.name_dist(d) for d in linked):
common.error_and_exit("package '%s' is not installed in %s" %
(name, prefix),
json=args.json,
error_type="ValueError")
if newenv and not args.no_default_packages:
default_packages = config.create_default_packages[:]
# Override defaults if they are specified at the command line
for default_pkg in config.create_default_packages:
if any(pkg.split('=')[0] == default_pkg for pkg in args.packages):
default_packages.remove(default_pkg)
args.packages.extend(default_packages)
else:
default_packages = []
common.ensure_override_channels_requires_channel(args)
channel_urls = args.channel or ()
specs = []
if args.file:
for fpath in args.file:
specs.extend(common.specs_from_url(fpath, json=args.json))
if '@EXPLICIT' in specs:
misc.explicit(specs, prefix)
return
elif getattr(args, 'all', False):
linked = ci.linked(prefix)
if not linked:
common.error_and_exit("There are no packages installed in the "
"prefix %s" % prefix)
for pkg in linked:
name, ver, build = pkg.rsplit('-', 2)
if name in getattr(args, '_skip', ['anaconda']):
continue
if name == 'python' and ver.startswith('2'):
# Oh Python 2...
specs.append('%s >=%s,<3' % (name, ver))
else:
specs.append('%s' % name)
specs.extend(common.specs_from_args(args.packages, json=args.json))
if command == 'install' and args.revision:
get_revision(args.revision, json=args.json)
elif not (newenv and args.clone):
common.check_specs(prefix, specs, json=args.json,
create=(command == 'create'))
num_cp = sum(s.endswith('.tar.bz2') for s in args.packages)
if num_cp:
if num_cp == len(args.packages):
misc.install_local_packages(prefix, args.packages,
verbose=not args.quiet)
return
else:
common.error_and_exit(
"cannot mix specifications with conda package filenames",
json=args.json,
error_type="ValueError")
# handle tar file containing conda packages
if len(args.packages) == 1:
tar_path = args.packages[0]
if tar_path.endswith('.tar'):
install_tar(prefix, tar_path, verbose=not args.quiet)
return
if args.use_local:
from conda.fetch import fetch_index
from conda.utils import url_path
try:
from conda_build.config import croot
except ImportError:
common.error_and_exit(
"you need to have 'conda-build >= 1.7.1' installed"
" to use the --use-local option",
json=args.json,
error_type="RuntimeError")
# remove the cache such that a refetch is made,
# this is necessary because we add the local build repo URL
fetch_index.cache = {}
if exists(croot):
channel_urls = [url_path(croot)] + list(channel_urls)
index = common.get_index_trap(channel_urls=channel_urls,
prepend=not args.override_channels,
use_cache=args.use_index_cache,
unknown=args.unknown,
json=args.json,
offline=args.offline)
if newenv and args.clone:
if set(args.packages) - set(default_packages):
common.error_and_exit('did not expect any arguments for --clone',
json=args.json,
error_type="ValueError")
clone(args.clone, prefix, json=args.json, quiet=args.quiet, index=index)
misc.append_env(prefix)
misc.touch_nonadmin(prefix)
if not args.json:
print_activate(args.name if args.name else prefix)
return
# Don't update packages that are already up-to-date
if command == 'update' and not (args.all or args.force):
r = Resolve(index)
orig_packages = args.packages[:]
for name in orig_packages:
installed_metadata = [ci.is_linked(prefix, dist)
for dist in linked]
vers_inst = [dist.rsplit('-', 2)[1] for dist in linked
if dist.rsplit('-', 2)[0] == name]
build_inst = [m['build_number'] for m in installed_metadata if
m['name'] == name]
try:
assert len(vers_inst) == 1, name
assert len(build_inst) == 1, name
except AssertionError as e:
if args.json:
common.exception_and_exit(e, json=True)
else:
raise
pkgs = sorted(r.get_pkgs(MatchSpec(name)))
if not pkgs:
# Shouldn't happen?
continue
latest = pkgs[-1]
if (latest.version == vers_inst[0] and
latest.build_number == build_inst[0]):
args.packages.remove(name)
if not args.packages:
from conda.cli.main_list import print_packages
if not args.json:
regex = '^(%s)$' % '|'.join(orig_packages)
print('# All requested packages already installed.')
print_packages(prefix, regex)
else:
common.stdout_json_success(
message='All requested packages already installed.')
return
if args.force:
args.no_deps = True
spec_names = set(s.split()[0] for s in specs)
if args.no_deps:
only_names = spec_names
else:
only_names = None
if not isdir(prefix) and not newenv:
if args.mkdir:
try:
os.makedirs(prefix)
except OSError:
common.error_and_exit("Error: could not create directory: %s" % prefix,
json=args.json,
error_type="OSError")
else:
common.error_and_exit("""\
environment does not exist: %s
#
# Use 'conda create' to create an environment before installing packages
# into it.
#""" % prefix,
json=args.json,
error_type="NoEnvironmentFound")
try:
if command == 'install' and args.revision:
actions = plan.revert_actions(prefix, get_revision(args.revision))
else:
with common.json_progress_bars(json=args.json and not args.quiet):
actions = plan.install_actions(prefix, index, specs,
force=args.force,
only_names=only_names,
pinned=args.pinned,
minimal_hint=args.alt_hint,
update_deps=args.update_deps)
if config.always_copy or args.copy:
new_link = []
for pkg in actions["LINK"]:
dist, pkgs_dir, lt = inst.split_linkarg(pkg)
lt = ci.LINK_COPY
new_link.append("%s %s %d" % (dist, pkgs_dir, lt))
actions["LINK"] = new_link
except NoPackagesFound as e:
error_message = e.args[0]
if command == 'update' and args.all:
# Packages not found here just means they were installed but
# cannot be found any more. Just skip them.
if not args.json:
print("Warning: %s, skipping" % error_message)
else:
# Not sure what to do here
pass
args._skip = getattr(args, '_skip', ['anaconda'])
for pkg in e.pkgs:
p = pkg.split()[0]
if p in args._skip:
# Avoid infinite recursion. This can happen if a spec
# comes from elsewhere, like --file
raise
args._skip.append(p)
return install(args, parser, command=command)
else:
packages = {index[fn]['name'] for fn in index}
for pkg in e.pkgs:
close = get_close_matches(pkg, packages, cutoff=0.7)
if close:
error_message += ("\n\nDid you mean one of these?"
"\n\n %s" % (', '.join(close)))
error_message += '\n\nYou can search for this package on anaconda.org with'
error_message += '\n\n anaconda search -t conda %s' % pkg
if len(e.pkgs) > 1:
# Note this currently only happens with dependencies not found
error_message += '\n\n (and similarly for the other packages)'
if not find_executable('anaconda', include_others=False):
error_message += '\n\nYou may need to install the anaconda-client command line client with'
error_message += '\n\n conda install anaconda-client'
pinned_specs = plan.get_pinned_specs(prefix)
if pinned_specs:
error_message += "\n\nNote that you have pinned specs in %s:" % join(prefix, 'conda-meta', 'pinned')
error_message += "\n\n %r" % pinned_specs
common.error_and_exit(error_message, json=args.json)
except SystemExit as e:
# Unsatisfiable package specifications/no such revision/import error
error_type = 'UnsatisfiableSpecifications'
if e.args and 'could not import' in e.args[0]:
error_type = 'ImportError'
common.exception_and_exit(e, json=args.json, newline=True,
error_text=False,
error_type=error_type)
if plan.nothing_to_do(actions):
from conda.cli.main_list import print_packages
if not args.json:
regex = '^(%s)$' % '|'.join(spec_names)
print('\n# All requested packages already installed.')
print_packages(prefix, regex)
else:
common.stdout_json_success(
message='All requested packages already installed.')
return
if not args.json:
print()
print("Package plan for installation in environment %s:" % prefix)
plan.display_actions(actions, index, show_channel_urls=args.show_channel_urls)
if command in {'install', 'update'}:
common.check_write(command, prefix)
if not args.json:
common.confirm_yn(args)
| |
`Tensor` of shape `[dim]`.
Corresponds to the mean reversion rate.
volatility: A real positive `Tensor` of the same `dtype` as
`mean_reversion` or a callable with the same specs as above.
Corresponds to the long run price variance.
notional: An optional `Tensor` of same dtype and compatible shape as
`strikes`specifying the notional amount for the underlying swap.
Default value: None in which case the notional is set to 1.
is_payer_swaption: A boolean `Tensor` of a shape compatible with `expiries`.
Indicates whether the swaption is a payer (if True) or a receiver
(if False) swaption. If not supplied, payer swaptions are assumed.
use_analytic_pricing: A Python boolean specifying if analytic valuation
should be performed. Analytic valuation is only supported for constant
`mean_reversion` and piecewise constant `volatility`. If the input is
`False`, then valuation using Monte-Carlo simulations is performed.
Default value: The default value is `True`.
num_samples: Positive scalar `int32` `Tensor`. The number of simulation
paths during Monte-Carlo valuation. This input is ignored during analytic
valuation.
Default value: The default value is 1.
random_type: Enum value of `RandomType`. The type of (quasi)-random
number generator to use to generate the simulation paths. This input is
relevant only for Monte-Carlo valuation and ignored during analytic
valuation.
Default value: `None` which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is only relevant if
`random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be an Python integer. For
`STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer
`Tensor` of shape `[2]`. This input is relevant only for Monte-Carlo
valuation and ignored during analytic valuation.
Default value: `None` which means no seed is set.
skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
Halton sequence to skip. Used only when `random_type` is 'SOBOL',
'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
Default value: `0`.
time_step: Scalar real `Tensor`. Maximal distance between time grid points
in Euler scheme. Relevant when Euler scheme is used for simulation. This
input is ignored during analytic valuation.
Default value: `None`.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this function.
Default value: `None` which maps to the default name
`hw_swaption_price`.
Returns:
A `Tensor` of real dtype and shape expiries.shape + [dim] containing the
computed swaption prices. For swaptions that have. reset in the past
(expiries<0), the function sets the corresponding option prices to 0.0.
"""
# TODO(b/160061740): Extend the functionality to support mid-curve swaptions.
name = name or 'hw_swaption_price'
del floating_leg_daycount_fractions
with tf.name_scope(name):
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
dtype = dtype or expiries.dtype
float_leg_start_times = tf.convert_to_tensor(
floating_leg_start_times, dtype=dtype, name='float_leg_start_times')
float_leg_end_times = tf.convert_to_tensor(
floating_leg_end_times, dtype=dtype, name='float_leg_end_times')
fixed_leg_payment_times = tf.convert_to_tensor(
fixed_leg_payment_times, dtype=dtype, name='fixed_leg_payment_times')
fixed_leg_daycount_fractions = tf.convert_to_tensor(
fixed_leg_daycount_fractions, dtype=dtype,
name='fixed_leg_daycount_fractions')
fixed_leg_coupon = tf.convert_to_tensor(
fixed_leg_coupon, dtype=dtype, name='fixed_leg_coupon')
notional = tf.convert_to_tensor(notional, dtype=dtype, name='notional')
notional = tf.expand_dims(
tf.broadcast_to(notional, expiries.shape), axis=-1)
if is_payer_swaption is None:
is_payer_swaption = True
is_payer_swaption = tf.convert_to_tensor(
is_payer_swaption, dtype=tf.bool, name='is_payer_swaption')
output_shape = expiries.shape.as_list() + [dim]
# Add a dimension corresponding to multiple cashflows in a swap
if expiries.shape.rank == fixed_leg_payment_times.shape.rank - 1:
expiries = tf.expand_dims(expiries, axis=-1)
elif expiries.shape.rank < fixed_leg_payment_times.shape.rank - 1:
raise ValueError('Swaption expiries not specified for all swaptions '
'in the batch. Expected rank {} but received {}.'.format(
fixed_leg_payment_times.shape.rank - 1,
expiries.shape.rank))
# Expected shape: batch_shape + [m], same as fixed_leg_payment_times.shape
# We need to explicitly use tf.repeat because we need to price
# batch_shape + [m] bond options with different strikes along the last
# dimension.
expiries = tf.repeat(
expiries, tf.shape(fixed_leg_payment_times)[-1], axis=-1)
if use_analytic_pricing:
return _analytic_valuation(expiries, float_leg_start_times,
float_leg_end_times, fixed_leg_payment_times,
fixed_leg_daycount_fractions,
fixed_leg_coupon, reference_rate_fn,
dim, mean_reversion, volatility, notional,
is_payer_swaption, output_shape, dtype,
name + '_analytic_valuation')
if time_step is None:
raise ValueError('`time_step` must be provided for simulation '
'based bond option valuation.')
# Monte-Carlo pricing
model = vector_hull_white.VectorHullWhiteModel(
dim,
mean_reversion,
volatility,
initial_discount_rate_fn=reference_rate_fn,
dtype=dtype)
def _sample_discount_curve_path_fn(times, curve_times, num_samples):
return model.sample_discount_curve_paths(
times=times,
curve_times=curve_times,
num_samples=num_samples,
random_type=random_type,
seed=seed,
skip=skip)
payoff_discount_factors, payoff_bond_price = (
swaption_util.discount_factors_and_bond_prices_from_samples(
expiries=expiries,
payment_times=fixed_leg_payment_times,
sample_discount_curve_paths_fn=_sample_discount_curve_path_fn,
num_samples=num_samples,
time_step=time_step,
dtype=dtype))
# Add an axis corresponding to `dim`
fixed_leg_pv = tf.expand_dims(
fixed_leg_coupon * fixed_leg_daycount_fractions,
axis=-1) * payoff_bond_price
# Sum fixed coupon payments within each swap
fixed_leg_pv = tf.math.reduce_sum(fixed_leg_pv, axis=-2)
float_leg_pv = 1.0 - payoff_bond_price[..., -1, :]
payoff_swap = payoff_discount_factors[..., -1, :] * (
float_leg_pv - fixed_leg_pv)
payoff_swap = tf.where(is_payer_swaption, payoff_swap, -1.0 * payoff_swap)
payoff_swaption = tf.math.maximum(payoff_swap, 0.0)
option_value = tf.reshape(
tf.math.reduce_mean(payoff_swaption, axis=0), output_shape)
return notional * option_value
def bermudan_swaption_price(*,
exercise_times,
floating_leg_start_times,
floating_leg_end_times,
fixed_leg_payment_times,
floating_leg_daycount_fractions,
fixed_leg_daycount_fractions,
fixed_leg_coupon,
reference_rate_fn,
dim,
mean_reversion,
volatility,
notional=None,
is_payer_swaption=None,
use_finite_difference=False,
lsm_basis=None,
num_samples=100,
random_type=None,
seed=None,
skip=0,
time_step=None,
time_step_finite_difference=None,
num_grid_points_finite_difference=100,
dtype=None,
name=None):
"""Calculates the price of Bermudan Swaptions using the Hull-White model.
A Bermudan Swaption is a contract that gives the holder an option to enter a
swap contract on a set of future exercise dates. The exercise dates are
typically the fixing dates (or a subset thereof) of the underlying swap. If
`T_N` denotes the final payoff date and `T_i, i = {1,...,n}` denote the set
of exercise dates, then if the option is exercised at `T_i`, the holder is
left with a swap with first fixing date equal to `T_i` and maturity `T_N`.
Simulation based pricing of Bermudan swaptions is performed using the least
squares Monte-carlo approach [1].
#### References:
[1]: <NAME>, <NAME>. Interest Rate Models-Theory and Practice.
Second Edition. 2007.
#### Example
The example shows how value a batch of 5-no-call-1 and 5-no-call-2
swaptions using the Hull-White model.
````python
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
dtype = tf.float64
exercise_swaption_1 = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5]
exercise_swaption_2 = [2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.0]
exercise_times = [exercise_swaption_1, exercise_swaption_2]
float_leg_start_times_1y = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5]
float_leg_start_times_18m = [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]
float_leg_start_times_2y = [2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.0]
float_leg_start_times_30m = [2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.0, 5.0]
float_leg_start_times_3y = [3.0, 3.5, 4.0, 4.5, 5.0, 5.0, 5.0, 5.0]
float_leg_start_times_42m = [3.5, 4.0, 4.5, 5.0, 5.0, 5.0, 5.0, 5.0]
float_leg_start_times_4y = [4.0, 4.5, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0]
float_leg_start_times_54m = [4.5, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0]
float_leg_start_times_5y = [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0]
float_leg_start_times_swaption_1 = [float_leg_start_times_1y,
float_leg_start_times_18m,
float_leg_start_times_2y,
float_leg_start_times_30m,
float_leg_start_times_3y,
float_leg_start_times_42m,
float_leg_start_times_4y,
float_leg_start_times_54m]
float_leg_start_times_swaption_2 = [float_leg_start_times_2y,
float_leg_start_times_30m,
float_leg_start_times_3y,
float_leg_start_times_42m,
float_leg_start_times_4y,
float_leg_start_times_54m,
float_leg_start_times_5y,
float_leg_start_times_5y]
float_leg_start_times = [float_leg_start_times_swaption_1,
float_leg_start_times_swaption_2]
float_leg_end_times = np.clip(np.array(float_leg_start_times) + 0.5, 0.0, 5.0)
fixed_leg_payment_times = float_leg_end_times
float_leg_daycount_fractions = (np.array(float_leg_end_times) -
np.array(float_leg_start_times))
fixed_leg_daycount_fractions = float_leg_daycount_fractions
fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)
zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
price = bermudan_swaption_price(
exercise_times=exercise_times,
floating_leg_start_times=float_leg_start_times,
floating_leg_end_times=float_leg_end_times,
fixed_leg_payment_times=fixed_leg_payment_times,
floating_leg_daycount_fractions=float_leg_daycount_fractions,
fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,
fixed_leg_coupon=fixed_leg_coupon,
reference_rate_fn=zero_rate_fn,
notional=100.,
dim=1,
mean_reversion=[0.03],
volatility=[0.01],
num_samples=1000000,
time_step=0.1,
random_type=tff.math.random.RandomType.PSEUDO_ANTITHETIC,
seed=0,
dtype=dtype)
# Expected value: [1.8913050118443016, 1.6618681421434984] # shape = (2,)
````
Args:
exercise_times: A real `Tensor` of any shape `batch_shape + [num_exercise]`
`and real dtype. The times corresponding to exercise dates of the
swaptions. `num_exercise` corresponds to the number of exercise dates for
the Bermudan swaption. The shape of this input determines the number (and
shape) of Bermudan swaptions to be priced and the shape of the output.
floating_leg_start_times: A real `Tensor` of the same dtype as
`exercise_times`. The times when accrual begins for each payment in the
floating leg upon exercise of the option. The shape of this input should
be `exercise_times.shape + [m]` where `m` denotes the number of floating
payments in each leg of the underlying swap until the swap maturity.
floating_leg_end_times: A real `Tensor` of the same dtype as
`exercise_times`. The times when accrual ends for each payment in the
floating leg upon exercise of the option. The shape of this input should
be `exercise_times.shape + [m]` where `m` denotes the number of floating
payments in each leg of the | |
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for converting to/from the canonical form.
Note: Refer to `get_iterative_process_for_canonical_form()` for the meaning of
variable names used in this module.
"""
import collections
from typing import Callable
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.api import placements
from tensorflow_federated.python.core.backends.mapreduce import canonical_form
from tensorflow_federated.python.core.backends.mapreduce import transformations
from tensorflow_federated.python.core.impl import type_utils
from tensorflow_federated.python.core.impl import value_transformations
from tensorflow_federated.python.core.impl.compiler import building_block_factory
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import intrinsic_defs
from tensorflow_federated.python.core.impl.compiler import tree_analysis
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.types import placement_literals
from tensorflow_federated.python.core.impl.wrappers import computation_wrapper_instances
from tensorflow_federated.python.core.templates import iterative_process
def get_iterative_process_for_canonical_form(cf):
"""Creates `tff.templates.IterativeProcess` from a canonical form.
Args:
cf: An instance of `tff.backends.mapreduce.CanonicalForm`.
Returns:
An instance of `tff.templates.IterativeProcess` that corresponds to `cf`.
Raises:
TypeError: If the arguments are of the wrong types.
"""
py_typecheck.check_type(cf, canonical_form.CanonicalForm)
@computations.federated_computation
def init_computation():
return intrinsics.federated_value(cf.initialize(), placements.SERVER)
@computations.federated_computation(init_computation.type_signature.result,
computation_types.FederatedType(
cf.work.type_signature.parameter[0],
placements.CLIENTS))
def next_computation(arg):
"""The logic of a single MapReduce processing round."""
s1 = arg[0]
c1 = arg[1]
s2 = intrinsics.federated_map(cf.prepare, s1)
c2 = intrinsics.federated_broadcast(s2)
c3 = intrinsics.federated_zip([c1, c2])
c4 = intrinsics.federated_map(cf.work, c3)
c5 = c4[0]
c6 = c5[0]
c7 = c5[1]
c8 = c4[1]
s3 = intrinsics.federated_aggregate(c6, cf.zero(), cf.accumulate, cf.merge,
cf.report)
s4 = intrinsics.federated_secure_sum(c7, cf.bitwidth())
s5 = intrinsics.federated_zip([s3, s4])
s6 = intrinsics.federated_zip([s1, s5])
s7 = intrinsics.federated_map(cf.update, s6)
s8 = s7[0]
s9 = s7[1]
return s8, s9, c8
return iterative_process.IterativeProcess(init_computation, next_computation)
def _check_len(
target,
length,
err_fn: Callable[[str],
Exception] = transformations.CanonicalFormCompilationError,
):
py_typecheck.check_type(length, int)
if len(target) != length:
raise err_fn('Expected length of {}, found {}.'.format(length, len(target)))
def _check_placement(
target,
placement: placement_literals.PlacementLiteral,
err_fn: Callable[[str],
Exception] = transformations.CanonicalFormCompilationError,
):
py_typecheck.check_type(target, computation_types.FederatedType)
py_typecheck.check_type(placement, placement_literals.PlacementLiteral)
if target.placement != placement:
raise err_fn(
'Expected value with placement {}, found value of type {}.'.format(
placement, target))
def _check_type_equal(
actual,
expected,
err_fn: Callable[[str],
Exception] = transformations.CanonicalFormCompilationError,
):
py_typecheck.check_type(actual, computation_types.Type)
py_typecheck.check_type(expected, computation_types.Type)
if actual != expected:
raise err_fn('Expected type of {}, found {}.'.format(expected, actual))
def _check_type(
target,
type_spec,
err_fn: Callable[[str],
Exception] = transformations.CanonicalFormCompilationError,
):
py_typecheck.check_type(type_spec, type)
if not isinstance(target, type_spec):
raise err_fn('Expected type of {}, found {}.'.format(
type_spec, type(target)))
def _check_type_is_no_arg_fn(
target,
err_fn: Callable[[str],
Exception] = transformations.CanonicalFormCompilationError,
):
_check_type(target, computation_types.FunctionType, err_fn)
if target.parameter is not None:
raise err_fn(('Expected function to take no argument, but found '
'parameter of type {}.').format(target.parameter))
def _check_iterative_process_compatible_with_canonical_form(
initialize_tree, next_tree):
"""Tests compatibility with `tff.backends.mapreduce.CanonicalForm`.
Args:
initialize_tree: An instance of `building_blocks.ComputationBuildingBlock`
representing the `initalize` component of an
`tff.templates.IterativeProcess`.
next_tree: An instance of `building_blocks.ComputationBuildingBlock` that
representing `next` component of an `tff.templates.IterativeProcess`.
Raises:
TypeError: If the arguments are of the wrong types.
"""
py_typecheck.check_type(initialize_tree,
building_blocks.ComputationBuildingBlock)
init_tree_ty = initialize_tree.type_signature
_check_type_is_no_arg_fn(init_tree_ty, TypeError)
_check_type(init_tree_ty.result, computation_types.FederatedType, TypeError)
_check_placement(init_tree_ty.result, placements.SERVER, TypeError)
py_typecheck.check_type(next_tree, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(next_tree.type_signature,
computation_types.FunctionType)
py_typecheck.check_type(next_tree.type_signature.parameter,
computation_types.NamedTupleType)
py_typecheck.check_len(next_tree.type_signature.parameter, 2)
py_typecheck.check_type(next_tree.type_signature.result,
computation_types.NamedTupleType)
py_typecheck.check_len(next_tree.type_signature.parameter, 2)
next_result_len = len(next_tree.type_signature.result)
if next_result_len != 2 and next_result_len != 3:
raise TypeError(
'Expected length of 2 or 3, found {}.'.format(next_result_len))
def _create_next_with_fake_client_output(tree):
r"""Creates a next computation with a fake client output.
This function returns the AST:
Lambda
|
[Comp, Comp, Tuple]
|
[]
In the AST, `Lambda` and the first two `Comps`s in the result of `Lambda` are
`tree` and the empty `Tuple` is the fake client output.
This function is intended to be used by
`get_canonical_form_for_iterative_process` to create a next computation with
a fake client output when no client output is returned by `tree` (which
represents the `next` function of the `tff.templates.IterativeProcess`). As a
result, this function does not assert that there is no client output in `tree`
and it does not assert that `tree` has the expected structure, the caller is
expected to perform these checks before calling this function.
Args:
tree: An instance of `building_blocks.ComputationBuildingBlock`.
Returns:
A new `building_blocks.ComputationBuildingBlock` representing a next
computaiton with a fake client output.
"""
if isinstance(tree.result, building_blocks.Tuple):
arg_1 = tree.result[0]
arg_2 = tree.result[1]
else:
arg_1 = building_blocks.Selection(tree.result, index=0)
arg_2 = building_blocks.Selection(tree.result, index=1)
empty_tuple = building_blocks.Tuple([])
client_output = building_block_factory.create_federated_value(
empty_tuple, placements.CLIENTS)
output = building_blocks.Tuple([arg_1, arg_2, client_output])
return building_blocks.Lambda(tree.parameter_name, tree.parameter_type,
output)
def _create_before_and_after_broadcast_for_no_broadcast(tree):
r"""Creates a before and after broadcast computations for the given `tree`.
This function returns the two ASTs:
Lambda
|
Tuple
|
[]
Lambda(x)
|
Call
/ \
Comp Sel(0)
/
Ref(x)
The first AST is an empty structure that has a type signature satisfying the
requirements of before broadcast.
In the second AST, `Comp` is `tree`; `Lambda` has a type signature satisfying
the requirements of after broadcast; and the argument passed to `Comp` is a
selection from the parameter of `Lambda` which intentionally drops `c2` on the
floor.
This function is intended to be used by
`get_canonical_form_for_iterative_process` to create before and after
broadcast computations for the given `tree` when there is no
`intrinsic_defs.FEDERATED_BROADCAST` in `tree`. As a result, this function
does not assert that there is no `intrinsic_defs.FEDERATED_BROADCAST` in
`tree` and it does not assert that `tree` has the expected structure, the
caller is expected to perform these checks before calling this function.
Args:
tree: An instance of `building_blocks.ComputationBuildingBlock`.
Returns:
A pair of the form `(before, after)`, where each of `before` and `after`
is a `tff_framework.ComputationBuildingBlock` that represents a part of the
result as specified by
`transformations.force_align_and_split_by_intrinsics`.
"""
name_generator = building_block_factory.unique_name_generator(tree)
parameter_name = next(name_generator)
empty_tuple = building_blocks.Tuple([])
value = building_block_factory.create_federated_value(empty_tuple,
placements.SERVER)
before_broadcast = building_blocks.Lambda(parameter_name,
tree.type_signature.parameter,
value)
parameter_name = next(name_generator)
type_signature = computation_types.FederatedType(
before_broadcast.type_signature.result.member, placements.CLIENTS)
parameter_type = computation_types.NamedTupleType(
[tree.type_signature.parameter, type_signature])
ref = building_blocks.Reference(parameter_name, parameter_type)
arg = building_blocks.Selection(ref, index=0)
call = building_blocks.Call(tree, arg)
after_broadcast = building_blocks.Lambda(ref.name, ref.type_signature, call)
return before_broadcast, after_broadcast
def _create_before_and_after_aggregate_for_no_federated_aggregate(tree):
r"""Creates a before and after aggregate computations for the given `tree`.
This function returns the two ASTs:
Lambda
|
Tuple
|
[Tuple, Comp]
|
[Tuple, [], Lambda, Lambda, Lambda]
| | | |
[] [] [] []
Lambda(x)
|
Call
/ \
Comp Tuple
|
[Sel(0), Sel(1)]
/ /
Ref(x) Sel(1)
/
Ref(x)
In the first AST, the second element returned by `Lambda`, `Comp`, is the
result of the before aggregate returned by force aligning and splitting `tree`
by `intrinsic_defs.FEDERATED_SECURE_SUM.uri` and the first element returned by
`Lambda` is an empty structure that represents the argument to the federated
aggregate intrinsic. Therefore, the first AST has a type signature satisfying
the requirements of before aggregate.
In the second AST, `Comp` is the after aggregate returned by force aligning
and splitting `tree` by intrinsic_defs.FEDERATED_SECURE_SUM.uri; `Lambda` has
a type signature satisfying the requirements of after aggregate; and the
argument passed to `Comp` is a selection from the parameter of `Lambda` which
intentionally drops `s3` on the floor.
This function is intended to be used by
`get_canonical_form_for_iterative_process` to create before and after
broadcast computations for the given `tree` when there is no
`intrinsic_defs.FEDERATED_AGGREGATE` in `tree`. As a result, this function
does not assert that there is no `intrinsic_defs.FEDERATED_AGGREGATE` in
`tree` and it does not assert that `tree` has the expected structure, the
caller is expected to perform these checks before calling this function.
Args:
tree: An instance of `building_blocks.ComputationBuildingBlock`.
Returns:
A pair of the form `(before, after)`, where each of `before` and `after`
is a `tff_framework.ComputationBuildingBlock` that represents a part of the
result as specified by
`transformations.force_align_and_split_by_intrinsics`.
"""
name_generator = building_block_factory.unique_name_generator(tree)
before_aggregate, after_aggregate = (
transformations.force_align_and_split_by_intrinsics(
tree, [intrinsic_defs.FEDERATED_SECURE_SUM.uri]))
def _create_empty_function(type_elements):
ref_name = next(name_generator)
ref_type = computation_types.NamedTupleType(type_elements)
ref = building_blocks.Reference(ref_name, ref_type)
empty_tuple = building_blocks.Tuple([])
return building_blocks.Lambda(ref.name, ref.type_signature, empty_tuple)
empty_tuple = building_blocks.Tuple([])
value = building_block_factory.create_federated_value(empty_tuple,
placements.CLIENTS)
zero = empty_tuple
accumulate = _create_empty_function([[], []])
merge = _create_empty_function([[], []])
report = _create_empty_function([])
args = building_blocks.Tuple([value, zero, accumulate, merge, report])
result = building_blocks.Tuple([args, before_aggregate.result])
before_aggregate = building_blocks.Lambda(before_aggregate.parameter_name,
before_aggregate.parameter_type,
result)
ref_name = next(name_generator)
s3_type = computation_types.FederatedType([], placements.SERVER)
ref_type = computation_types.NamedTupleType([
after_aggregate.parameter_type[0],
computation_types.NamedTupleType(
[s3_type, after_aggregate.parameter_type[1]]),
])
ref = building_blocks.Reference(ref_name, ref_type)
sel_arg = building_blocks.Selection(ref, index=0)
sel = building_blocks.Selection(ref, index=1)
sel_s4 = building_blocks.Selection(sel, | |
<filename>test/integration/component/test_vpc_routers.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC - Router Operations
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from marvin.remoteSSHClient import remoteSSHClient
import datetime
class Services:
"""Test VPC Router services
"""
def __init__(self):
self.services = {
"account": {
"email": "<EMAIL>",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"service_offering_new": {
"name": "Small Instance",
"displaytext": "Small Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 256,
"issystem": 'true',
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
"startport": 2222,
"endport": 2222,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"fw_rule": {
"startport": 1,
"endport": 6000,
"cidr": '0.0.0.0/0',
# Any network (For creating FW rule)
"protocol": "TCP"
},
"http_rule": {
"startport": 80,
"endport": 80,
"cidrlist": '0.0.0.0/0',
"protocol": "TCP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
"mode": 'advanced'
}
class TestVPCRoutersBasic(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.apiclient = super(
TestVPCRoutersBasic,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient, cls.services)
cls.zone = get_zone(cls.apiclient, cls.services)
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offering"]
)
cls.vpc_off = VpcOffering.create(
cls.apiclient,
cls.services["vpc_offering"]
)
cls.vpc_off.update(cls.apiclient, state='Enabled')
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account]
cls._cleanup.append(cls.vpc_off)
#cls.debug("Enabling the VPC offering created")
cls.vpc_off.update(cls.apiclient, state='Enabled')
#cls.debug("creating a VPC network in the account: %s" %
# cls.account.name)
cls.services["vpc"]["cidr"] = '10.1.1.1/16'
cls.vpc = VPC.create(
cls.apiclient,
cls.services["vpc"],
vpcofferingid=cls.vpc_off.id,
zoneid=cls.zone.id,
account=cls.account.name,
domainid=cls.account.domainid
)
cls._cleanup.append(cls.service_offering)
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
return
def tearDown(self):
return
def validate_vpc_offering(self, vpc_offering):
"""Validates the VPC offering"""
self.debug("Check if the VPC offering is created successfully?")
vpc_offs = VpcOffering.list(
self.apiclient,
id=vpc_offering.id
)
self.assertEqual(
isinstance(vpc_offs, list),
True,
"List VPC offerings should return a valid list"
)
self.assertEqual(
vpc_offering.name,
vpc_offs[0].name,
"Name of the VPC offering should match with listVPCOff data"
)
self.debug(
"VPC offering is created successfully - %s" %
vpc_offering.name)
return
def validate_vpc_network(self, network, state=None):
"""Validates the VPC network"""
self.debug("Check if the VPC network is created successfully?")
vpc_networks = VPC.list(
self.apiclient,
id=network.id
)
self.assertEqual(
isinstance(vpc_networks, list),
True,
"List VPC network should return a valid list"
)
self.assertEqual(
network.name,
vpc_networks[0].name,
"Name of the VPC network should match with listVPC data"
)
if state:
self.assertEqual(
vpc_networks[0].state,
state,
"VPC state should be '%s'" % state
)
self.debug("VPC network validated - %s" % network.name)
return
def migrate_router(self, router):
""" Migrate the router """
self.debug("Checking if the host is available for migration?")
hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing')
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
if len(hosts) < 2:
raise unittest.SkipTest(
"No host available for migration. Test requires atleast 2 hosts")
# Remove the host of current VM from the hosts list
hosts[:] = [host for host in hosts if host.id != router.hostid]
host = hosts[0]
self.debug("Validating if the network rules work properly or not?")
self.debug("Migrating VM-ID: %s from %s to Host: %s" % (
router.id,
router.hostid,
host.id
))
try:
#Migrate the router
cmd = migrateSystemVm.migrateSystemVmCmd()
cmd.isAsync = "false"
cmd.hostid = host.id
cmd.virtualmachineid = router.id
self.apiclient.migrateSystemVm(cmd)
except Exception as e:
self.fail("Failed to migrate instance, %s" % e)
self.debug("Waiting for Router mgiration ....")
time.sleep(240)
#List routers to check state of router
router_response = list_routers(
self.apiclient,
id=router.id
)
self.assertEqual(
isinstance(router_response, list),
True,
"Check list response returns a valid list"
)
router.hostid = router_response[0].hostid
self.assertEqual(router.hostid, host.id, "Migration to host %s failed. The router host is"
" still %s" % (host.id, router.hostid))
return
@attr(tags=["advanced", "intervlan"])
def test_01_stop_start_router_after_creating_vpc(self):
""" Test to stop and start router after creation of VPC
"""
# Validate following:
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Stop the VPC Virtual Router which is created as a result of VPC creation.
# 3. Start the Stopped VPC Virtual Router
self.validate_vpc_offering(self.vpc_off)
self.validate_vpc_network(self.vpc)
# Stop the VPC Router
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List Routers should return a valid list"
)
router = routers[0]
self.debug("Stopping the router with ID: %s" % router.id)
#Stop the router
cmd = stopRouter.stopRouterCmd()
cmd.id = router.id
self.apiclient.stopRouter(cmd)
#List routers to check state of router
router_response = list_routers(
self.apiclient,
id=router.id
)
self.assertEqual(
isinstance(router_response, list),
True,
"Check list response returns a valid list"
)
#List router should have router in stopped state
self.assertEqual(
router_response[0].state,
'Stopped',
"Check list router response for router state"
)
self.debug("Stopped the router with ID: %s" % router.id)
# Start The Router
self.debug("Starting the router with ID: %s" % router.id)
cmd = startRouter.startRouterCmd()
cmd.id = router.id
self.apiclient.startRouter(cmd)
#List routers to check state of router
router_response = list_routers(
self.apiclient,
id=router.id
)
self.assertEqual(
isinstance(router_response, list),
True,
"Check list response returns a valid list"
)
#List router should have router in running state
self.assertEqual(
router_response[0].state,
'Running',
"Check list router response for router state"
)
self.debug("Started the router with ID: %s" % router.id)
return
@attr(tags=["advanced", "intervlan"])
def test_02_reboot_router_after_creating_vpc(self):
""" Test to reboot the router after creating a VPC
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Reboot the VPC Virtual Router which is created as a result of VPC creation.
# Stop the VPC Router
self.validate_vpc_offering(self.vpc_off)
self.validate_vpc_network(self.vpc)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List Routers should return a valid list"
)
router = routers[0]
self.debug("Rebooting the router ...")
#Reboot the router
cmd = rebootRouter.rebootRouterCmd()
cmd.id = router.id
self.apiclient.rebootRouter(cmd)
#List routers to check state of router
router_response = list_routers(
self.apiclient,
id=router.id
)
self.assertEqual(
isinstance(router_response, list),
True,
"Check list response returns a valid list"
)
#List router should have router in running state and same public IP
self.assertEqual(
router_response[0].state,
'Running',
"Check list router response for router state"
)
return
@attr(tags=["advanced", "intervlan"])
def test_03_migrate_router_after_creating_vpc(self):
""" Test migration of router to another host after creating VPC """
self.validate_vpc_offering(self.vpc_off)
self.validate_vpc_network(self.vpc)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
| |
!= type(item2):
return False
# Handle Mapping objects (dict)
if isinstance(item1, Mapping):
# Second item must be Mapping
if not isinstance(item2, Mapping):
return False
# Items must have the same number of elements
if not len(item1) == len(item2):
return False
# Keys must be the same
if not insensitive_comparison(list(item1.keys()), list(item2.keys()),
type_insensitive=True):
return False
# Each key's value must be the same
# We can just check item1.items because the keys are the same
for key, val in item1.items():
if not insensitive_comparison(item1[key], item2[key],
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
return False
# Keys and values are the same
return True
# Handle strings
elif isinstance(item1, str):
# Second item must be string
if not isinstance(item2, str):
return False
# Items must have the same number of elements (except string_insensitive)
if not len(item1) == len(item2) and not string_insensitive:
return False
# If we're insensitive to case, spaces, and order, compare characters
if string_insensitive:
# If the string is one character long, skip additional comparison
if len(item1) <= 1:
return item1.lower() == item2.lower()
# Make strings into containers (lists) and discard whitespace
item1_list = [c for c in item1.lower() if not c.isspace()]
item2_list = [c for c in item2.lower() if not c.isspace()]
# The insensitive args shouldn't matter, but they're here just in case
return insensitive_comparison(item1_list, item2_list,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive)
# Otherwise, case and order matter
else:
return item1 == item2
# Handle other Iterable Containers
elif isinstance(item1, Container) and isinstance(item1, Iterable):
# Second item must be an Iterable Container
if not isinstance(item2, Container) or not isinstance(item2, Iterable):
return False
# Items must have the same number of elements
if not len(item1) == len(item2):
return False
# Every element in item1 must be in item2, and vice-versa
# Painfully slow, but unavoidable for deep comparison
# Each match in item1 removes the corresponding element from item2_copy
# If they're the same, item2_copy should be empty at the end,
# unless a .remove() failed, in which case we have to re-match using item2
item2_copy = list(deepcopy(item2))
remove_failed = False
for elem in item1:
matched = False
# Try every element
for candidate in item2:
# If comparison succeeds, flag a match, remove match from copy, and dump out
if insensitive_comparison(elem, candidate,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
matched = True
try:
item2_copy.remove(candidate)
except ValueError: # list.remove(x): x not in list
remove_failed = True
break
# One failure indicates unequivalence
if not matched:
return False
# If all removes succeeded, we can shortcut checking all item2 elements in item1
if not remove_failed:
# If the Containers are equivalent, all elements in item2_copy should be removed
# Otherwise
return len(item2_copy) == 0
# If something failed, we have to verify all of item2
# We can't assume item2 != item1, because removal is comparative
else:
for elem in item2:
matched = False
# Try every element
for candidate in item1:
# If comparison succeeds, flag a match, remove match from copy, and dump out
if insensitive_comparison(elem, candidate,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
matched = True
break
# One failure indicates unequivalence
if not matched:
return False
# All elements have a match
return True
# Handle otherwise unhandled type (catchall)
else:
return item1 == item2
def expand_jsonschema(schema, base_uri=None, definitions=None, resolver=None):
"""Expand references in a JSONSchema and return the dereferenced schema.
Note:
This function only dereferences simple ``$ref`` values. It does not
dereference ``$ref`` values that are sufficiently complex.
This tool is not exhaustive for all valid JSONSchemas.
Arguments:
schema (dict): The JSONSchema to dereference.
base_uri (str): The base URI to the schema files, or a local path to the schema files.
Required if ``resolver`` is not supplied (``base_uri`` is preferable).
definitions (dict): Referenced definitions to start. Fully optional.
**Default:** ``None``, to automatically populate definitions.
resolver (jsonschema.RefResolver): The RefResolver to use in resolving ``$ref``
values. Generally should not be set by users.
**Default:** ``None``.
Returns:
dict: The dereferenced schema.
"""
if not isinstance(schema, dict):
return schema # No-op on non-dict
if resolver is None and base_uri is None:
raise ValueError("base_uri is a required argument.")
# Create RefResolver
elif resolver is None:
if os.path.exists(base_uri):
base_uri = "{}{}{}".format(
"file://" if not base_uri.startswith("file://") else "",
os.path.abspath(base_uri),
"/" if base_uri.endswith("/") else "")
resolver = jsonschema.RefResolver(base_uri, None)
if definitions is None:
definitions = {}
# Save schema's definitions
# Could results in duplicate definitions, which has no effect
if schema.get("definitions"):
definitions = dict_merge(schema["definitions"], definitions)
definitions = expand_jsonschema(definitions, definitions=definitions, resolver=resolver)
while "$ref" in json.dumps(schema):
new_schema = {}
for key, val in schema.items():
if key == "$ref":
# $ref is supposed to take precedence, and effectively overwrite
# other keys present, so we can make new_schema exactly the $ref value
filename, intra_path = val.split("#")
intra_parts = [x for x in intra_path.split("/") if x]
# Filename ref refers to external file - resolve with RefResolver
if filename:
ref_schema = resolver.resolve(filename)[1]
'''
with open(os.path.join(base_path, filename)) as schema_file:
ref_schema = json.load(schema_file)
'''
if ref_schema.get("definitions"):
definitions = dict_merge(ref_schema["definitions"], definitions)
definitions = expand_jsonschema(definitions, base_uri, definitions)
for path_part in intra_parts:
ref_schema = ref_schema[path_part]
# new_schema[intra_parts[-1]] = ref_schema
new_schema = ref_schema
# Other refs should be in definitions block
else:
if intra_parts[0] != "definitions" or len(intra_parts) != 2:
raise ValueError("Invalid/complex $ref: {}".format(intra_parts))
# new_schema[intra_parts[-1]] = definitions.get(intra_parts[1], "NONE")
new_schema = definitions.get(intra_parts[1], None)
if new_schema is None:
raise ValueError("Definition missing: {}".format(intra_parts))
else:
new_schema[key] = expand_jsonschema(val, definitions=definitions,
resolver=resolver)
schema = new_schema
return schema
def condense_jsonschema(schema, include_containers=True, list_items=True):
"""Condense a JSONSchema into a dict of dot-notated data fields and data types.
This strips out all of the JSONSchema directives, like ``required`` and
``additionalProperties``, leaving only the fields that could actually be found in valid data.
Caution:
This tool is not exhaustive, and will not work correctly on all JSONSchemas.
In particular, schemas with objects nested in arrays will not be handled correctly,
and data fields that do not have a listed ``type`` will be skipped.
Additionally, ``$ref`` elements WILL NOT be expanded. Use ``expand_jsonschema()`` on your
schema first if you want references expanded.
Arguments:
schema (dict): The JSONSchema to condense. ``$ref`` elements will not be expanded.
include_containers (bool): Should containers (dicts/objects, lists/arrays) be listed
separately from their fields?
**Default**: ``True``, which will list containers.
list_items (bool): Should the field ``items`` be included in the data fields?
``items`` denotes an array of things, and it not directly a data field,
but the output can be confusing without it.
**Default**: ``True``.
Returns:
dict: The list of data fields, in dot notation, and the associated data type
(when specified), as `data_field: data_type`.
"""
data_fields = {}
for field, value in flatten_json(schema, True).items():
# TODO: Make this logic more robust (and less hacky).
# Generally works for MDF purposes; will explode on complex JSONSchemas.
if field.endswith("type") and (include_containers
or (value != "object" and value != "array")):
clean_field = field.replace("properties.", "").replace(".type", "")
if not list_items:
clean_field = clean_field.replace(".items", "")
data_fields[clean_field] = str(value)
return data_fields
def prettify_jsonschema(root, **kwargs):
"""Prettify a JSONSchema. Pretty-yield instead of pretty-print.
Caution:
This utility is not robust! It is intended to work only with
a subset of common JSONSchema patterns (mostly for MDF schemas)
and does not correctly prettify all valid JSONSchemas.
Use with caution.
Arguments:
root (dict): The schema to prettify.
Keyword Arguments:
num_indent_spaces (int): The number of spaces to consider one indentation level.
**Default:** ``4``
bullet (bool or str): Will prepend the character given as a bullet to properties.
When ``True``, will use a dash. When ``False``, will not use any bullets.
**Default:** ``True``
_nest_level (int): A variable to track the number of iterations this recursive
functions has gone through. Affects indentation level. It is not
necessary nor advised to set this argument.
**Default:** ``0``
Yields:
str: Lines of the JSONschema. To print the JSONSchema, just print each line.
Stylistic newlines are included as empty strings. These can be ignored
if a more compact style is preferred.
"""
indent = " " * kwargs.get("num_indent_spaces", 4)
if kwargs.get("bullet", True) is True:
| |
<gh_stars>1-10
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from episodic_curiosity import keras_checkpoint
from episodic_curiosity.constants import Const
from third_party.baselines import logger
import gin
import numpy as np
from tensorflow import keras
def generate_positive_example(buffer_position,
next_buffer_position):
"""Generates a close enough pair of states."""
first = buffer_position
second = next_buffer_position
# Make R-network symmetric.
# Works for DMLab (navigation task), the symmetry assumption might not be
# valid for all the environments.
if random.random() < 0.5:
first, second = second, first
return first, second
def generate_negative_example(buffer_position,
len_episode_buffer,
max_action_distance):
"""Generates a far enough pair of states."""
assert buffer_position < len_episode_buffer
# Defines the interval that must be excluded from the sampling.
time_interval = (Const.NEGATIVE_SAMPLE_MULTIPLIER * max_action_distance)
min_index = max(buffer_position - time_interval, 0)
max_index = min(buffer_position + time_interval + 1, len_episode_buffer)
# Randomly select an index outside the interval.
effective_length = len_episode_buffer - (max_index - min_index)
range_max = effective_length - 1
if range_max <= 0:
return buffer_position, None
index = random.randint(0, range_max)
if index >= min_index:
index = max_index + (index - min_index)
return buffer_position, index
def compute_next_buffer_position(buffer_position,
positive_example_candidate,
max_action_distance,
mode):
"""Computes the buffer position for the next training example."""
if mode == 'v3_affect_num_training_examples_overlap':
# This version was initially not intended (changing max_action_distance
# affects the number of training examples, and we can also get overlap
# across generated examples), but we have it because it produces good
# results (reward at ~40 according to raveman@ on 2018-10-03).
# R-nets /cns/vz-d/home/dune/episodic_curiosity/raphaelm_train_r_mad2_4 were
# generated with this version (the flag was set
# v1_affect_num_training_examples, but it referred to a "buggy" version of
# v1 that is reproduced here with that v3).
return buffer_position + random.randint(1, max_action_distance) + 1
if mode == 'v1_affect_num_training_examples':
return positive_example_candidate + 1
if mode == 'v2_fixed_num_training_examples':
# Produces the ablation study in the paper submitted to ICLR'19
# (https://openreview.net/forum?id=SkeK3s0qKQ), section S4.1.
return buffer_position + random.randint(1, 5) + 1
def create_training_data_from_episode_buffer_v4(episode_buffer,
max_action_distance,
avg_num_examples_per_env_step):
"""Sampling of positive/negative examples without using stride logic."""
num_examples = int(avg_num_examples_per_env_step * len(episode_buffer))
num_examples_per_class = num_examples // 2
# We first generate positive pairs, and then sample from them (ensuring that
# we don't select twice exactly the same pair (i,i+j)).
positive_pair_candidates = []
for first in range(len(episode_buffer)):
for j in range(1, max_action_distance + 1):
second = first + j
if second >= len(episode_buffer):
continue
positive_pair_candidates.append(
(first, second) if random.random() > 0.5 else (second, first))
assert len(positive_pair_candidates) >= num_examples_per_class
positive_pairs = random.sample(positive_pair_candidates,
num_examples_per_class)
# Generate negative pairs.
num_negative_candidates = len(episode_buffer) * (
len(episode_buffer) -
2 * Const.NEGATIVE_SAMPLE_MULTIPLIER * max_action_distance) / 2
# Make sure we have enough negative examples to sample from (with some
# headroom). If that does not happen (meaning very short episode buffer given
# current values of negative_sample_multiplier, max_action_distance), don't
# generate any training example.
if num_negative_candidates < 2 * num_examples_per_class:
return [], [], []
negative_pairs = set()
while len(negative_pairs) < num_examples_per_class:
i = random.randint(0, len(episode_buffer) - 1)
j = generate_negative_example(
i, len(episode_buffer), max_action_distance)[1]
# Checking this is not strictly required, because it should happen
# infrequently with current parameter values.
# We still check for it for the symmetry with the positive example case.
if (i, j) not in negative_pairs and (j, i) not in negative_pairs:
negative_pairs.add((i, j))
x1 = []
x2 = []
labels = []
for i, j in positive_pairs:
x1.append(episode_buffer[i])
x2.append(episode_buffer[j])
labels.append(1)
for i, j in negative_pairs:
x1.append(episode_buffer[i])
x2.append(episode_buffer[j])
labels.append(0)
return x1, x2, labels
def create_training_data_from_episode_buffer_v123(episode_buffer,
max_action_distance,
mode):
"""Samples intervals and forms pairs."""
first_second_label = []
buffer_position = 0
while True:
positive_example_candidate = (
buffer_position + random.randint(1, max_action_distance))
next_buffer_position = compute_next_buffer_position(
buffer_position, positive_example_candidate,
max_action_distance, mode)
if (next_buffer_position >= len(episode_buffer) or
positive_example_candidate >= len(episode_buffer)):
break
label = random.randint(0, 1)
if label:
first, second = generate_positive_example(buffer_position,
positive_example_candidate)
else:
first, second = generate_negative_example(buffer_position,
len(episode_buffer),
max_action_distance)
if first is None or second is None:
break
first_second_label.append((first, second, label))
buffer_position = next_buffer_position
x1 = []
x2 = []
labels = []
for first, second, label in first_second_label:
x1.append(episode_buffer[first])
x2.append(episode_buffer[second])
labels.append(label)
return x1, x2, labels
class RLBTrainer(object):
"""Train a R network in an online way."""
def __init__(self,
rlb_model_wrapper,
ensure_train_between_episodes=True,
checkpoint_dir=None):
observation_history_size = rlb_model_wrapper.all_rlb_args.outer_args['rlb_ot_history_size']
training_interval = rlb_model_wrapper.all_rlb_args.outer_args['rlb_ot_train_interval']
num_epochs = rlb_model_wrapper.all_rlb_args.outer_args['rlb_ot_num_epochs']
batch_size = rlb_model_wrapper.all_rlb_args.outer_args['rlb_ot_batch_size']
# The training interval is assumed to be the same as the history size
# for invalid negative values.
if training_interval < 0:
training_interval = observation_history_size
self._rlb_model_wrapper = rlb_model_wrapper
self.training_interval = training_interval
self._ensure_train_between_episodes = ensure_train_between_episodes
self._batch_size = batch_size
self._num_epochs = num_epochs
# Keeps track of the last N observations.
# Those are used to train the R network in an online way.
self._fifo_observations = [None] * observation_history_size
self._fifo_actions = [None] * observation_history_size
self._fifo_dones = [None] * observation_history_size
self._fifo_index = 0
self._fifo_count = 0
# Used to save checkpoints.
self._current_epoch = 0
self._checkpointer = None
if checkpoint_dir is not None:
checkpoint_period_in_epochs = self._num_epochs
self._checkpointer = keras_checkpoint.GFileModelCheckpoint(
os.path.join(checkpoint_dir, 'r_network_weights.{epoch:05d}.h5'),
save_summary=False,
save_weights_only=True,
period=checkpoint_period_in_epochs)
self._checkpointer.set_model(self._rlb_model_wrapper)
def on_new_observation2(self, observations, unused_rewards, dones, infos, actions):
"""Event triggered when the environments generate a new observation."""
if len(observations.shape) >= 3 or infos is None or 'frame' not in infos:
self._fifo_observations[self._fifo_index] = observations
assert observations.dtype == np.uint8
else:
# Specific to Parkour (stores velocity, joints as the primary
# observation).
self._fifo_observations[self._fifo_index] = infos['frame']
self._fifo_actions[self._fifo_index] = actions
self._fifo_dones[self._fifo_index] = dones
self._fifo_index = (
(self._fifo_index + 1) % len(self._fifo_observations))
self._fifo_count += 1
if (self._fifo_count > 0 and
self._fifo_count % self.training_interval == 0):
print('Training RLB after: {}'.format(
self._fifo_count))
with logger.ProfileKV('train_ot'):
history_observations, history_dones, history_actions = self._get_flatten_history()
self.train(history_observations, history_dones, history_actions)
return True
return False
def _get_flatten_history(self):
"""Convert the history given as a circular fifo to a linear array."""
if self._fifo_count < len(self._fifo_observations):
return (self._fifo_observations[:self._fifo_count],
self._fifo_dones[:self._fifo_count],
self._fifo_actions[:self._fifo_count])
# Reorder the indices.
history_observations = self._fifo_observations[self._fifo_index:]
history_observations.extend(self._fifo_observations[:self._fifo_index])
history_dones = self._fifo_dones[self._fifo_index:]
history_dones.extend(self._fifo_dones[:self._fifo_index])
history_actions = self._fifo_actions[self._fifo_index:]
history_actions.extend(self._fifo_actions[:self._fifo_index])
return history_observations, history_dones, history_actions
def _split_history(self, observations, dones, actions):
"""Returns some individual trajectories."""
if len(observations) == 0: # pylint: disable=g-explicit-length-test
return []
# Number of environments that generated "observations",
# and total number of steps.
nenvs = len(dones[0])
nsteps = len(dones)
# Starting index of the current trajectory.
start_index = [0] * nenvs
trajectories = []
action_sequences = []
for k in range(nsteps):
for n in range(nenvs):
if dones[k][n] or k == nsteps - 1:
if self._ensure_train_between_episodes:
assert dones[k][n]
# Actually, the observation that comes with (done == True) is the initial observation of the next episode.
if dones[k][n]:
next_start_index = k
else:
next_start_index = k + 1
time_slice = observations[start_index[n]:next_start_index]
trajectories.append([obs[n] for obs in time_slice])
# In the slice for each trajectory, the first action doesn't have a corresponding previous state.
ac_time_slice = actions[start_index[n]+1:next_start_index]
action_sequences.append([ac[n] for ac in ac_time_slice])
start_index[n] = next_start_index
return trajectories, action_sequences
def _prepare_data(self, observations, dones, actions):
"""Generate the positive and negative pairs used to train the R network."""
all_obs = []
all_obs_next = []
all_acs = []
trajectories, action_sequences = self._split_history(observations, dones, actions)
for trajectory, action_sequence in zip(trajectories, action_sequences):
all_obs.extend(trajectory[:-1])
all_obs_next.extend(trajectory[1:])
all_acs.extend(action_sequence)
assert len(all_obs) == len(all_obs_next)
assert len(all_obs) == len(all_acs)
return all_obs, all_obs_next, all_acs
def _shuffle(self, x1, *data):
sample_count = len(x1)
for d in data:
assert len(d) == sample_count
permutation = np.random.permutation(sample_count)
x1 = [x1[p] for p in permutation]
data = tuple([d[p] for p in permutation] for d in data)
return (x1,) + data
def train(self, history_observations, history_dones, history_actions):
"""Do one pass of training of the R-network."""
obs, obs_next, acs = self._prepare_data(history_observations, history_dones, history_actions)
obs, obs_next, acs = self._shuffle(obs, obs_next, acs)
train_count = len(obs)
obs_train, obs_next_train, acs_train = obs, obs_next, acs
self._rlb_model_wrapper.train(
batch_gen=self._generate_batch(obs_train, obs_next_train, acs_train),
steps_per_epoch=train_count // self._batch_size,
num_epochs=self._num_epochs)
# Note: the same could possibly be achieved using parameters "callback",
# "initial_epoch", "epochs" in fit_generator. However, this is not really
# clear how this initial epoch is supposed to work.
# TODO(damienv): change it to use callbacks of fit_generator.
for _ in range(self._num_epochs):
self._current_epoch += 1
if self._checkpointer is not None:
self._checkpointer.on_epoch_end(self._current_epoch)
def _generate_batch(self, x1, *data):
"""Generate batches of data used to train the R network."""
logger.info('RLBTrainer._generate_batch. # batches per epoch: {}'.format(len(x1) // self._batch_size))
while True:
# Train for one epoch.
sample_count | |
}
})
@staticmethod
def load_sky_easy_task_1() -> 'Dataset':
df = pd.read_csv('data/sky_easy_task_1.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Easy 1', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_sky_easy_task_2() -> 'Dataset':
df = pd.read_csv('data/sky_easy_task_2.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Easy 2', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_sky_med_training_1() -> 'Dataset':
df = pd.read_csv('data/sky_med_training_1.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Medium Training', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_sky_med_task_1() -> 'Dataset':
df = pd.read_csv('data/sky_med_task_1.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Medium 1', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_sky_med_task_2() -> 'Dataset':
df = pd.read_csv('data/sky_med_task_2.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Medium 2', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_sky_hard_training_1() -> 'Dataset':
df = pd.read_csv('data/sky_hard_training_1.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Hard Training', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_sky_hard_task_1() -> 'Dataset':
df = pd.read_csv('data/sky_hard_task_1.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Hard 1', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_sky_hard_task_2() -> 'Dataset':
df = pd.read_csv('data/sky_hard_task_2.csv')
df['cat'] = '1'
convert_dict = {
'Distance': 'float',
'CostPerNight': 'float',
'Label': 'category',
'cat':'category'
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
return Dataset('Label', df,
'MVO Hard 2', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'Distance': {
'text': 'Distance',
'unit': 'Miles',
'short': 'B',
'type': 'numeric',
},
'CostPerNight': {
'text': "Cost Per Night",
'unit': 'Dollars',
'short': 'C',
'type': 'numeric'
},
'cat': {
'text': "cat",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_cluster_easy_training_1() -> 'Dataset':
df = pd.read_csv('data/cluster_easy_training_1.csv')
df["Label"] = 0
for i in range(0, len(df["Label"])):
df.loc[i, ('Label')] = i
convert_dict = {
'X': 'float',
'Y': 'float',
'Label': 'category',
'Cluster': 'category',
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
df['Cluster'] = df['Cluster'].apply(str)
return Dataset('Label', df,
'Cluster Easy Training', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'X': {
'text': 'X',
'unit': '',
'short': 'B',
'type': 'numeric',
},
'Y': {
'text': "Y",
'unit': '',
'short': 'C',
'type': 'numeric'
},
'Cluster': {
'text': "Cluster",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_cluster_easy_task_1() -> 'Dataset':
df = pd.read_csv('data/cluster_easy_task_1.csv')
df["Label"] = 0
for i in range(0, len(df["Label"])):
df.loc[i, ('Label')] = i
convert_dict = {
'X': 'float',
'Y': 'float',
'Label': 'category',
'Cluster': 'category',
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
df['Cluster'] = df['Cluster'].apply(str)
return Dataset('Label', df,
'Cluster Easy 1', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'X': {
'text': 'X',
'unit': '',
'short': 'B',
'type': 'numeric',
},
'Y': {
'text': "Y",
'unit': '',
'short': 'C',
'type': 'numeric'
},
'Cluster': {
'text': "Cluster",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_cluster_easy_task_2() -> 'Dataset':
df = pd.read_csv('data/cluster_easy_task_2.csv')
df["Label"] = 0
for i in range(0, len(df["Label"])):
df.loc[i, ('Label')] = i
convert_dict = {
'X': 'float',
'Y': 'float',
'Label': 'category',
'Cluster': 'category',
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
df['Cluster'] = df['Cluster'].apply(str)
return Dataset('Label', df,
'Cluster Easy 2', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'X': {
'text': 'X',
'unit': '',
'short': 'B',
'type': 'numeric',
},
'Y': {
'text': "Y",
'unit': '',
'short': 'C',
'type': 'numeric'
},
'Cluster': {
'text': "Cluster",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_cluster_med_training_1() -> 'Dataset':
df = pd.read_csv('data/cluster_med_training_1.csv')
df["Label"] = 0
for i in range(0, len(df["Label"])):
df.loc[i, ('Label')] = i
convert_dict = {
'X': 'float',
'Y': 'float',
'Label': 'category',
'Cluster': 'category',
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
df['Cluster'] = df['Cluster'].apply(str)
return Dataset('Label', df,
'Cluster Medium Training', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'X': {
'text': 'X',
'unit': '',
'short': 'B',
'type': 'numeric',
},
'Y': {
'text': "Y",
'unit': '',
'short': 'C',
'type': 'numeric'
},
'Cluster': {
'text': "Cluster",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_cluster_med_task_1() -> 'Dataset':
df = pd.read_csv('data/cluster_med_task_1.csv')
df["Label"] = 0
for i in range(0, len(df["Label"])):
df.loc[i, ('Label')] = i
convert_dict = {
'X': 'float',
'Y': 'float',
'Label': 'category',
'Cluster': 'category',
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
df['Cluster'] = df['Cluster'].apply(str)
return Dataset('Label', df,
'Cluster Medium 1', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'X': {
'text': 'X',
'unit': '',
'short': 'B',
'type': 'numeric',
},
'Y': {
'text': "Y",
'unit': '',
'short': 'C',
'type': 'numeric'
},
'Cluster': {
'text': "Cluster",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_cluster_med_task_2() -> 'Dataset':
df = pd.read_csv('data/cluster_med_task_2.csv')
df["Label"] = 0
for i in range(0, len(df["Label"])):
df.loc[i, ('Label')] = i
convert_dict = {
'X': 'float',
'Y': 'float',
'Label': 'category',
'Cluster': 'category',
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
df['Cluster'] = df['Cluster'].apply(str)
return Dataset('Label', df,
'Cluster Medium 2', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'X': {
'text': 'X',
'unit': '',
'short': 'B',
'type': 'numeric',
},
'Y': {
'text': "Y",
'unit': '',
'short': 'C',
'type': 'numeric'
},
'Cluster': {
'text': "Cluster",
'unit': '',
'short': 'D',
'type': 'categorical'
}
})
@staticmethod
def load_cluster_hard_training_1() -> 'Dataset':
df = pd.read_csv('data/cluster_hard_training_1.csv')
df["Label"] = 0
for i in range(0, len(df["Label"])):
df.loc[i, ('Label')] = i
convert_dict = {
'X': 'float',
'Y': 'float',
'Label': 'category',
'Cluster': 'category',
}
df = df.astype(convert_dict)
df['Label'] = df['Label'].apply(str)
df['Cluster'] = df['Cluster'].apply(str)
return Dataset('Label', df,
'Cluster Hard Training', {
'Label': {
'text': 'Label',
'unit': 'label',
'short': 'A',
'type': 'label',
},
'X': {
'text': 'X',
'unit': | |
= find_str(s, ['\\quotes{', '\\doublequotes{', '\\enquote{'])
if k == -1:
if kwargs.get('pb'): # Update progressbar
kwargs.get('pb').update('Processing quotes')
return s
m = 0
for j in range(len(s)):
if s[k + j] == '{':
m = j
if s[k + j] == '}':
s = s[:k] + '"' + s[k + m + 1:k + j] + '"' + s[k + j + 1:]
break
def remove_comments(s: str, **kwargs) -> str:
"""
Remove comments from text.
:param s: Latex string code
:return: String without comments
"""
newline_symbol = '⇱NEWLINE_SYMBOL_REMOVE_COMMENTS⇲'
s = s.replace(' ', ' ')
s = s.replace('\\\\', newline_symbol)
s = s.replace('\\%', _TAG_PERCENTAGE_SYMBOL)
s = s.replace('\\\n', '\n')
k = s.split('\n')
for r in range(len(k)):
k[r] = k[r].strip() # Strips all text
line_merge: List[bool] = []
for r in range(len(k)):
sp = k[r].split('%')
k[r] = sp[0] # Removes all comments from list
line_merge.append(len(sp) > 1)
line_merge.append(False)
line_merge2: List[bool] = line_merge.copy()
k.append('')
for r in range(len(k)):
if line_merge[r] and not line_merge[r + 1] and k[r + 1] != '':
line_merge2[r + 1] = True
for r in range(len(k)):
line_merge[r] = line_merge[r] or line_merge2[r]
new_k = []
j = 0
merged_str = ''
while True: # Merge comment lines
if not line_merge[j]:
if merged_str != '': # Add current merged str
new_k.append(merged_str)
merged_str = ''
new_k.append(k[j])
else:
merged_str += k[j]
if j == len(k) - 1:
break
j += 1
if merged_str != '':
new_k.append(merged_str)
k = new_k
w = [] # Remove duplicates '' lines to single ''
last = ''
for j in k:
if j == '' and j == last:
pass
else:
w.append(j)
last = j
if len(w) > 0 and w[-1] == '': # Removes last space
w.pop()
s = '\n'.join(w).strip()
s = s.replace(newline_symbol, '\\\\')
if kwargs.get('pb'): # Update progressbar
kwargs.get('pb').update('Removing comments')
return s
def simple_replace(s: str, **kwargs) -> str:
"""
Replace simple tokens.
:param s: Latex string code
:return: String with replaced items
"""
for w in REPLACE_SYMBOLS_LIBRARY:
s = s.replace(w[0], w[1])
# Replace unique symbols
s += ' '
invalid_tag = '⇱SYMBOL_REPLACE_TAG_TOKEN⇲'
for w in REPLACE_TEX_COMMANDS_LIBRARY:
word, repl = w
while True:
k = s.find(word)
if k == -1:
break
if s[k + len(word)] not in ut.TEX_COMMAND_CHARS:
s = s[0:k] + repl + s[k + len(word):]
else:
s = s[0:k + 1] + invalid_tag + s[k + 1:] # format ...\\INVALID_TAG...
s = s[0:len(s) - 1].replace(invalid_tag, '')
# Replace equation symbols
s = s.replace('\$', _TAG_DOLLAR_SYMBOL)
tex_tags = ut.find_tex_command_char(s, ut.TEX_EQUATION_CHARS)
new_s = ''
k = 0 # Moves through tags
added_s = False
for i in range(len(s)):
if k < len(tex_tags):
if i < tex_tags[k][1]:
new_s += s[i]
elif tex_tags[k][1] <= i < tex_tags[k][2] and not added_s or tex_tags[k][1] == i == tex_tags[k][2]:
if not added_s:
k_s: str = s[tex_tags[k][1]:tex_tags[k][2] + 1]
# Replace
for j in REPLACE_EQUATION_SYMBOLS_LIBRARY:
k_s = k_s.replace(j[0], j[1])
new_s += k_s
added_s = True
elif tex_tags[k][2] < i < tex_tags[k][3]:
new_s += s[i]
elif i == tex_tags[k][3]: # Advance to other tag
new_s += s[i]
k += 1
added_s = False
else:
new_s += s[i]
if kwargs.get('pb'): # Update progressbar
kwargs.get('pb').update('Replacing simple tokens')
return new_s
def _load_file_search(tex_file: str, print_error: bool = False) -> str:
"""
Search and load a file.
:param tex_file: Name of the file
:param print_error: Prints if file not found
:return: Loaded file or tag error
"""
tx = _TAG_FILE_ERROR
folders = _os_listfolder()
folders.insert(0, '../')
folders.insert(0, './')
for f in folders:
tx = _load_file(tex_file, f)
if tx == _TAG_FILE_ERROR:
if print_error:
print(f'\tFile not found in {f}')
else:
break
return tx
def process_inputs(
s: str,
clear_not_found_files: bool = False,
**kwargs
) -> str:
"""
Process inputs, which find the input files and retrieve its contents.
:param s: Latex string code with inputs
:param clear_not_found_files: Clear the not found files. Used when changing the path
:return: Text copied with data from inputs
"""
global _PRINT_LOCATION, _NOT_FOUND_FILES
if os.getcwd() != _LAST_NOT_FOUND_FILES_PATH[0] or clear_not_found_files:
_LAST_NOT_FOUND_FILES_PATH[0] = os.getcwd()
_NOT_FOUND_FILES.clear()
_PRINT_LOCATION = False
print_ = kwargs.get('print', True)
symbol = '⇱INPUT_FILE_TAG⇲'
s = remove_comments(s)
while True:
k = find_str(s, '\\input{')
if k == -1:
if kwargs.get('pb'): # Update progressbar
kwargs.get('pb').update('Processing \\input')
return s.replace(symbol, '\\input{')
m = 0
for j in range(len(s)):
if s[k + j] == '{':
m = j
if s[k + j] == '}':
tex_file = s[k + m + 1:k + j]
if '.tex' not in tex_file:
tex_file += '.tex'
if tex_file not in _NOT_FOUND_FILES and '\jobname' not in tex_file:
if not _PRINT_LOCATION:
if print_:
print(f'Current path location:\n\t{os.getcwd()}')
_PRINT_LOCATION = True
if print_:
print(f'Detected file {tex_file}:')
tx = _load_file_search(tex_file, print_error=print_)
if tx == _TAG_FILE_ERROR:
_NOT_FOUND_FILES.append(tex_file)
s = s[:k] + symbol + s[k + m + 1:]
else:
if print_:
print('\tFile found and loaded')
tx = '\n'.join(tx.splitlines())
tx = remove_comments(tx)
s = s[:k] + tx + s[k + j + 1:]
else:
s = s[:k] + symbol + s[k + m + 1:]
break
def remove_commands_char(s: str, chars: List[Tuple[str, str, bool]]) -> str:
"""
Remove all char commands.
:param s: Latex string code
:param chars: Char that define equations [(initial, final, ignore escape), ...]
:return: Code with removed chars
"""
tex_tags = ut.find_tex_command_char(s, symbols_char=chars)
if len(tex_tags) == 0:
return s
new_s = ''
k = 0 # Moves through tags
for i in range(len(s)):
if k < len(tex_tags):
if i < tex_tags[k][0]:
new_s += s[i]
# elif tex_tags[k][0] <= i < tex_tags[k][3]:
# pass
elif i == tex_tags[k][3]: # Advance to other tag
k += 1
else:
new_s += s[i]
return new_s
def remove_equations(s: str, **kwargs) -> str:
"""
Remove all equations from a string.
:param s: Latex string code
:return: Latex without equation
"""
s = remove_commands_char(s, chars=ut.TEX_EQUATION_CHARS)
if kwargs.get('pb'): # Update progressbar
kwargs.get('pb').update('Removing equations')
return s
def output_text_for_some_commands(
s: str,
lang: str
) -> str:
"""
Replaces the command for a particular text.
:param s: Latex string code
:param lang: Language tag of the code
:return: Text string or empty if error
"""
# Stores the commands to be transformed
# (
# command name,
# [(argument number, argument is optional), ...],
# tag to be replaced,
# total commands,
# font_tag ('tex_text_tag' if None),
# font_content ('tex_text_tag_content' if None),
# add new line (before, after)
# )
# The font format is like .... [font tag]YOUR TAG {[font content]YOUR CONTENT} ...[font normal]. In that case, tag to be
# relaced is 'YOUR TAG {0}, {1}
# All *arguments will be formatted using the tag
commands: List[Tuple[str, List[Tuple[int, bool]], str, int, Optional[str], Optional[str], Tuple[bool, bool]]] = [
('caption', [(1, False)], LANG_TT_TAGS.get(lang, 'caption'), 1, None, None, (False, True)),
('chapter', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('chapter*', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('em', [(1, False)], '{0}', 1, 'normal', 'bold', (False, False)),
('emph', [(1, False)], '{0}', 1, 'normal', 'italic', (False, False)),
('href', [(2, False)], LANG_TT_TAGS.get(lang, 'link'), 2, None, None, (False, False)),
('insertimage', [(3, False)], LANG_TT_TAGS.get(lang, 'figure_caption'), 3, None, None, (False, True)),
('insertimage', [(4, False)], LANG_TT_TAGS.get(lang, 'figure_caption'), 4, None, None, (False, False)),
('insertimageboxed', [(4, False)], LANG_TT_TAGS.get(lang, 'figure_caption'), 4, None, None, (False, True)),
('insertimageboxed', [(5, False)], LANG_TT_TAGS.get(lang, 'figure_caption'), 5, None, None, (False, True)),
('paragraph', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('section', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('section*', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('subfloat', [(1, True)], LANG_TT_TAGS.get(lang, 'sub_figure_title'), 1, None, None, (False, True)),
('subparagraph', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('subsection', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('subsection*', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('subsubsection', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('subsubsection*', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('subsubsubsection', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('subsubsubsection*', [(1, False)], '{0}', 1, 'normal', 'bold', (True, True)),
('textbf', [(1, False)], '{0}', 1, 'normal', 'bold', (False, False)),
('textit', [(1, False)], '{0}', 1, 'normal', 'italic', (False, False)),
('texttt', [(1, False)], '{0}', 1, 'normal', 'normal', (False, False))
]
new_s = ''
# | |
from __future__ import division
import os
import base64
import zlib
import array
import gzip
import struct
import binascii
import io
import functools
from weakref import WeakValueDictionary
import sys
import warnings
import csv
import copy
import six
try:
# noinspection PyPackageRequirements
from lxml import etree
have_lxml = True
except ImportError: # pragma: no cover
# noinspection PyPep8Naming
from xml.etree import ElementTree as etree
have_lxml = False
warnings.warn(ImportWarning('lxml is recommended'))
from tmxlib.compatibility import ord_
class ReadWriteBase(object):
"""Base class for objects that support loading and saving.
"""
@classmethod
def open(cls, filename, serializer=None, base_path=None, shared=False):
"""Load an object of this class from a file
:arg filename: The file from which to load
:arg serializer:
:arg base_path:
:arg shared: Objects loaded from a single file with `shared=True` will
be reused.
Modifications to this shared object will, naturally, be visible
from all variables that reference it.
(External tilesets are loaded as `shared` by default.)
"""
serializer = serializer_getdefault(serializer)
# noinspection PyUnresolvedReferences
return serializer.open(cls, cls._rw_obj_type, filename, base_path,
shared)
@classmethod
def load(cls, string, serializer=None, base_path=None, options=None):
"""Load an object of this class from a string.
:arg string:
String containing the XML description of the object, as it would be
read from a file.
:arg serializer:
:arg base_path:
:arg options:
An option dictionary of options to respect when loading the tmx.
Currently supported options:
None
"""
serializer = serializer_getdefault(serializer)
# noinspection PyUnresolvedReferences
return serializer.load(cls, cls._rw_obj_type, string, base_path, options)
def save(self, filename, serializer=None, base_path=None):
"""Save this object to a file
:arg base_path:
:arg serializer:
:arg filename:
Name of the file to save to.
"""
serializer = serializer_getdefault(serializer, self)
# noinspection PyUnresolvedReferences
return serializer.save(self, self._rw_obj_type, filename, base_path)
def dump(self, serializer=None, base_path=None):
"""Save this object as a string
:returns:
String with the representation of the object, suitable for
writing to a file.
"""
serializer = serializer_getdefault(serializer, self)
# noinspection PyUnresolvedReferences
return serializer.dump(self, self._rw_obj_type, base_path)
def load_method(func):
"""Helper to set the loaded object's `serializer` and `base_path`
"""
@functools.wraps(func)
def loader(self, *args, **kwargs):
obj = func(self, *args, **kwargs)
obj.serializer = self
try:
obj.base_path = kwargs['base_path']
except KeyError:
pass
return obj
return loader
def int_or_none(value):
if value is None:
return None
return int(value)
def int_or_float(value):
if isinstance(value, str):
if '.' in value:
return float(value)
return int(value)
if isinstance(value, int):
return value
return float(value)
# noinspection PyMethodMayBeStatic
class TMXSerializer(object):
def __init__(self):
import tmxlib
self.map_class = tmxlib.Map
self.template_class = tmxlib.Template
self.tile_layer_class = tmxlib.TileLayer
self.object_layer_class = tmxlib.ObjectLayer
self.image_layer_class = tmxlib.ImageLayer
self.rectangle_object_class = tmxlib.RectangleObject
self.ellipse_object_class = tmxlib.EllipseObject
self.polygon_object_class = tmxlib.PolygonObject
self.point_object_class = tmxlib.PointObject
self.polyline_object_class = tmxlib.PolylineObject
self.image_class = tmxlib.image.preferred_image_class
self._shared_objects = WeakValueDictionary()
def tileset_class(self, *args, **kwargs):
import tmxlib
if 'image' in kwargs:
return tmxlib.ImageTileset(*args, **kwargs)
else:
return tmxlib.IndividualTileTileset(*args, **kwargs)
def load_file(self, filename, base_path=None):
if base_path:
filename = os.path.join(base_path, filename)
with open(filename, 'rb') as fileobj:
return fileobj.read()
def open(self, cls, obj_type, filename, base_path=None, shared=False, options=None):
if not base_path:
base_path = os.path.dirname(os.path.abspath(filename))
if shared:
filename = os.path.normpath(os.path.join(base_path, filename))
try:
return self._shared_objects[obj_type, filename]
except KeyError:
self._shared_objects[obj_type, filename] = obj = self.open(
cls, obj_type, filename, options=options)
return obj
return self.load(cls, obj_type, self.load_file(filename),
base_path=base_path, options=options)
def load(self, cls, obj_type, string, base_path=None, options=None):
if have_lxml:
tree = etree.XML(string, etree.XMLParser(remove_comments=True))
else: # pragma: no cover
tree = etree.XML(string)
def strip_comments(elem):
for subelem in elem:
if subelem.tag == etree.Comment:
elem.remove(subelem)
strip_comments(tree)
return self.from_element(cls, obj_type, tree, base_path=base_path, options=options)
def from_element(self, cls, obj_type, element, base_path=None, options=None):
read_func = getattr(self, obj_type + '_from_element')
obj = read_func(cls, element, base_path=base_path, options=options)
obj.serializer = self
return obj
# noinspection PyUnusedLocal
def save(self, obj, obj_type, filename, serializer=None, base_path=None):
if not base_path:
base_path = os.path.dirname(os.path.abspath(filename))
with open(filename, 'wb') as fileobj:
fileobj.write(self.dump(obj, obj_type, base_path=base_path))
def dump(self, obj, obj_type, base_path=None):
# noinspection PyUnusedLocal
extra_kwargs = {}
if have_lxml:
extra_kwargs = dict(pretty_print=True, xml_declaration=True)
else: # pragma: no cover
extra_kwargs = dict()
return etree.tostring(self.to_element(obj, obj_type, base_path),
encoding='UTF-8', **extra_kwargs)
def to_element(self, obj, obj_type, base_path=None,
**kwargs):
write_func = getattr(self, obj_type + '_to_element')
return write_func(obj, base_path=base_path, **kwargs)
@load_method
def map_from_element(self, cls, root, base_path, options):
assert root.tag == 'map'
assert root.attrib.pop('version') in ('1.0', '1.1', '1.2'), 'Bad TMX file version'
background_color = root.attrib.pop('backgroundcolor', None)
if background_color:
background_color = from_hexcolor(background_color)
args = dict(
size=(int(root.attrib.pop('width')),
int(root.attrib.pop('height'))),
tile_size=(int(root.attrib.pop('tilewidth')),
int(root.attrib.pop('tileheight'))),
orientation=root.attrib.pop('orientation'),
base_path=base_path,
background_color=background_color,
infinite=True if root.attrib.pop('infinite', '0') == '1' else False,
staggeraxis=root.attrib.pop('staggeraxis', None),
staggerindex=root.attrib.pop('staggerindex', None),
hexsidelength=root.attrib.pop('hexsidelength', None),
nextobjectid=int_or_none(root.attrib.pop('nextobjectid', None)),
nextlayerid=int_or_none(root.attrib.pop('nextlayerid', None)),
tiledversion=root.attrib.pop('tiledversion', None),
)
render_order = root.attrib.pop('renderorder', None)
if render_order:
args['render_order'] = render_order
# compressionlevel is currently undocumented, but I have only ever seen a value of '0'
compressionlevel = root.attrib.pop('compressionlevel', '0')
assert compressionlevel == '0', "Unsupported compression level %s" % compressionlevel
assert not root.attrib, 'Unexpected map attributes: %s' % root.attrib
map = cls(**args)
for elem in root:
if elem.tag == 'properties':
map.properties.update(self.read_properties(elem))
elif elem.tag == 'tileset':
tileset = self.tileset_from_element(
self.tileset_class, elem, base_path=base_path, options=options)
map.tilesets.append(tileset)
assert tileset.first_gid(map) == tileset._read_first_gid
elif elem.tag == 'layer':
map.layers.append(self.tile_layer_from_element(
self.tile_layer_class, elem, map, options=options))
elif elem.tag == 'objectgroup':
map.layers.append(self.object_layer_from_element(
self.object_layer_class, elem, base_path=base_path, map=map, options=options))
elif elem.tag == 'imagelayer':
map.layers.append(self.image_layer_from_element(
self.image_layer_class, elem, map, base_path, options=options))
else:
raise ValueError('Unknown tag %s' % elem.tag)
return map
def map_to_element(self, map, base_path):
elem = etree.Element('map', attrib=dict(
version='1.0',
orientation=map.orientation,
width=str(map.width),
height=str(map.height),
tilewidth=str(map.tile_width),
tileheight=str(map.tile_height),
))
if map.background_color:
elem.attrib['backgroundcolor'] = '#{0}'.format(
to_hexcolor(map.background_color))
if map.render_order:
elem.attrib['renderorder'] = map.render_order
self.append_properties(elem, map.properties)
for tileset in map.tilesets:
elem.append(self.tileset_to_element(tileset,
base_path=base_path, first_gid=tileset.first_gid(map)))
for layer in map.layers:
elem.append(self.layer_to_element(layer, base_path))
return elem
@load_method
def tileset_from_element(self, cls, elem, base_path, options):
source = elem.attrib.pop('source', None)
load_source = bool(options["load-tileset-sources"]) if options else True
# if 'source' is set, then the tileset is not embedded
if source:
# if we should load the source from it's file (default)
if load_source:
# XXX: Return a proxy object?
if base_path is None and not os.path.isabs(source):
raise ValueError(
'Cannot load external tileset from relative path %s' %
source)
elif base_path:
real_source = os.path.join(base_path, source)
else:
real_source = source
tileset = self.open(cls, 'tileset', real_source, shared=True)
tileset._read_first_gid = int(elem.attrib.pop('firstgid'))
tileset.source = source
assert not elem.attrib, ('Unexpected tileset attributes: %s' % elem.attrib)
return tileset
# otherwise through the options, the user has told us not to load the tileset source
else:
tileset = cls(name=source, tile_size=(0, 0))
tileset._read_first_gid = int(elem.attrib.pop('firstgid'))
for _ in range(tileset._read_first_gid):
tileset.append_image(None)
tileset.source = None
assert not elem.attrib, ('Unexpected tileset attributes: %s' % elem.attrib)
return tileset
kwargs = {}
if any(e.tag == 'image' for e in elem):
kwargs['margin'] = int(elem.attrib.pop('margin', 0))
kwargs['spacing'] = int(elem.attrib.pop('spacing', 0))
kwargs['image'] = None
columns = elem.attrib.pop('columns', None)
if columns:
kwargs['columns'] = int(columns)
tileset = cls(
name=elem.attrib.pop('name'),
tile_size=(int(elem.attrib.pop('tilewidth')),
int(elem.attrib.pop('tileheight'))),
**kwargs
)
tileset._read_first_gid = int(elem.attrib.pop('firstgid', 0))
elem.attrib.pop('tilecount', None)
elem.attrib.pop('version', '1.0')
elem.attrib.pop('tiledversion', None)
assert not elem.attrib, ('Unexpected tileset attributes: %s' % elem.attrib)
for subelem in elem:
if subelem.tag == 'image':
assert tileset.image is None
tileset.image = self.image_from_element(
self.image_class, subelem, base_path=base_path, options=options)
elif subelem.tag == 'terraintypes':
for subsubelem in subelem:
if subsubelem.tag == 'terrain':
tileset.terrains.append_new(
name=subsubelem.attrib.pop('name'),
tile=tileset[int(subsubelem.attrib.pop('tile'))],
)
assert not subsubelem.attrib, (
'Unexpected terrain attributes: %s' %
subsubelem.attrib)
else:
raise ValueError('Unknown tag %s' % subsubelem.tag)
elif subelem.tag == 'tile':
id = int(subelem.attrib.pop('id'))
terrain = subelem.attrib.pop('terrain', None)
if terrain:
tileset.tile_attributes[id]['terrain_indices'] = [
int(n) if n else -1 for n in terrain.split(',')]
probability = subelem.attrib.pop('probability', None)
if probability:
try:
probability = int(probability)
except ValueError:
probability = float(probability)
tileset.tile_attributes[id]['probability'] = probability
for subsubelem in subelem:
if subsubelem.tag == 'properties':
props = tileset.tile_attributes[id].setdefault(
'properties' ,{})
props.update(self.read_properties(subsubelem))
elif subsubelem.tag == 'image':
assert id == len(tileset), (id, len(tileset))
image = self.image_from_element(
self.image_class, subsubelem, base_path=base_path, options=options)
props = tileset.append_image(image)
else:
raise ValueError('Unknown tag %s' % subsubelem.tag)
elif subelem.tag == 'properties':
tileset.properties.update(self.read_properties(subelem))
elif subelem.tag == 'tileoffset':
tileset.tile_offset = (
int(subelem.attrib['x']), int(subelem.attrib['y']))
elif subelem.tag == 'wangsets':
# XXX: Not implemented
pass
elif subelem.tag == 'grid':
# XXX: Not implemented
pass
else:
raise ValueError('Unknown tag %s' % subelem.tag)
if tileset.type == 'image' and not tileset.image:
raise ValueError('No image for tileset %s' % tileset.name)
return tileset
def tileset_to_element(self, tileset, base_path, first_gid=None):
if tileset.source is not None:
attrib = dict(
source=tileset.source,
)
if first_gid:
attrib['firstgid'] = str(first_gid)
return etree.Element('tileset', attrib=attrib)
else:
attrib = dict(name=tileset.name)
if tileset.type == 'image':
attrib['tileheight'] = str(tileset.tile_height)
attrib['tilewidth'] = str(tileset.tile_width)
if first_gid:
attrib['firstgid'] = str(first_gid)
element = etree.Element('tileset', attrib=attrib)
if tileset.type == 'image':
if tileset.spacing:
element.attrib['spacing'] = str(tileset.spacing)
if tileset.margin:
element.attrib['margin'] = str(tileset.margin)
if any(tileset.tile_offset):
offset_elem = etree.Element('tileoffset',
attrib={'x': | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.artifactregistry_v1beta2.types import apt_artifact
from google.cloud.artifactregistry_v1beta2.types import file
from google.cloud.artifactregistry_v1beta2.types import package
from google.cloud.artifactregistry_v1beta2.types import repository
from google.cloud.artifactregistry_v1beta2.types import repository as gda_repository
from google.cloud.artifactregistry_v1beta2.types import settings
from google.cloud.artifactregistry_v1beta2.types import tag
from google.cloud.artifactregistry_v1beta2.types import tag as gda_tag
from google.cloud.artifactregistry_v1beta2.types import version
from google.cloud.artifactregistry_v1beta2.types import yum_artifact
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import ArtifactRegistryTransport, DEFAULT_CLIENT_INFO
class ArtifactRegistryGrpcTransport(ArtifactRegistryTransport):
"""gRPC backend transport for ArtifactRegistry.
The Artifact Registry API service.
Artifact Registry is an artifact management system for storing
artifacts from different package management systems.
The resources managed by this API are:
- Repositories, which group packages and their data.
- Packages, which group versions and their tags.
- Versions, which are specific forms of a package.
- Tags, which represent alternative names for versions.
- Files, which contain content and are optionally associated with a
Package or Version.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "artifactregistry.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "artifactregistry.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def import_apt_artifacts(
self,
) -> Callable[[apt_artifact.ImportAptArtifactsRequest], operations_pb2.Operation]:
r"""Return a callable for the import apt artifacts method over gRPC.
Imports Apt artifacts. The returned Operation will
complete once the resources are imported. Package,
Version, and File resources are created based on the
imported artifacts. Imported artifacts that conflict
with existing resources are ignored.
Returns:
Callable[[~.ImportAptArtifactsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_apt_artifacts" not in self._stubs:
self._stubs["import_apt_artifacts"] = self.grpc_channel.unary_unary(
"/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/ImportAptArtifacts",
request_serializer=apt_artifact.ImportAptArtifactsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_apt_artifacts"]
@property
def import_yum_artifacts(
self,
) -> Callable[[yum_artifact.ImportYumArtifactsRequest], operations_pb2.Operation]:
r"""Return a callable for the import yum artifacts method over gRPC.
Imports Yum (RPM) artifacts. The returned Operation
will complete once the resources are imported. Package,
Version, and File resources are created based on the
imported artifacts. Imported artifacts that conflict
with existing resources are ignored.
Returns:
Callable[[~.ImportYumArtifactsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
| |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropGameServerGroupTargetTrackingConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.TargetTrackingConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-targettrackingconfiguration.html
Property Document:
- ``rp_TargetValue``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-targettrackingconfiguration.html#cfn-gamelift-gameservergroup-targettrackingconfiguration-targetvalue
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.TargetTrackingConfiguration"
rp_TargetValue: float = attr.ib(
default=None,
validator=attr.validators.instance_of(float),
metadata={AttrMeta.PROPERTY_NAME: "TargetValue"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-targettrackingconfiguration.html#cfn-gamelift-gameservergroup-targettrackingconfiguration-targetvalue"""
@attr.s
class PropFleetLocationCapacity(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.LocationCapacity"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html
Property Document:
- ``rp_DesiredEC2Instances``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-desiredec2instances
- ``rp_MaxSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-maxsize
- ``rp_MinSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-minsize
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.LocationCapacity"
rp_DesiredEC2Instances: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "DesiredEC2Instances"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-desiredec2instances"""
rp_MaxSize: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MaxSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-maxsize"""
rp_MinSize: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MinSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-minsize"""
@attr.s
class PropBuildS3Location(Property):
"""
AWS Object Type = "AWS::GameLift::Build.S3Location"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html
Property Document:
- ``rp_Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-bucket
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-key
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-rolearn
- ``p_ObjectVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-object-verison
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Build.S3Location"
rp_Bucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Bucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-bucket"""
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-key"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-rolearn"""
p_ObjectVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ObjectVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-object-verison"""
@attr.s
class PropAliasRoutingStrategy(Property):
"""
AWS Object Type = "AWS::GameLift::Alias.RoutingStrategy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html
Property Document:
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-type
- ``p_FleetId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-fleetid
- ``p_Message``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-message
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Alias.RoutingStrategy"
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-type"""
p_FleetId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FleetId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-fleetid"""
p_Message: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Message"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-message"""
@attr.s
class PropGameServerGroupLaunchTemplate(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.LaunchTemplate"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html
Property Document:
- ``p_LaunchTemplateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplateid
- ``p_LaunchTemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplatename
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-version
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.LaunchTemplate"
p_LaunchTemplateId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplateid"""
p_LaunchTemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplatename"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-version"""
@attr.s
class PropFleetCertificateConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.CertificateConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-certificateconfiguration.html
Property Document:
- ``rp_CertificateType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-certificateconfiguration.html#cfn-gamelift-fleet-certificateconfiguration-certificatetype
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.CertificateConfiguration"
rp_CertificateType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "CertificateType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-certificateconfiguration.html#cfn-gamelift-fleet-certificateconfiguration-certificatetype"""
@attr.s
class PropScriptS3Location(Property):
"""
AWS Object Type = "AWS::GameLift::Script.S3Location"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html
Property Document:
- ``rp_Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-bucket
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-key
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-rolearn
- ``p_ObjectVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-objectversion
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Script.S3Location"
rp_Bucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Bucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-bucket"""
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-key"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-rolearn"""
p_ObjectVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ObjectVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-objectversion"""
@attr.s
class PropGameServerGroupAutoScalingPolicy(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.AutoScalingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html
Property Document:
- ``rp_TargetTrackingConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-targettrackingconfiguration
- ``p_EstimatedInstanceWarmup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-estimatedinstancewarmup
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.AutoScalingPolicy"
rp_TargetTrackingConfiguration: typing.Union['PropGameServerGroupTargetTrackingConfiguration', dict] = attr.ib(
default=None,
converter=PropGameServerGroupTargetTrackingConfiguration.from_dict,
validator=attr.validators.instance_of(PropGameServerGroupTargetTrackingConfiguration),
metadata={AttrMeta.PROPERTY_NAME: "TargetTrackingConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-targettrackingconfiguration"""
p_EstimatedInstanceWarmup: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "EstimatedInstanceWarmup"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-estimatedinstancewarmup"""
@attr.s
class PropGameSessionQueuePlayerLatencyPolicy(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.PlayerLatencyPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html
Property Document:
- ``p_MaximumIndividualPlayerLatencyMilliseconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-maximumindividualplayerlatencymilliseconds
- ``p_PolicyDurationSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-policydurationseconds
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.PlayerLatencyPolicy"
p_MaximumIndividualPlayerLatencyMilliseconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaximumIndividualPlayerLatencyMilliseconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-maximumindividualplayerlatencymilliseconds"""
p_PolicyDurationSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "PolicyDurationSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-policydurationseconds"""
@attr.s
class PropGameSessionQueueDestination(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.Destination"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-destination.html
Property Document:
- ``p_DestinationArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-destination.html#cfn-gamelift-gamesessionqueue-destination-destinationarn
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.Destination"
p_DestinationArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DestinationArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-destination.html#cfn-gamelift-gamesessionqueue-destination-destinationarn"""
@attr.s
class PropFleetLocationConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.LocationConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html
Property Document:
- ``rp_Location``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-location
- ``p_LocationCapacity``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-locationcapacity
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.LocationConfiguration"
rp_Location: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Location"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-location"""
p_LocationCapacity: typing.Union['PropFleetLocationCapacity', dict] = attr.ib(
default=None,
converter=PropFleetLocationCapacity.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropFleetLocationCapacity)),
metadata={AttrMeta.PROPERTY_NAME: "LocationCapacity"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-locationcapacity"""
@attr.s
class PropFleetIpPermission(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.IpPermission"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html
Property Document:
- ``rp_FromPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-fromport
- ``rp_IpRange``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-iprange
- ``rp_Protocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-protocol
- ``rp_ToPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-toport
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.IpPermission"
rp_FromPort: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "FromPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-fromport"""
rp_IpRange: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "IpRange"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-iprange"""
rp_Protocol: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Protocol"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-protocol"""
rp_ToPort: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "ToPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-toport"""
@attr.s
class PropGameSessionQueueFilterConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.FilterConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-filterconfiguration.html
Property Document:
- ``p_AllowedLocations``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-filterconfiguration.html#cfn-gamelift-gamesessionqueue-filterconfiguration-allowedlocations
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.FilterConfiguration"
p_AllowedLocations: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AllowedLocations"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-filterconfiguration.html#cfn-gamelift-gamesessionqueue-filterconfiguration-allowedlocations"""
@attr.s
class PropFleetServerProcess(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.ServerProcess"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html
Property Document:
- ``rp_ConcurrentExecutions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-concurrentexecutions
- ``rp_LaunchPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-launchpath
- ``p_Parameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-parameters
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.ServerProcess"
rp_ConcurrentExecutions: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "ConcurrentExecutions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-concurrentexecutions"""
rp_LaunchPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "LaunchPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-launchpath"""
p_Parameters: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Parameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-parameters"""
@attr.s
class PropFleetResourceCreationLimitPolicy(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.ResourceCreationLimitPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html
Property Document:
- ``p_NewGameSessionsPerCreator``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-newgamesessionspercreator
- ``p_PolicyPeriodInMinutes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-policyperiodinminutes
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.ResourceCreationLimitPolicy"
p_NewGameSessionsPerCreator: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "NewGameSessionsPerCreator"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-newgamesessionspercreator"""
p_PolicyPeriodInMinutes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "PolicyPeriodInMinutes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-policyperiodinminutes"""
@attr.s
class PropGameServerGroupInstanceDefinition(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.InstanceDefinition"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html
Property Document:
- ``rp_InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-instancetype
- ``p_WeightedCapacity``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-weightedcapacity
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.InstanceDefinition"
rp_InstanceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-instancetype"""
p_WeightedCapacity: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "WeightedCapacity"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-weightedcapacity"""
@attr.s
class PropFleetRuntimeConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.RuntimeConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html
Property Document:
- ``p_GameSessionActivationTimeoutSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-gamesessionactivationtimeoutseconds
- ``p_MaxConcurrentGameSessionActivations``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-maxconcurrentgamesessionactivations
- ``p_ServerProcesses``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-serverprocesses
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.RuntimeConfiguration"
p_GameSessionActivationTimeoutSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "GameSessionActivationTimeoutSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-gamesessionactivationtimeoutseconds"""
p_MaxConcurrentGameSessionActivations: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxConcurrentGameSessionActivations"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-maxconcurrentgamesessionactivations"""
p_ServerProcesses: typing.List[typing.Union['PropFleetServerProcess', dict]] = attr.ib(
default=None,
converter=PropFleetServerProcess.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropFleetServerProcess), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ServerProcesses"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-serverprocesses"""
@attr.s
class PropGameSessionQueuePriorityConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.PriorityConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html
Property Document:
- ``p_LocationOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-locationorder
- ``p_PriorityOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-priorityorder
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.PriorityConfiguration"
p_LocationOrder: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LocationOrder"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-locationorder"""
p_PriorityOrder: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PriorityOrder"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-priorityorder"""
@attr.s
class PropMatchmakingConfigurationGameProperty(Property):
"""
AWS Object Type = "AWS::GameLift::MatchmakingConfiguration.GameProperty"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html
Property Document:
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-key
- ``rp_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-value
"""
AWS_OBJECT_TYPE = "AWS::GameLift::MatchmakingConfiguration.GameProperty"
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-key"""
rp_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-value"""
#--- Resource declaration ---
@attr.s
class Alias(Resource):
"""
AWS Object Type = "AWS::GameLift::Alias"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-name
- ``rp_RoutingStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-routingstrategy
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-description
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Alias"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-name"""
rp_RoutingStrategy: typing.Union['PropAliasRoutingStrategy', dict] = attr.ib(
default=None,
converter=PropAliasRoutingStrategy.from_dict,
validator=attr.validators.instance_of(PropAliasRoutingStrategy),
metadata={AttrMeta.PROPERTY_NAME: "RoutingStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-routingstrategy"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-description"""
@property
def rv_AliasId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#aws-resource-gamelift-alias-return-values"""
return GetAtt(resource=self, attr_name="AliasId")
@attr.s
class Build(Resource):
"""
AWS Object Type = "AWS::GameLift::Build"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-name
- ``p_OperatingSystem``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-operatingsystem
- ``p_StorageLocation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-storagelocation
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-version
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Build"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-name"""
p_OperatingSystem: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OperatingSystem"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-operatingsystem"""
p_StorageLocation: typing.Union['PropBuildS3Location', dict] = attr.ib(
default=None,
converter=PropBuildS3Location.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropBuildS3Location)),
metadata={AttrMeta.PROPERTY_NAME: "StorageLocation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-storagelocation"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-version"""
@attr.s
class Script(Resource):
"""
AWS Object Type = "AWS::GameLift::Script"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html
Property Document:
- ``rp_StorageLocation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-storagelocation
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-name
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-version
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Script"
rp_StorageLocation: typing.Union['PropScriptS3Location', dict] = attr.ib(
default=None,
converter=PropScriptS3Location.from_dict,
validator=attr.validators.instance_of(PropScriptS3Location),
metadata={AttrMeta.PROPERTY_NAME: "StorageLocation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-storagelocation"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-name"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-version"""
@property
def rv_Id(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#aws-resource-gamelift-script-return-values"""
return GetAtt(resource=self, attr_name="Id")
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#aws-resource-gamelift-script-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class GameServerGroup(Resource):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html
Property Document:
- ``rp_GameServerGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-gameservergroupname
- ``rp_InstanceDefinitions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-instancedefinitions
- ``rp_LaunchTemplate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-launchtemplate
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-rolearn
- ``p_AutoScalingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-autoscalingpolicy
- ``p_BalancingStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-balancingstrategy
- ``p_DeleteOption``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-deleteoption
- ``p_GameServerProtectionPolicy``: | |
from typing import Tuple, List, Dict
from ursina import Ursina, camera, window, Light, color, scene, Entity, held_keys, time, Mesh, EditorCamera, invoke
import math
from ursina import Mesh
MAX_DEPTH = 5
LIMIT_VALUE = 2.0
# ONE PHASE CONSTANTS
ITER_COUNT = 10
# SEVERAL PHASES CONSTANTS
ITER_TETRAHEDRON_COUNT = 20
ITER_TRIANGLE_COUNT = 10
ACCURACY = 0.001
class Game(Ursina):
def __init__(self):
super().__init__()
window.color = color.black
window.fullscreen_size = 1920, 1080
window.fullscreen = False
Light(type='ambient', color=(0.5, 0.5, 0.5, 1))
Light(type='directional', color=(0.5, 0.5, 0.5, 1), direction=(1, 1, 1))
self.fractal = one_phase_build(ITER_COUNT, LIMIT_VALUE)
# self.fractal = several_phases_build(ITER_TETRAHEDRON_COUNT, ITER_TRIANGLE_COUNT, LIMIT_VALUE)
self.state = -1
EditorCamera()
@staticmethod
def new_game():
scene.clear()
def update(self):
pass
def input(self, key):
if key == 'q':
if self.state == 0:
return
self.state -= 1
scene.clear()
self.fractal.gen(self.state).model.generate()
elif key == 'e':
if self.state == (len(self.fractal.materials) - 1):
return
self.state += 1
scene.clear()
self.fractal.gen(self.state).model.generate()
super().input(key)
class Point:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class Line:
def __init__(self, p1: Point, p2: Point):
self.p1 = p1
self.p2 = p2
def length(self):
return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.p1.y) ** 2 + (self.p2.z - self.p1.z) ** 2)
def make_coef_surface(p1: Point, p2: Point, p3: Point) -> (float, float, float, float, Tuple[float, float, float]):
"""
Вычисление коэффициентов плоскости A, B, C, проходящую через три точки p1, p2 и p3, и вектора нормали N к этой
плоскости.
:param p1: Первая точка
:param p2: Вторая точка
:param p3: Третья точка
:return: Кожфиициенты клоскости A, B и C и проходящий через нее вектор нормали N
"""
A = (p2.y - p1.y) * (p3.z - p1.z) - (p3.y - p1.y) * (p2.z - p1.z)
B = (p3.x - p1.x) * (p2.z - p1.z) - (p2.x - p1.x) * (p3.z - p1.z)
C = (p2.x - p1.x) * (p3.y - p1.y) - (p3.x - p1.x) * (p2.y - p1.y)
N = math.sqrt(math.pow(A, 2) + math.pow(B, 2) + math.pow(C, 2))
n = (A / N, B / N, C / N)
return A, B, C, N, n
def median_case(p1: Point, p2: Point, p3: Point) -> (Point, Point):
"""
:param p1:
:param p2:
:param p3:
:return:
"""
p5 = Point((p2.x + p3.x) / 2.0, (p2.y + p3.y) / 2.0, (p2.z + p3.z) / 2.0)
p6 = Point((p1.x + p3.x) / 2.0, (p1.y + p3.y) / 2.0, (p1.z + p3.z) / 2.0)
return p5, p6
def find_p7_point(p1: Point, p5: Point) -> Point:
"""
:param p1:
:param p2:
:param p5:
:param p6:
:return:
"""
x = p1.x + ((2 * (p5.x - p1.x)) / 3.0)
y = p1.y + ((2 * (p5.y - p1.y)) / 3.0)
z = p1.z + ((2 * (p5.z - p1.z)) / 3.0)
return Point(x, y, z)
def find_p4_point(a: float, b: float, c: float, n: float, h: float, p7: Point) -> Point:
"""
:param a:
:param b:
:param c:
:param n:
:param h:
:param p7:
:return:
"""
x = p7.x + (a * h) / n
y = p7.y + (b * h) / n
z = p7.z + (c * h) / n
return Point(x, y, z)
class MaterialState:
def __init__(self, depth: int, vertices: List[List[float]], triangles: List[List[int]], p1: Point, p2: Point,
p3: Point, p4: Point = None, h: float = None, A: float = None, B: float = None, C: float = None):
self.depth = depth
self.vertices = vertices
self.triangles = triangles
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.p4 = p4
self.h = h
self.A = A
self.B = B
self.C = C
class Builder:
def __init__(self):
self.materials = []
def append_material(self, material: List[MaterialState]):
self.materials.append(material)
def gen(self, item: int):
print(f'state = {item}, capacity = {len(self.materials)}')
if item >= len(self.materials):
raise KeyError
material = self.materials[item][0]
parent_entity = Entity(model=Mesh(vertices=material.vertices, triangles=material.triangles, thickness=4,
mode='line'), scale=2, color=color.yellow)
if len(self.materials[item]) == 1:
return parent_entity
for material in self.materials[item][1:]:
Entity(model=Mesh(vertices=material.vertices, triangles=material.triangles,
thickness=4, mode='line'), scale=2, color=color.yellow)
return parent_entity
def make_basic_tetrahedron(coefficient: float, depth: int) -> [MaterialState]:
"""
Формирование базового тетраэдра (который растет из точки)
:param coefficient: Коэффициент представления фигуры
:param depth: заданная глубина
:return: Состояние фрактала на заданном коэфициенте
"""
p1 = Point(0.0 * coefficient, 0.0 * coefficient, 0.0 * coefficient)
p2 = Point(0.5 * coefficient, (math.sqrt(3) / 2.0) * coefficient, 0.0 * coefficient)
p3 = Point(1.0 * coefficient, 0.0 * coefficient, 0.0 * coefficient)
h = (math.sqrt(2.0 / 3.0)) * coefficient
A, B, C, N, n = make_coef_surface(p1, p2, p3)
p5, p6 = median_case(p1, p2, p3)
p7 = find_p7_point(p1, p5)
p4 = find_p4_point(A, B, C, N, h, p7)
vertices = [[p1.x, p1.y, p1.z], [p2.x, p2.y, p2.z], [p3.x, p3.y, p3.z], [p4.x, p4.y, p4.z]]
triangles = [[0, 1], [1, 2], [2, 0], [0, 3], [3, 1], [3, 2]]
return [MaterialState(depth, vertices, triangles, p1=p1, p2=p2, p3=p3, p4=p4, h=h, A=A, B=B, C=C)]
def one_phase_build(iter_count: int, limit_value: float) -> Builder:
"""
Формирования фрактальной структуры однофазным адгоритмом роста
:param iter_count: Количество итераций, за которое необходимо вырастить каждый из компонентов фрактальной структуры
:param limit_value: Предельено значние стороны, до которого необходимо осуществлять рост
:return: Сформрованная фрактальная труктура по каждой из итераций
"""
fractal = Builder()
coefficient = limit_value / float(iter_count)
for i in range(iter_count):
c = coefficient + coefficient * i
fractal.append_material(make_basic_tetrahedron(c, 0))
last_material = fractal.materials[-1][0]
mp11 = calc_midpoint(last_material.p1, last_material.p2)
mp21 = calc_midpoint(last_material.p2, last_material.p3)
mp31 = calc_midpoint(last_material.p3, last_material.p1)
h_new_1 = last_material.h * calc_distance(mp11, mp21) / calc_distance(last_material.p1, last_material.p2)
mp12 = calc_midpoint(last_material.p1, last_material.p2)
mp22 = calc_midpoint(last_material.p2, last_material.p4)
mp32 = calc_midpoint(last_material.p4, last_material.p1)
h_new_2 = last_material.h * calc_distance(mp12, mp22) / calc_distance(last_material.p1, last_material.p2)
mp13 = calc_midpoint(last_material.p2, last_material.p3)
mp23 = calc_midpoint(last_material.p3, last_material.p4)
mp33 = calc_midpoint(last_material.p4, last_material.p2)
h_new_3 = last_material.h * calc_distance(mp13, mp23) / calc_distance(last_material.p2, last_material.p3)
mp14 = calc_midpoint(last_material.p1, last_material.p3)
mp24 = calc_midpoint(last_material.p3, last_material.p4)
mp34 = calc_midpoint(last_material.p4, last_material.p1)
h_new_4 = last_material.h * calc_distance(mp14, mp24) / calc_distance(last_material.p1, last_material.p3)
# Коэфициент роста не для тетраедра, а для трегуольника, образовашегося путем дроблении грани, на 4 треугольника
ordinary_coefficient = calc_distance(last_material.p1, mp11) / float(iter_count)
c = 1
for i in range(iter_count):
materials = []
c += ordinary_coefficient
c2 = coefficient + coefficient * i
materials.append(growth_triangle(p1=last_material.p1, p2=mp11, p3=mp31, h=h_new_1, n_prev=(-last_material.A, -last_material.B, -last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp11, p2=last_material.p2, p3=mp21, h=h_new_1, n_prev=(-last_material.A, -last_material.B, -last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp21, p2=last_material.p3, p3=mp31, h=h_new_1, n_prev=(-last_material.A, -last_material.B, -last_material.C), coefficient=c, depth=1))
f_c = calc_centroid(
Point(mp11.x * c, mp11.y * c, mp11.z * c),
Point(mp21.x * c, mp21.y * c, mp21.z * c),
Point(mp31.x * c, mp31.y * c, mp31.z * c))
materials.append(cal_tetrahedron_1(mp11, mp21, mp31, h_new_1, coefficient=c2, depth=1, f_c=f_c))
materials.append(growth_triangle(p1=last_material.p1, p2=mp12, p3=mp32, h=h_new_2, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp12, p2=last_material.p2, p3=mp22, h=h_new_2, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp22, p2=last_material.p4, p3=mp32, h=h_new_2, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
f_c = calc_centroid(
Point(mp12.x * c, mp12.y * c, mp12.z * c),
Point(mp22.x * c, mp22.y * c, mp22.z * c),
Point(mp32.x * c, mp32.y * c, mp32.z * c))
materials.append(cal_tetrahedron(mp12, mp22, mp32, h_new_1, (last_material.A, last_material.B, last_material.C), coefficient=c2, depth=1, f_c=f_c))
materials.append(growth_triangle(p1=last_material.p2, p2=mp13, p3=mp33, h=h_new_3, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp13, p2=last_material.p3, p3=mp23, h=h_new_3, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp23, p2=last_material.p4, p3=mp33, h=h_new_3, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
f_c = calc_centroid(
Point(mp13.x * c, mp13.y * c, mp13.z * c),
Point(mp23.x * c, mp23.y * c, mp23.z * c),
Point(mp33.x * c, mp33.y * c, mp33.z * c))
materials.append(cal_tetrahedron(mp13, mp23, mp33, h_new_3, (last_material.A, last_material.B, last_material.C), coefficient=c2, depth=1, f_c=f_c))
materials.append(growth_triangle(p1=last_material.p1, p2=mp14, p3=mp34, h=h_new_4, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp14, p2=last_material.p3, p3=mp24, h=h_new_4, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp24, p2=last_material.p4, p3=mp34, h=h_new_4, n_prev=(last_material.A, last_material.B, last_material.C), coefficient=c, depth=1))
f_c = calc_centroid(
Point(mp14.x * c, mp14.y * c, mp14.z * c),
Point(mp24.x * c, mp24.y * c, mp24.z * c),
Point(mp34.x * c, mp34.y * c, mp34.z * c))
materials.append(cal_tetrahedron(mp14, mp24, mp34, h_new_4, (last_material.A, last_material.B, last_material.C), coefficient=c2, depth=1, f_c=f_c))
fractal.append_material(materials)
# preparation
edges = preparation_edges(fractal.materials[-1])
current_depth = 1
while MAX_DEPTH - current_depth != 0:
c = 1
for i in range(iter_count):
materials = []
c += ordinary_coefficient
c2 = coefficient + coefficient * i
for edgs in edges:
mp1 = calc_midpoint(edgs["edges"][0], edgs["edges"][1])
mp2 = calc_midpoint(edgs["edges"][1], edgs["edges"][2])
mp3 = calc_midpoint(edgs["edges"][2], edgs["edges"][0])
h_new = edgs["height"] * calc_distance(mp1, mp2) / calc_distance(edgs["edges"][0], edgs["edges"][1])
materials.append(growth_triangle(p1=edgs["edges"][0], p2=mp1, p3=mp3, h=h_new, n_prev=edgs["normal"], coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp1, p2=edgs["edges"][1], p3=mp2, h=h_new, n_prev=edgs["normal"], coefficient=c, depth=1))
materials.append(growth_triangle(p1=mp2, | |
Defaults to [].
"""
if client_comms:
warnings.warn("When wrapping a model function, client comms "
"must either be initialized outside the function, "
"pass a 'global_scope' parameter to the "
"comm initialization (e.g. Python, R, Matlab), "
"or use a 'WITH_GLOBAL_SCOPE' macro "
"(e.g. C, C++, Fortran) around the initialization "
"so that they are persistent "
"across calls and the call or recv/send methods "
"must be called explicitly (as opposed to the "
"function inputs/outputs which will be handled "
"by the wrapper). This model's client comms are:\n"
"\t%s" % client_comms)
# Replace server input w/ split input/output and remove client
# connections from inputs
for i, x in enumerate(inputs):
if x.get('server_replaces', False):
inputs[x['server_replaces']['input_index']] = (
x['server_replaces']['input'])
outputs.insert(x['server_replaces']['output_index'],
x['server_replaces']['output'])
rm_outputs = [i for i, x in enumerate(outputs)
if x['name'] in client_comms]
for i in rm_outputs[::-1]:
outputs.pop(i)
@classmethod
def preparse_function(cls, yml):
r"""Extract information about inputs and outputs based on the
function being wrapped.
Args:
yml (dict): Options that will be used to initialize the model.
Returns:
dict: Information about the parsed function.
"""
if 'function' not in yml:
return
if yml.get('is_server', False):
assert(isinstance(yml['is_server'], dict))
if cls.function_param is None:
raise ValueError(("Language %s is not parameterized "
"and so functions cannot be automatically "
"wrapped as a model.") % cls.language)
source_files = cls.identify_source_files(**yml)
if not source_files: # pragma: debug
raise ValueError("Could not identify any source files.")
model_function_file = source_files[0]
if not os.path.isfile(model_function_file): # pragma: debug
raise ValueError("Source file does not exist: '%s'"
% model_function_file)
# Update input/outputs based on parsed source code
client_comms = ['%s:%s_%s' % (yml['name'], x, yml['name'])
for x in yml.get('client_of', [])]
model_function_inputs = copy.copy(yml.get('inputs', []))
model_function_outputs = copy.copy(yml.get('outputs', []))
cls.expand_server_io(
model_function_inputs, model_function_outputs,
client_comms=client_comms)
expected_outputs = []
for x in model_function_outputs:
expected_outputs += x.get('vars', [])
model_outputs_in_inputs = yml.get('outputs_in_inputs', None)
model_function_info = cls.parse_function_definition(
model_function_file, yml['function'],
expected_outputs=expected_outputs,
outputs_in_inputs=model_outputs_in_inputs)
if model_outputs_in_inputs is None:
model_outputs_in_inputs = model_function_info.get(
'outputs_in_inputs', None)
model_flag = cls.update_io_from_function(
model_function_info, yml['function'],
inputs=model_function_inputs,
outputs=model_function_outputs,
iter_function_over=yml.get('iter_function_over', []))
yml['preparsed_function'] = {
'model_file': model_function_info,
'model_function': yml['function'],
'inputs': model_function_inputs,
'outputs': model_function_outputs,
'model_flag': model_flag,
'outputs_in_inputs': model_outputs_in_inputs,
'copies': yml.get('copies', 1),
'iter_function_over': yml.get('iter_function_over', []),
'skip_update_io': True}
return yml['preparsed_function']
@classmethod
def update_io_from_function(cls, model_file, model_function,
inputs=[], outputs=[], contents=None,
outputs_in_inputs=None, iter_function_over=[]):
r"""Update inputs/outputs from the function definition.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
contents (str, optional): Contents of file to parse rather than
re-reading the file. Defaults to None and is ignored.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
Returns:
dict, None: Flag variable used by the model. If None, the
model does not use a flag variable.
"""
# Read info from the source code
if (((isinstance(model_file, str) and os.path.isfile(model_file))
or (contents is not None))): # pragma: debug
expected_outputs = []
for x in outputs:
expected_outputs += x.get('vars', [])
info = cls.parse_function_definition(model_file, model_function,
contents=contents,
expected_outputs=expected_outputs)
logger.warn("The new execution pattern reuses the parsed "
"source code parameters. Double check results:\n%s."
% pformat(info))
elif isinstance(model_file, dict):
info = model_file
else:
info = {"inputs": [], "outputs": []}
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = info.get('outputs_in_inputs',
cls.outputs_in_inputs)
info_map = {io: OrderedDict([(x['name'], x) for x in info.get(io, [])])
for io in ['inputs', 'outputs']}
# Determine flag variable
flag_var = None
if info.get('flag_var', None):
flag_var = dict(info['flag_var'], name='model_flag')
# Check for vars matching names of input/output channels
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
if (io == 'outputs') and outputs_in_inputs:
io_map = info_map['inputs']
else:
io_map = info_map[io]
for x in io_var:
if x.get('vars', []):
continue
var_name = x['name'].split(':')[-1]
if var_name in io_map:
x['vars'] = [var_name]
for k in ['length', 'shape', 'ndim']:
kvar = '%s_var' % k
if kvar in io_map[var_name]:
x['vars'].append(io_map[var_name][kvar])
# Move variables if outputs in inputs
if outputs_in_inputs:
if ((((len(inputs) + len(outputs)) == len(info.get('inputs', [])))
and (len(info.get('outputs', [])) == 0))):
for i, vdict in enumerate(info['inputs'][:len(inputs)]):
inputs[i].setdefault('vars', [vdict['name']])
assert(inputs[i]['vars'] == [vdict['name']])
for i, vdict in enumerate(info['inputs'][len(inputs):]):
outputs[i].setdefault('vars', [vdict['name']])
assert(outputs[i]['vars'] == [vdict['name']])
for x in outputs:
for i, v in enumerate(x.get('vars', [])):
if v in info_map['inputs']:
info_map['outputs'][v] = cls.input2output(
info_map['inputs'].pop(v))
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
for x in io_var:
x['channel_name'] = x['name']
x['channel'] = (x['name'].split(':', 1)[-1]
+ '_%s_channel' % io[:-1])
for i, v in enumerate(x.get('vars', [])):
if v in info_map[io]:
x['vars'][i] = info_map[io][v]
if (len(io_var) == 1) and info_map.get(io, False):
io_var[0].setdefault('vars', list(info_map[io].values()))
for x in io_var:
if 'vars' not in x:
x['vars'] = [copy.deepcopy(x)]
x['vars'][0]['name'] = x['name'].split(':', 1)[-1]
for v in x['vars']:
if isinstance(v.get('datatype', None), str):
v['datatype'] = {'type': v['datatype']}
if isinstance(x.get('datatype', None), str):
x['datatype'] = {'type': x['datatype']}
# Check for user defined length variables and add flag to
# length variables
for x in io_var:
for k in ['length', 'shape', 'ndim']:
for v in x['vars']:
if k + '_var' in v:
v[k + '_var'] = info_map[io][v[k + '_var']]
# v[k + '_var']['is_' + k + '_var'] = True
v[k + '_var']['is_length_var'] = True
else:
v[k + '_var'] = False
# Update datatypes
if cls.is_typed:
for x in io_var:
non_length = []
for v in x['vars']:
if not v.get('is_length_var', False):
non_length.append(v)
if ((x.get('datatype', None)
and (not is_default_typedef(x['datatype'])))):
if (len(non_length) == 1):
non_length[0]['datatype'] = x['datatype']
else:
# TODO: Remove types associated with length?
assert(x['datatype']['type'] == 'array')
assert(len(x['datatype']['items'])
== len(non_length))
for v, t in zip(non_length, x['datatype']['items']):
v['datatype'] = t
else:
if (len(non_length) == 1):
x['datatype'] = non_length[0]['datatype']
else:
x['datatype'] = {
'type': 'array',
'items': [v['datatype'] for v in non_length]}
x['datatype']['from_function'] = True
for v in x['vars']:
if 'native_type' not in v:
v['native_type'] = cls.get_native_type(**v)
# Update types based on iteration
for x in io_var:
for v in x.get('vars', [x]):
if v['name'] in iter_function_over:
v['iter_datatype'] = copy.deepcopy(v.get('datatype', {}))
if v.get('datatype', {}):
assert(v['datatype']['type'] == 'scalar')
v['datatype']['type'] = '1darray'
v.pop('native_type', None)
v['native_type'] = cls.get_native_type(**v)
# Finalize io variables
for x in inputs:
cls.finalize_function_io('input', x)
for x in outputs:
cls.finalize_function_io('output', x)
return flag_var
@classmethod
def finalize_function_io(cls, direction, x):
r"""Finalize info for an input/output channel following function
parsing.
Args:
direction (str): Direction of channel ('input' or 'output')
"""
assert(direction in ['input', 'output'])
@classmethod
def write_model_wrapper(cls, model_file, model_function,
inputs=[], outputs=[], model_flag=None,
outputs_in_inputs=None, verbose=False, copies=1,
iter_function_over=[], verbose_model=False,
skip_update_io=False, model_name=None):
r"""Return the lines required to wrap a model function as an integrated
model.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
model_flag (dict, optional): Information about the flag that
should be used to track the success of yggdrasil send/recv
calls. This should only be provided if update_io_from_function
has already been called. Defaults to None and is determined
by update_io_from_function.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to the class attribute outputs_in_inputs.
verbose (bool, optional): If True, the contents of the created file
are displayed. Defaults to False.
copies (int, optional): Number of times the model driver is
duplicated. If more than one, no error will be raised in the
event there is never a call the the function. Defaults to 1.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
skip_update_io (bool, optional): If True, update_io_from_function
will not be called. Defaults to False.
verbose_model (bool, optional): If True, print statements will
be added after every line in the model. Defaults to False.
model_name (str, optional): Name given to the model. Defaults to
None.
Returns:
list: Lines | |
block should be deleted\n'
'[ Mac ] failures/expected/text.html [ Failure ]\n')
self.set_up_using_raw_expectations(raw_expectations)
self._system_config_remover.remove_os_versions(
'failures/expected/text.html',
{'Mac10.10', 'Mac10.11', 'Mac10.12'})
self._system_config_remover.update_expectations()
updated_exps = self._port.host.filesystem.read_text_file(
self._general_exp_filename)
self.assertEqual(updated_exps,
('# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac ]\n'
'# results: [ Failure ]\n'))
def test_remove_all_mac_versions_from_all_config_expectation(self):
raw_expectations = (
'# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac Linux Win ]\n'
'# results: [ Failure ]\n'
'# Below Expectation should be split\n'
'failures/expected/text.html [ Failure ]\n')
self.set_up_using_raw_expectations(raw_expectations)
self._system_config_remover.remove_os_versions(
'failures/expected/text.html',
{'Mac10.10', 'Mac10.11', 'Mac10.12'})
self._system_config_remover.update_expectations()
updated_exps = self._port.host.filesystem.read_text_file(
self._general_exp_filename)
self.assertEqual(
updated_exps,
('# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac Linux Win ]\n'
'# results: [ Failure ]\n'
'# Below Expectation should be split\n'
'[ Linux ] failures/expected/text.html [ Failure ]\n'
'[ Win ] failures/expected/text.html [ Failure ]\n'))
def test_remove_all_mac_versions_from_linux_expectation(self):
raw_expectations = (
'# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac Linux Win ]\n'
'# results: [ Failure ]\n'
'# Below Expectation should be unaffected\n'
'[ Linux ] failures/expected/text.html [ Failure ]\n')
self.set_up_using_raw_expectations(raw_expectations)
self._system_config_remover.remove_os_versions(
'failures/expected/text.html',
{'Mac10.10', 'Mac10.11', 'Mac10.12'})
self._system_config_remover.update_expectations()
updated_exps = self._port.host.filesystem.read_text_file(
self._general_exp_filename)
self.assertEqual(updated_exps, raw_expectations)
def test_remove_all_configs(self):
raw_expectations = (
'# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac Linux Win ]\n'
'# results: [ Failure ]\n'
'# Below Expectation and this comment should be deleted\n'
'failures/expected/text.html [ Failure ]\n')
self.set_up_using_raw_expectations(raw_expectations)
all_versions = reduce(
lambda x, y: x + y,
list(self._port.configuration_specifier_macros_dict.values()))
self._system_config_remover.remove_os_versions(
'failures/expected/text.html', all_versions)
self._system_config_remover.update_expectations()
updated_exps = self._port.host.filesystem.read_text_file(
self._general_exp_filename)
self.assertEqual(
updated_exps,
('# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac Linux Win ]\n'
'# results: [ Failure ]\n'))
def test_remove_all_configs2(self):
raw_expectations = (
'# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac Linux Win ]\n'
'# results: [ Failure ]\n'
'# Below Expectation and this comment should be deleted\n'
'[ Mac ] failures/expected/text.html [ Failure ]\n')
self.set_up_using_raw_expectations(raw_expectations)
all_versions = reduce(
lambda x, y: x + y,
list(self._port.configuration_specifier_macros_dict.values()))
self._system_config_remover.remove_os_versions(
'failures/expected/text.html', all_versions)
self._system_config_remover.update_expectations()
updated_exps = self._port.host.filesystem.read_text_file(
self._general_exp_filename)
self.assertEqual(
updated_exps,
('# tags: [ Mac10.10 Mac10.11 Mac10.12 Mac Linux Win ]\n'
'# results: [ Failure ]\n'))
def test_remove_mac_version_from_another_mac_version_expectation(self):
raw_expectations = (
'# tags: [ Mac10.10 Mac10.11 Linux ]\n'
'# results: [ Failure ]\n'
'# Below Expectation should be unaffected\n'
'[ Mac10.11 ] failures/expected/text.html [ Failure ]\n')
self.set_up_using_raw_expectations(raw_expectations)
self._system_config_remover.remove_os_versions(
'failures/expected/text.html', set(['Mac10.10']))
self._system_config_remover.update_expectations()
updated_exps = self._port.host.filesystem.read_text_file(
self._general_exp_filename)
self.assertEqual(updated_exps, raw_expectations)
def test_remove_mac_version_from_same_mac_version_expectation(self):
raw_expectations = (
'# tags: [ Mac10.10 Mac10.11 Linux ]\n'
'# results: [ Failure ]\n'
'# Below Expectation as well as this comment should be deleted\n'
'[ Mac10.10 ] failures/expected/text.html [ Failure ]\n')
self.set_up_using_raw_expectations(raw_expectations)
self._system_config_remover.remove_os_versions(
'failures/expected/text.html', set(['Mac10.10']))
self._system_config_remover.update_expectations()
updated_exps = self._port.host.filesystem.read_text_file(
self._general_exp_filename)
self.assertEqual(updated_exps, ('# tags: [ Mac10.10 Mac10.11 Linux ]\n'
'# results: [ Failure ]\n'))
class MiscTests(Base):
def test_parse_mac_legacy_names(self):
host = MockHost()
expectations_dict = OrderedDict()
expectations_dict['expectations'] = (
'# tags: [ Mac10.10 ]\n'
'# results: [ Failure ]\n'
'[ Mac10.10 ] failures/expected/text.html [ Failure ]\n')
port = host.port_factory.get('test-mac-mac10.10', None)
port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(port)
self.assertEqual(
expectations.get_expectations('failures/expected/text.html').
results, {ResultType.Failure})
port = host.port_factory.get('test-win-win7', None)
port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(port)
self.assertEqual(
expectations.get_expectations('failures/expected/text.html').
results, {ResultType.Pass})
def test_get_test_with_expected_result(self):
test_expectations = (
'# tags: [ win7 linux ]\n'
'# results: [ Failure ]\n'
'[ win7 ] failures/expected/text.html [ Failure ]\n'
'[ linux ] failures/expected/image_checksum.html [ Failure ]\n')
self.parse_exp(test_expectations)
self.assertEqual(
self._exp.get_tests_with_expected_result(ResultType.Failure),
set(['failures/expected/text.html']))
def test_multiple_results(self):
self.parse_exp(
'# results: [ Crash Failure ]\nfailures/expected/text.html [ Crash Failure ]'
)
self.assertEqual(
self._exp.get_expectations('failures/expected/text.html').results,
{ResultType.Failure, ResultType.Crash})
def test_overrides_include_slow(self):
self.parse_exp(
'# results: [ Failure ]\nfailures/expected/text.html [ Failure ]',
'# results: [ Slow ]\nfailures/expected/text.html [ Slow ]')
self.assert_exp_list('failures/expected/text.html',
set([ResultType.Failure]))
self.assertTrue(
self._exp.get_expectations('failures/expected/text.html').
is_slow_test)
def test_overrides(self):
self.parse_exp(
'# results: [ Failure ]\nfailures/expected/text.html [ Failure ]',
'# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]')
self.assert_exp_list('failures/expected/text.html',
{ResultType.Failure, ResultType.Timeout})
def test_more_specific_override_resets_skip(self):
self.parse_exp(
'# results: [ Skip ]\nfailures/expected* [ Skip ]',
'# results: [ Failure ]\nfailures/expected/text.html [ Failure ]')
self.assert_exp_list('failures/expected/text.html',
{ResultType.Failure, ResultType.Skip})
def test_bot_test_expectations(self):
"""Test that expectations are merged rather than overridden when using flaky option 'unexpected'."""
test_name1 = 'failures/expected/text.html'
test_name2 = 'passes/text.html'
expectations_dict = OrderedDict()
expectations_dict['expectations'] = (
'# results: [ Failure Crash ]\n%s [ Failure ]\n%s [ Crash ]\n' %
(test_name1, test_name2))
self._port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(self._port)
self.assertEqual(
expectations.get_expectations(test_name1).results,
set([ResultType.Failure]))
self.assertEqual(
expectations.get_expectations(test_name2).results,
set([ResultType.Crash]))
def bot_expectations():
return {
test_name1: [ResultType.Pass, ResultType.Timeout],
test_name2: [ResultType.Crash]
}
self._port.bot_expectations = bot_expectations
self._port._options.ignore_flaky_tests = 'unexpected'
expectations = TestExpectations(self._port)
self.assertEqual(
expectations.get_expectations(test_name1).results,
set([ResultType.Pass, ResultType.Failure, ResultType.Timeout]))
self.assertEqual(
expectations.get_expectations(test_name2).results,
set([ResultType.Crash]))
class RemoveExpectationsTest(Base):
def test_remove_expectation(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will be deleted\n'
'[ mac ] test1 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_to_exps = test_expectations._expectations[1].individual_exps
test_expectations.remove_expectations('/tmp/TestExpectations2',
[test_to_exps['test1'][0]])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, ('# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'))
def test_readd_removed_expectation_instance(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will not be deleted\n'
'[ mac ] test1 [ Failure ]\n'
'[ mac ] test2 [ Failure ]\n'
'[ mac ] test3 [ Failure ]\n'
'[ mac ] test4 [ Failure ]\n'
'[ mac ] test5 [ Failure ]\n'
'[ mac ] test6 [ Failure ]\n'
'[ mac ] test7 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_to_exps = test_expectations._expectations[1].individual_exps
exp = test_expectations._expectations[1].individual_exps['test1'][0]
exps_to_remove = [test_to_exps[
'test%d' % case_no][0] for case_no in range(1, 8)]
test_expectations.remove_expectations(
'/tmp/TestExpectations2', exps_to_remove)
test_expectations.add_expectations(
'/tmp/TestExpectations2',[exp],
lineno=4)
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, ('# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will not be deleted\n'
'[ mac ] test1 [ Failure ]\n'))
def test_remove_added_expectations(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will be deleted\n'
'[ mac ] test1 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_expectations.add_expectations('/tmp/TestExpectations2', [
Expectation(test='test2', results=set([ResultType.Failure])),
Expectation(
test='test3',
results=set([ResultType.Crash]),
tags=set(['win']))
], 5)
test_expectations.remove_expectations('/tmp/TestExpectations2', [
Expectation(
test='test2', results=set([ResultType.Failure]), lineno=5)
])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, ('# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will be deleted\n'
'[ mac ] test1 [ Failure ]\n'
'[ Win ] test3 [ Crash ]\n'))
def test_remove_after_add(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# This comment will not be deleted\n'
'[ mac ] test1 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_to_exps = test_expectations._expectations[1].individual_exps
test_expectations.add_expectations('/tmp/TestExpectations2', [
Expectation(test='test2', results=set([ResultType.Failure])),
Expectation(
test='test3',
results=set([ResultType.Crash]),
tags=set(['mac']))
], 5)
test_expectations.remove_expectations('/tmp/TestExpectations2',
[test_to_exps['test1'][0]])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, ('# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# This comment will not be deleted\n'
'test2 [ Failure ]\n'
'[ Mac ] test3 [ Crash ]\n'))
class AddExpectationsTest(Base):
def test_add_expectation_end_of_file_nonzero_lineno(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# tags: [ Mac Win ]\n'
'# tags: [ release ]\n'
'# results: [ Failure ]\n'
'\n'
'# this is a block of expectations\n'
'test [ failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
with self.assertRaises(ValueError) as ctx:
test_expectations.add_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test3',
results=set([ResultType.Failure]))],
lineno=0)
test_expectations.commit_changes()
self.assertIn('append_to_end_of_file must be set to True',
str(ctx.exception))
def test_add_expectation_with_negative_lineno(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# tags: [ Mac Win ]\n'
'# tags: [ release ]\n'
'# results: [ Failure ]\n'
'\n'
'# this is a block of expectations\n'
'test [ failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
with self.assertRaises(ValueError) as ctx:
test_expectations.add_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test3',
results=set([ResultType.Failure]))],
lineno=-1)
test_expectations.commit_changes()
self.assertIn('cannot be negative', str(ctx.exception))
def test_add_expectation_outside_file_size_range(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# tags: [ Mac Win ]\n'
'# tags: [ release ]\n'
'# results: [ Failure ]\n'
'\n'
'# this is a block of expectations\n'
'test [ failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
with self.assertRaises(ValueError) as ctx:
test_expectations.add_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test3',
results=set([ResultType.Failure]))],
lineno=100)
test_expectations.commit_changes()
self.assertIn('greater than the total line count', str(ctx.exception))
def test_use_append_to_end_flag_non_zero_lineno(self):
# Use append_to_end_of_file=True with lineno != 0
# An exception should be raised.
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = ('# | |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# ConfigVariables.py
# ConfigVariables for Text Products.
#
# Author: hansen
# ----------------------------------------------------------------------------
##
# This is a base file that is not intended to be overridden.
##
import TextUtils
class ConfigVariables(TextUtils.TextUtils):
def __init__(self):
TextUtils.TextUtils.__init__(self)
def missingData_notification_list(self, tree, node):
# Return a list of weather element names for which you wish
# to be notified when there is missing data
return []
# Descriptors: Phrase, Unit, Time periods
# Connecting phrases
def phrase_descriptor_dict(self, tree, node):
# Dictionary of descriptors for various weather elements in phrases
# The value for an element may be a phrase or a method
# If a method, it will be called with arguments:
# tree, node, key, element
return {
"HeatIndex": "heat index readings",
"PoP": "chance of",
#"PoP": self.areal_or_chance_pop_descriptor,
#"PoP": self.allAreal_or_chance_pop_descriptor,
"Period":"period",
"IceAccum": "ice accumulation",
"NewIceAccum": "new ice accumulation",
"SnowLevel": "snow level",
"Swell": "swell",
"TotalSnow": "total snow accumulation",
"NewTotalSnow": "total new snow accumulation",
"StormTotalSnow": "storm total snow accumulation",
"Visibility": "visibility",
"Wind": "winds",
"Wind20ft": "winds",
"WindChill": "wind chill readings",
"WindGust": "gusts up to",
"Wx": "",
#
# Snow accumulation combinations
"Snow": "snow accumulation",
"SnowSnow": "snow accumulation",
"SnowSleet": "snow and sleet accumulation",
#"SnowSleetIceCrystal": "snow and sleet and ice crystal accumulation",
"SnowSleetIceCrystal": "snow and sleet accumulation",
"Sleet": "sleet accumulation",
#"SleetIceCrystal": "sleet and ice crystal accumulation",
"SleetIceCrystal": "sleet accumulation",
#"IceCrystal": "ice crystal accumulation",
#"SnowIceCrystal": "snow and ice crystal accumulation",
"IceCrystal": "snow accumulation",
"SnowIceCrystal": "snow accumulation",
"New": "new",
#
"evening temperatures": "evening temperatures",
"highs": "highs",
"lakeWinds": "caution advised on area lakes",
"lows": "lows",
"severeWeather": "some thunderstorms may be severe",
"thunderstorms": "some thunderstorms may produce",
"heavyRainfall": "locally heavy rainfall possible",
"heavyRain" : "rain may be heavy at times",
"heavyRains" : "locally heavy rain possible",
"heavySnow" : "snow may be heavy at times",
"heavyPrecip" : "precipitation may be heavy at times",
"temperature":"temperature",
"higher":{
"MaxT":"warmer",
"MinT":"warmer",
"MinRH":"wetter",
"MaxRH":"wetter",
"RH":"wetter",
},
"lower":{
"MaxT":"cooler",
"MinT":"cooler",
"MinRH":"drier",
"MaxRH":"drier",
"RH":"drier",
},
# Marine
"WaveHeight": "waves", # Used for NSH/GLF
"seas": "combined seas",
"waves": "wind waves",
"inland waters": "waves",
"chop": "bay and inland waters",
"mixed swell": "mixed swell",
"dominant period": "dominant period", # to be used in combined seas phrase
"hurricane force winds to": "hurricane force winds to",
"storm force winds to": "storm force winds to",
"gales to": "gales to",
"up to": "up to",
# Fire Weather labels
"Sky/weather.........":"Sky/weather.........",
" 24 hr trend......":" 24 hr trend......",
"unchanged":"unchanged",
"missing":"MISSING",
"MinT":"lows",
"MaxT":"highs",
"MinT_FireWx": "Min temperature.....",
"MaxT_FireWx": "Max temperature.....",
"MinRH_FireWx":"Min humidity........",
"MaxRH_FireWx":"Max humidity........",
"Humidity recovery...":"Humidity recovery...",
"Upslope/downslope...":"Upslope/downslope...",
"20-foot winds.......":"20-foot winds.......",
" Valleys/lwr slopes...":" Valleys/lwr slopes...",
" Ridges/upr slopes....":" Ridges/upr slopes....",
"WIND................":"WIND................",
"LAL.................":"LAL.................",
"Free winds..........":"10K ft wind.........",
"Smoke dispersal.....":"Smoke dispersal.....",
"Transport winds.....":"Transport winds.....",
"Mixing height.......":"Mixing height.......",
"Haines Index........":"Haines Index........",
"CWR.................":"Chc of wetting rain.",
"Marine layer........":"Marine layer........",
# Used for Headlines
"EXPECTED" : "expected",
"IN EFFECT" : "in effect",
# Used for single values
"around": "around",
"through the day": "through the day",
"through the night": "through the night",
# Used for Tropical
"iminHR":"Hurricane conditions",
"iminTS":"Tropical storm conditions",
"iminTSposHR":"Tropical storm conditions with hurricane conditions possible",
"posTS":"Tropical storm conditions possible",
"posTSbcmgposHR":"Tropical storm conditions possible with hurricane conditions also possible",
"expTS":"Tropical storm conditions expected",
"posHR":"Hurricane conditions possible",
"expHR":"Hurricane conditions expected",
"expTSposHR":"Tropical storm conditions expected with hurricane conditions possible",
"posTSorHR":"Tropical storm or hurricane conditions possible" ,
}
def phrase_descriptor(self, tree, node, key, value):
return self.access_dictionary(tree, node, key, value, "phrase_descriptor_dict")
def phrase_connector_dict(self, tree, node):
# Dictionary of connecting phrases for various
# weather element phrases
# The value for an element may be a phrase or a method
# If a method, it will be called with arguments:
# tree, node
return {
# Used for Scalar and Wx elements
"then": {
"Sky": " then becoming ",
"otherwise": " then ",
},
# Used for Scalar and Vector elements
"increasing to": {
"Sky": " then becoming ",
"WaveHeight": " building to ",
"otherwise":" increasing to ",
},
"decreasing to": {
"Sky": " then becoming ",
"WaveHeight": " subsiding to ",
"otherwise":" decreasing to ",
},
"becoming": " becoming ",
"shifting to the": " shifting to the ",
# Used for Marine Vector weather elements
"rising to": " rising to ",
"easing to": " easing to ",
"backing": " backing ",
"veering":" veering ",
"becoming onshore": " becoming onshore",
}
def phrase_connector(self, tree, node, key, value):
return self.access_dictionary(tree, node, key, value, "phrase_connector_dict")
def useThenConnector_dict(self, tree, node):
# Conjunctive "THEN" to make 3+ subPhrase phrases
# flow better. e.g.
# "N WIND 10 TO 20 KT RISING TO 30 KT EARLY IN THE
# AFTERNOON, THEN RISING TO GALES TO 40 KT LATE
# IN THE AFTERNOON."
return {
"otherwise": 1,
"Wx": 0,
}
def useThenConnector(self, tree, node, key, value):
return self.access_dictionary(tree, node, key, value, "useThenConnector_dict")
def thenConnector_dict(self, tree, node):
# Conjunctive "THEN" to make 3+ subPhrase phrases
# flow better. e.g.
# "N WIND 10 TO 20 KT RISING TO 30 KT EARLY IN THE
# AFTERNOON, THEN RISING TO GALES TO 40 KT LATE
# IN THE AFTERNOON."
return {
"otherwise": ", then",
"Sky": "",
}
def thenConnector(self, tree, node, key, value):
return self.access_dictionary(tree, node, key, value, "thenConnector_dict")
def value_connector_dict(self, tree, node):
# Dictionary of connectors for ranges of values
# E.g. 25 TO 35 mph
# The value for an element may be a phrase or a method
# If a method, it will be called with arguments:
# tree, node
return {
"Period": " to ",
"Period2": " to ",
"TransWind": " to ",
"Wind20ft": " to ",
"FreeWind": " to ",
"Swell": " to ",
"Swell2": " to ",
"Wind": " to ",
"MaxT": "-",
"MinT": "-",
"MaxRH": "-",
"MinRH": "-",
"RH": "-",
"T": "-",
"WaveHeight" : " to ",
"WindWaveHgt" : " to ",
"WindChill": " to ",
"HeatIndex": " to ",
}
def value_connector(self, tree, node, key, value):
return self.access_dictionary(tree, node, key, value,
"value_connector_dict")
def units_descriptor_dict(self, tree, node):
# Dictionary of descriptors for various units
return {
"units": {
"ft": "feet",
"F":"",
"C":"degrees",
"K":"kelvins",
"%":" percent",
"in":"inches",
"kts":"knots",
"s":"seconds",
"hrs":"hours",
"m/s":"meters/second",
"mph":"mph",
"m":"meters",
"m^2/s":"meters^2/second",
"kt-ft":"knots-feet",
"mm":"millimeters",
"degrees": "degrees",
"percent": "percent",
},
"unit": {
"ft":"foot",
"F":"",
"C":"degree",
"K":"kelvin",
"%":" percent",
"in":"inch",
"kts":"knot",
"s":"second",
"hrs":"hour",
"m/s":"meter/second",
"mph":"mph",
"m":"meter",
"m^2/s":"meter^2/second",
"kt-ft":"knot-foot",
"mm":"millimeter",
"degree": "degree",
"percent": "percent",
},
}
def units_descriptor(self, tree, node, key, value):
return self.access_dictionary(tree, node, key, value, "units_descriptor_dict")
def element_inUnits_dict(self, tree, node):
# Dictionary of descriptors for various units
return {
"QPF":"in",
"Wind":"kts",
"Wind20ft":"kts",
"Wx":"wx",
"SnowAmt":"in",
"IceAccum":"in",
"StormTotalSnow": "in",
"PoP":"%",
"Sky":"%",
"FzLevel":"ft",
"SnowLevel":"ft",
"RH":"%",
"HeatIndex":"F",
"WindChill":"F",
"T":"F",
"Td":"F",
"MinT":"F",
"MaxT":"F",
"WindWaveHgt":"ft",
"WaveHeight":"ft",
"Swell":"ft",
"Swell2":"ft",
"Period":"s",
"Period2":"s",
"WindGust":"kts",
"LAL":"cat",
"CWR":"%",
"Haines":"cat",
"MixHgt":"ft",
"FreeWind":"kts",
"TransWind":"kts",
"Stability":"cat",
"HrsOfSun":"hrs",
"MarineLayer":"ft",
"InvBurnOffTemp":"F",
"VentRate":"kt-ft",
"MinRH": "%",
"MaxRH":"%",
"RH":"%",
"WetFlag":"y/n",
"Ttrend":"F",
"RHtrend":"%",
}
def element_inUnits(self, tree, node, key, value):
return self.access_dictionary(tree, node, key, value, "element_inUnits_dict")
def element_outUnits_dict(self, tree, node):
# Dictionary of descriptors for various units
return {
"QPF":"in",
"Wind":"kts",
"Wind20ft":"kts",
"Wx":"wx",
"SnowAmt":"in",
"IceAccum":"in",
"StormTotalSnow": "in",
"PoP":"%",
"Sky":"%",
"FzLevel":"ft",
"SnowLevel":"ft",
"RH":"%",
"HeatIndex":"F",
"WindChill":"F",
"T":"F",
"Td":"F",
"MinT":"F",
"MaxT":"F",
"WindWaveHgt":"ft",
"WaveHeight":"ft",
"Swell":"ft",
"Swell2":"ft",
"Period":"s",
"Period2":"s",
"WindGust":"kts",
"LAL":"cat",
"CWR":"%",
"Haines":"cat",
"MixHgt":"ft",
"FreeWind":"kts",
"TransWind":"kts",
"Stability":"cat",
"HrsOfSun":"hrs",
"MarineLayer":"ft",
"InvBurnOffTemp":"F",
"VentRate":"kt-ft",
"MinRH": "%",
"MaxRH":"%",
"RH":"%",
"WetFlag":"y/n",
"Ttrend":"F",
"RHtrend":"%",
"Visibility": "SM", # statute miles -- Can also be | |
to lock after 0.001 seconds
___step_2___
coffee 2 ready to start transaction after 0.000 seconds
___step_3___
Wait 0.1 seconds.
___step_4___
Start transaction SHOULD happen, but it won't
coffee 4 idn has been assigned after 0.001 seconds
___step_5___
Wait 0.1 seconds.
___step_6___
Successful insert on the coffee thread, idn = 5
___step_7___
tea 7 ready to start transaction after 0.000 seconds
___step_8___
Start transaction SHOULD happen, but it won't
tea 8b idn has been assigned after 0.001 seconds
Crash on the tea thread!
Simultaneous insert broke, coffee idn = 5 tea word is None
"""
main_lex = Word0080Threading.LexManipulated(do_lock=True, do_start=True)
main_lex.cop2.go()
main_lex.uninstall_to_scratch()
main_lex.install_from_scratch()
is_lex_starting_out_empty = main_lex.max_idn() == main_lex.IDN_MAX_FIXED
assert is_lex_starting_out_empty, "Unexpected " + main_lex.max_idn().qstring()
max_idn_beforehand = main_lex.max_idn()
# NOTE: This USED to free future main_lex.max_idn() calls to have different values:
# main_lex._start_transaction()
thread_tea = self.ThreadCreatesNoun('tea', do_lock=do_lock, do_start=do_start)
thread_coffee = self.ThreadCreatesNoun('coffee', do_lock=do_lock, do_start=do_start)
def step_1_start_both_threads_but_stop_before_either_locks():
"""That is, some Cop lock stops them, not a Word lock."""
thread_tea.start()
thread_tea.await_connect()
thread_tea.lex.cop1.await_stop("tea 1")
wait(0.1)
thread_coffee.start()
thread_coffee.await_connect()
thread_coffee.lex.cop1.await_stop("coffee 1")
self.assertIsNot(thread_tea.lex, thread_coffee.lex)
self.assertIs (thread_tea.lex._global_lock, thread_coffee.lex._global_lock)
# NOTE: Threads have different lex instances, but share a common lock.
# So it won't matter in step 3 which we check is .locked()
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
self.assertEqual(max_idn_beforehand, thread_coffee.lex.max_idn())
def step_2_let_the_coffee_thread_acquire_the_lock():
thread_coffee.lex.cop1.go()
thread_coffee.lex.cop2.await_stop("coffee 2")
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
self.assertEqual(max_idn_beforehand, thread_coffee.lex.max_idn())
def step_3_let_the_tea_thread_block_on_the_lock():
thread_tea.lex.cop1.go()
if do_lock:
wait(0.1)
self.assertFalse(thread_tea.lex.cop2.is_stopped)
# NOTE: So the tea thread is NOT stuck at cop2.stop().
# This means the tea thread has not called ._start_transaction() yet.
# Therefore it must be hung up on ._lock_next_word()
# As it should be, because the coffee thread acquired the lock.
self.assertTrue(thread_tea.lex._global_lock.locked())
else:
thread_tea.lex.cop2.await_stop("tea 3")
self.assertTrue(thread_tea.lex.cop2.is_stopped)
# NOTE: Because locking is deliberately broken,
# the tea thread proceeded to cop2, where it called ._start_transaction()
# though the transaction hasn't actually started yet.
# (Because LexManipulated._start_transaction() has
# yet to call LexMySQL._start_transaction().)
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
self.assertEqual(max_idn_beforehand, thread_coffee.lex.max_idn())
def step_4_let_the_coffee_thread_inch_ahead_to_where_its_got_max_idn():
thread_coffee.lex.cop2.go()
thread_coffee.lex.cop3.await_stop("coffee 4")
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
self.assertEqual(max_idn_beforehand, thread_coffee.lex.max_idn())
def step_5_tea_thread_should_be_locked():
wait(0.1)
if do_lock:
self.assertFalse(thread_tea.lex.cop2.is_stopped)
else:
self.assertTrue(thread_tea.lex.cop2.is_stopped)
thread_tea.lex.cop2.go()
thread_tea.lex.cop3.await_stop("tea 5")
# NOTE: With locking broken, the tea thread goes all the way
# to where it gets an idn assigned.
# That's the same point where we've held up the coffee thread.
# So they should get the same idn.
# And whichever creates its word second will crash.
self.assertEqual(max_idn_beforehand, main_lex.max_idn())
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
self.assertEqual(max_idn_beforehand, thread_coffee.lex.max_idn())
def step_6_let_the_coffee_thread_create_its_word_first():
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
thread_coffee.lex.cop3.go()
thread_coffee.join()
self.assertIsNone(thread_coffee.lex)
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
self.assertFalse(thread_coffee.did_crash)
def step_7_now_tea_thread_should_proceed_to_just_before_start_transaction():
if do_lock:
thread_tea.lex.cop2.await_stop("tea 7")
self.assertTrue(thread_tea.lex.cop2.is_stopped)
else:
self.assertTrue(thread_tea.lex.cop3.is_stopped)
# NOTE: Except if locks are deliberately broken,
# it has already gone past _start_transaction(),
# and moved on to where it's gotten an idn assigned.
def step_8_let_the_tea_thread_create_its_word_second():
if not do_lock:
# NOTE: LexManipulated._lock_next_word() was deliberately broken.
# So an unrestrained tea thread has already gotten its idn assigned,
# which is a fatal duplicate of the idn assigned to the coffee word.
thread_tea.lex.cop3.go()
thread_tea.join() # crash creating tea word, duplicate idn
self.assertTrue(thread_tea.did_crash, "No lock - should have crashed")
elif not do_start:
# NOTE: _lock_next_word() works, but _start_transaction() was deliberately broken.
# So the tea thread can't see the coffee thread's insert.
self.assertEqual(max_idn_beforehand, thread_tea.lex.max_idn())
self.assertTrue(thread_tea.lex.cop2.is_stopped)
thread_tea.lex.cop2.go()
thread_tea.lex.cop3.await_stop("tea 8b")
thread_tea.lex.cop3.go()
thread_tea.join() # tea word creation should crash, dup idn
self.assertTrue(thread_tea.did_crash, "No start transaction - should have crashed")
else:
# NOTE: Because locks are working, the tea thread will just now be getting its idn.
self.assertTrue(thread_tea.lex.cop2.is_stopped)
thread_tea.lex.cop2.go()
thread_tea.lex.cop3.await_stop("tea 8a")
thread_tea.lex.cop3.go()
thread_tea.join() # tea word successfully created
self.assertFalse(thread_tea.did_crash, "Should NOT have crashed")
def s(n):
print("___step_{}___\n".format(n), end="")
try:
s(1); step_1_start_both_threads_but_stop_before_either_locks()
s(2); step_2_let_the_coffee_thread_acquire_the_lock()
s(3); step_3_let_the_tea_thread_block_on_the_lock()
s(4); step_4_let_the_coffee_thread_inch_ahead_to_where_its_got_max_idn()
s(5); step_5_tea_thread_should_be_locked()
s(6); step_6_let_the_coffee_thread_create_its_word_first()
s(7); step_7_now_tea_thread_should_proceed_to_just_before_start_transaction()
s(8); step_8_let_the_tea_thread_create_its_word_second()
except Cop.DidntStop as e:
self.fail("Cop didn't get to its stopping point: " + str(e))
else:
if do_lock and do_start:
print(
"Simultaneous insert worked,",
thread_coffee.word.txt, "idn =", int(thread_coffee.word.idn),
thread_tea.word.txt, "idn =", int(thread_tea.word.idn),
)
self.assertEqual(max_idn_beforehand + 1, thread_coffee.word.idn)
self.assertEqual(max_idn_beforehand + 2, thread_tea.word.idn)
self.assertEqual(max_idn_beforehand , main_lex.max_idn())
main_lex._start_transaction()
self.assertEqual(max_idn_beforehand + 2, main_lex.max_idn())
else:
print(
"Simultaneous insert broke,",
thread_coffee.txt, "idn =", int(thread_coffee.word.idn),
thread_tea.txt, "word is", repr(thread_tea.word),
)
self.assertIsNone(thread_tea.word)
self.assertEqual(max_idn_beforehand + 1, thread_coffee.word.idn)
self.assertEqual(max_idn_beforehand , main_lex.max_idn())
main_lex._start_transaction()
self.assertEqual(max_idn_beforehand + 1, main_lex.max_idn())
finally:
Cop.let_go_all()
thread_tea.join()
thread_coffee.join()
main_lex.uninstall_to_scratch()
main_lex.disconnect()
class WordQoolbarSetup(WordTests):
def setUp(self):
super(WordQoolbarSetup, self).setUp()
self.qoolbar = qiki.QoolbarSimple(self.lex)
self.qool = self.lex.verb('qool')
self.iconify = self.lex.verb('iconify')
self.like = self.lex.verb('like')
self.delete = self.lex.verb('delete')
# self.lex.says(self.qool, self.like, qiki.Number(1))
# self.lex.says(self.qool, self.delete, qiki.Number(1))
self.anna = self.lex.define('agent', 'anna')
self.bart = self.lex.define('agent', 'bart')
self.youtube = self.lex.noun('youtube')
self.zigzags = self.lex.noun('zigzags')
self.anna_like_youtube = self.anna.says(self.like, self.youtube, 1)
self.bart_like_youtube = self.bart.says(self.like, self.youtube, 10)
self.anna_like_zigzags = self.anna.says(self.like, self.zigzags, 2)
self.bart_delete_zigzags = self.bart.says(self.delete, self.zigzags, 1)
qool_declarations = self.lex.find_words(vrb=self.qool.idn)
self.qool_idns = [w.obj.idn for w in qool_declarations]
class WordQoolbarTests(WordQoolbarSetup):
def disable_test_lex_report(self):
"""
lex_report <-- test_lex_report
0 [lex](define, 'lex')[agent]
1 [lex](define, 'define')[verb]
2 [lex](define, 'noun')[noun]
3 [lex](define, 'verb')[noun]
4 [lex](define, 'agent')[noun]
5 [lex](define, 'qool')[verb]
6 [lex](define, 'iconify')[verb]
7 [lex](define, 'delete')[verb]
8 [lex](qool)[delete]
9 [lex](iconify, 16, 'https://tool.qiki.info/icon/delete_16.png')[delete]
10 [lex](define, 'like')[verb]
11 [lex](qool)[like]
12 [lex](iconify, 16, 'https://tool.qiki.info/icon/thumbsup_16.png')[like]
13 [lex](define, 'anna')[agent]
14 [lex](define, 'bart')[agent]
15 [lex](define, 'youtube')[noun]
16 [lex](define, 'zigzags')[noun]
17 [anna](like)[youtube]
18 [bart](like, 10)[youtube]
19 [anna](like, 2)[zigzags]
20 [bart](delete)[zigzags]
17 ⋅ Word('lex')
13 ⋅ Word('define')
5 ⋅ Word('verb')
5 ⋅ Word('noun')
5 ⋅ Word('like')
3 ⋅ Word('agent')
3 ⋅ Word('delete')
2 ⋅ Word('qool')
2 ⋅ Word('iconify')
2 ⋅ Word('anna')
2 ⋅ Word('youtube')
2 ⋅ Word('bart')
2 ⋅ Word('zigzags')
Mesa lexes
None: LexMemory
"""
self.lex_report()
def test_get_all_qool_verbs(self):
"""Make sure the qool verbs were found."""
self.assertEqual({self.like.idn, self.delete.idn}, set(self.qool_idns))
def test_find_qool_verbed_words(self):
"""Find words with qool verbs."""
qool_uses = self.lex.find_words(vrb=self.qool_idns)
self.assertEqual(4, len(qool_uses))
self.assertEqual(qool_uses[0].sbj, self.anna)
self.assertEqual(qool_uses[1].sbj, self.bart)
self.assertEqual(qool_uses[2].sbj, self.anna)
self.assertEqual(qool_uses[3].sbj, self.bart)
def test_find_qool_verbed_words_with_particular_object(self):
"""Find words with qool verbs and a specific object."""
qool_uses = self.lex.find_words(vrb=self.qool_idns, obj=self.youtube)
self.assertEqual(2, len(qool_uses))
self.assertEqual(qool_uses[0].sbj, self.anna)
self.assertEqual(qool_uses[0].num, qiki.Number(1))
self.assertEqual(qool_uses[1].sbj, self.bart)
self.assertEqual(qool_uses[1].num, qiki.Number(10))
def test_find_qool_verbed_words_with_particular_subject(self):
"""Find words with qool verbs and a specific subject."""
qool_uses = self.lex.find_words(vrb=self.qool_idns, sbj=self.bart)
self.assertEqual(2, len(qool_uses))
self.assertEqual(qool_uses[0].obj, self.youtube)
self.assertEqual(qool_uses[0].num, qiki.Number(10))
self.assertEqual(qool_uses[1].obj, self.zigzags)
self.assertEqual(qool_uses[1].num, qiki.Number(1))
def test_lex_from_idn(self):
word = self.lex._lex.spawn()
self.lex.populate_word_from_idn(word, self.zigzags.idn)
self.assertEqual(self.zigzags.idn, word.idn)
self.assertEqual(self.zigzags.sbj, word.sbj)
self.assertEqual(self.zigzags.vrb, word.vrb)
self.assertEqual(self.zigzags.obj, word.obj)
self.assertEqual(self.zigzags.num, word.num)
self.assertEqual(self.zigzags.txt, word.txt)
self.assertEqual(self.zigzags.whn, word.whn)
def test_lex_from_definition(self):
word = self.lex._lex.spawn()
self.lex.populate_word_from_definition(word, self.zigzags.txt)
self.assertEqual(self.zigzags.idn, word.idn)
self.assertEqual(self.zigzags.sbj, word.sbj)
self.assertEqual(self.zigzags.vrb, word.vrb)
self.assertEqual(self.zigzags.obj, word.obj)
self.assertEqual(self.zigzags.num, word.num)
self.assertEqual(self.zigzags.txt, word.txt)
self.assertEqual(self.zigzags.whn, word.whn)
def test_lex_from_sbj_vrb_obj_idns(self):
word = self.lex._lex.spawn()
self.lex.populate_word_from_sbj_vrb_obj(
word,
self.zigzags.sbj,
self.zigzags.vrb,
self.zigzags.obj
)
self.assertEqual(self.zigzags.idn, word.idn)
self.assertEqual(self.zigzags.sbj, word.sbj)
self.assertEqual(self.zigzags.vrb, word.vrb)
self.assertEqual(self.zigzags.obj, word.obj)
self.assertEqual(self.zigzags.num, word.num)
self.assertEqual(self.zigzags.txt, word.txt)
self.assertEqual(self.zigzags.whn, word.whn)
def test_lex_from_sbj_vrb_obj_words(self):
word = self.lex._lex.spawn()
self.lex.populate_word_from_sbj_vrb_obj(
word,
self.lex[self.zigzags.sbj],
self.lex[self.zigzags.vrb],
self.lex[self.zigzags.obj]
)
self.assertEqual(self.zigzags.idn, word.idn)
self.assertEqual(self.zigzags.sbj, word.sbj)
self.assertEqual(self.zigzags.vrb, word.vrb)
self.assertEqual(self.zigzags.obj, word.obj)
self.assertEqual(self.zigzags.num, word.num)
self.assertEqual(self.zigzags.txt, word.txt)
self.assertEqual(self.zigzags.whn, word.whn)
def test_lex_from_sbj_vrb_obj_num_txt_idns(self):
word = self.lex._lex.spawn()
self.lex.populate_word_from_sbj_vrb_obj_num_txt(
word,
self.zigzags.sbj,
self.zigzags.vrb,
self.zigzags.obj,
self.zigzags.num,
self.zigzags.txt
)
self.assertEqual(self.zigzags.idn, word.idn)
self.assertEqual(self.zigzags.sbj, word.sbj)
self.assertEqual(self.zigzags.vrb, word.vrb)
self.assertEqual(self.zigzags.obj, word.obj)
self.assertEqual(self.zigzags.num, word.num)
self.assertEqual(self.zigzags.txt, word.txt)
self.assertEqual(self.zigzags.whn, word.whn)
def test_lex_from_sbj_vrb_obj_num_txt_words(self):
word = self.lex._lex.spawn()
self.lex.populate_word_from_sbj_vrb_obj_num_txt(
word,
self.lex[self.zigzags.sbj],
self.lex[self.zigzags.vrb],
self.lex[self.zigzags.obj],
self.zigzags.num,
self.zigzags.txt
)
self.assertEqual(self.zigzags.idn, word.idn)
self.assertEqual(self.zigzags.sbj, word.sbj)
self.assertEqual(self.zigzags.vrb, word.vrb)
self.assertEqual(self.zigzags.obj, word.obj)
self.assertEqual(self.zigzags.num, word.num)
self.assertEqual(self.zigzags.txt, word.txt)
self.assertEqual(self.zigzags.whn, word.whn)
def test_find_words(self):
nouns = self.lex.find_words(obj=self.lex.noun())
self.assertEqual(5, len(nouns))
self.assertEqual('noun', nouns[0].txt)
self.assertEqual('verb', nouns[1].txt)
self.assertEqual('agent', nouns[2].txt)
self.assertEqual('youtube', nouns[3].txt)
self.assertEqual('zigzags', nouns[4].txt)
def test_find_words_jbo(self):
nouns = self.lex.find_words(obj=self.lex.noun(), jbo_vrb=self.qool_idns)
self.assertEqual([
'noun',
'verb',
'agent',
'youtube',
'zigzags',
], [noun.txt for noun in nouns])
self.assertEqual([], nouns[0].jbo)
self.assertEqual([], nouns[1].jbo)
self.assertEqual([], nouns[2].jbo)
self.assertEqual([self.anna_like_youtube, self.bart_like_youtube], nouns[3].jbo)
self.assertEqual([self.anna_like_zigzags, self.bart_delete_zigzags], nouns[4].jbo)
self.assertIsInstance(nouns[4].jbo[0].num, qiki.Number)
self.assertIsInstance(nouns[4].jbo[0].txt, qiki.Text) # (This was broken once.)
def test_find_words_jbo_inner(self):
nouns = self.lex.find_words(obj=self.lex.noun(), jbo_vrb=self.qool_idns, jbo_strictly=True)
self.assertEqual([
'youtube',
'zigzags',
], [noun.txt for noun in nouns])
self.assertEqual([self.anna_like_youtube, self.bart_like_youtube], nouns[0].jbo)
self.assertEqual([self.anna_like_zigzags, self.bart_delete_zigzags], nouns[1].jbo)
def test_jbo_single_verb_word(self):
deleted_things = self.lex.find_words(jbo_vrb=self.delete, jbo_strictly=True)
self.assertEqual({
'zigzags'
}, {thing.txt for thing in deleted_things})
def test_jbo_single_verb_idn(self):
deleted_things = self.lex.find_words(jbo_vrb=self.delete.idn, jbo_strictly=True)
self.assertEqual({
'zigzags'
}, {thing.txt for thing in deleted_things})
def test_jbo_two_verbs(self):
"""jbo_vrb specifying multiple words and/or idns"""
deleted_and_qooled = self.lex.find_words(
jbo_vrb=[self.delete, self.qool.idn],
jbo_strictly=True
)
self.assertEqual({
'delete',
'like',
'zigzags',
}, {thing.txt for thing in deleted_and_qooled})
deleted_and_qooled = self.lex.find_words(
jbo_vrb=[self.delete.idn, self.like],
jbo_strictly=True
)
self.assertEqual({
'youtube',
'zigzags',
}, {thing.txt for thing in deleted_and_qooled})
def find_b_l_y(self): # Find all the words where bart likes youtube.
return self.lex.find_words(sbj=self.bart, vrb=self.like, obj=self.youtube)
def test_num_replace_num(self):
b_l_y_before = self.find_b_l_y()
self.bart.says(self.like, self.youtube, 20)
b_l_y_after = self.find_b_l_y()
self.assertEqual(len(b_l_y_after), len(b_l_y_before) + 1)
self.assertEqual(qiki.Number(20), b_l_y_after[-1].num)
def test_num_replace_named_num(self):
b_l_y_before = self.find_b_l_y()
self.bart.says(self.like, self.youtube, num=20)
b_l_y_after = self.find_b_l_y()
self.assertEqual(len(b_l_y_after), len(b_l_y_before) + 1)
self.assertEqual(qiki.Number(20), b_l_y_after[-1].num)
def test_num_add(self):
b_l_y_before = self.find_b_l_y()
self.bart.says(self.like, self.youtube, num_add=20)
b_l_y_after = self.find_b_l_y()
self.assertEqual(len(b_l_y_after), len(b_l_y_before) + 1)
self.assertEqual(10, b_l_y_before[-1].num)
| |
resultifier = ARAXResultify()
input_parameters = {'ignore_edge_direction': 'true'}
resultifier.apply(message, input_parameters)
assert resultifier.response.status == 'OK'
assert len(resultifier.message.results) == 2
def test07():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'UniProtKB:56789',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'ChEMBL.COMPOUND:12345',
'type': 'chemical_substance',
'qnode_ids': ['n02']},
{'id': 'ChEMBL.COMPOUND:23456',
'type': 'chemical_substance',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke04',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke06',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': True},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'chemical_substance',
'is_set': False})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n02',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n01'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
response = Response()
from actions_parser import ActionsParser
actions_parser = ActionsParser()
actions_list = ['resultify(ignore_edge_direction=true)']
result = actions_parser.parse(actions_list)
response.merge(result)
actions = result.data['actions']
assert result.status == 'OK'
resultifier = ARAXResultify()
message = Message(query_graph=query_graph,
knowledge_graph=knowledge_graph,
results=[])
parameters = actions[0]['parameters']
parameters['debug'] = 'true'
result = resultifier.apply(message, parameters)
response.merge(result)
assert len(message.results) == 2
assert result.status == 'OK'
def test08():
shorthand_qnodes = {"n00": "",
"n01": ""}
shorthand_qedges = {"e00": "n00--n01"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n00": ["DOID:731"],
"n01": ["HP:01", "HP:02", "HP:03", "HP:04"]}
shorthand_kg_edges = {"e00": ["DOID:731--HP:01", "DOID:731--HP:02", "DOID:731--HP:03", "DOID:731--HP:04"]}
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
n01_nodes = {node.id for node in message.knowledge_graph.nodes if "n01" in node.qnode_ids}
assert len(message.results) == len(n01_nodes)
@pytest.mark.slow
def test09():
actions = [
"add_qnode(name=DOID:731, id=n00, type=disease, is_set=false)",
"add_qnode(type=phenotypic_feature, is_set=false, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"resultify(ignore_edge_direction=true, debug=true)",
"filter_results(action=limit_number_of_results, max_results=100)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert len(message.results) == 100
def test10():
resultifier = ARAXResultify()
desc = resultifier.describe_me()
assert 'brief_description' in desc[0]
assert 'ignore_edge_direction' in desc[0]
@pytest.mark.slow
def test_example1():
actions = [
"add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)",
"add_qnode(id=qg1, type=protein)",
"add_qedge(source_id=qg1, target_id=qg0, id=qe0)",
"expand(edge_id=qe0)",
"resultify(ignore_edge_direction=true, debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert len(message.results) == len({node.id for node in message.knowledge_graph.nodes if "qg1" in node.qnode_ids})
assert message.results[0].essence is not None
def test_bfs():
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': None},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
qg = QueryGraph(qg_nodes, qg_edges)
adj_map = ARAX_resultify._make_adj_maps(qg, directed=False, droploops=True)['both']
bfs_dists = ARAX_resultify._bfs_dists(adj_map, 'n01')
assert bfs_dists == {'n01': 0, 'DOID:12345': 1, 'n02': 2}
bfs_dists = ARAX_resultify._bfs_dists(adj_map, 'DOID:12345')
assert bfs_dists == {'n01': 1, 'DOID:12345': 0, 'n02': 1}
def test_bfs_in_essence_code():
kg_node_info = ({'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['n00']},
{'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'FOO:12345',
'type': 'gene',
'qnode_ids': ['n02']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n03']})
kg_edge_info = ({'edge_id': 'ke01',
'target_id': 'UniProtKB:12345',
'source_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'target_id': 'UniProtKB:23456',
'source_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'UniProtKB:12345',
'target_id': 'FOO:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'UniProtKB:23456',
'target_id': 'FOO:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'FOO:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe03']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n00', # DOID:12345
'type': 'disease',
'is_set': False},
{'id': 'n01',
'type': 'protein',
'is_set': False},
{'id': 'n02',
'type': 'gene',
'is_set': False},
{'id': 'n03', # HP:56789
'type': 'phenotypic_feature',
'is_set': False})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n00',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'n01',
'target_id': 'n02'},
{'edge_id': 'qe03',
'source_id': 'n02',
'target_id': 'n03'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph)
assert len(results_list) == 2
assert results_list[0].essence is not None
@pytest.mark.slow
def test_issue680():
actions = [
"add_qnode(curie=DOID:14330, id=n00, type=disease)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00,e01], kp=ARAX/KG1)",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.2, remove_connected_nodes=t, qnode_id=n02)",
"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=Pharos)",
"overlay(action=predict_drug_treats_disease, source_qnode_id=n02, target_qnode_id=n00, virtual_relation_label=P1)",
"resultify(ignore_edge_direction=true, debug=true)",
"return(message=true, store=false)",
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert message.results[0].essence is not None
kg_edges_map = {edge.id: edge for edge in message.knowledge_graph.edges}
kg_nodes_map = {node.id: node for node in message.knowledge_graph.nodes}
for result in message.results:
result_nodes_by_qg_id = _get_result_nodes_by_qg_id(result, kg_nodes_map, message.query_graph)
result_edges_by_qg_id = _get_result_edges_by_qg_id(result, kg_edges_map, message.query_graph)
# Make sure all intermediate nodes are connected to at least one (real, not virtual) edge on BOTH sides
for n01_node_id in result_nodes_by_qg_id['n01']:
assert any(edge for edge in result_edges_by_qg_id['e00'].values() if
edge.source_id == n01_node_id or edge.target_id == n01_node_id)
assert any(edge for edge in result_edges_by_qg_id['e01'].values() if
edge.source_id == n01_node_id or edge.target_id == n01_node_id)
# Make sure all edges' nodes actually exist in this result (includes virtual and real edges)
for qedge_id, edges_map in result_edges_by_qg_id.items():
qedge = next(qedge for qedge in message.query_graph.edges if qedge.id == qedge_id)
for edge_id, edge in edges_map.items():
assert (edge.source_id in result_nodes_by_qg_id[qedge.source_id] and edge.target_id in
result_nodes_by_qg_id[qedge.target_id]) or \
(edge.target_id in result_nodes_by_qg_id[qedge.source_id] and edge.source_id in
result_nodes_by_qg_id[qedge.target_id])
def test_issue686a():
# Tests that an error is thrown when an invalid parameter is passed to resultify
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify(ignore_edge_direction=true, INVALID_PARAMETER_NAME=true)',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert 'INVALID_PARAMETER_NAME' in response.show()
def test_issue686b():
# Tests that resultify can be called with no parameters passed in
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify()',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
def test_issue686c():
# Tests that setting ignore_edge_direction to an invalid value results in an error
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify(ignore_edge_direction=foo)',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status != 'OK' and 'foo' in response.show()
def test_issue687():
# Tests that ignore_edge_direction need not be specified
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify(debug=true)',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert len(message.results) == len(message.knowledge_graph.nodes)
def test_issue727():
# Check resultify ignores edge direction appropriately
shorthand_qnodes = {"n00": "",
"n01": ""}
shorthand_qedges = {"e00": "n00--n01"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n00": ["DOID:111"],
"n01": ["PR:01", "PR:02"]}
shorthand_kg_edges = {"e00": ["PR:01--DOID:111", "PR:02--DOID:111"]} # Edges are reverse direction of QG
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
assert len(message.results) == 2
def test_issue731():
# Return no results if QG is unfulfilled
shorthand_qnodes = {"n0": "",
"n1": "is_set",
"n2": ""}
shorthand_qedges = {"e0": "n0--n1",
"e1": "n1--n2"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n0": [],
"n1": ["UniProtKB:123", "UniProtKB:124"],
"n2": ["DOID:122"]}
shorthand_kg_edges = {"e0": [],
"e1": ["UniProtKB:123--DOID:122", "UniProtKB:124--DOID:122"]}
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
assert len(message.results) == 0
@pytest.mark.slow
def test_issue731b():
actions = [
"add_qnode(name=MONDO:0005737, id=n0, type=disease)",
"add_qnode(type=protein, id=n1)",
"add_qnode(type=disease, id=n2)",
"add_qedge(source_id=n0, target_id=n1, id=e0)",
"add_qedge(source_id=n1, target_id=n2, id=e1)",
"expand(edge_id=[e0,e1], kp=ARAX/KG2)",
"resultify(debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
for result in message.results:
found_e01 = any(edge_binding.qg_id == 'e1' for edge_binding in result.edge_bindings)
assert found_e01
def test_issue731c():
qg = QueryGraph(nodes=[QNode(curie='MONDO:0005737',
id='n0',
type='disease'),
QNode(id='n1',
type='protein'),
QNode(id='n2',
type='disease')],
edges=[QEdge(source_id='n0',
target_id='n1',
id='e0'),
QEdge(source_id='n1',
target_id='n2',
id='e1')])
kg_node_info = ({'id': 'MONDO:0005737',
'type': 'disease',
'qnode_ids': ['n0']},
{'id': 'UniProtKB:Q14943',
'type': 'protein',
'qnode_ids': ['n1']},
{'id': 'DOID:12297',
'type': 'disease',
'qnode_ids': ['n2']},
{'id': 'DOID:11077',
'type': 'disease',
'qnode_ids': ['n2']})
kg_edge_info = ({'edge_id': 'UniProtKB:Q14943--MONDO:0005737',
'target_id': 'MONDO:0005737',
'source_id': 'UniProtKB:Q14943',
'qedge_ids': ['e0']},
{'edge_id': 'DOID:12297--UniProtKB:Q14943',
'target_id': 'UniProtKB:Q14943',
'source_id': 'DOID:12297',
'qedge_ids': ['e1']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
kg = KnowledgeGraph(nodes=kg_nodes, edges=kg_edges)
results = ARAX_resultify._get_results_for_kg_by_qg(kg, qg)
indexes_results_with_single_edge = [index for index, result in enumerate(results) if len(result.edge_bindings) == 1]
assert len(indexes_results_with_single_edge) == 0
def test_issue740():
# Tests that self-edges are handled properly
shorthand_qnodes = {"n00": "",
"n01": ""}
shorthand_qedges = {"e00": "n00--n01"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n00": ["CUI:C0004572"], # Babesia
"n01": ["HP:01", "HP:02", | |
<filename>lib/dataset_utils.py
#!/usr/bin/python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utils for all datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import json
import os
import random as rand
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from lib import paths
# Constants used for dealing with the files, matches convert_to_records.
FLAGS = flags.FLAGS
TRAIN_FILE = "train.tfrecords"
VALIDATION_FILE = "validation.tfrecords"
TEST_FILE = "test.tfrecords"
FILES = {"train": TRAIN_FILE, "valid": VALIDATION_FILE, "test": TEST_FILE}
DATASET_SHAPE = {
"cifar10": (None, 32, 32, 3),
"cifar_unnormalized": (None, 32, 32, 3),
"svhn": (None, 32, 32, 3),
"svhn_extra": (None, 32, 32, 3),
"imagenet_32": (None, 32, 32, 3),
}
DATASET_DTYPE = {
"cifar10": tf.float32,
"cifar_unnormalized": tf.uint8,
"svhn": tf.uint8,
"svhn_extra": tf.uint8,
"imagenet_32": tf.uint8,
}
DATASET_CLASS_COUNT = {
"cifar10": 10,
"cifar_unnormalized": 10,
"svhn": 10,
"svhn_extra": 10,
"imagenet_32": 1000,
}
DATASET_EXAMPLE_COUNT = {
"train": {
"cifar10": 50000 - 5000,
"cifar_unnormalized": 50000 - 5000,
"svhn": 73257 - 7326,
"svhn_extra": 531131,
"imagenet_32": 1281167 - 50050,
},
"test": {
"cifar10": 10000,
"cifar_unnormalized": 10000,
"svhn": 26032,
"imagenet_32": 50000,
},
"valid": {
"cifar10": 5000,
"cifar_unnormalized": 5000,
"svhn": 7326,
"imagenet_32": 50050,
},
}
def int64_feature(value):
"""Create a feature that is serialized as an int64."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def bytes_feature(value):
"""Create a feature that is stored on disk as a byte array."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(images, labels, num_examples, name, directory, dataset_name):
"""Converts a dataset to tfrecords.
This function has the side effect of writing a dataset to disk.
Args:
images (tensor): many images.
labels (tensor): many labels.
num_examples (int): how many images and labels we are converting.
name (str): the base name we will give to the file we construct.
directory (str): where the dataset will be written.
dataset_name (str): the name of the actual dataset, e.g. 'svhn'.
Raises:
ValueError: if the image size and label size don't match.
"""
if images.shape[0] != num_examples:
raise ValueError(
"Images size %d does not match label size %d."
% (images.shape[0], num_examples)
)
rows, cols, depth = images.shape[1:4]
label_to_fkeys = defaultdict(list)
filename = os.path.join(directory, dataset_name, name + ".tfrecords")
if not os.path.exists(os.path.join(directory, dataset_name)):
os.makedirs(os.path.join(directory, dataset_name))
tf.logging.info("Writing {}".format(filename))
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
file_key = str(index)
file_key_bytes = file_key.encode()
label = int(labels[index])
example = tf.train.Example(
features=tf.train.Features(
feature={
"height": int64_feature(rows),
"width": int64_feature(cols),
"depth": int64_feature(depth),
"label": int64_feature(label),
"file_key": bytes_feature(file_key_bytes),
"image_raw": bytes_feature(image_raw),
}
)
)
writer.write(example.SerializeToString())
label_to_fkeys[label].append(file_key)
writer.close()
# Serialize map from fkey to label
# This way, we can create balanced semisupervised datasets later
result_path = os.path.join(
directory, dataset_name, "label_to_fkeys_" + name
)
with tf.gfile.GFile(result_path, "w") as f:
json.dump(label_to_fkeys, f)
def build_simple_mixed_batch_datasets(
labeled_dataset_name, unlabeled_dataset_name, labeled_parser
):
"""Build the datasets for parsed labeled and unlabeled data.
Args:
labeled_dataset_name (str): name of the labeled dataset.
unlabeled_dataset_name (str): name of the unlabeled dataset.
labeled_parser (func): Function that parsers one image from labeled
dataset.
Returns:
A pair of datasets (labeled_dataset, unlabeled_dataset)
Raises:
ValueError: If the datasets aren't compatible
"""
# If split were not train, this function would not be called.
split = "train"
labeled_filenames = get_filenames(labeled_dataset_name, split)
unlabeled_filenames = get_filenames(unlabeled_dataset_name, split)
unlabeled_parser = construct_parser(unlabeled_dataset_name)
# A dataset object holding only the labeled examples from primary.
labeled_dataset = tf.data.TFRecordDataset(labeled_filenames)
labeled_dataset = labeled_dataset.map(
labeled_parser, num_parallel_calls=32
).prefetch(100)
# A dataset object holding all examples from secondary, unlabeled.
unlabeled_dataset = tf.data.TFRecordDataset(unlabeled_filenames)
unlabeled_dataset = unlabeled_dataset.map(
unlabeled_parser, num_parallel_calls=32
).prefetch(100)
if are_datasets_compatible(labeled_dataset_name, unlabeled_dataset_name):
return [labeled_dataset, unlabeled_dataset]
else:
raise ValueError(
"Datasets {}, {} not compatible",
labeled_dataset_name,
unlabeled_dataset_name,
)
def parse_small_example(
dataset, serialized_example, image_shape, apply_normalization
):
"""Parses an example from one of the smaller datasets.
This function also performs some pre-processing.
Args:
dataset (str): the name of the dataset.
serialized_example (tf.Example): blob representing a tf.Example.
image_shape (int): the size we want the image to have.
apply_normalization (int): whether to mean-normalize the images.
Returns:
A tuple (image, label, file_key), where:
* image is a single image.
* label is an int32 with the true label,
* fkey is a key that will be used to decide whether to perturb the label
for the purpose of performing semi-supervised learning.
"""
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
"image_raw": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64),
"file_key": tf.FixedLenFeature([], tf.string),
},
)
# Read the bytes that constitute the image
image_dtype = DATASET_DTYPE[dataset]
image = tf.decode_raw(features["image_raw"], image_dtype)
image.set_shape(np.prod(image_shape))
if apply_normalization:
# Convert from [0, 255] -> [-1., 1.] floats.
image = tf.cast(image, tf.float32)
image = image * (1. / 255) - 0.5
image *= 2.
# Reshape the images
image = tf.reshape(image, image_shape)
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features["label"], tf.int32)
# Read the file_key, which we use for label manipulation
file_key = features["file_key"]
return image, label, file_key
def construct_label_table(dataset, label_map):
"""Given a label_map file, construct the hash-table it represents.
Args:
dataset (str): the name of the dataset.
label_map (str): the label_map filename.
Returns:
A tensorflow hashtable from String to Bool.
"""
# We just want to use all the labels in this case.
if label_map is None:
return None
# So that we don't need to store copies of label_maps for all our different
# imagenet variations:
if "imagenet" in dataset:
dataset = "imagenet"
# Load the map from disk
result_path = os.path.join(paths.TRAIN_DIR, dataset, label_map)
with tf.gfile.GFile(result_path, "r") as f:
result_dict = json.load(f)
# Because the imagenet keys are just the image filenames:
if "imagenet" in dataset:
keys = [st + ".JPEG" for st in result_dict["values"]]
else:
# This result_dict will contain a list of file_keys
# The json library seems to default to loading them as unicode?
# so I ascii encode them (I wrote them, so I know they only include
# ascii characters)
keys = [key.encode("ascii", "ignore") for key in result_dict["values"]]
values = [True] * len(keys)
label_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), False
)
return label_table
def get_filenames(dataset_name, split):
"""Get the names of the tfrecord files for this (dataset, split) pair.
Args:
dataset_name (str): Name of the dataset, e.g. svhn.
split (str): Which split to use, e.g. test.
Returns:
A list of filenames.
Raises:
ValueError: if the dataset or split is not supported.
"""
if dataset_name in [
"cifar10",
"svhn",
"cifar_unnormalized",
"imagenet_32",
]:
filenames = [os.path.join(paths.TRAIN_DIR, dataset_name, FILES[split])]
elif dataset_name == "svhn_extra":
if split != "train":
raise ValueError("svhn_extra dataset only has a train split")
filenames = [os.path.join(paths.TRAIN_DIR, "svhn", "extra.tfrecords")]
else:
raise ValueError("Unsupported dataset, split pair.")
return filenames
def are_datasets_compatible(labeled_dataset_name, unlabeled_dataset_name):
"""Check if a pair of datasets are compatible for semisupevised learning.
Args:
labeled_dataset_name (str): a string identifier.
unlabeled_dataset_name (str): a string identifier.
Returns:
Boolean
"""
valid_combos = [
("cifar_unnormalized", "svhn"),
("svhn", "cifar_unnormalized"),
("svhn", "svhn_extra"),
]
return (labeled_dataset_name == unlabeled_dataset_name) or (
labeled_dataset_name,
unlabeled_dataset_name,
) in valid_combos
def shuffle_merge(dataset_1, dataset_2):
"""Merge two tensorflow dataset objects in a more shuffley way.
If the two datasets being merged repeat indefinitely,
an iterator created on this dataset will alternate examples
from the two datasets.
If the datasets have very different sizes, this might not
be the behavior that you want.
If you want examples to come out in proportion to the original
size of the dataset, you will need to do something like store
some marker of the source dataset and then do rejection resampling
on the outputs to rebalance them according to source dataset size.
Args:
dataset_1 (Tensorflow dataset): first dataset object.
dataset_2 (Tensorflow dataset): second dataset object.
Returns:
A new dataset.
"""
zipped = tf.data.Dataset.zip((dataset_1, dataset_2))
def concat_func(x_1, x_2):
return tf.data.Dataset.from_tensors(x_1).concatenate(
tf.data.Dataset.from_tensors(x_2)
)
alternated = zipped.flat_map(concat_func)
return alternated
def construct_parser(dataset_name):
"""Construct a parser based on configuration data.
Args:
dataset_name (str): Name of the dataset, e.g. 'svhn'.
Returns:
A parser of serialized tf.Examples.
| |
176, 688, -688
51540, 192, 720, -800
51600, 144, 592, -864
51660, 272, 512, -816
51720, 384, 528, -752
51780, 224, 528, -784
51840, 256, 624, -752
51900, 224, 672, -688
51960, 192, 784, -720
52020, 240, 592, -720
52080, 272, 640, -736
52140, 320, 656, -544
52200, 240, 768, -528
52260, 496, 720, -576
52320, 304, 992, -480
52380, 144, 944, -608
52440, 384, 704, -512
52500, 448, 640, -608
52560, 320, 640, -704
52620, 368, 656, -720
52680, 416, 640, -656
52740, 400, 656, -624
52800, 272, 704, -656
52860, 304, 656, -640
52920, 336, 624, -576
52980, 240, 704, -576
53040, 256, 336, -512
53100, 112, 320, -512
53160, -80, 160, -448
53220, -464, 144, -512
53280, -672, -112, -1168
53340, -512, -352, -1504
53400, -96, -480, -1728
53460, 480, -592, -1312
53520, 880, -496, -928
53580, 224, -480, -816
53640, 96, -272, -1072
53700, -624, 64, -848
53760, -80, 48, 336
53820, -272, -448, -240
53880, -256, -800, -192
53940, -448, -1184, -208
54000, -736, -1104, -416
54060, -1024, -1072, -592
54120, -1120, -800, -832
54180, -1216, -896, -720
54240, -1120, -976, -816
54300, -1408, -1232, -768
54360, -1584, -1536, -496
54420, -1392, -1472, -432
54480, -864, -1248, -272
54540, -416, -576, -272
54600, -64, 160, -288
54660, 64, 512, -96
54720, -352, 352, -400
54780, 32, -32, -816
54840, 688, 0, -976
54900, 208, -320, -992
54960, 352, -704, -944
55020, 480, -1168, -1312
55080, 416, -1728, -1184
55140, 1136, -1824, -592
55200, 864, -2032, -384
55260, -48, -2032, -592
55320, -624, -2032, -528
55380, -2032, -2032, 192
55440, 528, -1840, 1184
55500, 16, 128, 1008
55560, 176, -96, 1040
55620, 368, -128, 576
55680, 480, 0, 144
55740, 320, 80, 16
55800, 96, 144, 96
55860, -96, 352, 112
55920, -80, 384, 208
55980, 16, 368, 448
56040, 144, 512, 688
56100, 336, 592, 688
56160, 448, 560, 560
56220, 1680, 2048, 224
56280, -160, 384, -576
56340, -416, -1568, 480
56400, 432, -2032, 416
56460, 448, -2032, 16
56520, 1152, -1520, -544
56580, 1136, -272, -416
56640, 976, -240, -400
56700, 624, -16, -352
56760, 480, 48, -544
56820, 336, 16, -368
56880, 416, 112, -96
56940, 368, 432, 192
57000, 272, 800, 672
57060, -128, 768, 752
57120, -896, 320, 1008
57180, -1264, -2032, 2032
57240, -656, 288, 1520
57300, -1776, 496, 2032
57360, -464, -576, 1248
57420, -96, 544, 368
57480, 144, 256, -48
57540, 0, 128, 128
57600, 16, 112, 176
57660, 2048, -720, 1232
57720, -496, 752, 2032
57780, 1488, 384, 704
57840, 608, -2032, 1664
57900, 976, 48, 1376
57960, 736, 2048, 880
58020, -304, 2048, 1584
58080, -576, -240, 32
58140, 176, 288, -48
58200, 0, 256, -16
58260, 16, 496, 112
58320, 64, 592, 224
58380, -2032, 2048, -2048
58440, 2048, -2032, 1488
58500, -160, -32, 816
58560, 288, -896, 448
58680, 528, -2032, 496
58740, 784, -2032, 304
58800, 576, -2032, 272
58860, -368, -1728, 928
58920, 1392, -976, -192
58980, 1008, -2032, 1328
59040, 1280, -2032, 2032
59100, 448, -2032, 2032
59160, 2048, -2032, 2032
59220, 496, -160, 1712
59280, 2032, 832, 1040
59340, 1664, 2048, 816
59400, 1104, 2048, 1984
59460, 592, 2048, 2032
59520, 288, 1600, 1056
59580, 208, 1360, 432
59640, 256, 1536, 448
59700, 208, 1600, 1072
59760, 1024, 160, 2032
59820, 464, 624, -704
59880, -16, 528, -48
59940, 144, 304, 48
60000, 224, 352, -16
60060, 240, 416, 16
60120, 2048, 2048, 1504
60180, -2032, -144, -896
60240, 1840, -1664, -336
60300, 64, -112, -528
60360, -1088, 2048, -448
60420, 32, 2048, -2048
60480, 912, 2048, -2048
60540, -336, 448, -720
60600, 192, 16, -496
60660, 624, 944, 64
60720, 224, 1216, -992
60780, 160, 112, -1056
60840, 16, -640, -1632
60900, 336, -1312, -2048
60960, 1856, -2032, -2048
61020, -2032, -736, -2048
61080, 832, 320, 1872
61140, 736, 272, 992
61200, 432, 1248, 1920
61260, 832, 1456, 1200
61320, 656, 816, 448
61380, 48, 512, 176
61440, 0, 288, 400
61500, 0, 416, 768
61560, 256, 416, 1024
61620, 400, 464, 784
61680, 400, 464, 480
61740, 432, 384, 80
61800, 272, 288, 32
61860, -2032, 2048, 2032
61920, 2048, 928, 2032
61980, 240, 2048, -112
62040, -1328, -2032, 656
62100, -208, -640, -16
62160, -192, -192, 288
62220, -896, -2032, 432
62280, 48, -1888, 768
62340, -96, -544, 32
62400, -192, 48, 128
62460, 32, 192, 320
62520, 0, 48, 240
62580, 128, -592, 384
62640, -400, -2032, 1184
62700, -448, -1776, 1984
62760, 176, -816, 1072
62820, 592, -48, 416
62880, 448, -480, 640
62940, -16, -704, 32
63000, -48, -1072, 912
63060, -464, -976, 816
63120, -144, -768, 368
63180, -192, -992, 736
63240, -144, -832, 752
63300, -192, -800, 368
63360, -32, -880, 320
63420, 96, -816, 480
63480, -96, -1136, 576
63540, -256, -912, 688
63600, -432, -976, 368
63660, -416, -928, 272
63720, -160, -960, 416
63780, -128, -960, 560
63840, -160, -768, 560
63900, -160, -912, 528
63960, -224, -1184, 400
64020, -272, -1216, 224
64080, -32, -896, 176
64140, 0, -688, 112
64200, -80, -848, 128
64260, -112, -1072, 64
64320, -32, -1120, 224
64380, 16, -1056, 240
64440, 0, -1136, 224
64500, -272, -1184, 368
64560, -288, -960, 240
64620, -400, -960, 160
64680, -336, -928, 224
64740, -208, -928, 160
64800, -256, -992, 144
64860, -208, -1040, 224
64920, -208, -1120, 192
64980, -64, -1088, 128
65040, -208, -1120, 80
65100, -160, -912, 64
65160, -160, -832, 176
65220, -128, -960, 0
65280, -240, -1104, 128
65340, -96, -1056, 144
65400, -128, -1008, 112
65460, -96, -944, 256
65520, -128, -1040, 272
65580, -80, -1008, 240
65640, -80, -1008, 224
65700, -96, -1024, 208
65760, -80, -1024, 208
65820, -80, -944, 128
65880, -64, -1008, 144
65940, -112, -1040, 144
66000, -80, -1056, 144
66060, -64, -1088, 176
66120, -32, -1056, 160
66180, -32, -1008, 128
66240, -64, -1040, 160
66300, -128, -1072, 128
66360, -112, -976, 80
66420, -128, -1040, 144
66480, -112, -1024, 128
66540, -112, -992, 80
66600, -112, -1024, 80
66660, -96, -960, 48
66720, -112, -1072, 32
66780, -192, -1088, 128
66840, -144, -1024, 48
66900, -160, -1040, 112
66960, -144, -976, 192
67020, -144, -1040, 144
67080, -96, -1024, 256
67140, -80, -1056, 304
67200, -32, -1056, 288
67260, -32, -1040, 224
67320, -48, -1040, 240
67380, -80, -960, 240
67440, 0, -960, 256
67500, -64, -992, 352
67560, -48, -992, 352
67620, -32, -976, 352
67680, -32, -944, 368
67740, -16, -944, 320
67800, -64, -992, 352
67860, -112, -1040, 384
67920, -112, -976, 400
67980, -32, -1008, 352
68040, -64, -1040, 432
68100, -96, -992, 464
68160, -112, -976, 368
68220, -80, -960, 272
68280, -64, -976, 224
68340, -48, -960, 256
68400, -80, -992, 320
68460, -32, -960, 336
68520, -16, -976, 352
68580, 0, -992, 384
68640, 0, -992, 384
68700, -16, -976, 384
68760, -48, -976, 352
68820, -80, -976, 320
68880, -80, -992, 304
68940, -80, -992, 304
69000, -80, -1008, 320
69060, -64, -1008, 336
69120, -64, -1008, 336
69180, -96, -1008, 352
69240, -80, -1008, 336
69300, -48, -960, 368
69360, -48, -992, 352
69420, -32, -992, 352
69480, 0, -992, 304
69540, 16, -976, 272
69600, -32, -1040, 352
69660, 0, -992, 336
69720, -16, -1072, 160
69780, -16, -976, 64
69840, 32, -976, 192
69900, 32, -992, 272
69960, 16, -1056, 288
70020, 0, -1024, 336
70080, 0, -1024, 352
70140, 0, -1008, 256
70200, 0, -976, 256
70260, -16, -1008, 288
70320, 16, -1024, 288
70380, 16, -1008, 304
70440, 16, -1008, 320
70500, 32, -1008, 304
70560, 32, -992, 288
70620, 16, -992, 288
70680, 0, -1024, 304
70740, 16, -1008, 304
70800, 16, -1008, 256
70860, 16, -976, 272
70920, 0, -1008, 304
70980, 0, -1008, 288
71040, 0, -1008, 288
71100, 0, -1008, 272
71160, 0, -1008, 272
71220, 0, -1008, 320
71280, 16, -1008, 304
71340, 16, -992, 288
71400, 0, -992, 304
71460, -16, -1024, 320
71520, -16, -1008, 304
71580, -16, -1008, 288
71760, -16, -1008, 320
71820, 0, -1008, 320
71880, -16, -992, 320
72000, 0, -992, 304
72060, 0, -976, 336
72120, 16, -992, 336
72180, 0, -1008, 336
72240, -16, -992, 304
72300, -32, -1008, 320
72360, -16, -992, 320
72420, -16, -1008, 320
72480, -16, -992, 320
72540, -16, -992, 336
72600, -16, -992, 320
72660, -16, -992, 320
72720, -16, -992, 336
72780, -16, -992, 320
72840, 0, -992, 304
72900, 0, -976, 304
72960, -32, -1024, 368
73020, 0, -992, 336
73080, -32, -1008, 304
73140, -48, -992, 320
73200, -32, -992, 304
73260, 0, -992, 336
73320, 0, -992, 336
73380, 0, -992, 336
73440, -16, -992, 352
73500, -16, -992, 336
73560, -16, -992, 336
73620, -16, -976, 352
73680, -16, -992, 352
73740, -16, -992, 352
73800, -32, -976, 336
73860, -32, -960, 288
73920, 32, -928, 272
73980, -32, -960, 304
74040, -176, -992, 368
74100, -176, -912, 400
74160, -160, -1008, 512
74220, -96, -1008, 672
74280, -64, -1104, 784
74340, 0, -1008, 720
74400, 208, -800, 336
74460, 256, -800, -128
74520, -80, -864, -160
74580, -16, -800, -224
74640, -32, -1104, -592
74700, -224, -1056, -432
74760, -96, -944, -256
74820, -160, -976, -480
74880, -144, -896, -608
74940, -288, -992, -480
75000, -176, -960, -512
75060, -160, -864, -704
75120, -128, -800, -624
75180, -176, -736, -560
75240, -192, -720, -496
75300, -176, -656, -576
75420, -48, -672, -672
75480, -48, -736, -640
75540, -96, -768, -560
75600, -144, -720, -496
75660, 32, -672, -352
75720, 864, -1248, -208
75780, 496, -1024, -784
75840, 384, -976, -608
75900, 304, -864, -544
75960, 320, -816, -528
76020, 304, -832, -544
76080, 288, -832, -528
76140, 320, -784, -528
76200, 304, -816, -576
76260, 272, -800, -576
76320, 240, -736, -624
76380, 240, -768, -592
76440, 256, -784, -624
76500, 208, -720, -640
76560, 256, -784, -640
76620, 352, -832, -640
76680, 368, -800, -752
76740, 416, -800, -832
76800, 544, -672, -880
76860, 608, -480, -896
76920, 624, -240, -864
76980, 704, 16, -704
77040, 720, 176, -592
77100, 560, 368, -480
77160, 464, 336, -464
77220, 432, 352, -368
77280, 352, 240, -176
77340, 160, 496, | |
0x00, 0x00, 0x00,
0x2c, 0x19, 0x00, 0x33, 0x00, 0x04, 0x00, 0x02, 0x00, 0x00, 0x08, 0x0c, 0x00, 0xb9, 0xd9, 0x43,
0x07, 0x00, 0x80, 0xb0, 0x5b, 0x00, 0x02, 0x02, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x04, 0x00, 0x00,
0x00, 0x2c, 0x19, 0x00, 0x32, 0x00, 0x07, 0x00, 0x03, 0x00, 0x00, 0x08, 0x16, 0x00, 0xe9, 0xa9,
0x03, 0x40, 0x90, 0xa0, 0xb3, 0x19, 0x57, 0xf0, 0xb1, 0x2b, 0x08, 0xc0, 0x8a, 0x95, 0x66, 0x29,
0x00, 0x04, 0x04, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x02, 0x00, 0x2c, 0x18, 0x00, 0x32,
0x00, 0x09, 0x00, 0x03, 0x00, 0x00, 0x08, 0x19, 0x00, 0x4b, 0x94, 0xba, 0xb2, 0xcd, 0x9c, 0x80,
0x83, 0x08, 0x55, 0xb4, 0xe8, 0x61, 0xea, 0x8b, 0x3d, 0x74, 0x08, 0x0f, 0x5a, 0x99, 0x28, 0x2c,
0x20, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x00, 0x00, 0x2c, 0x18, 0x00, 0x31, 0x00, 0x0a,
0x00, 0x04, 0x00, 0x00, 0x08, 0x20, 0x00, 0x53, 0x6c, 0xb3, 0x47, 0x4f, 0x1d, 0x80, 0x83, 0x07,
0x17, 0xd5, 0x68, 0xd1, 0x42, 0xcb, 0x33, 0x7b, 0xec, 0x10, 0x22, 0x64, 0xd8, 0x42, 0x86, 0x26,
0x6e, 0x12, 0x33, 0x02, 0x08, 0x08, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x07, 0x00, 0x00, 0x00, 0x2c,
0x18, 0x00, 0x30, 0x00, 0x0a, 0x00, 0x04, 0x00, 0x00, 0x08, 0x20, 0x00, 0x01, 0xa0, 0x53, 0x07,
0xa0, 0xa0, 0x41, 0x00, 0xcb, 0x64, 0xc8, 0xd0, 0x42, 0x69, 0x9b, 0xb9, 0x83, 0x06, 0x5b, 0x48,
0x34, 0xf5, 0xc5, 0x1e, 0xc4, 0x82, 0x2d, 0x54, 0x30, 0x0a, 0x08, 0x00, 0x21, 0xf9, 0x04, 0x05,
0x03, 0x00, 0x00, 0x00, 0x2c, 0x18, 0x00, 0x30, 0x00, 0x0b, 0x00, 0x03, 0x00, 0x00, 0x08, 0x1d,
0x00, 0xb9, 0x7d, 0x19, 0x18, 0xc9, 0x1e, 0x3a, 0x00, 0x08, 0x01, 0x10, 0x6a, 0xc1, 0x90, 0xa1,
0x0c, 0x4a, 0xdb, 0xcc, 0x25, 0x9c, 0x08, 0x80, 0x61, 0x8d, 0x60, 0x01, 0x01, 0x00, 0x21, 0xf9,
0x04, 0x05, 0x04, 0x00, 0x00, 0x00, 0x2c, 0x18, 0x00, 0x2f, 0x00, 0x0b, 0x00, 0x04, 0x00, 0x00,
0x08, 0x22, 0x00, 0x01, 0xb0, 0xd3, 0x67, 0xcf, 0x1e, 0x3b, 0x00, 0x08, 0x11, 0xc6, 0xaa, 0xd1,
0xa2, 0x21, 0x0b, 0x2d, 0x5f, 0xec, 0xa1, 0x4b, 0x48, 0xb1, 0x61, 0x0b, 0x1a, 0xb7, 0x28, 0x6a,
0xec, 0x01, 0x20, 0x20, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x00, 0x00, 0x2c, 0x18, 0x00,
0x2f, 0x00, 0x0b, 0x00, 0x03, 0x00, 0x00, 0x08, 0x1f, 0x00, 0xed, 0x7d, 0xd1, 0xa4, 0x45, 0x91,
0x16, 0x4a, 0x91, 0xec, 0xa9, 0x03, 0x60, 0xac, 0x05, 0x80, 0x87, 0x2d, 0x22, 0xb6, 0x40, 0x12,
0xe4, 0xa1, 0xc5, 0x8b, 0x2d, 0x9a, 0x05, 0x04, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x07, 0x00, 0x00,
0x00, 0x2c, 0x17, 0x00, 0x2e, 0x00, 0x0d, 0x00, 0x03, 0x00, 0x00, 0x08, 0x20, 0x00, 0x01, 0x00,
0x40, 0x67, 0x2f, 0xd2, 0x97, 0x83, 0xdb, 0xe8, 0xa9, 0x13, 0x08, 0xe0, 0x8a, 0x8c, 0x16, 0x10,
0x23, 0xb6, 0xd8, 0xf2, 0x85, 0xa1, 0x45, 0x8b, 0x2d, 0x0e, 0x01, 0x08, 0x08, 0x00, 0x21, 0xf9,
0x04, 0x05, 0x03, 0x00, 0x00, 0x00, 0x2c, 0x17, 0x00, 0x2d, 0x00, 0x0d, 0x00, 0x03, 0x00, 0x00,
0x08, 0x23, 0x00, 0x01, 0x08, 0x04, 0xc0, 0xce, 0x9e, 0x41, 0x7b, 0xec, 0x06, 0x02, 0xa0, 0x17,
0xc9, 0x13, 0x8c, 0x16, 0x10, 0x5b, 0xb0, 0x30, 0xf5, 0x85, 0xdb, 0x22, 0x28, 0x2d, 0x14, 0x0e,
0x6c, 0xc1, 0x25, 0x58, 0x40, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x00, 0x00, 0x2c, 0x17,
0x00, 0x2c, 0x00, 0x0d, 0x00, 0x04, 0x00, 0x00, 0x08, 0x27, 0x00, 0x01, 0x08, 0x1c, 0xa8, 0x8e,
0x1d, 0xbb, 0x81, 0x03, 0xd1, 0xd9, 0xfb, 0x62, 0x0a, 0x46, 0x8b, 0x16, 0x32, 0x3c, 0x7d, 0xb1,
0xc7, 0xed, 0x4a, 0x8e, 0x87, 0x08, 0x1f, 0xb6, 0x30, 0x06, 0x80, 0x0b, 0xc2, 0x8f, 0x02, 0x03,
0x02, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x04, 0x00, 0x00, 0x00, 0x2c, 0x17, 0x00, 0x2c, 0x00, 0x0e,
0x00, 0x03, 0x00, 0x00, 0x08, 0x24, 0x00, 0x01, 0x08, 0x04, 0xc0, 0x0e, 0xdf, 0x33, 0x4f, 0xa6,
0x4c, 0x51, 0xfa, 0x62, 0xaf, 0x1d, 0x3a, 0x7b, 0x91, 0x3c, 0xd5, 0x68, 0xd1, 0x42, 0x20, 0xc5,
0x16, 0x2a, 0x9a, 0x19, 0xa3, 0x38, 0xb0, 0x63, 0xc7, 0x80, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x06,
0x00, 0x00, 0x00, 0x2c, 0x16, 0x00, 0x2b, 0x00, 0x10, 0x00, 0x04, 0x00, 0x00, 0x08, 0x2b, 0x00,
0x01, 0x08, 0x1c, 0xa8, 0x4e, 0xdf, 0xb6, 0x2f, 0xcf, 0xbe, 0x44, 0xb2, 0x87, 0x6e, 0x20, 0x00,
0x76, 0xf6, 0x9e, 0x69, 0x69, 0x41, 0xb1, 0x62, 0x8b, 0x1c, 0x29, 0x52, 0x5c, 0x31, 0x05, 0x83,
0xa2, 0xc3, 0x8f, 0x02, 0x05, 0x81, 0x1c, 0x09, 0x20, 0x20, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x04,
0x00, 0x00, 0x00, 0x2c, 0x15, 0x00, 0x2a, 0x00, 0x12, 0x00, 0x04, 0x00, 0x00, 0x08, 0x31, 0x00,
0x01, 0x08, 0x1c, 0x38, 0x90, 0x9d, 0xbd, 0x6d, 0xdb, 0xf0, 0xd9, 0x43, 0x47, 0x70, 0xa0, 0x3a,
0x7a, 0x91, 0x3c, 0xc1, 0x68, 0x41, 0x91, 0xa2, 0x8c, 0x4c, 0x02, 0xbf, 0x44, 0x8a, 0xf4, 0x4c,
0x8b, 0x8a, 0x16, 0x0d, 0x05, 0xae, 0x20, 0x24, 0xb0, 0x59, 0x0f, 0x8a, 0x21, 0x53, 0x06, 0x04,
0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x01, 0x00, 0x2c, 0x14, 0x00, 0x29, 0x00, 0x13, 0x00,
0x04, 0x00, 0x00, 0x08, 0x32, 0x00, 0x03, 0x08, 0x1c, 0x48, 0x30, 0x80, 0x3a, 0x76, 0xf6, 0xec,
0xe9, 0x43, 0x57, 0xb0, 0x20, 0xc2, 0x67, 0x5a, 0x54, 0xb4, 0x98, 0x08, 0xc3, 0xd3, 0x11, 0x07,
0x60, 0xb6, 0x25, 0xb4, 0x17, 0x89, 0x92, 0x8c, 0x89, 0x05, 0x5b, 0x70, 0x69, 0x12, 0x80, 0xd0,
0x8a, 0x89, 0x28, 0x1b, 0x16, 0x0c, 0x08, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x00, 0x00,
0x2c, 0x14, 0x00, 0x28, 0x00, 0x14, 0x00, 0x04, 0x00, 0x00, 0x08, 0x35, 0x00, 0x01, 0x08, 0x1c,
0x48, 0x90, 0x20, 0x3b, 0x76, 0xea, 0x06, 0xa2, 0x2b, 0x38, 0x50, 0x9d, 0xbe, 0x2f, 0x9e, 0x68,
0xa8, 0x50, 0x01, 0xc3, 0xd4, 0x17, 0x0a, 0xc6, 0xae, 0x6c, 0xb3, 0x77, 0x50, 0x1f, 0xbe, 0x67,
0x5a, 0x5a, 0x88, 0x1c, 0x28, 0xf2, 0x10, 0x80, 0x60, 0x4c, 0x44, 0xaa, 0x1c, 0xc9, 0x90, 0x60,
0x40, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x07, 0x00, 0x00, 0x00, 0x2c, 0x13, 0x00, 0x28, 0x00, 0x15,
0x00, 0x03, 0x00, 0x00, 0x08, 0x34, 0x00, 0xb9, 0xb1, 0x03, 0x40, 0xb0, 0xa0, 0x41, 0x76, 0xdb,
0x9e, 0x99, 0x52, 0xa4, 0xc5, 0xd3, 0x33, 0x6e, 0x4d, 0x60, 0x68, 0x8a, 0xa4, 0x0f, 0x1d, 0x00,
0x75, 0xec, 0xec, 0x45, 0xf2, 0x54, 0xa3, 0x85, 0xc7, 0x8f, 0x3d, 0x84, 0x01, 0x38, 0xf4, 0xb1,
0x06, 0x0d, 0x1a, 0x2c, 0x3c, 0x1a, 0x5c, 0x09, 0x20, 0x20, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03,
0x00, 0x01, 0x00, 0x2c, 0x13, 0x00, 0x27, 0x00, 0x15, 0x00, 0x04, 0x00, 0x00, 0x08, 0x38, 0x00,
0xcf, 0x99, 0x0b, 0x40, 0xb0, 0xa0, 0xc1, 0x00, 0xe8, 0xec, 0x7d, 0xb9, 0xa2, 0x89, 0xd2, 0xb3,
0x73, 0x43, 0x54, 0x98, 0xfa, 0x62, 0x0f, 0x9d, 0x41, 0x73, 0xf8, 0x28, 0xd1, 0x68, 0xc1, 0xb1,
0xe3, 0x92, 0x00, 0x3d, 0x3a, 0xc2, 0x30, 0xe5, 0xc9, 0x14, 0x2f, 0x15, 0x1c, 0x0f, 0xaa, 0x24,
0xd8, 0xb1, 0xc5, 0xca, 0x83, 0x01, 0x01, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x04, 0x00, 0x00, 0x00,
0x2c, 0x13, 0x00, 0x26, 0x00, 0x15, 0x00, 0x04, 0x00, 0x00, 0x08, 0x36, 0x00, 0xcf, 0xd9, 0x43,
0x07, 0xa0, 0xa0, 0xc1, 0x83, 0x00, 0xd4, 0xd1, 0xdb, 0xf6, 0xa5, 0xe1, 0x97, 0x61, 0x2d, 0x78,
0x3d, 0xc3, 0xc7, 0x0e, 0x21, 0x3a, 0x7b, 0x5f, 0xb4, 0xb4, 0xd8, 0xb8, 0xd1, 0x10, 0x00, 0x8e,
0x2d, 0x6a, 0x98, 0xba, 0x42, 0xd2, 0x93, 0x8c, 0x8d, 0x08, 0x53, 0x7e, 0x04, 0xd9, 0x42, 0xa5,
0xc1, 0x80, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x00, 0x00, 0x2c, 0x13, 0x00, 0x25, 0x00,
0x15, 0x00, 0x04, 0x00, 0x00, 0x08, 0x35, 0x00, 0xf1, 0xd9, 0x43, 0x07, 0xa0, 0xa0, 0xc1, 0x83,
0x06, 0xd9, 0xd9, 0xfb, 0xf2, 0x65, 0x1b, 0xb0, 0x16, 0x30, 0x34, 0x7d, 0xa1, 0x87, 0x10, 0x80,
0x3a, 0x7d, 0x5f, 0x4a, 0xb1, 0x68, 0xc1, 0x31, 0x51, 0x41, 0x8e, 0x1c, 0xb5, 0x5c, 0x61, 0xf8,
0xe5, 0x0a, 0x2f, 0x8e, 0x15, 0x11, 0x82, 0x5c, 0x99, 0xb2, 0x60, 0x40, 0x00, 0x21, 0xf9, 0x04,
0x05, 0x07, 0x00, 0x00, 0x00, 0x2c, 0x13, 0x00, 0x24, 0x00, 0x15, 0x00, 0x04, 0x00, 0x00, 0x08,
0x38, 0x00, 0xf1, 0xd9, 0x63, 0x07, 0xa0, 0xa0, 0xc1, 0x83, 0x07, 0xd1, 0xe9, 0xb3, 0x87, 0x0f,
0x4a, 0x0b, 0x16, 0xa6, 0xbe, 0xd8, 0x43, 0x87, 0xb0, 0xa0, 0xb9, 0x6d, 0x94, 0x64, 0xb4, 0x68,
0x01, 0xa3, 0xe0, 0xc6, 0x8d, 0x34, 0x28, 0x45, 0xda, 0x86, 0x6f, 0xdb, 0x17, 0x53, 0x2c, 0x36,
0x56, 0x3c, 0xf8, 0xb1, 0xe5, 0x4a, 0x00, 0x01, 0x01, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00,
0x00, 0x00, 0x2c, 0x13, 0x00, 0x23, 0x00, 0x15, 0x00, 0x04, 0x00, 0x00, 0x08, | |
"""
A Convolutional Encoding and Decoding
Copyright (c) March 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
A forward error correcting coding (FEC) class which defines methods
for performing convolutional encoding and decoding. Arbitrary
polynomials are supported, but the rate is presently limited to r = 1/n,
where n = 2. Punctured (perforated) convolutional codes are also supported.
The puncturing pattern (matrix) is arbitrary.
Two popular encoder polynomial sets are:
K = 3 ==> G1 = '111', G2 = '101' and
K = 7 ==> G1 = '1011011', G2 = '1111001'.
A popular puncturing pattern to convert from rate 1/2 to rate 3/4 is
a G1 output puncture pattern of '110' and a G2 output puncture
pattern of '101'.
Graphical display functions are included to allow the user to
better understand the operation of the Viterbi decoder.
<NAME> and <NAME>: October 2018.
"""
import numpy as np
from math import factorial
from fractions import Fraction
import matplotlib.pyplot as plt
import warnings
from .digitalcom import q_fctn
from logging import getLogger
log = getLogger(__name__)
import warnings
# Data structure support classes
class TrellisNodes(object):
"""
A structure to hold the trellis from nodes and to nodes.
Ns is the number of states = :math:`2^{(K-1)}`.
"""
def __init__(self,Ns):
self.Ns = Ns
self.fn = np.zeros((Ns,1),dtype=int)
self.tn = np.zeros((Ns,1),dtype=int)
self.out_bits = np.zeros((Ns,1),dtype=int)
class TrellisBranches(object):
"""
A structure to hold the trellis states, bits, and input values
for both '1' and '0' transitions.
Ns is the number of states = :math:`2^{(K-1)}`.
"""
def __init__(self,Ns):
self.Ns = Ns
self.states1 = np.zeros((Ns,1),dtype=int)
self.states2 = np.zeros((Ns,1),dtype=int)
self.bits1 = np.zeros((Ns,1),dtype=int)
self.bits2 = np.zeros((Ns,1),dtype=int)
self.input1 = np.zeros((Ns,1),dtype=int)
self.input2 = np.zeros((Ns,1),dtype=int)
class TrellisPaths(object):
"""
A structure to hold the trellis paths in terms of traceback_states,
cumulative_metrics, and traceback_bits. A full decision depth history
of all this infomation is not essential, but does allow the graphical
depiction created by the method traceback_plot().
Ns is the number of states = :math:`2^{(K-1)}` and D is the decision depth.
As a rule, D should be about 5 times K.
"""
def __init__(self,Ns,D):
self.Ns = Ns
self.decision_depth = D
self.traceback_states = np.zeros((Ns,self.decision_depth),dtype=int)
self.cumulative_metric = np.zeros((Ns,self.decision_depth),dtype=float)
self.traceback_bits = np.zeros((Ns,self.decision_depth),dtype=int)
def binary(num, length=8):
"""
Format an integer to binary without the leading '0b'
"""
return format(num, '0{}b'.format(length))
class FECConv(object):
"""
Class responsible for creating rate 1/2 convolutional code objects, and
then encoding and decoding the user code set in polynomials of G. Key
methods provided include :func:`conv_encoder`, :func:`viterbi_decoder`, :func:`puncture`,
:func:`depuncture`, :func:`trellis_plot`, and :func:`traceback_plot`.
Parameters
----------
G: A tuple of two binary strings corresponding to the encoder polynomials
Depth: The decision depth employed by the Viterbi decoder method
Returns
-------
Examples
--------
>>> from sk_dsp_comm import fec_conv
>>> # Rate 1/2
>>> cc1 = fec_conv.FECConv(('101', '111'), Depth=10) # decision depth is 10
>>> # Rate 1/3
>>> from sk_dsp_comm import fec_conv
>>> cc2 = fec_conv.FECConv(('101','011','111'), Depth=15) # decision depth is 15
"""
def __init__(self,G = ('111','101'), Depth = 10):
"""
cc1 = fec_conv(G = ('111','101'), Depth = 10)
Instantiate a Rate 1/2 or Rate 1/3 convolutional
coder/decoder object. Polys G1 and G2 are entered
as binary strings, e.g,
Rate 1/2
G1 = '111' and G2 = '101' for K = 3 and
G1 = '1111001' and G2 = '1011011' for K = 7.
Rate 1/3
G1 = '111', G2 = '011' and G3 = '101' for K = 3 and
G1 = '1111001', G2 = '1100101' and G3 = '1011011'
for K= 7
The rate will automatically be selected by the number
of G polynomials (only rate 1/2 and 1/3 are available)
Viterbi decoding has a decision depth of Depth.
Data structures than manage the VA are created
upon instantiation via the __init__ method.
Other ideal polynomial considerations (taken from
"Introduction to Digital Communication" Second Edition
by Ziemer and Peterson:
Rate 1/2
K=3 ('111','101')
K=4 ('1111','1101')
K=5 ('11101','10011')
K=6 ('111101','101011')
K=7 ('1111001','1011011')
K=8 ('11111001','10100111')
K=9 ('111101011','101110001')
Rate 1/3
K=3 ('111','111','101')
K=4 ('1111','1101','1011')
K=5 ('11111','11011','10101')
K=6 ('111101','101011','100111')
K=7 ('1111001','1100101','1011011')
K=8 ('11110111','11011001','10010101')
<NAME> and <NAME> October 2018
"""
self.G_polys = G
self.constraint_length = len(self.G_polys[0])
self.Nstates = 2**(self.constraint_length-1) # number of states
self.decision_depth = Depth
self.input_zero = TrellisNodes(self.Nstates)
self.input_one = TrellisNodes(self.Nstates)
self.paths = TrellisPaths(self.Nstates, self.decision_depth)
self.rate = Fraction(1,len(G))
if(len(G) == 2 or len(G) == 3):
log.info('Rate %s Object' %(self.rate))
else:
warnings.warn('Invalid rate. Use Rate 1/2 or 1/3 only')
raise ValueError('Invalid rate. Use Rate 1/2 or 1/3 only')
pass
for m in range(self.Nstates):
self.input_zero.fn[m] = m
self.input_one.fn[m] = m
# state labeling with LSB on right (more common)
output0,state0 = self.conv_encoder([0],
binary(m,self.constraint_length-1))
output1,state1 = self.conv_encoder([1],
binary(m,self.constraint_length-1))
self.input_zero.tn[m] = int(state0,2)
self.input_one.tn[m] = int(state1,2)
if(self.rate == Fraction(1,2)):
self.input_zero.out_bits[m] = 2*output0[0] + output0[1]
self.input_one.out_bits[m] = 2*output1[0] + output1[1]
elif(self.rate == Fraction(1,3)):
self.input_zero.out_bits[m] = 4*output0[0] + 2*output0[1] + output0[2]
self.input_one.out_bits[m] = 4*output1[0] + 2*output1[1] + output1[2]
# Now organize the results into a branches_from structure that holds the
# from state, the u2 u1 bit sequence in decimal form, and the input bit.
# The index where this information is stored is the to state where survivors
# are chosen from the two input branches.
self.branches = TrellisBranches(self.Nstates)
for m in range(self.Nstates):
match_zero_idx = np.where(self.input_zero.tn == m)
match_one_idx = np.where(self.input_one.tn == m)
if len(match_zero_idx[0]) != 0:
self.branches.states1[m] = self.input_zero.fn[match_zero_idx[0][0]]
self.branches.states2[m] = self.input_zero.fn[match_zero_idx[0][1]]
self.branches.bits1[m] = self.input_zero.out_bits[match_zero_idx[0][0]]
self.branches.bits2[m] = self.input_zero.out_bits[match_zero_idx[0][1]]
self.branches.input1[m] = 0
self.branches.input2[m] = 0
elif len(match_one_idx[0]) != 0:
self.branches.states1[m] = self.input_one.fn[match_one_idx[0][0]]
self.branches.states2[m] = self.input_one.fn[match_one_idx[0][1]]
self.branches.bits1[m] = self.input_one.out_bits[match_one_idx[0][0]]
self.branches.bits2[m] = self.input_one.out_bits[match_one_idx[0][1]]
self.branches.input1[m] = 1
self.branches.input2[m] = 1
else:
log.error('branch calculation error')
return
def viterbi_decoder(self,x,metric_type='soft',quant_level=3):
"""
A method which performs Viterbi decoding of noisy bit stream,
taking as input soft bit values centered on +/-1 and returning
hard decision 0/1 bits.
Parameters
----------
x: Received noisy bit values centered on +/-1 at one sample per bit
metric_type:
'hard' - Hard decision metric. Expects binary or 0/1 input values.
'unquant' - unquantized soft decision decoding. Expects +/-1
input values.
'soft' - soft decision decoding.
quant_level: The quantization level for soft decoding. Expected
input values between 0 and 2^quant_level-1. 0 represents the most
confident 0 and 2^quant_level-1 represents the most confident 1.
Only used for 'soft' metric type.
Returns
-------
y: Decoded 0/1 bit stream
Examples
--------
>>> import numpy as np
>>> from numpy.random import randint
>>> import sk_dsp_comm.fec_conv as fec
>>> import sk_dsp_comm.digitalcom as dc
>>> import matplotlib.pyplot as plt
>>> # Soft decision rate 1/2 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 4
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc1 = fec.FECConv(('11101','10011'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc1.conv_encoder(x,state)
>>> # Add channel noise | |
During this time, you can\'t invoke or modify the function. The State , StateReason , and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Function States .
A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function\'s code and configuration. A published version is a snapshot of your function code and configuration that can\'t be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.
The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration . Function-level settings apply to both the unpublished and published versions of the function, and include tags ( TagResource ) and per-function concurrency limits ( PutFunctionConcurrency ).
If another account or an AWS service invokes your function, use AddPermission to grant permission by creating a resource-based IAM policy. You can grant permissions at the function level, on a version, or on an alias.
To invoke your function directly, use Invoke . To invoke your function in response to events in other AWS services, create an event source mapping ( CreateEventSourceMapping ), or configure a function trigger in the other service. For more information, see Invoking Functions .
See also: AWS API Documentation
Exceptions
Examples
The following example creates a function with a deployment package in Amazon S3 and enables X-Ray tracing and environment variable encryption.
Expected Output:
:example: response = client.create_function(
FunctionName='string',
Runtime='nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'nodejs10.x'|'nodejs12.x'|'java8'|'java11'|'python2.7'|'python3.6'|'python3.7'|'python3.8'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'dotnetcore3.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'ruby2.7'|'provided',
Role='string',
Handler='string',
Code={
'ZipFile': b'bytes',
'S3Bucket': 'string',
'S3Key': 'string',
'S3ObjectVersion': 'string'
},
Description='string',
Timeout=123,
MemorySize=123,
Publish=True|False,
VpcConfig={
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
DeadLetterConfig={
'TargetArn': 'string'
},
Environment={
'Variables': {
'string': 'string'
}
},
KMSKeyArn='string',
TracingConfig={
'Mode': 'Active'|'PassThrough'
},
Tags={
'string': 'string'
},
Layers=[
'string',
]
)
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the Lambda function.\n\nName formats\n\nFunction name - my-function .\nFunction ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function .\nPartial ARN - 123456789012:function:my-function .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n
:type Runtime: string
:param Runtime: [REQUIRED]\nThe identifier of the function\'s runtime .\n
:type Role: string
:param Role: [REQUIRED]\nThe Amazon Resource Name (ARN) of the function\'s execution role.\n
:type Handler: string
:param Handler: [REQUIRED]\nThe name of the method within your code that Lambda calls to execute your function. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Programming Model .\n
:type Code: dict
:param Code: [REQUIRED]\nThe code for the function.\n\nZipFile (bytes) --The base64-encoded contents of the deployment package. AWS SDK and AWS CLI clients handle the encoding for you.\n\nS3Bucket (string) --An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.\n\nS3Key (string) --The Amazon S3 key of the deployment package.\n\nS3ObjectVersion (string) --For versioned objects, the version of the deployment package object to use.\n\n\n
:type Description: string
:param Description: A description of the function.
:type Timeout: integer
:param Timeout: The amount of time that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds.
:type MemorySize: integer
:param MemorySize: The amount of memory that your function has access to. Increasing the function\'s memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.
:type Publish: boolean
:param Publish: Set to true to publish the first version of the function during creation.
:type VpcConfig: dict
:param VpcConfig: For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can only access resources and the internet through that VPC. For more information, see VPC Settings .\n\nSubnetIds (list) --A list of VPC subnet IDs.\n\n(string) --\n\n\nSecurityGroupIds (list) --A list of VPC security groups IDs.\n\n(string) --\n\n\n\n
:type DeadLetterConfig: dict
:param DeadLetterConfig: A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues .\n\nTargetArn (string) --The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.\n\n\n
:type Environment: dict
:param Environment: Environment variables that are accessible from function code during execution.\n\nVariables (dict) --Environment variable key-value pairs.\n\n(string) --\n(string) --\n\n\n\n\n\n
:type KMSKeyArn: string
:param KMSKeyArn: The ARN of the AWS Key Management Service (AWS KMS) key that\'s used to encrypt your function\'s environment variables. If it\'s not provided, AWS Lambda uses a default service key.
:type TracingConfig: dict
:param TracingConfig: Set Mode to Active to sample and trace a subset of incoming requests with AWS X-Ray.\n\nMode (string) --The tracing mode.\n\n\n
:type Tags: dict
:param Tags: A list of tags to apply to the function.\n\n(string) --\n(string) --\n\n\n\n
:type Layers: list
:param Layers: A list of function layers to add to the function\'s execution environment. Specify each layer by its ARN, including the version.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'FunctionName': 'string',
'FunctionArn': 'string',
'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'nodejs10.x'|'nodejs12.x'|'java8'|'java11'|'python2.7'|'python3.6'|'python3.7'|'python3.8'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'dotnetcore3.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'ruby2.7'|'provided',
'Role': 'string',
'Handler': 'string',
'CodeSize': 123,
'Description': 'string',
'Timeout': 123,
'MemorySize': 123,
'LastModified': 'string',
'CodeSha256': 'string',
'Version': 'string',
'VpcConfig': {
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
],
'VpcId': 'string'
},
'DeadLetterConfig': {
'TargetArn': 'string'
},
'Environment': {
'Variables': {
'string': 'string'
},
'Error': {
'ErrorCode': 'string',
'Message': 'string'
}
},
'KMSKeyArn': 'string',
'TracingConfig': {
'Mode': 'Active'|'PassThrough'
},
'MasterArn': 'string',
'RevisionId': 'string',
'Layers': [
{
'Arn': 'string',
'CodeSize': 123
},
],
'State': 'Pending'|'Active'|'Inactive'|'Failed',
'StateReason': 'string',
'StateReasonCode': 'Idle'|'Creating'|'Restoring'|'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup',
'LastUpdateStatus': 'Successful'|'Failed'|'InProgress',
'LastUpdateStatusReason': 'string',
'LastUpdateStatusReasonCode': 'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup'
}
Response Structure
(dict) --
Details about a function\'s configuration.
FunctionName (string) --
The name of the function.
FunctionArn (string) --
The function\'s Amazon Resource Name (ARN).
Runtime (string) --
The runtime environment for the Lambda function.
Role (string) --
The function\'s execution role.
Handler (string) --
The function that Lambda calls to begin executing your function.
CodeSize (integer) --
The size of the function\'s deployment package, in bytes.
Description (string) --
The function\'s description.
Timeout (integer) --
The amount of time in seconds that Lambda allows a function to run before stopping it.
MemorySize (integer) --
The memory that\'s allocated to the function.
LastModified (string) --
The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
CodeSha256 (string) --
The SHA256 hash of the function\'s deployment package.
Version (string) --
The version of the Lambda function.
VpcConfig (dict) --
The function\'s networking configuration.
SubnetIds (list) --
A list of VPC subnet IDs.
(string) --
SecurityGroupIds (list) --
A list of VPC security groups IDs.
(string) --
VpcId (string) --
The ID of the VPC.
DeadLetterConfig (dict) --
The function\'s dead letter queue.
TargetArn (string) --
The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
Environment (dict) --
The function\'s environment variables.
Variables (dict) --
Environment variable key-value pairs.
(string) --
(string) --
Error (dict) --
Error messages for environment variables that couldn\'t be applied.
ErrorCode (string) --
The error code.
Message (string) --
The error message.
KMSKeyArn (string) --
The KMS key that\'s used to encrypt the function\'s environment variables. This key is only returned if you\'ve configured a customer managed CMK.
TracingConfig (dict) --
The function\'s AWS X-Ray tracing configuration.
Mode (string) --
The tracing mode.
MasterArn (string) --
For Lambda@Edge functions, the ARN of the master function.
RevisionId (string) --
The latest updated revision of the function or alias.
Layers (list) --
The function\'s layers .
(dict) --
An AWS Lambda layer .
Arn (string) --
The Amazon Resource Name (ARN) of the function layer.
CodeSize (integer) --
The size of the layer archive in bytes.
State (string) --
The current state of the function. When the state is Inactive , you can reactivate the function by invoking it.
StateReason (string) --
The reason for the function\'s current state.
StateReasonCode (string) --
The reason code for the function\'s current state. When the code is Creating , you can\'t invoke or modify the function.
LastUpdateStatus | |
size=5)
if self.plot_vim_data:
self.vim_data_win = self.add_new_plot_window(title='VIM',
x_axis_title='MonteCarlo Step (MCS)',
y_axis_title='Variables', x_scale_type='linear',
y_scale_type='linear',
grid=False,
config_options={'legend': True})
self.vim_data_win.add_plot("R", style='Dots', color='orange', size=5)
# Initialize population data plot if requested
if self.plot_pop_data:
self.pop_data_win = self.add_new_plot_window(title='Population data',
x_axis_title='MCS',
y_axis_title='Numer of cells',
x_scale_type='linear',
y_scale_type='log',
grid=True,
config_options={'legend': True})
self.pop_data_win.add_plot("Uninfected", style='Dots', color='blue', size=5)
self.pop_data_win.add_plot("Infected", style='Dots', color='red', size=5)
self.pop_data_win.add_plot("VirusReleasing", style='Dots', color='green', size=5)
self.pop_data_win.add_plot("Dying", style='Dots', color='yellow', size=5)
self.pop_data_win.add_plot("ImmuneCell", style='Dots', color='white', size=5)
self.pop_data_win.add_plot("ImmuneCellActivated", style='Dots', color='purple', size=5)
if self.plot_med_diff_data:
self.med_diff_data_win = self.add_new_plot_window(title='Total diffusive species',
x_axis_title='MCS',
y_axis_title='Number of diffusive species per volume',
x_scale_type='linear',
y_scale_type='log',
grid=True,
config_options={'legend': True})
self.med_diff_data_win.add_plot("MedViral", style='Dots', color='red', size=5)
self.med_diff_data_win.add_plot("MedCyt", style='Dots', color='blue', size=5)
self.med_diff_data_win.add_plot("MedOxi", style='Dots', color='green', size=5)
if self.plot_ir_data:
self.ir_data_win = self.add_new_plot_window(title='Immune Response Model',
x_axis_title='MCS',
y_axis_title='State variable S',
x_scale_type='linear',
y_scale_type='linear',
grid=True)
self.ir_data_win.add_plot(self.ir_key, style='Dots', color='red', size=5)
if self.plot_spat_data:
self.spat_data_win = self.add_new_plot_window(title='Spatial data',
x_axis_title='MCS',
y_axis_title='',
x_scale_type='linear',
y_scale_type='linear',
grid=True,
config_options={'legend': True})
self.spat_data_win.add_plot("DeathComp", style='Dots', color='red', size=5)
self.spat_data_win.add_plot("InfectDist", style='Dots', color='blue', size=5)
if self.plot_death_data:
self.death_data_win = self.add_new_plot_window(title='Death data',
x_axis_title='MCS',
y_axis_title='Numer of cells',
x_scale_type='linear',
y_scale_type='log',
grid=True,
config_options={'legend': True})
self.death_data_win.add_plot("Viral", style='Dots', color='blue', size=5)
self.death_data_win.add_plot("OxiField", style='Dots', color='red', size=5)
self.death_data_win.add_plot("Contact", style='Dots', color='green', size=5)
self.death_data_win.add_plot("Bystander", style='Dots', color='yellow', size=5)
# Check that output directory is available
if self.output_dir is not None:
from pathlib import Path
if self.write_vrm_data:
self.vrm_data_path = Path(self.output_dir).joinpath('vrm_data.dat')
with open(self.vrm_data_path, 'w'):
pass
if self.write_vim_data:
self.vim_data_path = Path(self.output_dir).joinpath('vim_data.dat')
with open(self.vim_data_path, 'w'):
pass
if self.write_pop_data:
self.pop_data_path = Path(self.output_dir).joinpath('pop_data.dat')
with open(self.pop_data_path, 'w'):
pass
if self.write_med_diff_data:
self.med_diff_data_path = Path(self.output_dir).joinpath('med_diff_data.dat')
with open(self.med_diff_data_path, 'w'):
pass
if self.write_ir_data:
self.ir_data_path = Path(self.output_dir).joinpath('ir_data.dat')
with open(self.ir_data_path, 'w'):
pass
if self.write_spat_data:
self.spat_data_path = Path(self.output_dir).joinpath('spat_data.dat')
with open(self.spat_data_path, 'w'):
pass
if self.write_death_data:
self.death_data_path = Path(self.output_dir).joinpath('death_data.dat')
with open(self.death_data_path, 'w'):
pass
def step(self, mcs):
plot_pop_data = self.plot_pop_data and mcs % plot_pop_data_freq == 0
plot_med_diff_data = self.plot_med_diff_data and mcs % plot_med_diff_data_freq == 0
plot_ir_data = self.plot_ir_data and mcs % plot_ir_data_freq == 0
plot_vrm_data = self.plot_vrm_data and mcs % plot_vrm_data_freq == 0
plot_vim_data = self.plot_vim_data and mcs % plot_vim_data_freq == 0
plot_spat_data = self.plot_spat_data and mcs % plot_spat_data_freq == 0
plot_death_data = self.plot_death_data and mcs % plot_death_data_freq == 0
if self.output_dir is not None:
write_pop_data = self.write_pop_data and mcs % write_pop_data_freq == 0
write_med_diff_data = self.write_med_diff_data and mcs % write_med_diff_data_freq == 0
write_ir_data = self.write_ir_data and mcs % write_ir_data_freq == 0
write_vrm_data = self.write_vrm_data and mcs % write_vrm_data_freq == 0
write_vim_data = self.write_vim_data and mcs % write_vim_data_freq == 0
write_spat_data = self.write_spat_data and mcs % write_spat_data_freq == 0
write_death_data = self.write_death_data and mcs % write_death_data_freq == 0
else:
write_pop_data = False
write_med_diff_data = False
write_ir_data = False
write_vrm_data = False
write_vim_data = False
write_spat_data = False
write_death_data = False
if self.vrm_tracked_cell is not None and (plot_vrm_data or write_vrm_data):
if plot_vrm_data:
self.vrm_data_win.add_data_point("U", mcs, self.vrm_tracked_cell.dict['Unpacking'])
self.vrm_data_win.add_data_point("R", mcs, self.vrm_tracked_cell.dict['Replicating'])
self.vrm_data_win.add_data_point("P", mcs, self.vrm_tracked_cell.dict['Packing'])
self.vrm_data_win.add_data_point("A", mcs, self.vrm_tracked_cell.dict['Assembled'])
self.vrm_data_win.add_data_point("Uptake", mcs, self.vrm_tracked_cell.dict['Uptake'])
self.vrm_data_win.add_data_point("Secretion", mcs, self.vrm_tracked_cell.dict['Secretion'])
if write_vrm_data:
with open(self.vrm_data_path, 'a') as fout:
fout.write('{}, {}, {}, {}, {}, {}, {}, {}\n'.format(mcs,
self.vrm_tracked_cell.id,
self.vrm_tracked_cell.dict['Unpacking'],
self.vrm_tracked_cell.dict['Replicating'],
self.vrm_tracked_cell.dict['Packing'],
self.vrm_tracked_cell.dict['Assembled'],
self.vrm_tracked_cell.dict['Uptake'],
self.vrm_tracked_cell.dict['Secretion']))
if self.vrm_tracked_cell is not None and (plot_vim_data or write_vim_data):
if plot_vim_data:
self.vim_data_win.add_data_point("R", mcs, self.vrm_tracked_cell.dict['Receptors'])
if write_vim_data:
with open(self.vim_data_path, 'a') as fout:
fout.write('{}, {}, {}\n'.format(mcs,
self.vrm_tracked_cell.id,
self.vrm_tracked_cell.dict['Receptors']))
if plot_pop_data or write_pop_data:
# Gather population data
num_cells_uninfected = len(self.cell_list_by_type(self.UNINFECTED))
num_cells_infected = len(self.cell_list_by_type(self.INFECTED))
num_cells_virusreleasing = len(self.cell_list_by_type(self.VIRUSRELEASING))
num_cells_dying = len(self.cell_list_by_type(self.DYING))
num_cells_immune = len(self.cell_list_by_type(self.IMMUNECELL))
num_cells_immune_act = len([c for c in self.cell_list_by_type(self.IMMUNECELL) if c.dict['activated']])
# Plot population data plot if requested
if plot_pop_data:
if num_cells_uninfected > 0:
self.pop_data_win.add_data_point('Uninfected', mcs, num_cells_uninfected)
if num_cells_infected > 0:
self.pop_data_win.add_data_point('Infected', mcs, num_cells_infected)
if num_cells_virusreleasing > 0:
self.pop_data_win.add_data_point('VirusReleasing', mcs, num_cells_virusreleasing)
if num_cells_dying > 0:
self.pop_data_win.add_data_point('Dying', mcs, num_cells_dying)
if num_cells_immune > 0:
self.pop_data_win.add_data_point('ImmuneCell', mcs, num_cells_immune)
if num_cells_immune_act > 0:
self.pop_data_win.add_data_point('ImmuneCellActivated', mcs, num_cells_immune_act)
# Write population data to file if requested
if write_pop_data:
with open(self.pop_data_path, 'a') as fout:
fout.write('{}, {}, {}, {}, {}, {}, {}\n'.format(mcs,
num_cells_uninfected,
num_cells_infected,
num_cells_virusreleasing,
num_cells_dying,
num_cells_immune,
num_cells_immune_act))
if plot_med_diff_data or write_med_diff_data:
# Gather total diffusive amounts
med_viral_total = 0.0
med_cyt_total = 0.0
med_oxi_total = 0.0
try:
med_viral_total = self.get_field_secretor("Virus").totalFieldIntegral()
med_cyt_total = self.get_field_secretor("cytokine").totalFieldIntegral()
med_oxi_total = self.get_field_secretor("oxidator").totalFieldIntegral()
except AttributeError: # Pre-v4.2.1 CC3D
for x, y, z in self.every_pixel():
med_viral_total += self.field.Virus[x, y, z]
med_cyt_total += self.field.cytokine[x, y, z]
med_oxi_total += self.field.oxidator[x, y, z]
# Plot total diffusive viral amount if requested
if plot_med_diff_data:
if med_viral_total > 0:
self.med_diff_data_win.add_data_point("MedViral", mcs, med_viral_total)
if med_cyt_total > 0:
self.med_diff_data_win.add_data_point("MedCyt", mcs, med_cyt_total)
if med_oxi_total > 0:
self.med_diff_data_win.add_data_point("MedOxi", mcs, med_oxi_total)
# Write total diffusive viral amount if requested
if write_med_diff_data:
with open(self.med_diff_data_path, 'a') as fout:
fout.write('{}, {}, {}, {}\n'.format(mcs, med_viral_total, med_cyt_total, med_oxi_total))
if plot_ir_data or write_ir_data:
if self.ir_steppable is None:
if self.ir_steppable is None:
self.ir_steppable: ImmuneRecruitmentSteppable = self.shared_steppable_vars[
ViralInfectionVTMLib.ir_steppable_key]
s_val = self.ir_steppable.get_state_variable_val()
# Plot state variable S if requested
if plot_ir_data:
self.ir_data_win.add_data_point(self.ir_key, mcs, s_val)
# Write state variable S if requested
if write_ir_data:
with open(self.ir_data_path, 'a') as fout:
fout.write('{}, {}\n'.format(mcs, s_val))
if plot_spat_data or write_spat_data:
# Calculate compactness of dead cell area as total surface area of intefaces between dying and non-dying
# types in epithelial sheet divided by total volume of dying types
dead_srf = 0
dead_vol = 0
dying_cell_list = self.cell_list_by_type(self.DYING)
if not dying_cell_list:
dead_comp = 0
else:
for cell in dying_cell_list:
dead_vol += cell.volume
for neighbor, common_srf in self.get_cell_neighbor_data_list(cell):
if neighbor is not None and neighbor.type in [self.UNINFECTED,
self.INFECTED,
self.VIRUSRELEASING]:
dead_srf += common_srf
dead_comp = dead_srf / dead_vol
# Calculate infection front: max. distance from initial point of infection to all infected cells
# If no infected cells, distance is -1
max_infect_dist = -1
if self.init_infect_pt is None:
infected_cell_list = self.cell_list_by_type(self.INFECTED, self.VIRUSRELEASING)
num_cells_infected = len(infected_cell_list)
if num_cells_infected > 0:
self.init_infect_pt = [0, 0, 0]
for cell in infected_cell_list:
self.init_infect_pt[0] += cell.xCOM
self.init_infect_pt[1] += cell.yCOM
self.init_infect_pt[0] /= num_cells_infected
self.init_infect_pt[1] /= num_cells_infected
if self.init_infect_pt is not None:
for cell in self.cell_list_by_type(self.INFECTED, self.VIRUSRELEASING):
dx = cell.xCOM - self.init_infect_pt[0]
dy = cell.yCOM - self.init_infect_pt[1]
max_infect_dist = max(max_infect_dist, math.sqrt(dx * dx + dy * dy))
# Plot spatial data if requested
# Infection distance is normalized by average lattice dimension
if plot_spat_data:
self.spat_data_win.add_data_point("DeathComp", mcs, dead_comp)
if max_infect_dist > 0:
max_infect_dist_norm = max_infect_dist / ((self.dim.x + self.dim.y) / 2.0)
self.spat_data_win.add_data_point("InfectDist", mcs, max_infect_dist_norm)
# Write spatial data if requested
if write_spat_data:
with open(self.spat_data_path, 'a') as fout:
fout.write('{}, {}, {}\n'.format(mcs, dead_comp, max_infect_dist))
if plot_death_data or write_death_data:
num_viral = self.__death_mech['viral']
num_oxi = self.__death_mech['oxi']
num_contact = self.__death_mech['contact']
num_bystander = self.__death_mech['bystander']
# Plot death data if requested
if plot_death_data:
if num_viral > 0:
self.death_data_win.add_data_point("Viral", mcs, num_viral)
if num_oxi > 0:
self.death_data_win.add_data_point("OxiField", mcs, num_oxi)
if num_contact > 0:
self.death_data_win.add_data_point("Contact", mcs, num_contact)
if num_bystander > 0:
self.death_data_win.add_data_point("Bystander", mcs, num_bystander)
# Write death data if requested
if write_death_data:
with open(self.death_data_path, 'a') as fout:
fout.write('{}, {}, {}, {}, {}\n'.format(mcs, num_viral, num_oxi, num_contact, num_bystander))
def set_vrm_tracked_cell(self, cell):
self.vrm_tracked_cell = cell
def track_death_viral(self):
self.__death_mech['viral'] += 1
def track_death_oxi_field(self):
self.__death_mech['oxi'] += 1
def track_death_contact(self):
self.__death_mech['contact'] += 1
def track_death_bystander(self):
self.__death_mech['bystander'] += 1
class CytokineProductionAbsorptionSteppable(ViralInfectionVTMSteppableBasePy):
"""
Implements cytokine production/secretion and immune cell activation module
"""
def __init__(self, frequency=1):
ViralInfectionVTMSteppableBasePy.__init__(self, frequency)
if track_model_variables:
self.track_cell_level_scalar_attribute(field_name='activated', attribute_name='activated')
self.ck_secretor = None
self.virus_secretor = None
# Reference to ImmuneResponseSteppable
self.ir_steppable = None
def start(self):
# cytokine diff parameters
self.get_xml_element('cytokine_dc').cdata = cytokine_dc
self.get_xml_element('cytokine_decay').cdata = cytokine_field_decay
for cell in self.cell_list_by_type(self.IMMUNECELL):
# cytokine production/uptake parameters for immune cells
cell.dict['ck_production'] = max_ck_secrete_im
cell.dict['ck_consumption'] = max_ck_consume
for cell in self.cell_list_by_type(self.INFECTED, self.VIRUSRELEASING):
cell.dict['ck_production'] = max_ck_secrete_infect
self.ck_secretor = self.get_field_secretor("cytokine")
self.virus_secretor = self.get_field_secretor("Virus")
def step(self, mcs):
if self.ir_steppable is None:
self.ir_steppable: ImmuneRecruitmentSteppable = \
self.shared_steppable_vars[ViralInfectionVTMLib.ir_steppable_key]
# Track the total amount added and subtracted to the cytokine field
total_ck_inc = 0.0
for cell in self.cell_list_by_type(self.INFECTED, self.VIRUSRELEASING):
viral_load = ViralInfectionVTMLib.get_assembled_viral_load_inside_cell(cell, vr_step_size)
produced = cell.dict['ck_production'] * nCoVUtils.hill_equation(viral_load, ec50_infecte_ck_prod, 2)
res = self.ck_secretor.secreteInsideCellTotalCount(cell, produced / cell.volume)
total_ck_inc += res.tot_amount
for cell in self.cell_list_by_type(self.IMMUNECELL):
self.virus_secretor.uptakeInsideCellTotalCount(cell, cell.dict['ck_consumption'] / cell.volume, 0.1)
up_res = self.ck_secretor.uptakeInsideCellTotalCount(cell,
cell.dict['ck_consumption'] / cell.volume, 0.1)
# decay seen ck
cell.dict['tot_ck_upt'] *= ck_memory_immune
# uptake ck
cell.dict['tot_ck_upt'] -= up_res.tot_amount # from POV of secretion uptake is negative
total_ck_inc += up_res.tot_amount
p_activate = nCoVUtils.hill_equation(cell.dict['tot_ck_upt'], EC50_ck_immune, 2)
if rng.uniform() < p_activate and not cell.dict['activated']:
cell.dict['activated'] = True
cell.dict['time_activation'] = mcs
elif (cell.dict['activated']
and mcs - cell.dict['time_activation'] > minimum_activated_time):
cell.dict['activated'] = False
cell.dict['time_activation'] = - 99
if cell.dict['activated']:
seen_field = self.total_seen_field(self.field.cytokine, cell)
produced = | |
Ending number of ionic step.
TODO(rambalachandran):
Requires a check to ensure if the new concatenating file has the
same lattice structure and atoms as the Xdatcar class.
"""
preamble = None
coords_str = []
structures = self.structures
preamble_done = False
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_start < 1:
raise Exception("End ionic step cannot be less than 1")
# pylint: disable=E1136
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
def get_string(self, ionicstep_start=1, ionicstep_end=None, significant_figures=8):
"""
Write Xdatcar class to a string.
Args:
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
significant_figures (int): Number of significant figures.
"""
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_end < 1:
raise Exception("End ionic step cannot be less than 1")
latt = self.structures[0].lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
format_str = f"{{:.{significant_figures}f}}"
ionicstep_cnt = 1
output_cnt = 1
for cnt, structure in enumerate(self.structures):
ionicstep_cnt = cnt + 1
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
lines.append("Direct configuration=" + " " * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
lines.append("Direct configuration=" + " " * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
return "\n".join(lines) + "\n"
def write_file(self, filename, **kwargs):
"""
Write Xdatcar class into a file.
Args:
filename (str): Filename of output XDATCAR file.
The supported kwargs are the same as those for the
Xdatcar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def __str__(self):
return self.get_string()
class Dynmat:
"""
Object for reading a DYNMAT file.
.. attribute:: data
A nested dict containing the DYNMAT data of the form::
[atom <int>][disp <int>]['dispvec'] =
displacement vector (part of first line in dynmat block, e.g. "0.01 0 0")
[atom <int>][disp <int>]['dynmat'] =
<list> list of dynmat lines for this atom and this displacement
Authors: <NAME>
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing DYNMAT
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
self._nspecs, self._natoms, self._ndisps = map(int, lines[0].split())
self._masses = map(float, lines[1].split())
self.data = defaultdict(dict)
atom, disp = None, None
for i, l in enumerate(lines[2:]):
v = list(map(float, l.split()))
if not i % (self._natoms + 1):
atom, disp = map(int, v[:2])
if atom not in self.data:
self.data[atom] = {}
if disp not in self.data[atom]:
self.data[atom][disp] = {}
self.data[atom][disp]["dispvec"] = v[2:]
else:
if "dynmat" not in self.data[atom][disp]:
self.data[atom][disp]["dynmat"] = []
self.data[atom][disp]["dynmat"].append(v)
def get_phonon_frequencies(self):
"""calculate phonon frequencies"""
# TODO: the following is most likely not correct or suboptimal
# hence for demonstration purposes only
frequencies = []
for k, v0 in self.data.items():
for v1 in v0.itervalues():
vec = map(abs, v1["dynmat"][k - 1])
frequency = math.sqrt(sum(vec)) * 2.0 * math.pi * 15.633302 # THz
frequencies.append(frequency)
return frequencies
@property
def nspecs(self):
"""returns the number of species"""
return self._nspecs
@property
def natoms(self):
"""returns the number of atoms"""
return self._natoms
@property
def ndisps(self):
"""returns the number of displacements"""
return self._ndisps
@property
def masses(self):
"""returns the list of atomic masses"""
return list(self._masses)
def get_adjusted_fermi_level(efermi, cbm, band_structure):
"""
When running a band structure computations the fermi level needs to be
take from the static run that gave the charge density used for the non-self
consistent band structure run. Sometimes this fermi level is however a
little too low because of the mismatch between the uniform grid used in
the static run and the band structure k-points (e.g., the VBM is on Gamma
and the Gamma point is not in the uniform mesh). Here we use a procedure
consisting in looking for energy levels higher than the static fermi level
(but lower than the LUMO) if any of these levels make the band structure
appears insulating and not metallic anymore, we keep this adjusted fermi
level. This procedure has shown to detect correctly most insulators.
Args:
efermi (float): Fermi energy of the static run
cbm (float): Conduction band minimum of the static run
run_bandstructure: a band_structure object
Returns:
a new adjusted fermi level
"""
# make a working copy of band_structure
bs_working = BandStructureSymmLine.from_dict(band_structure.as_dict())
if bs_working.is_metal():
e = efermi
while e < cbm:
e += 0.01
bs_working._efermi = e
if not bs_working.is_metal():
return e
return efermi
# a note to future confused people (i.e. myself):
# I use numpy.fromfile instead of scipy.io.FortranFile here because the records
# are of fixed length, so the record length is only written once. In fortran,
# this amounts to using open(..., form='unformatted', recl=recl_len). In
# constrast when you write UNK files, the record length is written at the
# beginning of each record. This allows you to use scipy.io.FortranFile. In
# fortran, this amounts to using open(..., form='unformatted') [i.e. no recl=].
class Wavecar:
"""
This is a class that contains the (pseudo-) wavefunctions from VASP.
Coefficients are read from the given WAVECAR file and the corresponding
G-vectors are generated using the algorithm developed in WaveTrans (see
acknowledgments below). To understand how the wavefunctions are evaluated,
please see the evaluate_wavefunc docstring.
It should be noted that the pseudopotential augmentation is not included in
the WAVECAR file. As a result, some caution should be exercised when
deriving value from this information.
The usefulness of this class is to allow the user to do projections or band
unfolding style manipulations of the wavefunction. An example of this can
be seen in the work of Shen et al. 2017
(https://doi.org/10.1103/PhysRevMaterials.1.065001).
.. attribute:: filename
String of the input file (usually WAVECAR)
.. attribute:: vasp_type
String that determines VASP type the WAVECAR was generated with (either
'std', 'gam', or 'ncl')
.. attribute:: nk
Number of k-points from the WAVECAR
.. attribute:: nb
Number of bands per k-point
.. attribute:: encut
Energy cutoff (used to define G_{cut})
.. attribute:: efermi
Fermi energy
.. attribute:: a
Primitive lattice vectors of the cell (e.g. a_1 = self.a[0, :])
.. attribute:: b
Reciprocal lattice vectors of the cell (e.g. b_1 = self.b[0, :])
.. attribute:: vol
The volume of the unit cell in real space
.. attribute:: kpoints
The list of k-points read from the WAVECAR file
.. attribute:: band_energy
The list of band eigenenergies (and corresponding occupancies) for
each kpoint, where the first index corresponds to the index of the
k-point (e.g. self.band_energy[kp])
.. attribute:: Gpoints
The list of generated G-points for each k-point (a double list), which
are used with the coefficients for each k-point and band to recreate
the wavefunction (e.g. self.Gpoints[kp] is the list of G-points for
k-point kp). The G-points depend on the k-point and reciprocal lattice
and therefore are identical for each band at the same k-point. Each
G-point is represented by integer multipliers (e.g. assuming
Gpoints[kp][n] == [n_1, n_2, n_3], then
G_n = n_1*b_1 + n_2*b_2 + n_3*b_3)
.. attribute:: coeffs
The list of coefficients for each | |
# -*- coding: utf-8 -*-
"""
UUTrack.Controller.devices.PhotonicScience.scmoscam.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A wrapper class originally written by <NAME>,
<EMAIL> in Py2 and has been tested successfully with
scmoscontrol.dll SCMOS Pleora (GEV) control dll (x86 )v5.6.0.0 (date modified 10/2/2013)
SaFa @nanoLINX has adapted the wrapper class for a camera control program.
v1.0, 24 feb. 2015
.. sectionauthor:: SaFa <<EMAIL>>
"""
import ctypes as C
import os
import sys
from sys import platform
if platform == "linux" or platform == "linux2":
pass
elif platform == "darwin":
pass
elif platform == "win32":
from _ctypes import LoadLibrary, FreeLibrary
import numpy
NUMPY_MODES = {"L":numpy.uint8, "I;16":numpy.uint16}
class GEVSCMOS:
def __init__(self, cwd_path, name):
self.cwd_path = cwd_path #working directory
self.name = name #Camera name = folder where DLL and settings are stored
self.setup_file = "%s\\%s\\PSL_camera_files\\ps_setup.dat"%(cwd_path,name)
self.dll_name = self.GetDLL()
self.dll = None
self.LoadCamDLL()
self.ResetOptions()
def __str__(self):
msg = "Camera setting located in %s"%(self.setup_file)
return msg
def GetDLL(self):
FileList = os.listdir('%s\\%s'%(self.cwd_path,self.name))
count = 0
for file in FileList:
if file[-4:] == ".dll":
dll_name = file
count+=1
if count == 0:
msg = "Check in '%s'\n!!!CAMERA CONTROL DLL NOT FOUND!!!"%self.cwd_path
print (msg)
return ""
elif count > 1:
msg = "Check in '%s'\n!!!ONLY ONE DLL FILE MUST EXIST IN THE CAMERA FOLDER!!!"%self.cwd_path
print (msg)
return ""
else:
return dll_name
def LoadCamDLL(self):
self.libHandle = LoadLibrary('%s\\%s\\%s'%(self.cwd_path,self.name,self.dll_name))
#self.libHandle = C.windll.kernel32.LoadLibraryA('%s\\%s\\%s'%(self.cwd_path,self.name,self.dll_name))
self.dll = C.CDLL(None, handle=self.libHandle) #cdecl
#self.dll = C.WinDLL(None, handle=self.libHandle) #stdcall
#self.dll = C.CDLL('%s\\%s\\%s'%(self.cwd_path,self.name,self.dll_name))
self.InitFunctions()
def UnloadCamDLL(self):
del self.dll
self.dll = None
FreeLibrary(self.libHandle)
#C.windll.kernel32.FreeLibrary(self.libHandle)
def ResetOptions(self):
self.mode = "I;16"
self.size = (0,0)
self.sizemax = (1919,1079)
self.state = 0
self.abort_flag = False
self.remapping = False
self.smooth = False
self.clip = True
self.SubArea = (0,0,0,0)
self.SoftBin = (1,1)
self.gainmode = 0
self.expous = 100000
self.FlatAverage = 10
self.GlobalRemap = False
self.tempread = True
#self.is2tap = False
# 0 = gain mode 1 - 16 bit
# 1 = gain mode 2 - 16 bit
# 2 = gain mode 10 - 16 bit
# 3 = gain mode 30 - 16 bit
# 4 = combined (1 and 30) in software - 24 => Demangle => 16 bit
# 5 = combined in hardware - 16 bit
# 6 = gain mode 1 - 8 bit
# 7 = gain mode 2 - 8 bit
# 8 = gain mode 10 - 8 bit
# 9 = gain mode 30 - 8 bit
# 10= combined in hardware - 8bit
if self.IsInCamCor():
if self.Has8bitGainModes():
gainmodes = ['gain1','gain2','gain10','gain30','gain1+30_Hardware','gain1_8b','gain2_8b','gain10_8b','gain30_8b','gain1+30_8b']
else:
gainmodes = ['gain1','gain2','gain10','gain30','gain1+30_Hardware']
else:
if self.Has8bitGainModes():
gainmodes = ['gain1','gain2','gain10','gain30','gain1+30','gain1_8b','gain2_8b','gain10_8b','gain30_8b']
else:
gainmodes = ['gain1','gain2','gain10','gain30','gain1+30']
if self.HasClockSpeedLimit():
clockspeedmodes = ['50MHz']
else:
clockspeedmodes = ['50MHz','100MHz'] #,'200MHz'
#self.flipdata = self.IsFlipped()
self.Options = {
'TriggerMode' :['FreeRunning','Software',
'Hardware_Falling','Hardware_Rising'],
'ClockSpeedMode' :clockspeedmodes,
'GainMode' :gainmodes,
'PowerSavingMode' :['PowerOn','PowerOff','CoolingOff'],
#'VideoGain' :[0,100],
'IntensifierGain' :[1,100],
#'ChipGain' :[1,100],
'SoftBin' :[(1,1),(1040,1040)],
'SubArea' :[(0,0,0,0)],
'Exposure' :[(100,'Millisec'),(4294967,['Microsec','Millisec','Second'])],
'Temperature' :[0,0],
'Offset' :[1],
'BrightPixel' :[1],
'FlatField' :[0],
'MakeFlat' :[None],
'FlatAverage' :[10,1000],
'Remapping' :[0],
'Smooth' :[0],
'Clip' :[0],
'Sharpening' :[0],
'AutoLevel' :[0],
'ALC_maxexp' :[1000,65535],
'ALC_win' :[(0,0,1919,1079)],
'BestFit' :[0],
'BF_Peek' :[1000,65535],
'IF_delay' :[0,65535],
'BinningFilter' :[0],
'AutoBinning' :[0],
'Gamma' :[0],
'GammaPeak' :[0,100],
'GammaBright' :[0,100],
#'FlickerMode' :['Off','50MHz','60MHz'],
}
def InitFunctions(self):
#Buffer
self.dll.PSL_VHR_get_image_pointer.restype = C.POINTER(C.c_char) #ushort
self.dll.PSL_VHR_demangle_rgb24_into_16bit_image.restype = C.POINTER(C.c_char) #ushort
self.dll.PSL_VHR_remap_image.restype = C.POINTER(C.c_char) #ushort
self.dll.PSL_VHR_get_pointer_to_safebufferA.restype = C.POINTER(C.c_char) #ushort
self.dll.PSL_VHR_get_pointer_to_safebufferB.restype = C.POINTER(C.c_char) #ushort
self.dll.PSL_VHR_get_pointer_to_safebufferC.restype = C.POINTER(C.c_char) #ushort
#Bool
self.dll.PSL_VHR_Open.restype = C.c_bool
self.dll.PSL_VHR_open_map.restype = C.c_bool
self.dll.PSL_VHR_Close.restype = C.c_bool
self.dll.PSL_VHR_set_gain_mode.restype = C.c_bool
self.dll.PSL_VHR_set_speed.restype = C.c_bool
self.dll.PSL_VHR_set_video_gain.restype = C.c_bool
self.dll.PSL_VHR_set_chip_gain.restype = C.c_bool
self.dll.PSL_VHR_set_exposure.restype = C.c_bool
self.dll.PSL_VHR_set_trigger_mode.restype = C.c_bool
self.dll.PSL_VHR_set_sub_area_coordinates.restype = C.c_bool
self.dll.PSL_VHR_enable_offset_subtraction.restype = C.c_bool
self.dll.PSL_VHR_enable_bright_pixel_correction.restype = C.c_bool
self.dll.PSL_VHR_enable_flat_field_correction.restype = C.c_bool
self.dll.PSL_VHR_Snap_and_return.restype = C.c_bool
self.dll.PSL_VHR_Get_snap_status.restype = C.c_bool
self.dll.PSL_VHR_abort_snap.restype = C.c_bool
self.dll.PSL_VHR_apply_post_snap_processing.restype = C.c_bool
self.dll.PSL_VHR_enable_gamma.restype = C.c_bool
self.dll.PSL_VHR_set_gamma_gain_bright.restype = C.c_bool
self.dll.PSL_VHR_set_gamma_gain_brightness.restype = C.c_bool
#self.dll.PSL_VHR_set_flicker_mode.restype = C.c_bool
def IsInCamCor(self):
isincamcor = 0
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option.lower() in ["onboardcorrectionssupported","incameracorrections"]:
isincamcor = int(value)
break
except:
pass
return bool(isincamcor)
def IsFlipped(self):
isflip = 0
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option.lower() in ["swflipimage"]:
isflip = int(value)
break
except:
pass
return bool(isflip)
def GetRemapSize(self):
remapsize = None
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
Nx,Ny = (0,0)
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option in ["Submapwidth","submapwidth"]:
Nx = int(value)
if option in ["Submapheight","submapheight"]:
Ny = int(value)
break
remapsize = Nx,Ny
except:
pass
#print "remap size is (%s,%s)"%remapsize
return remapsize
def HasIntensifier(self):
intensifier_value = 1
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option in ["intensifiergaincanbeset","IntensifierGainCanBeSet","HasIntensifier","hasintensifier"]:
intensifier_value = int(value)
break
except:
pass
return bool(intensifier_value)
def HasTemperature(self):
tempset = None
tempread = None
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option in ["TemperatureCanBeSet","temperaturecanbeset"]:
tempset = int(value)
if option in ["TemperatureCanBeRead","temperaturecanberead"]:
tempread = int(value)
if tempset==1:
return [-30,50]
elif tempset==0:
return [0,0]
elif tempread==1:
return [0,0]
elif tempread==0:
return None
else:
return [-30,50]
except:
return [-30,50]
def HasHPMapping(self):
use_hpm_remap = 0
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option in ["viewer_use_hp_mapping"]:
use_hpm_remap = int(value)
break
except:
print ("HasHPMapping: %s: %s"%(sys.exc_info()[0],sys.exc_info()[1]))
return bool(use_hpm_remap)
def HasBinning(self):
use_binning = 1
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option in ["binning_supported"]:
use_binning = int(value)
break
except:
print ("HasBinning: %s: %s"%(sys.exc_info()[0],sys.exc_info()[1]))
return bool(use_binning)
def HasClockSpeedLimit(self):
clockspeedlimit = 0
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option.lower() in ["hasclockspeedlimit","clockspeedlimit"]:
clockspeedlimit = int(value)
break
except:
pass
return bool(clockspeedlimit)
def Has8bitGainModes(self):
has8bit = 1
try:
fich = open(self.setup_file,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option,sep,value) = line.strip().partition('=')
if option.lower() in ["remove_8bit_gainmodes"]:
if int(value)==0:
has8bit = 1
else:
has8bit = 0
break
except:
pass
return bool(has8bit)
#------ CAMERA PROPERTIES ---------------------------------
def GetName(self):
return self.name
def GetDLLName(self):
return self.dll_name
def GetMode(self):
return self.mode
def GetState(self):
return self.state
def GetPedestal(self):
return self.pedestal
def GetOptions(self):
return self.Options.keys()
def GetSize(self):
return self.size
def GetSizeMax(self):
return self.sizemax
def UpdateSizeMax(self):
Nx = self.dll.PSL_VHR_get_maximum_width()
Ny = self.dll.PSL_VHR_get_maximum_height()
self.sizemax = (Nx,Ny)
return self.sizemax
def UpdateSize(self):
Nx = self.dll.PSL_VHR_get_width()
Ny = self.dll.PSL_VHR_get_height()
self.size = (Nx,Ny)
#----- Specificities -----------------------------------------------------------------
def SelectIportDevice(self):
path = "%s\\%s\\IPconf.dat"%(self.cwd_path,self.name)
if os.path.exists(path):
self.IP_add = ""
self.mac_add = ""
self.IsIport = True
fich = open(path,'r')
lines = fich.readlines()
fich.close()
for line in lines:
(option, sep, value) = line.strip().partition('=')
if option == "MAC":
self.mac_add = value
elif option == "IP":
self.IP_add = value
if self.mac_add=="" or self.IP_add=="":
self.dll.PSL_VHR_select_IPORT_device("","")
else:
self.dll.PSL_VHR_select_IPORT_device(self.mac_add,"[%s]"%self.IP_add)
return True
else:
self.IsIport = False
return False
#-------- CAMERA STANTARD FUNCTIONS ------------------
def Open(self):
path = "%s\\%s\\PSL_camera_files"%(self.cwd_path,self.name)
self.SelectIportDevice()
if self.dll.PSL_VHR_Open(str(path)) :
if not self.OpenMap():
#del self.Options['Remapping']
pass
if not self.IsIntensifier():
del self.Options['IntensifierGain']
Temp = self.HasTemperature()
if Temp==None:
self.tempread = False
else:
self.tempread = True
(Nx,Ny) = self.UpdateSizeMax()
self.Options["SubArea"][0] = (0,0,Nx-1,Ny-1)
self.Options["ALC_win"][0] = (0,0,Nx-1,Ny-1)
self.SetSubArea(0,0,Nx-1,Ny-1)
self.UpdateSize()
return 0
else:
return 1
def Close(self):
self.dll.PSL_VHR_Close()
self.UnloadCamDLL()
def SetSubArea(self,left,top,right,bottom):
self.SubArea = (left,top,right,bottom)
rep = self.dll.PSL_VHR_set_sub_area_coordinates(left,right,top,bottom)
self.UpdateSize()
return rep
def SetSoftBin(self,Sx,Sy):
self.SoftBin = (Sx,Sy)
def SetExposure(self, expo, unit):
if unit=="Second":
self.expous = expo*1000000
elif unit=="Millisec":
self.expous = expo*1000
elif unit=="Microsec":
self.expous = expo
ans = self.dll.PSL_VHR_set_exposure(self.expous)
#print "SetExposure ",self.name,expo,unit,self.expous,type(self.expous),ans
return ans
def SetTrigger(self,mode):
if mode == "FreeRunning":
return self.dll.PSL_VHR_set_trigger_mode(0)
elif mode == "Software":
return self.dll.PSL_VHR_set_trigger_mode(1)
elif mode == "Hardware_Falling":
return self.dll.PSL_VHR_set_trigger_mode(2)
elif mode == "Hardware_Rising":
return self.dll.PSL_VHR_set_trigger_mode(6)
elif mode == "Pipeline_Master":
return self.dll.PSL_VHR_set_trigger_mode(16)
elif mode == "Pipeline_Slave":
return self.dll.PSL_VHR_set_trigger_mode(18)
else:
return "Trigger mode not valid"
def SetGainMode(self, mode):
# 0 = gain mode 1 - 16 bit
# 1 = gain mode 2 - 16 bit
# 2 = gain mode 10 - 16 bit
# 3 = gain mode 30 - 16 bit
# 4 = combined (1 and 30) in software - 24 => Demangle => 16 bit
# 5 = combined in hardware - 16 bit
# 6 = gain mode 1 - 8 bit
# 7 = gain mode 2 - 8 bit
# 8 = gain mode 10 - 8 bit
# | |
117, 117),
("GRAY47", 120, 120, 120),
("GRAY48", 122, 122, 122),
("GRAY49", 125, 125, 125),
("GRAY5", 13, 13, 13),
("GRAY50", 127, 127, 127),
("GRAY51", 130, 130, 130),
("GRAY52", 133, 133, 133),
("GRAY53", 135, 135, 135),
("GRAY54", 138, 138, 138),
("GRAY55", 140, 140, 140),
("GRAY56", 143, 143, 143),
("GRAY57", 145, 145, 145),
("GRAY58", 148, 148, 148),
("GRAY59", 150, 150, 150),
("GRAY6", 15, 15, 15),
("GRAY60", 153, 153, 153),
("GRAY61", 156, 156, 156),
("GRAY62", 158, 158, 158),
("GRAY63", 161, 161, 161),
("GRAY64", 163, 163, 163),
("GRAY65", 166, 166, 166),
("GRAY66", 168, 168, 168),
("GRAY67", 171, 171, 171),
("GRAY68", 173, 173, 173),
("GRAY69", 176, 176, 176),
("GRAY7", 18, 18, 18),
("GRAY70", 179, 179, 179),
("GRAY71", 181, 181, 181),
("GRAY72", 184, 184, 184),
("GRAY73", 186, 186, 186),
("GRAY74", 189, 189, 189),
("GRAY75", 191, 191, 191),
("GRAY76", 194, 194, 194),
("GRAY77", 196, 196, 196),
("GRAY78", 199, 199, 199),
("GRAY79", 201, 201, 201),
("GRAY8", 20, 20, 20),
("GRAY80", 204, 204, 204),
("GRAY81", 207, 207, 207),
("GRAY82", 209, 209, 209),
("GRAY83", 212, 212, 212),
("GRAY84", 214, 214, 214),
("GRAY85", 217, 217, 217),
("GRAY86", 219, 219, 219),
("GRAY87", 222, 222, 222),
("GRAY88", 224, 224, 224),
("GRAY89", 227, 227, 227),
("GRAY9", 23, 23, 23),
("GRAY90", 229, 229, 229),
("GRAY91", 232, 232, 232),
("GRAY92", 235, 235, 235),
("GRAY93", 237, 237, 237),
("GRAY94", 240, 240, 240),
("GRAY95", 242, 242, 242),
("GRAY96", 245, 245, 245),
("GRAY97", 247, 247, 247),
("GRAY98", 250, 250, 250),
("GRAY99", 252, 252, 252),
("HONEYDEW", 240, 255, 240),
("HONEYDEW1", 240, 255, 240),
("HONEYDEW2", 224, 238, 224),
("HONEYDEW3", 193, 205, 193),
("HONEYDEW4", 131, 139, 131),
("HOTPINK", 255, 105, 180),
("HOTPINK1", 255, 110, 180),
("HOTPINK2", 238, 106, 167),
("HOTPINK3", 205, 96, 144),
("HOTPINK4", 139, 58, 98),
("INDIANRED", 205, 92, 92),
("INDIANRED1", 255, 106, 106),
("INDIANRED2", 238, 99, 99),
("INDIANRED3", 205, 85, 85),
("INDIANRED4", 139, 58, 58),
("IVORY", 255, 255, 240),
("IVORY1", 255, 255, 240),
("IVORY2", 238, 238, 224),
("IVORY3", 205, 205, 193),
("IVORY4", 139, 139, 131),
("KHAKI", 240, 230, 140),
("KHAKI1", 255, 246, 143),
("KHAKI2", 238, 230, 133),
("KHAKI3", 205, 198, 115),
("KHAKI4", 139, 134, 78),
("LAVENDER", 230, 230, 250),
("LAVENDERBLUSH", 255, 240, 245),
("LAVENDERBLUSH1", 255, 240, 245),
("LAVENDERBLUSH2", 238, 224, 229),
("LAVENDERBLUSH3", 205, 193, 197),
("LAVENDERBLUSH4", 139, 131, 134),
("LAWNGREEN", 124, 252, 0),
("LEMONCHIFFON", 255, 250, 205),
("LEMONCHIFFON1", 255, 250, 205),
("LEMONCHIFFON2", 238, 233, 191),
("LEMONCHIFFON3", 205, 201, 165),
("LEMONCHIFFON4", 139, 137, 112),
("LIGHTBLUE", 173, 216, 230),
("LIGHTBLUE1", 191, 239, 255),
("LIGHTBLUE2", 178, 223, 238),
("LIGHTBLUE3", 154, 192, 205),
("LIGHTBLUE4", 104, 131, 139),
("LIGHTCORAL", 240, 128, 128),
("LIGHTCYAN", 224, 255, 255),
("LIGHTCYAN1", 224, 255, 255),
("LIGHTCYAN2", 209, 238, 238),
("LIGHTCYAN3", 180, 205, 205),
("LIGHTCYAN4", 122, 139, 139),
("LIGHTGOLDENROD", 238, 221, 130),
("LIGHTGOLDENROD1", 255, 236, 139),
("LIGHTGOLDENROD2", 238, 220, 130),
("LIGHTGOLDENROD3", 205, 190, 112),
("LIGHTGOLDENROD4", 139, 129, 76),
("LIGHTGOLDENRODYELLOW", 250, 250, 210),
("LIGHTGREEN", 144, 238, 144),
("LIGHTGRAY", 211, 211, 211),
("LIGHTPINK", 255, 182, 193),
("LIGHTPINK1", 255, 174, 185),
("LIGHTPINK2", 238, 162, 173),
("LIGHTPINK3", 205, 140, 149),
("LIGHTPINK4", 139, 95, 101),
("LIGHTSALMON", 255, 160, 122),
("LIGHTSALMON1", 255, 160, 122),
("LIGHTSALMON2", 238, 149, 114),
("LIGHTSALMON3", 205, 129, 98),
("LIGHTSALMON4", 139, 87, 66),
("LIGHTSEAGREEN", 32, 178, 170),
("LIGHTSKYBLUE", 135, 206, 250),
("LIGHTSKYBLUE1", 176, 226, 255),
("LIGHTSKYBLUE2", 164, 211, 238),
("LIGHTSKYBLUE3", 141, 182, 205),
("LIGHTSKYBLUE4", 96, 123, 139),
("LIGHTSLATEBLUE", 132, 112, 255),
("LIGHTSLATEGRAY", 119, 136, 153),
("LIGHTSTEELBLUE", 176, 196, 222),
("LIGHTSTEELBLUE1", 202, 225, 255),
("LIGHTSTEELBLUE2", 188, 210, 238),
("LIGHTSTEELBLUE3", 162, 181, 205),
("LIGHTSTEELBLUE4", 110, 123, 139),
("LIGHTYELLOW", 255, 255, 224),
("LIGHTYELLOW1", 255, 255, 224),
("LIGHTYELLOW2", 238, 238, 209),
("LIGHTYELLOW3", 205, 205, 180),
("LIGHTYELLOW4", 139, 139, 122),
("LIMEGREEN", 50, 205, 50),
("LINEN", 250, 240, 230),
("MAGENTA", 255, 0, 255),
("MAGENTA1", 255, 0, 255),
("MAGENTA2", 238, 0, 238),
("MAGENTA3", 205, 0, 205),
("MAGENTA4", 139, 0, 139),
("MAROON", 176, 48, 96),
("MAROON1", 255, 52, 179),
("MAROON2", 238, 48, 167),
("MAROON3", 205, 41, 144),
("MAROON4", 139, 28, 98),
("MEDIUMAQUAMARINE", 102, 205, 170),
("MEDIUMBLUE", 0, 0, 205),
("MEDIUMORCHID", 186, 85, 211),
("MEDIUMORCHID1", 224, 102, 255),
("MEDIUMORCHID2", 209, 95, 238),
("MEDIUMORCHID3", 180, 82, 205),
("MEDIUMORCHID4", 122, 55, 139),
("MEDIUMPURPLE", 147, 112, 219),
("MEDIUMPURPLE1", 171, 130, 255),
("MEDIUMPURPLE2", 159, 121, 238),
("MEDIUMPURPLE3", 137, 104, 205),
("MEDIUMPURPLE4", 93, 71, 139),
("MEDIUMSEAGREEN", 60, 179, 113),
("MEDIUMSLATEBLUE", 123, 104, 238),
("MEDIUMSPRINGGREEN", 0, 250, 154),
("MEDIUMTURQUOISE", 72, 209, 204),
("MEDIUMVIOLETRED", 199, 21, 133),
("MIDNIGHTBLUE", 25, 25, 112),
("MINTCREAM", 245, 255, 250),
("MISTYROSE", 255, 228, 225),
("MISTYROSE1", 255, 228, 225),
("MISTYROSE2", 238, 213, 210),
("MISTYROSE3", 205, 183, 181),
("MISTYROSE4", 139, 125, 123),
("MOCCASIN", 255, 228, 181),
("MUPDFBLUE", 37, 114, 172),
("NAVAJOWHITE", 255, 222, 173),
("NAVAJOWHITE1", 255, 222, 173),
("NAVAJOWHITE2", 238, 207, 161),
("NAVAJOWHITE3", 205, 179, 139),
("NAVAJOWHITE4", 139, 121, 94),
("NAVY", 0, 0, 128),
("NAVYBLUE", 0, 0, 128),
("OLDLACE", 253, 245, 230),
("OLIVEDRAB", 107, 142, 35),
("OLIVEDRAB1", 192, 255, 62),
("OLIVEDRAB2", 179, 238, 58),
("OLIVEDRAB3", 154, 205, 50),
("OLIVEDRAB4", 105, 139, 34),
("ORANGE", 255, 165, 0),
("ORANGE1", 255, 165, 0),
("ORANGE2", 238, 154, 0),
("ORANGE3", 205, 133, 0),
("ORANGE4", 139, 90, 0),
("ORANGERED", 255, 69, 0),
("ORANGERED1", 255, 69, 0),
("ORANGERED2", 238, 64, 0),
("ORANGERED3", 205, 55, 0),
("ORANGERED4", 139, 37, 0),
("ORCHID", 218, 112, 214),
("ORCHID1", 255, 131, 250),
("ORCHID2", 238, 122, 233),
("ORCHID3", 205, 105, 201),
("ORCHID4", 139, 71, 137),
("PALEGOLDENROD", 238, 232, 170),
("PALEGREEN", 152, 251, 152),
("PALEGREEN1", 154, 255, 154),
("PALEGREEN2", 144, 238, 144),
("PALEGREEN3", 124, 205, 124),
("PALEGREEN4", 84, 139, 84),
("PALETURQUOISE", 175, 238, 238),
("PALETURQUOISE1", 187, 255, 255),
("PALETURQUOISE2", 174, 238, 238),
("PALETURQUOISE3", 150, 205, 205),
("PALETURQUOISE4", 102, 139, 139),
("PALEVIOLETRED", 219, 112, 147),
("PALEVIOLETRED1", 255, 130, 171),
("PALEVIOLETRED2", 238, 121, 159),
("PALEVIOLETRED3", 205, 104, 137),
("PALEVIOLETRED4", 139, 71, 93),
("PAPAYAWHIP", 255, 239, 213),
("PEACHPUFF", 255, 218, 185),
("PEACHPUFF1", 255, 218, 185),
("PEACHPUFF2", 238, 203, 173),
("PEACHPUFF3", 205, 175, 149),
("PEACHPUFF4", 139, 119, 101),
("PERU", 205, 133, 63),
("PINK", 255, 192, 203),
("PINK1", 255, 181, 197),
("PINK2", 238, 169, 184),
("PINK3", 205, 145, 158),
("PINK4", 139, 99, 108),
("PLUM", 221, 160, 221),
("PLUM1", 255, 187, 255),
("PLUM2", 238, 174, 238),
("PLUM3", 205, 150, 205),
("PLUM4", 139, 102, 139),
("POWDERBLUE", 176, 224, 230),
("PURPLE", 160, 32, 240),
("PURPLE1", 155, 48, 255),
("PURPLE2", 145, 44, 238),
("PURPLE3", 125, 38, 205),
("PURPLE4", 85, 26, 139),
("PY_COLOR", 240, 255, 210),
("RED", 255, 0, 0),
("RED1", 255, 0, 0),
("RED2", 238, 0, 0),
("RED3", 205, 0, 0),
("RED4", 139, 0, 0),
("ROSYBROWN", 188, 143, 143),
("ROSYBROWN1", 255, 193, 193),
("ROSYBROWN2", 238, 180, 180),
("ROSYBROWN3", 205, 155, 155),
("ROSYBROWN4", 139, 105, 105),
("ROYALBLUE", 65, 105, 225),
("ROYALBLUE1", 72, 118, 255),
("ROYALBLUE2", 67, 110, 238),
("ROYALBLUE3", 58, 95, 205),
("ROYALBLUE4", 39, 64, 139),
("SADDLEBROWN", 139, 69, 19),
("SALMON", 250, 128, 114),
("SALMON1", 255, 140, 105),
("SALMON2", 238, 130, 98),
("SALMON3", 205, 112, 84),
("SALMON4", 139, 76, 57),
("SANDYBROWN", 244, 164, 96),
("SEAGREEN", 46, 139, 87),
("SEAGREEN1", 84, 255, 159),
("SEAGREEN2", 78, 238, 148),
("SEAGREEN3", 67, 205, 128),
("SEAGREEN4", 46, 139, 87),
("SEASHELL", 255, 245, 238),
("SEASHELL1", 255, 245, 238),
("SEASHELL2", 238, 229, 222),
("SEASHELL3", 205, 197, 191),
("SEASHELL4", 139, 134, 130),
("SIENNA", 160, 82, 45),
("SIENNA1", 255, 130, 71),
("SIENNA2", 238, 121, 66),
("SIENNA3", 205, 104, 57),
("SIENNA4", 139, 71, 38),
("SKYBLUE", 135, 206, 235),
("SKYBLUE1", 135, 206, 255),
("SKYBLUE2", 126, 192, 238),
("SKYBLUE3", 108, 166, 205),
("SKYBLUE4", 74, 112, 139),
("SLATEBLUE", 106, 90, 205),
("SLATEBLUE1", 131, 111, 255),
("SLATEBLUE2", 122, 103, 238),
("SLATEBLUE3", 105, 89, 205),
("SLATEBLUE4", 71, 60, 139),
("SLATEGRAY", 112, 128, 144),
("SNOW", 255, 250, 250),
("SNOW1", 255, 250, 250),
("SNOW2", 238, 233, 233),
("SNOW3", 205, 201, 201),
("SNOW4", 139, 137, 137),
("SPRINGGREEN", 0, 255, 127),
("SPRINGGREEN1", 0, 255, 127),
("SPRINGGREEN2", 0, 238, 118),
("SPRINGGREEN3", 0, 205, 102),
("SPRINGGREEN4", 0, 139, 69),
("STEELBLUE", 70, 130, 180),
("STEELBLUE1", 99, 184, 255),
("STEELBLUE2", 92, 172, 238),
("STEELBLUE3", 79, 148, 205),
("STEELBLUE4", 54, 100, 139),
("TAN", 210, 180, 140),
("TAN1", 255, 165, 79),
("TAN2", 238, 154, 73),
("TAN3", 205, 133, 63),
("TAN4", 139, 90, 43),
("THISTLE", 216, 191, 216),
("THISTLE1", 255, 225, 255),
("THISTLE2", 238, 210, 238),
("THISTLE3", 205, | |
NOT NULL PRIMARY KEY, `bk_cloud_name` varchar(45) NOT NULL, `isp` varchar(45) NULL, `ap_id` integer NULL, `creator` json NOT NULL, `is_visible` bool NOT NULL, `is_deleted` bool NOT NULL);",
"CREATE TABLE `node_man_cmdbeventrecord` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `bk_biz_id` integer NOT NULL, `subscription_id` varchar(50) NOT NULL, `event_type` varchar(20) NOT NULL, `action` varchar(20) NOT NULL, `obj_type` varchar(32) NOT NULL, `data` json NOT NULL, `create_time` datetime(6) NOT NULL);",
"CREATE TABLE `node_man_downloadrecord` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `category` varchar(32) NOT NULL, `query_params` varchar(256) NOT NULL, `file_path` varchar(256) NOT NULL, `task_status` integer NOT NULL, `error_message` longtext NOT NULL, `creator` varchar(64) NOT NULL, `create_time` datetime(6) NOT NULL, `finish_time` datetime(6) NOT NULL, `source_app_code` varchar(64) NOT NULL);",
"CREATE TABLE `node_man_globalsettings` (`key` varchar(255) NOT NULL PRIMARY KEY, `v_json` json NOT NULL);",
"CREATE TABLE `node_man_gseplugindesc` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(32) NOT NULL UNIQUE, `description` longtext NOT NULL, `scenario` longtext NOT NULL, `description_en` longtext NULL, `scenario_en` longtext NULL, `category` varchar(32) NOT NULL, `launch_node` varchar(32) NOT NULL, `config_file` varchar(128) NULL, `config_format` varchar(32) NULL, `use_db` bool NOT NULL, `auto_launch` bool NOT NULL, `is_binary` bool NOT NULL);",
"CREATE TABLE `node_man_host` (`bk_host_id` integer NOT NULL PRIMARY KEY, `bk_biz_id` integer NOT NULL, `bk_cloud_id` integer NOT NULL, `inner_ip` varchar(45) NOT NULL, `outer_ip` varchar(45) NULL, `login_ip` varchar(45) NULL, `data_ip` varchar(45) NULL, `os_type` varchar(45) NOT NULL, `node_type` varchar(45) NOT NULL, `node_from` varchar(45) NOT NULL, `ap_id` integer NULL, `upstream_nodes` json NOT NULL, `created_at` datetime(6) NOT NULL, `updated_at` datetime(6) NULL);",
"CREATE TABLE `node_man_identitydata` (`bk_host_id` integer NOT NULL PRIMARY KEY, `auth_type` varchar(45) NOT NULL, `account` varchar(45) NOT NULL, `password` longtext NULL, `port` integer NULL, `key` longtext NULL, `extra_data` json NULL, `retention` integer NOT NULL, `updated_at` datetime(6) NULL);",
"CREATE TABLE `node_man_job` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `created_by` varchar(45) NOT NULL, `job_type` varchar(45) NOT NULL, `subscription_id` integer NOT NULL, `task_id_list` json NOT NULL, `start_time` datetime(6) NOT NULL, `end_time` datetime(6) NULL, `status` varchar(45) NOT NULL, `global_params` json NULL, `statistics` json NULL, `bk_biz_scope` json NOT NULL, `error_hosts` json NOT NULL);",
"CREATE TABLE `node_man_jobtask` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `job_id` integer NOT NULL, `bk_host_id` integer NOT NULL, `instance_id` varchar(45) NOT NULL, `pipeline_id` varchar(50) NOT NULL, `status` varchar(45) NOT NULL, `current_step` varchar(45) NOT NULL, `create_time` datetime(6) NOT NULL, `update_time` datetime(6) NOT NULL, `end_time` datetime(6) NULL);",
"CREATE TABLE `node_man_packages` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `pkg_name` varchar(128) NOT NULL, `version` varchar(128) NOT NULL, `module` varchar(32) NOT NULL, `project` varchar(32) NOT NULL, `pkg_size` integer NOT NULL, `pkg_path` varchar(128) NOT NULL, `md5` varchar(32) NOT NULL, `pkg_mtime` varchar(48) NOT NULL, `pkg_ctime` varchar(48) NOT NULL, `location` varchar(512) NOT NULL, `os` varchar(32) NOT NULL, `cpu_arch` varchar(32) NOT NULL, `is_release_version` bool NOT NULL, `is_ready` bool NOT NULL);",
"CREATE TABLE `node_man_pipelinetree` (`id` varchar(32) NOT NULL PRIMARY KEY, `tree` json NOT NULL);",
"CREATE TABLE `node_man_pluginconfiginstance` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `plugin_config_template` integer NOT NULL, `render_data` longtext NOT NULL, `data_md5` varchar(50) NOT NULL, `creator` varchar(64) NOT NULL, `create_time` datetime(6) NOT NULL, `source_app_code` varchar(64) NOT NULL);",
"CREATE TABLE `node_man_proccontrol` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `module` varchar(32) NOT NULL, `project` varchar(32) NOT NULL, `plugin_package_id` integer NOT NULL, `install_path` longtext NOT NULL, `log_path` longtext NOT NULL, `data_path` longtext NOT NULL, `pid_path` longtext NOT NULL, `start_cmd` longtext NOT NULL, `stop_cmd` longtext NOT NULL, `restart_cmd` longtext NOT NULL, `reload_cmd` longtext NOT NULL, `kill_cmd` longtext NOT NULL, `version_cmd` longtext NOT NULL, `health_cmd` longtext NOT NULL, `debug_cmd` longtext NOT NULL, `os` varchar(32) NOT NULL, `process_name` varchar(128) NULL, `port_range` longtext NULL, `need_delegate` bool NOT NULL);",
"CREATE TABLE `node_man_processstatus` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `bk_host_id` integer NOT NULL, `name` varchar(45) NOT NULL, `status` varchar(45) NOT NULL, `is_auto` varchar(45) NOT NULL, `version` varchar(45) NULL, `proc_type` varchar(45) NOT NULL, `configs` json NOT NULL, `listen_ip` varchar(45) NULL, `listen_port` integer NULL, `setup_path` longtext NOT NULL, `log_path` longtext NOT NULL, `data_path` longtext NOT NULL, `pid_path` longtext NOT NULL, `group_id` varchar(50) NOT NULL, `source_type` varchar(128) NOT NULL, `source_id` varchar(128) NULL);",
"CREATE TABLE `node_man_profile` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `bk_username` varchar(45) NOT NULL, `favorite` json NOT NULL, `update_time` datetime(6) NOT NULL);",
"CREATE TABLE `node_man_subscription` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `bk_biz_id` integer NULL, `object_type` varchar(20) NOT NULL, `node_type` varchar(20) NOT NULL, `nodes` json NOT NULL, `target_hosts` json NULL, `from_system` varchar(30) NOT NULL, `update_time` datetime(6) NOT NULL, `create_time` datetime(6) NOT NULL, `creator` varchar(64) NOT NULL, `enable` bool NOT NULL, `is_deleted` bool NOT NULL);",
"CREATE TABLE `node_man_subscriptioninstancerecord` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `task_id` integer NOT NULL, `subscription_id` integer NOT NULL, `instance_id` varchar(50) NOT NULL, `instance_info` json NOT NULL, `steps` json NOT NULL, `pipeline_id` varchar(50) NOT NULL, `update_time` datetime(6) NOT NULL, `create_time` datetime(6) NOT NULL, `need_clean` bool NOT NULL, `is_latest` bool NOT NULL);",
"CREATE TABLE `node_man_subscriptiontask` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `subscription_id` integer NOT NULL, `scope` json NOT NULL, `actions` json NOT NULL, `create_time` datetime(6) NOT NULL, `is_auto_trigger` bool NOT NULL);",
"CREATE TABLE `node_man_uploadpackage` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `file_name` varchar(64) NOT NULL, `module` varchar(32) NOT NULL, `file_path` varchar(128) NOT NULL, `file_size` integer NOT NULL, `md5` varchar(32) NOT NULL, `upload_time` datetime(6) NOT NULL, `creator` varchar(64) NOT NULL, `source_app_code` varchar(64) NOT NULL);",
"CREATE TABLE `node_man_subscriptionstep` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `subscription_id` integer NOT NULL, `index` integer NOT NULL, `step_id` varchar(64) NOT NULL, `type` varchar(20) NOT NULL, `config` json NOT NULL, `params` json NOT NULL);",
"CREATE TABLE `node_man_pluginconfigtemplate` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `plugin_name` varchar(32) NOT NULL, `plugin_version` varchar(128) NOT NULL, `name` varchar(128) NOT NULL, `version` varchar(128) NOT NULL, `format` varchar(16) NOT NULL, `file_path` varchar(128) NOT NULL, `content` longtext NOT NULL, `is_release_version` bool NOT NULL, `creator` varchar(64) NOT NULL, `create_time` datetime(6) NOT NULL, `source_app_code` varchar(64) NOT NULL);",
"CREATE INDEX `node_man_cmdbeventrecord_bk_biz_id_ceef7b40` ON `node_man_cmdbeventrecord` (`bk_biz_id`);",
"CREATE INDEX `node_man_cmdbeventrecord_subscription_id_40f64e79` ON `node_man_cmdbeventrecord` (`subscription_id`);",
"CREATE INDEX `node_man_job_subscription_id_66d5dd8f` ON `node_man_job` (`subscription_id`);",
"CREATE INDEX `node_man_packages_project_599c6474` ON `node_man_packages` (`project`);",
"CREATE INDEX `node_man_packages_os_4c08c4de` ON `node_man_packages` (`os`);",
"CREATE INDEX `node_man_packages_cpu_arch_bdefbd6c` ON `node_man_packages` (`cpu_arch`);",
"CREATE INDEX `node_man_packages_is_release_version_5839e52e` ON `node_man_packages` (`is_release_version`);",
"CREATE INDEX `node_man_pluginconfiginstance_plugin_config_template_532138e3` ON `node_man_pluginconfiginstance` (`plugin_config_template`);",
"CREATE INDEX `node_man_processstatus_bk_host_id_3bcd512d` ON `node_man_processstatus` (`bk_host_id`);",
"CREATE INDEX `node_man_processstatus_name_e9030502` ON `node_man_processstatus` (`name`);",
"CREATE INDEX `node_man_processstatus_status_51dc1f80` ON `node_man_processstatus` (`status`);",
"CREATE INDEX `node_man_processstatus_group_id_e049f18d` ON `node_man_processstatus` (`group_id`);",
"CREATE INDEX `node_man_subscription_bk_biz_id_4ee72393` ON `node_man_subscription` (`bk_biz_id`);",
"CREATE INDEX `node_man_subscription_enable_adb38208` ON `node_man_subscription` (`enable`);",
"CREATE INDEX `node_man_subscriptioninstancerecord_task_id_60347e2e` ON `node_man_subscriptioninstancerecord` (`task_id`);",
"CREATE INDEX `node_man_subscriptioninstancerecord_subscription_id_7f191490` ON `node_man_subscriptioninstancerecord` (`subscription_id`);",
"CREATE INDEX `node_man_subscriptioninstancerecord_instance_id_387f11f5` ON `node_man_subscriptioninstancerecord` (`instance_id`);",
"CREATE INDEX `node_man_subscriptiontask_subscription_id_d9370f40` ON `node_man_subscriptiontask` (`subscription_id`);",
"CREATE INDEX `node_man_uploadpackage_file_name_c64aa93d` ON `node_man_uploadpackage` (`file_name`);",
"ALTER TABLE `node_man_subscriptionstep` ADD CONSTRAINT `node_man_subscriptionstep_subscription_id_index_7a8cc815_uniq` UNIQUE (`subscription_id`, `index`);",
"ALTER TABLE `node_man_subscriptionstep` ADD CONSTRAINT `node_man_subscriptionstep_subscription_id_step_id_238ea3a4_uniq` UNIQUE (`subscription_id`, `step_id`);",
"CREATE INDEX `node_man_subscriptionstep_subscription_id_a54b7b75` ON `node_man_subscriptionstep` (`subscription_id`);",
"ALTER TABLE `node_man_pluginconfigtemplate` ADD CONSTRAINT `node_man_pluginconfigtem_plugin_name_plugin_versi_2c31949f_uniq` UNIQUE (`plugin_name`, `plugin_version`, `name`, `version`);",
"CREATE INDEX `node_man_pluginconfigtemplate_plugin_name_49d483c6` ON `node_man_pluginconfigtemplate` (`plugin_name`);",
]
for sql in create_tables:
logger.info("running create sql: {}".format(sql))
cursor.execute(sql)
cursor.execute(
"INSERT INTO django_migrations(app, name, applied) VALUES ('node_man', '0001_initial', now());",
)
for sql in insert_sqls:
logger.info("running insert sql: {}".format(sql))
try:
cursor.execute(sql)
except Exception as error:
logger.error("insert_sql_failed: {}".format(sql))
db.commit()
migrate_proxy(hosts, id_data, proxy_process_data)
migrate_process_status(process_data, process_data_no_listen_port)
migrate_cloud_creator(clouds)
migrate_pipeline_tree(pipeline_trees)
except Exception as e:
print("upgrade error: %s" % traceback.format_exc())
# 发生错误时回滚
print("Start Rolling Back...")
db.rollback()
print("Rolling Back End")
def upgrade_ce():
# 删除V1的表
drop_v1_tables = [
"DROP TABLE node_man_agentversion;",
"DROP TABLE node_man_cloud;",
"DROP TABLE node_man_cmdbhosts;",
"DROP TABLE node_man_cpuload;",
"DROP TABLE node_man_downloadrecord;",
"DROP TABLE node_man_gseplugindesc;",
"DROP TABLE node_man_host;",
"DROP TABLE node_man_hoststatus;",
"DROP TABLE node_man_job;",
"DROP TABLE node_man_jobtask;",
"DROP TABLE node_man_kv;",
"DROP TABLE node_man_packages;",
"DROP TABLE node_man_pluginconfiginstance;",
"DROP TABLE node_man_pluginconfigtemplate;",
"DROP TABLE node_man_proccontrol;",
"DROP TABLE node_man_profile;",
"DROP TABLE node_man_tasklog;",
"DROP TABLE node_man_uploadpackage;",
"DROP TABLE Records",
"DROP TABLE Excludes",
"DROP TABLE Filters",
"DELETE FROM django_migrations where app='node_man';",
"DELETE FROM django_migrations where app='requests_tracker';",
]
# 直接迁移数据的表
direct_migrate_tables = [
"node_man_packages",
"node_man_gseplugindesc",
"node_man_pluginconfiginstance",
"node_man_pluginconfigtemplate",
"node_man_proccontrol",
"node_man_uploadpackage",
]
# 新增字段的表需配上默认值
table_values_map = {
"node_man_subscriptioninstancerecord": {
"default_keys": ["need_clean"],
"default_values": ['"0"'],
"where": "where is_latest=1",
}
}
insert_sqls = []
for table_name in direct_migrate_tables:
logger.info("migrating {}".format(table_name))
cursor.execute("select * from {} {};".format(table_name, table_values_map.get(table_name, {}).get("where", "")))
table_values = cursor.fetchall()
if table_values:
default_keys = table_values_map.get(table_name, {}).get("default_keys", [])
default_values = table_values_map.get(table_name, {}).get("default_values", [])
table_key_lists = default_keys + list(table_values[0].keys())
| |
<reponame>C6SUMMER/allinclusive-kodi-pi
#############################################################################
#
# Copyright (C) 2013 Navi-X
#
# This file is part of Navi-X.
#
# Navi-X is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Navi-X is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Navi-X. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# CFileloader:
# This class is a generic file loader and handles downloading a file to disk.
#############################################################################
from string import *
import sys,os.path,re,random,string,os,time,datetime,traceback,urllib,urllib2,xbmc,xbmcgui,shutil,zipfile,ftplib,filecmp,md5
#import hashlib
from settings import *
from libs2 import *
from CServer import *
try: Emulating=xbmcgui.Emulating
except: Emulating=False
#todo: Add support for FTP files.
class CFileLoader2:
def __init__(self,*args,**kwargs):
if (kwargs.has_key('window')):
try: self.MainWindow=kwargs['window']
except: pass
#TestBug({'From':'CFileLoader2: __init__','MainWindow':'Was Found'})
#if hasattr(self.MainWindow,'dt'):
# print {"MainWindow: ":str(self.MainWindow.dt.getLabel())}
#else:
# self.MainWindow=0
self.metadata={}
self.metadata["expires"]='0'
self.localfile=''
cachedFlag=False
#if hasattr(self,'MainWindow'):
# if hasattr(self.MainWindow,'logo'):
# self.MainWindow.logo.setVisible(True)
######################################################################
# Description: Downloads a file in case of URL and returns absolute
# path to the local file.
#@todo: Fill parameters
# Parameters : URL=source, localfile=destination
# Return : -
######################################################################
def load(self,URL,localfile='',timeout=0,proxy="CACHING",content_type='',retries=0):
if (URL==''):
self.state= -1 #failed
return
destfile=localfile
self.data=''
if not content_type in ['image']:
TestBug({'content_type':content_type,'proxy':proxy,'URL':URL,'localfile':localfile})
if hasattr(self,'MainWindow'):
if not content_type in ['image']:
#if hasattr(self.MainWindow,'logo'):
# self.MainWindow.logo.setVisible(True)
#if hasattr(self.MainWindow,'imgCProtocol'):
# self.MainWindow.imgCProtocol.setVisible(True)
if hasattr(self.MainWindow,'imgProtocol'):
if URL.lower().startswith('ftp://') or localfile.lower().startswith('ftp://'):
try: self.MainWindow.imgProtocol.setImage(imageBrowse['FTP'])
except: pass
else:
try: self.MainWindow.imgProtocol.setImage(imageBrowse['Remote'])
except: pass
# self.MainWindow.imgProtocol.setVisible(True)
# TestBug({'imageBrowse[Cache]':imageBrowse['Remote']})
# TestBug("MainWindow was Found. Using: load")
# TestBug({'From':'loadSmartCache: near end','MainWindow':'Was Found','cachedFlag':cachedFlag,'content_type':content_type,'proxy':proxy,'URL':URL,'localfile':localfile})
# if cachedFlag==True:
# self.MainWindow.logo.setVisible(False)
# else:
# self.MainWindow.logo.setVisible(True)
#else:
# TestBug({'From':'loadSmartCache: near end','MainWindow':'Not Found','cachedFlag':cachedFlag,'content_type':content_type,'proxy':proxy,'URL':URL,'localfile':localfile})
if (URL[:4]=='http') or (URL[:3]=='ftp'):
sum_str=''
if proxy != "DISABLED":
#sum=0
##calculate hash of URL
#for i in range(len(URL)):
# sum=sum+(ord(URL[i]) * i)
#sum_str=str(sum)
#sum_str=hashlib.md5(URL).hexdigest()
sum_str=md5.new(URL).hexdigest()
if localfile != '':
ext_pos=localfile.rfind('.') #find last '.' in the string
if ext_pos != -1:
destfile=localfile[:ext_pos]+sum_str+localfile[ext_pos:]
else:
destfile=localfile+sum_str
else:
destfile=tempCacheDir+sum_str
if proxy=="INCACHE":
if os.path.exists(destfile)==True:
self.localfile=destfile
self.state=0 #success
#todo: load file in memory if localfile=''
else:
self.state= -1 #failed
elif proxy=="NEVER":
if (os.path.exists(destfile)==True): self.deleteMetaData(destfile)
if URL[:3]=='ftp':
self.loadFTP(URL,destfile,timeout,proxy,content_type,retries)
else:
self.loadHTTP(URL,destfile,timeout,proxy,content_type,retries)
elif (not((proxy=="ENABLED") and (os.path.exists(destfile)==True))):
#option CACHING or SMARTCACHE is set
#TestBug("Reached SMARTCACHE if within load function.")
if proxy=="SMARTCACHE":
TestBug("*Reached SMARTCACHE if within load function.")
self.loadSmartCache(URL,destfile,timeout,proxy,content_type,retries)
TestBug("*done SMARTCACHE if within load function.")
else: #option CACHING or DISABLED
self.deleteMetaData(destfile)
if URL[:3]=='ftp':
self.loadFTP(URL,destfile,timeout,proxy,content_type,retries)
else:
self.loadHTTP(URL,destfile,timeout,proxy,content_type,retries)
else: #(proxy=="ENABLED") and (os.path.exists(destfile)==True)
if hasattr(self,'MainWindow'):
if not content_type in ['image']:
if hasattr(self.MainWindow,'imgProtocol'):
try: self.MainWindow.imgProtocol.setImage(imageBrowse['Local'])
except: pass
self.localfile=destfile
self.state=0 #success
if localfile=='':
try:
f=open(self.localfile,'r')
self.data=f.read()
f.close()
except Exception,e:
self.state= -1 ;print'Error EXCEPTION '+str(e) #failed
else: #localfile
if URL=='' and localfile != '': URL=localfile
if len(URL) > 2:
iLocFile=False
if (URL[1]==':') or (URL[0]=='/'): #absolute (local) path
self.localfile=URL
self.state=0 #success
iLocFile=True
if iLocFile==True: pass
else: #assuming relative (local) path
if URL=='downloads.plx'or URL=='incdloads.plx'or URL=='downlqueue.plx'or URL=='My Playlists.plx'\
or URL=='blacklist.plx'or URL=='history.plx'or URL=='favorites.plx':
self.localfile=datapaths+URL;
self.state=0 ;#success
elif 'addons://' in URL:
self.localfile=xbmc.translatePath(URL.replace('addons://','special://home/addons/'))
self.state=0
else:
self.localfile=RootDir+SEPARATOR+URL;
self.state=0 ;#success
#elif 'addons://' in URL:
# self.localfile=xbmc.translatePath(URL.replace('addons://','special://home/addons/'))
# self.state=0
#else: #assuming relative (local) path
# self.localfile=RootDir+SEPARATOR+URL
# self.state=0 #success
#Trace(self.localfile)
if hasattr(self,'MainWindow'):
if not content_type in ['image']:
if hasattr(self.MainWindow,'imgProtocol'):
try: self.MainWindow.imgProtocol.setImage(imageBrowse['Local'])
except: pass
if localfile=='':
try:
f=open(self.localfile,'r')
self.data=f.read()
f.close()
except Exception,e: print'Error EXCEPTION '+str(e);self.state= -1 #failed
######################################################################
# Description: Reads a file using smart caching
# Parameters : URL=source, localfile=destination
# Return : -
######################################################################
def loadSmartCache(self,URL,localfile='',timeout=0,proxy="CACHING",content_type= '',retries=0):
expires=DefaultCachedExpires #3600 #seconds
cachedFlag=False
TestBug(["Attempting SmartCache"])
#if hasattr(self,'MainWindow'):
# if hasattr(self.MainWindow,'logo'):
# if not content_type in ['image']:
# self.MainWindow.logo.setVisible(False)
# TestBug("MainWindow was Found. Using: loadSmartCache")
if os.path.exists(localfile)==True:
self.readMetaData(localfile)
if self.metadata["expires"] != '0':
expires=int(self.metadata["expires"])
#check if the file is expired
creationtime=os.path.getmtime(localfile)
currenttime=time.time()
deltatime=currenttime-creationtime #;deltatime =int(deltatime)*1000
#dialog=xbmcgui.Dialog(); dialog.ok("DEBUG ",'expires = ' + str(expires),'deltatime = ' + str(deltatime))
if deltatime < expires:
self.state=0 #success
try:
f=open(localfile,'r')
self.data=f.read()
f.close()
except Exception,e: print 'error '+str(e);self.state= -1 #failed
TestBug(["Used SmartCache?"])
if hasattr(self,'MainWindow'):
if not content_type in ['image']:
#if hasattr(self.MainWindow,'logo'):
# self.MainWindow.logo.setImage(imageBrowse['Cache'])
# self.MainWindow.logo.setVisible(False)
# TestBug(["MainWindow was Found. Just Fetched from cache. Using: loadSmartCache"])
if hasattr(self.MainWindow,'imgProtocol'):
try: self.MainWindow.imgProtocol.setImage(imageBrowse['Cache'])
except: pass
# self.MainWindow.imgProtocol.setVisible(False)
TestBug(["MainWindow was Found. Just Fetched from cache. Using: loadSmartCache"])
TestBug({'imageBrowse[Cache]':imageBrowse['Cache']})
#if hasattr(self.MainWindow,'imgCProtocol'):
# self.MainWindow.imgCProtocol.setVisible(True)
# TestBug({'imageBrowse[Cache]':imageBrowse['Cache']})
return
#rename the existing (expired file)
os.rename(localfile,localfile+".old")
#load the file
try:
if URL[:3]=='ftp':
self.loadFTP(URL,localfile,timeout,proxy,content_type,retries)
else: self.loadHTTP(URL,localfile,timeout,proxy,content_type,retries)
except: print('SMARTCACHE url fail '+URL); #print('proxy'+proxy); # print to log
#this will creat a new cached file from the existing file and make it usable
if os.path.exists(localfile+".old")==True:
if os.path.exists(localfile)==False: #see that the new file was not loaded from the server
open(localfile,'a').close() #create an empty file
old_ff=open(localfile+".old",'r')
if (os.path.getsize(localfile)==0) or ((os.path.getsize(localfile) < 300) and ((os.path.getsize(localfile)+200) < os.path.getsize(localfile+".old"))): #0: #check to see if there is any data in the file
old_ff.close()
try:
with open(localfile+".old",'r') as old_file : # opens the old file with auto close
for line in old_file:
with open(localfile,'a+') as new_file: #open the new file with autoclose
new_file.write(line) #copies the line in old_file to new_file
cachedFlag=True
if hasattr(self,'MainWindow'):
if hasattr(self.MainWindow,'logo'):
try: self.MainWindow.imgProtocol.setImage(imageBrowse['Rewrite'])
except: pass
#xbmc.executebuiltin( "XBMC.Notification(%s,%s,%i)" % ( 'Server Error', 'You are reading cached files', 5000 ) )
#dialog=xbmcgui.Dialog(); dialog.ok("DEBUG ","Reading old cached page", str(URL));print"DEBUG CFL 284 Reading old cached page "+str(URL)
with open(localfile,'r')as newFile:
self.data=newFile.read(); self.state=0 #success
except Exception,e: print 'Error '+str(e);self.state= -3 #failed
else: old_ff.close()
os.remove(localfile+".old")
elif os.path.exists(localfile)==False and os.path.exists(localfile+".old")==False: self.state= -3
if cachedFlag==False:
try:
if URL.lower().startswith(nxserver_URL.lower()):
for ZZZA,ZZZB in CachedPagesAndTimes:
if URL.lower()==ZZZB:
expires=(ZZZA*60)*60
with open(localfile,'r') as data:
for line in data:
if line and line[0] != '#':
index=line.find('=')
if index!= -1:
key=line[:index]
value=line[index+1:]
if key=='expires':
expires=((int(value) * 60) * 60) ;#dialog=xbmcgui.Dialog(); dialog.ok('CFL 238','key= '+str(key),'value='+str(value),'expires'+str(expires))
break
elif key=='type': break
data.close()
TestBug({"url":URL.lower(),'expires':expires})
except Exception,e: print'DEBUG CFL312 expiration error'+str(e); expires=DefaultCachedExpires #3600
else:
expires=DefaultCachedExpires #3600
self.metadata["expires"]=str(expires)
self.writeMetaData(localfile)
if cachedFlag==False:
try:
with open(localfile, 'r')as f:
self.data=f.read();self.state=0
except: self.state= -3 #failed
#if hasattr(self,'MainWindow'):
# if hasattr(self.MainWindow,'logo'):
# TestBug({'From':'loadSmartCache: near end','MainWindow':'Was Found','cachedFlag':cachedFlag,'content_type':content_type,'proxy':proxy,'URL':URL,'localfile':localfile})
# if cachedFlag==True:
# self.MainWindow.logo.setVisible(False)
# else:
# self.MainWindow.logo.setVisible(True)
#else:
# TestBug({'From':'loadSmartCache: near end','MainWindow':'Not Found','cachedFlag':cachedFlag,'content_type':content_type,'proxy':proxy,'URL':URL,'localfile':localfile})
#end of function
######################################################################
# Description: Downloads a file in case of URL and returns absolute
# path to the local file.
#@todo: Fill parameters
# Parameters : URL=source, localfile=destination
# Return : -
######################################################################
def loadHTTP(self,URL,localfile='',timeout=0,proxy="",content_type= '',retries=0): #proxy="CACHING"
if timeout != 0:
socket_setdefaulttimeout(timeout)
self.state= -1 #failure
counter=0
while (counter <= retries) and (self.state != 0):
counter=counter+1
try:
cookies=''
if URL.find(nxserver_URL) != -1:
cookies='platform='+platform+'; version='+Version+'.'+SubVersion
cookies=cookies+'; nxid='+nxserver.user_id
values={'User-Agent':'Mozilla/4.0 (compatible;MSIE 7.0;Windows NT 6.0)','Cookie':cookies}
else:
values={'User-Agent':'Mozilla/4.0 (compatible;MSIE 7.0;Windows NT 6.0)'}
#print URL #print 'values = '+str(values)
### check if a differant lists and increase their timeout values
if timeout != 0: t_timer=timeout #allow differant values passed in
else:
t_timer=url_open_timeout #20
for var in '/search/','/user_list','/update','/new':
if var in URL and var=='/search/': t_timer=int(t_timer) * 3; break #t_timer= 60; break
elif var in URL: t_timer=int(t_timer) * 2 #t_timer=40
#if 'navixtreme' in URL: values = {'jibberish'} ###### used to stop from connecting to navi server
req=urllib2.Request(URL,None,values); #TestBug('req= '+str(req))
f=urllib2.urlopen(req,None,timeout = t_timer) ######### where it hangs and faults if server is down
headers=f.info()
type=headers.get('Content-Type',''); #type=headers['Content-Type']
if (content_type != '') and (type.find(content_type) == -1):
#unexpected type
if timeout != 0:
socket_setdefaulttimeout(url_open_timeout)
self.state= -1 #failed
break #do not try again
#open the destination file
self.data=f.read()
file=open(localfile,"wb")
file.write(self.data)
file.close()
f.close()
self.localfile=localfile
self.state=0 #success
except Exception,e:
if hasattr(e,'code'):
if (not '.png' in URL.lower()) and (not '.jpg' in URL.lower()) and (not '.jpeg' in URL.lower()) and (not '.bmp' in URL.lower()) and (not '.gif' in URL.lower()) and (not '.psx' in URL.lower()):
TestBug('The server could not fulfill the request. URL='+str(URL))
TestBug(['Error code: ',e.code])
elif hasattr(e, 'reason'):
if (not '.png' in URL.lower()) and (not '.jpg' in URL.lower()) and (not '.jpeg' in URL.lower()) and (not '.bmp' in URL.lower()) and (not '.gif' in URL.lower()) and (not '.psx' in URL.lower()):
#Message("Failed | |
= (credentials['username'], credentials['password'])
continue
elif e.name in ('UsernameOrPasswordError', 'OTPMismatchError'):
if attempt < 3:
if e.name == 'UsernameOrPasswordError':
warn("Incorrect username and/or password")
else:
warn("Incorrect verification code")
attempt += 1
continue
else:
err_exit("Incorrect username and/or password", arg_parser=parser)
else:
err_exit("Login error: {}".format(e), arg_parser=parser)
except Exception as e:
err_exit("Login error: {}".format(e), arg_parser=parser)
sec_context=json.dumps({'auth_token': token_res["access_token"], 'auth_token_type': token_res["token_type"]})
if using_default:
set_api(dxpy.DEFAULT_APISERVER_PROTOCOL, dxpy.DEFAULT_APISERVER_HOST, dxpy.DEFAULT_APISERVER_PORT, args.save)
else:
sec_context = '{"auth_token":"' + args.token + '","auth_token_type":"Bearer"}'
# Ensure correct API server
if args.host is None:
set_api(dxpy.DEFAULT_APISERVER_PROTOCOL, dxpy.DEFAULT_APISERVER_HOST, dxpy.DEFAULT_APISERVER_PORT, args.save)
using_default = True
os.environ['DX_SECURITY_CONTEXT'] = sec_context
dxpy.set_security_context(json.loads(sec_context))
if args.save:
dxpy.config.write("DX_SECURITY_CONTEXT", sec_context)
# If login via token, obtain current username from auth server.
if args.token is not None:
host, port = None, None
if dxpy.APISERVER_HOST not in ['api.dnanexus.com', 'stagingapi.dnanexus.com']:
host, port = args.host, args.port
try:
dxpy.config.write("DX_USERNAME", dxpy.user_info(host, port)['username'])
except DXError as details:
# Consider failure to obtain username to be a non-fatal error.
print("Could not obtain username from auth server. Consider setting both --host and --port.", file=sys.stderr)
print(fill(str(details)), file=sys.stderr)
if using_default or args.staging:
try:
greeting = dxpy.api.system_greet({'client': 'dxclient', 'version': 'v'+dxpy.TOOLKIT_VERSION})
if greeting.get('messages'):
print(BOLD("New messages from ") + DNANEXUS_LOGO())
for message in greeting['messages']:
print(BOLD("Date: ") + datetime.datetime.fromtimestamp(message['date']//1000).ctime())
print(BOLD("Subject: ") + fill(message['title'], subsequent_indent=' '*9))
body = message['body'].splitlines()
if len(body) > 0:
print(BOLD("Message: ") + body[0])
for line in body[1:]:
print(' '*9 + line)
except Exception as e:
warn("Error while retrieving greet data: {}".format(e))
args.current = False
args.name = None
args.level = 'CONTRIBUTE'
args.public = False
if args.host is not None and not args.staging and not using_default:
setenv(args)
elif args.projects:
pick_and_set_project(args)
if args.save and not args.token:
msg = "You are now logged in. Your credentials are stored in {conf_dir} and will expire in {timeout}. {tip}"
tip = "Use " + BOLD("dx login --timeout") + " to control the expiration date, or " + BOLD("dx logout") + \
" to end this session."
timeout = datetime.timedelta(seconds=normalize_time_input(args.timeout, default_unit='s') // 1000)
print(fill(msg.format(conf_dir=dxpy.config.get_user_conf_dir(),
timeout=timeout,
tip=tip)))
def logout(args):
if dxpy.AUTH_HELPER is not None:
authserver = dxpy.get_auth_server_name(args.host, args.port, args.protocol)
print("Deleting credentials from {}...".format(authserver))
token = dxpy.AUTH_HELPER.security_context["auth_token"]
try:
token_sig = hashlib.sha256(token).hexdigest()
response = dxpy.DXHTTPRequest(authserver + "/system/destroyAuthToken",
dict(tokenSignature=token_sig),
prepend_srv=False,
max_retries=1)
print("Deleted token with signature", token_sig)
except dxpy.DXAPIError as e:
print(format_exception(e))
except:
err_exit()
if state["interactive"]:
dxpy.AUTH_HELPER = None
else:
dxpy.config.write("DX_SECURITY_CONTEXT", None)
def set_api(protocol, host, port, write):
dxpy.config.update(DX_APISERVER_PROTOCOL=protocol,
DX_APISERVER_HOST=host,
DX_APISERVER_PORT=port)
if write:
dxpy.config.save()
def set_project(project, write, name=None):
if dxpy.JOB_ID is None:
dxpy.config["DX_PROJECT_CONTEXT_ID"] = project
dxpy.config["DX_PROJECT_CONTEXT_NAME"] = name
else:
dxpy.config["DX_WORKSPACE_ID"] = project
if write:
dxpy.config.save()
dxpy.set_workspace_id(project)
def set_wd(folder, write):
dxpy.config.update(DX_CLI_WD=folder)
if write:
dxpy.config.save()
# Will raise KeyboardInterrupt, EOFError
def prompt_for_env_var(prompt_str, env_var_str):
prompt = prompt_str
default = None
if env_var_str in os.environ:
default = os.environ[env_var_str]
prompt += ' [' + default + ']: '
else:
prompt += ': '
while True:
value = input(prompt)
if value != '':
return value
elif default is not None:
return default
def pick_and_set_project(args):
try:
result_generator = dxpy.find_projects(describe=True,
name=args.name, name_mode='glob',
level=('VIEW' if args.public else args.level),
explicit_perms=(not args.public if not args.public else None),
public=(args.public if args.public else None),
first_page_size=10)
except:
err_exit('Error while listing available projects')
any_results = False
first_pass = True
while True:
results = []
for _ in range(10):
try:
results.append(next(result_generator))
any_results = True
except StopIteration:
break
except:
err_exit('Error while listing available projects')
if not any_results:
parser.exit(0, '\n' + fill("No projects to choose from. You can create one with the command " +
BOLD("dx new project") + ". To pick from projects for which you only have " +
" VIEW permissions, use " + BOLD("dx select --level VIEW") + " or " +
BOLD("dx select --public") + ".") + '\n')
elif len(results) == 0:
err_exit('No projects left to choose from.', 3, expected_exceptions=StopIteration)
if first_pass:
if not args.public and args.level == "CONTRIBUTE":
print('')
print(fill("Note: Use " + BOLD("dx select --level VIEW") + " or " + BOLD("dx select --public") +
" to select from projects for which you only have VIEW permissions."))
first_pass = False
project_ids = [result['id'] for result in results]
# Eliminate current default if it is not a found project
try:
default = project_ids.index(dxpy.WORKSPACE_ID)
except:
default = None
print("")
if args.public:
print("Available public projects:")
else:
print("Available projects ({level} or higher):".format(level=args.level))
choice = try_call(pick,
[result['describe']['name'] + ' (' + result['level'] + ')' for result in results],
default,
more_choices=(len(results) == 10))
if choice == 'm':
continue
else:
print('Setting current project to: ' + BOLD(results[choice]['describe']['name']))
set_project(project_ids[choice], not state['interactive'] or args.save, name=results[choice]['describe']['name'])
state['currentproj'] = results[choice]['describe']['name']
set_wd('/', not state['interactive'] or args.save)
return
def whoami(args):
if dxpy.AUTH_HELPER is None:
err_exit('You are not logged in; run "dx login" to obtain a token.', 3)
user_id = dxpy.whoami()
if args.user_id:
print(user_id)
else:
print(dxpy.api.user_describe(user_id)['handle'])
def setenv(args):
if not state['interactive']:
args.save = True
if args.current:
dxpy.config.save()
else:
try:
api_protocol = prompt_for_env_var('API server protocol (choose "http" or "https")', 'DX_APISERVER_PROTOCOL')
api_host = prompt_for_env_var('API server host', 'DX_APISERVER_HOST')
api_port = prompt_for_env_var('API server port', 'DX_APISERVER_PORT')
set_api(api_protocol, api_host, api_port, args.save)
except:
raise DXCLIError("Error setting up API variables")
if args.projects:
args.name = None
args.public = False
args.current = False
args.level = 'CONTRIBUTE'
pick_and_set_project(args)
def clearenv(args):
if args.interactive:
print("The clearenv command is not available in the interactive shell")
return
dxpy.config.clear(reset=args.reset)
def env(args):
if args.bash:
if dxpy.AUTH_HELPER is not None:
print("export DX_SECURITY_CONTEXT='" + json.dumps(dxpy.AUTH_HELPER.security_context) + "'")
if dxpy.APISERVER_PROTOCOL is not None:
print("export DX_APISERVER_PROTOCOL=" + dxpy.APISERVER_PROTOCOL)
if dxpy.APISERVER_HOST is not None:
print("export DX_APISERVER_HOST=" + dxpy.APISERVER_HOST)
if dxpy.APISERVER_PORT is not None:
print("export DX_APISERVER_PORT=" + dxpy.APISERVER_PORT)
if dxpy.WORKSPACE_ID is not None:
print("export DX_PROJECT_CONTEXT_ID=" + dxpy.WORKSPACE_ID)
elif args.dx_flags:
flags_str = ''
if dxpy.AUTH_HELPER is not None:
token = dxpy.AUTH_HELPER.security_context.get('auth_token', None)
if token is not None:
flags_str += ' --auth-token ' + token
if dxpy.APISERVER_PROTOCOL is not None:
flags_str += ' --apiserver-protocol ' + dxpy.APISERVER_PROTOCOL
if dxpy.APISERVER_HOST is not None:
flags_str += ' --apiserver-host ' + dxpy.APISERVER_HOST
if dxpy.APISERVER_PORT is not None:
flags_str += ' --apiserver-port ' + dxpy.APISERVER_PORT
if dxpy.WORKSPACE_ID is not None:
flags_str += ' --project-context-id ' + dxpy.WORKSPACE_ID
print(flags_str)
else:
if dxpy.AUTH_HELPER is not None:
print("Auth token used\t\t" + dxpy.AUTH_HELPER.security_context.get("auth_token", "none"))
print("API server protocol\t" + dxpy.APISERVER_PROTOCOL)
print("API server host\t\t" + dxpy.APISERVER_HOST)
print("API server port\t\t" + dxpy.APISERVER_PORT)
print("Current workspace\t" + str(dxpy.WORKSPACE_ID))
if "DX_PROJECT_CONTEXT_NAME" in os.environ:
print(u'Current workspace name\t"{n}"'.format(n=dxpy.config.get("DX_PROJECT_CONTEXT_NAME")))
print("Current folder\t\t" + dxpy.config.get("DX_CLI_WD", "None"))
print("Current user\t\t" + str(os.environ.get("DX_USERNAME")))
def get_pwd():
pwd_str = None
if dxpy.WORKSPACE_ID is not None:
if state['currentproj'] is None:
try:
proj_name = dxpy.api.project_describe(dxpy.WORKSPACE_ID)['name']
state['currentproj'] = proj_name
except:
pass
if state['currentproj'] is not None:
pwd_str = state['currentproj'] + ':' + dxpy.config.get('DX_CLI_WD', u'/')
return pwd_str
def pwd(args):
pwd_str = get_pwd()
if pwd_str is not None:
print(pwd_str)
else:
err_exit('Current project is not set', 3)
def api(args):
json_input = json.loads(args.input_json)
if args.input is not None:
with (sys.stdin if args.input == '-' else open(args.input, 'r')) as fd:
data = fd.read()
try:
json_input = json.loads(data)
except ValueError:
err_exit('Error: file contents could not be parsed as JSON', 3)
resp = None
try:
resp = dxpy.DXHTTPRequest('/' + args.resource + '/' + args.method,
json_input)
except:
err_exit()
try:
print(json.dumps(resp, indent=4))
except ValueError:
err_exit('Error: server response could not be parsed as JSON', 3)
def invite(args):
# If --project is a valid project (ID or name), then appending ":"
# should not hurt the path resolution.
if ':' not in args.project:
args.project += ':'
project, _none, _none = try_call(resolve_existing_path,
args.project, 'project')
if args.invitee != 'PUBLIC' and '-' not in args.invitee and '@' not in args.invitee:
args.invitee = 'user-' + args.invitee.lower()
project_invite_input = {"invitee": args.invitee, "level": args.level}
if not args.send_email:
project_invite_input["suppressEmailNotification"] = not args.send_email
try:
resp = dxpy.api.project_invite(project, project_invite_input)
except:
err_exit()
print('Invited ' + args.invitee + ' to ' + project + ' (' + resp['state'] + ')')
def uninvite(args):
# If --project is a valid project (ID or name), then appending ":"
# should not hurt the path resolution.
if ':' not in args.project:
args.project += ':'
project, _none, _none = try_call(resolve_existing_path,
args.project, 'project')
if args.entity != 'PUBLIC' and '-' not in args.entity:
args.entity = 'user-' + args.entity.lower()
try:
dxpy.api.project_decrease_permissions(project, {args.entity: None})
except:
err_exit()
print('Uninvited ' + args.entity + ' from ' + project)
def select(args):
if args.project is not None:
if get_last_pos_of_char(':', args.project) != -1:
args.path = args.project
else:
args.path = args.project + ':'
cd(args)
print("Selected project", split_unescaped(":", | |
data from lists to arrays
self.species_pops = np.array(self.species_pops)
self.rxn_freqs = np.array(self.rxn_freqs)
self.propensities = np.array(self.propensities)
self.Props_integ = np.array(self.Props_integ)
self.traj_derivs = np.array(self.traj_derivs)
self.events_total = np.array(self.events_total)
self.CPU_total = np.array(self.CPU_total)
self.time_avg_covs = np.array(self.time_avg_covs)
self.runAvg = copy.deepcopy(dummy_run) # Initialize run average with information from dummyrun
self.avg_updated = False
def AverageRuns(self):
'''
Average multiple trajectories
'''
self.runAvg.Path = self.ParentFolder
self.runAvg.specnumout.t = self.t_vec
self.runAvg.specnumout.spec = np.mean(self.species_pops, axis = 0)
self.runAvg.procstatout.events = np.mean(self.rxn_freqs, axis = 0)
if not self.runAvg.prop is None:
self.runAvg.prop = np.mean(self.propensities, axis = 0)
if not self.runAvg.spec_num_int is None:
self.runAvg.spec_num_int = np.mean(self.time_avg_covs,axis = 0)
if not self.runAvg.propCounter is None:
self.runAvg.propCounter = np.mean(self.Props_integ, axis = 0)
self.runAvg.genout.events_occurred = np.mean(self.events_total)
self.runAvg.genout.CPU_time = np.mean(self.CPU_total)
self.avg_updated = True
def Compute_batch_data(self, n_batches_total = 1000):
'''
Compute varaibles needed for the analysis, based on the input
'''
self.N_batches = n_batches_total
# Find index of product molecule
try:
self.gas_prod_ind = len( self.runAvg.simin.surf_spec ) + self.runAvg.simin.gas_spec.index(self.gas_product) # Find the index of the product species and adjust index to account for surface species
except:
raise Exception('Product species ' + self.gas_product + ' not found.')
# Find the stochiometry of the product molecule for each reaction
nRxns = len(self.runAvg.genout.RxnNameList)
self.TOF_stoich = np.zeros(nRxns)
for i, elem_stoich in enumerate(self.runAvg.genout.Nu):
self.TOF_stoich[i] = elem_stoich[self.gas_prod_ind]
self.Nbpt = np.max( [ 3 , (self.N_batches-1) / self.n_trajectories + 2 ] ) # Set the number of batches per trajectory
self.batch_length = self.t_vec[-1] / self.Nbpt
def Compute_rate(self, include_ACF_CI = True):
'''
Use batch means to compute the reaction rate (and confidence interval)
Also compute the autocorrelation function (ACF) (and confidence interval)
:params include_ACF_CI: Whether to use statistical bootstrapping to compute confidence intervals.
This takes some CPU time.
:returns: The rate
'''
if not self.avg_updated:
self.AverageRuns()
self.Compute_batch_data()
bin_edges = np.linspace(0, self.t_vec[-1], self.Nbpt + 1)
rate_data = np.zeros([self.n_trajectories, self.Nbpt])
for i in range(self.Nbpt):
idb_start = self.runAvg.time_search_interp(bin_edges[i])
idb_end = self.runAvg.time_search_interp(bin_edges[i+1])
prop_integ_start = idb_start[1][0] * self.Props_integ[:, idb_start[0][0], :] + idb_start[1][1] * self.Props_integ[:, idb_start[0][1], :]
prop_integ_end = idb_end[1][0] * self.Props_integ[:, idb_end[0][0], :] + idb_end[1][1] * self.Props_integ[:, idb_end[0][1], :]
rate_data[:,i] = np.dot ( ( prop_integ_end - prop_integ_start ) / self.batch_length , self.TOF_stoich )
'''
Compute confidence in the rate (assuming IID)
'''
all_batch_rates = rate_data[:,1::]
all_batch_rates = all_batch_rates.flatten()
confidence=0.90
n = len(all_batch_rates)
self.rate, se = np.mean(all_batch_rates), scipy.stats.sem(all_batch_rates)
self.rate_CI = se * scipy.stats.t._ppf((1+confidence)/2., n - 1)
'''
Compute autocorrelation function
'''
# Compute the autocorrelation function for the batch means of the rate
c1 = rate_data[:,1:-1:]
c1 = c1.flatten()
c2 = rate_data[:,2::]
c2 = c2.flatten()
if np.var(all_batch_rates) == 0: # If the variance of the batch means is zero, autocorrelation cannot be computed.
self.ACF = None
self.ACF_CI = None
else:
self.ACF = ( np.mean(c1 * c2) - np.mean(c1) * np.mean(c2) ) / np.var(all_batch_rates)
if include_ACF_CI:
'''
Compute confidence interval for ACF
'''
N_boot = 100
ACF_dist = np.zeros(N_boot)
for i in range(N_boot):
subpop_inds = np.random.randint(len(c1), size=len(c1))
c1_new = c1[subpop_inds]
c2_new = c2[subpop_inds]
ACF_dist[i] = ( np.mean(c1_new * c2_new) - np.mean(c1_new) * np.mean(c2_new) ) / np.var(all_batch_rates)
ACF_dist = sorted(ACF_dist)
ACF_high = ACF_dist[int(0.95 * N_boot)]
ACF_low = ACF_dist[int(0.05 * N_boot)]
self.ACF_CI = (ACF_high - ACF_low) / 2
# Rescale error bars on the rate based on the ACF
if not self.ACF is None:
var_scale = 1
for i in range(1,self.Nbpt):
var_scale = var_scale + 2 * ( 1 - i / ( self.Nbpt - 1 ) ) * self.ACF ** i
self.rate_CI = self.rate_CI * np.sqrt( var_scale )
return self.rate
def PerformSA(self, delta_t = None, ergodic = True, dp_per_bin = 10): # Need implement time point interpolation
'''
Perform likelihood ratio sensitivity analysis with a combination of time and trajectory averaging
:param delta_t: Size of the time window used for likelihood ratio sensitivity analysis. By default, it is the size of a batch.
:param ergodic: True - average the rate over the entire time interval (centered ergodic likelihood ratio)
False - use the rate at the end of the time interval (centered likelihood ratio)
Data between sample points is estimated with linear interpolation
'''
self.Compute_batch_data()
# Use entire trajectory length as the default time window
if delta_t is None:
delta_t = self.batch_length
if not self.avg_updated:
self.AverageRuns()
if delta_t > self.t_vec[-1] or delta_t < 0:
print 'Time window: ' + str(delta_t)
print 'Final time: ' + str(self.t_vec[-1])
raise Exception('Time window is too large. Insufficient sampling.')
bin_edges = np.linspace(0, self.t_vec[-1], self.Nbpt * dp_per_bin + 1)
dp_per_traj = dp_per_bin * (self.Nbpt-1) + 1
rate_data_erg = np.zeros( self.n_trajectories * dp_per_traj )
W_data_all = np.zeros([ self.traj_derivs.shape[2] , self.n_trajectories * dp_per_traj ] )
fW_data = np.zeros( self.n_trajectories * dp_per_traj )
rate_contributions_all = np.zeros(self.traj_derivs.shape[2])
data_ind = 0
for traj_ind in range(self.n_trajectories):
for i in range(dp_per_traj):
idb_start = self.runAvg.time_search_interp( bin_edges[i] )
idb_end = self.runAvg.time_search_interp( bin_edges[i+dp_per_bin] )
W_start = idb_start[1][0] * self.traj_derivs[ traj_ind, idb_start[0][0] , :] + idb_start[1][1] * self.traj_derivs[ traj_ind, idb_start[0][1] , :]
W_end = idb_end[1][0] * self.traj_derivs[ traj_ind, idb_end[0][0] , :] + idb_end[1][1] * self.traj_derivs[ traj_ind, idb_end[0][1] , :]
W_data_all[:, data_ind] = W_end - W_start
if not ergodic:
idb_start = self.runAvg.time_search_interp( bin_edges[i+dp_per_bin-1] )
if ergodic:
prop_integ_start = idb_start[1][0] * self.Props_integ[traj_ind, idb_start[0][0], :] + idb_start[1][1] * self.Props_integ[traj_ind, idb_start[0][1], :]
prop_integ_end = idb_end[1][0] * self.Props_integ[traj_ind, idb_end[0][0], :] + idb_end[1][1] * self.Props_integ[traj_ind, idb_end[0][1], :]
rate_data_erg[data_ind] = np.dot ( ( prop_integ_end - prop_integ_start ) / self.batch_length , self.TOF_stoich )
rate_contributions_all = rate_contributions_all + ( ( prop_integ_end - prop_integ_start ) / self.batch_length * self.TOF_stoich )
else:
inst_rates = idb_end[1][0] * self.propensities[traj_ind, idb_end[0][0], :] + idb_end[1][1] * self.propensities[traj_ind, idb_end[0][1], :]
rate_data_erg[data_ind] = np.dot ( inst_rates , self.TOF_stoich )
rate_contributions_all = rate_contributions_all + ( inst_rates * self.TOF_stoich )
data_ind += 1
# Normalize rate contributions by the number of data points
rate_contributions_all = rate_contributions_all / ( self.n_trajectories * dp_per_traj )
# Combine forward and reverse reactions
W_data = np.zeros([self.runAvg.mechin.get_num_rxns(), self.n_trajectories * dp_per_traj])
rate_contributions = np.zeros(self.runAvg.mechin.get_num_rxns())
ind = 0
for i in range(self.runAvg.mechin.get_num_rxns()):
rxn_and_var = self.runAvg.mechin.get_rxn_var_inds(i)
if self.runAvg.mechin.rxn_list[rxn_and_var[0]].is_reversible:
W_data[i, :] = W_data_all[ind, :] + W_data_all[ind+1, :]
rate_contributions[i] = rate_contributions_all[ind] + rate_contributions_all[ind+1]
ind += 2
else:
W_data[i, :] = W_data_all[ind, :]
rate_contributions[i] = rate_contributions_all[i]
ind += 1
# Calculate NSCs
mean_rate = 0
W_mean = np.zeros( self.runAvg.mechin.get_num_rxns() )
NSCs = np.zeros(self.runAvg.mechin.get_num_rxns())
for dp in range( self.n_trajectories * dp_per_traj ):
mean_rate = mean_rate + rate_data_erg[dp]
W_mean = W_mean + W_data[ : , dp ]
NSCs = NSCs + rate_data_erg[dp] * W_data[ : , dp ]
# Normalize means
mean_rate = mean_rate / ( self.n_trajectories * dp_per_traj )
W_mean = W_mean / ( self.n_trajectories * dp_per_traj )
NSCs = NSCs / ( self.n_trajectories * dp_per_traj )
NSCs = NSCs - W_mean * mean_rate + rate_contributions # Convert from ELR to CELR
NSCs = NSCs / mean_rate # normalize
'''
Compute error bounds on NSCs
'''
N_boot = 100
NSC_sam = np.zeros([ N_boot , len(NSCs) ])
for i in range(N_boot):
# Prepare new set of data
subpop_inds = np.random.randint(self.n_trajectories * dp_per_traj, size = self.n_trajectories * dp_per_traj )
rate_data_erg_sub = rate_data_erg[subpop_inds]
W_sub = W_data[:, subpop_inds]
# Calculate NSCs
mean_rate = 0
W_mean = np.zeros(self.runAvg.mechin.get_num_rxns())
NSCsub = np.zeros(self.runAvg.mechin.get_num_rxns())
for dp in range( self.n_trajectories * dp_per_traj ):
mean_rate = mean_rate + rate_data_erg_sub[dp]
W_mean = W_mean + W_sub[ : , dp ]
NSCsub = NSCsub + rate_data_erg_sub[dp] * W_sub[ : , dp ]
# Normalize means
mean_rate = mean_rate / ( self.n_trajectories * dp_per_traj )
W_mean = W_mean / ( self.n_trajectories * dp_per_traj )
NSCsub = NSCsub / ( self.n_trajectories * dp_per_traj )
NSCsub = NSCsub - W_mean * mean_rate + rate_contributions # Convert from ELR to CELR
| |
== None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
oOoOOo . print_notify ( )
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
if 26 - 26: iII111i
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
if ( oOoOOo . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
iiI = lisp_eid_record ( )
if 93 - 93: i11iIiiIii
if ( iiI . decode ( oOoOOo . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
iiI . print_record ( " " , False )
if 40 - 40: IiII % IiII
I11i11i1 = iiI . print_eid_tuple ( )
if 9 - 9: I1IiiI * i1IIi + OOooOOo * OoOoOO00
if 8 - 8: iII111i
if 51 - 51: I1IiiI
if 72 - 72: ooOoO0o / I1ii11iIi11i . Ii1I * iII111i . iIii1I11I1II1
if ( oOoOOo . alg_id != LISP_NONE_ALG_ID and oOoOOo . auth_len != 0 ) :
oO00Oooo0o0o0 = lisp_sites_by_eid . lookup_cache ( iiI . eid , True )
if ( oO00Oooo0o0o0 == None ) :
i1i1IiIIIiI = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( i1i1IiIIIiI , green ( I11i11i1 , False ) ) )
if 35 - 35: OoO0O00 . OoOoOO00 % O0 * OoO0O00
return
if 68 - 68: OOooOOo
IIiiIiI = oO00Oooo0o0o0 . site
if 87 - 87: IiII * IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
if 39 - 39: i11iIiiIii - OoO0O00 - i11iIiiIii / OoooooooOO
if 15 - 15: i1IIi . iII111i + IiII / I1ii11iIi11i - i1IIi / iII111i
IIiiIiI . map_notify_acks_received += 1
if 27 - 27: OoOoOO00 / OoooooooOO + i1IIi % iIii1I11I1II1 / OoO0O00
IIIiI1i = oOoOOo . key_id
if ( IIiiIiI . auth_key . has_key ( IIIiI1i ) ) :
OoO0o = IIiiIiI . auth_key [ IIIiI1i ]
else :
OoO0o = ""
if 73 - 73: I1ii11iIi11i / OoOoOO00 / IiII + oO0o
if 73 - 73: I11i * o0oOOo0O0Ooo * I1IiiI . OoooooooOO % I1Ii111
iIii111Ii = lisp_verify_auth ( packet , oOoOOo . alg_id ,
oOoOOo . auth_data , OoO0o )
if 9 - 9: oO0o % I1Ii111 . O0 + I1ii11iIi11i - Ii1I - I1ii11iIi11i
IIIiI1i = "key-id {}" . format ( IIIiI1i ) if IIIiI1i == oOoOOo . key_id else "bad key-id {}" . format ( oOoOOo . key_id )
if 57 - 57: i11iIiiIii
if 21 - 21: iIii1I11I1II1 / I1IiiI / iII111i
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if iIii111Ii else "failed" , IIIiI1i ) )
if 19 - 19: Oo0Ooo / iIii1I11I1II1 / I11i
if ( iIii111Ii == False ) : return
if 71 - 71: iIii1I11I1II1 * I1IiiI
if 35 - 35: O0
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
if 78 - 78: I1IiiI - iIii1I11I1II1
if ( oOoOOo . retransmit_timer ) : oOoOOo . retransmit_timer . cancel ( )
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
iIi11I11I1i = source . print_address ( )
ii1i1I1111ii = oOoOOo . nonce_key
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if ( lisp_map_notify_queue . has_key ( ii1i1I1111ii ) ) :
oOoOOo = lisp_map_notify_queue . pop ( ii1i1I1111ii )
if ( oOoOOo . retransmit_timer ) : oOoOOo . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( ii1i1I1111ii ) )
if 92 - 92: i11iIiiIii
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( oOoOOo . nonce_key , red ( iIi11I11I1i , False ) ) )
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
return
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
if 42 - 42: OoOoOO00 . I11i % II111iiii
if 19 - 19: OoooooooOO
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
if 56 - 56: I11i
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 24 - 24: I1IiiI . I1IiiI % ooOoO0o
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 32 - 32: OOooOOo / i1IIi / OOooOOo
if 97 - 97: ooOoO0o * Oo0Ooo * OoooooooOO * I1IiiI
if 45 - 45: Oo0Ooo
if 27 - 27: oO0o / IiII - iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * iIii1I11I1II1
i1I1i1iI1iI1 = False
if ( group . is_null ( ) == False ) :
i1I1i1iI1iI1 = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 40 - 40: oO0o - II111iiii * OOooOOo % OoooooooOO
if ( i1I1i1iI1iI1 == False ) :
i1I1i1iI1iI1 = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 52 - 52: OOooOOo + OoO0O00
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
if ( i1I1i1iI1iI1 ) :
Oo00O0o = lisp_print_eid_tuple ( eid , group )
iII1 = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 42 - 42: O0 * iII111i . i1IIi / i11iIiiIii + Ii1I
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( Oo00O0o , False ) , s ,
# Oo0Ooo % OoOoOO00 . i11iIiiIii / I1Ii111 - Oo0Ooo
iII1 ) )
if 24 - 24: iIii1I11I1II1
return ( i1I1i1iI1iI1 )
if 79 - 79: IiII - Oo0Ooo - iIii1I11I1II1 % OoO0O00 - iIii1I11I1II1
if 6 - 6: OoO0O00
if 62 - 62: Ii1I
if 11 - 11: I1Ii111 + I1IiiI - OOooOOo
if 56 - 56: II111iiii + IiII * iIii1I11I1II1 - i1IIi + iIii1I11I1II1
if 98 - 98: Oo0Ooo . iIii1I11I1II1
if 12 - 12: I11i - i11iIiiIii * OoOoOO00 - OoOoOO00 * II111iiii
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 45 - 45: I1ii11iIi11i - iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
O0OOoOoOO = lisp_map_referral ( )
packet = O0OOoOoOO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
O0OOoOoOO . print_map_referral ( )
if 53 - 53: iIii1I11I1II1 * oO0o
IiII1iiI = source . print_address ( )
oOO000 = O0OOoOoOO . nonce
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if 41 - 41: OoOoOO00 . iII111i . i1IIi | |
<reponame>milnus/Corekaburra<gh_stars>1-10
import os
import csv
from math import ceil, floor
import gffutils
EXIT_GFF_REANNOTATION_ERROR = 3
try:
from Corekaburra.correct_gffs import annotate_refound_genes
except ModuleNotFoundError:
from correct_gffs import annotate_refound_genes
try:
from Corekaburra.exit_with_error import exit_with_error
except ModuleNotFoundError:
from exit_with_error import exit_with_error
def add_gene_to_dict(main_dict, gene, pan_gene_name, genome):
"""
Function to add a gene to a given dictionary
:param main_dict: Dict of genes from genomes. A dict of dicts, with first set of keys being genomes, second is locus_tags with pan-genome gene being the key.
:param gene: The gene in question from a specific genome (locus_tag)
:param pan_gene_name: The name of the pan-genome gene (cluster) to which the above gene belongs.
:param genome: The name of the genome in question
:return: returns the dict to be used further
"""
if ';' in gene:
for gene_part in gene.split(';'): # TODO - NOTE! HERE BOTH GENES IN A PAIR IS ADDED as separate key/value-pairs
main_dict[genome][gene_part] = pan_gene_name
else:
main_dict[genome][gene] = pan_gene_name
return main_dict
def check_fragmented_gene(fragment_info, input_gffs, tmp_folder_path, gene_data_dict, corrected_dir, logger):
"""
Function that check for that placement of fragmented gene parts, to determine if they are neighbouring or have some genomic feature between them
:param fragment_info: List of genes that are found to be fragmented, one composite of fragments for each index
:param input_gffs: A list of file-paths to the gff files given as input
:param tmp_folder_path: A file-path to the temporary folder of the Corekaburra run
:return: A List of booleans indicating if a fragments has nothing in between fragments (True) or not (False)
"""
# Check if any refound genes are in fragments to be checked, if then reannotate the genes before checking:
refound_fregments = [[i, gene_gff] for i, gene_gff in enumerate(fragment_info) if 'refound' in gene_gff[0]]
if refound_fregments:
for i, gene_gff in refound_fregments:
gene, gff = gene_gff
gff_name = None
try:
gff_name = [gff_name for gff_name in input_gffs
if f"{gff}_corrected" in [os.path.basename(gff_name),
os.path.basename(gff_name).rsplit('.', 1)[0],
os.path.basename(gff_name).rsplit('.', 1)[0].rsplit('.', 1)[0]]][0]
except IndexError:
pass
if gff_name is None:
try:
gff_name = [gff_name for gff_name in input_gffs
if gff in [os.path.basename(gff_name),
os.path.basename(gff_name).rsplit('.', 1)[0],
os.path.basename(gff_name).rsplit('.', 1)[0].rsplit('.', 1)[0]]][0]
except IndexError:
exit_with_error(EXIT_GFF_REANNOTATION_ERROR,
f'A problem occurred when trying to find a file for reannotation, when passing the '
f'gene_presence_absence_roary.csv! GFF: {gff}, Gene: {gene}')
gff_name = annotate_refound_genes(gff_name, gene_data_dict, tmp_folder_path, corrected_dir, logger)
fragment_info[i][1] = gff_name
fragments_close = []
for fragment in fragment_info:
# split the two fragments
fragment_pieces = fragment[0].split(';')
# Get the name of the genome
genome = fragment[1]
# Get the gff and its path
if '.gff' not in genome:
try:
gff_file = [file for file in input_gffs if f'{genome}.gff' in file][0]
db_name = os.path.join(tmp_folder_path, f'{genome}_db')
except IndexError:
raise NotImplementedError(f'No gff match was found when searching fragments for genome: {genome}')
else:
gff_file = genome
db_name = f"{os.path.basename(genome)}_db"
db_name = os.path.join(tmp_folder_path, db_name)
# Construct gff database to be searched
if not os.path.isfile(db_name):
gffutils.create_db(gff_file, db_name, force_gff=True, id_spec=['old_locus_tag', 'ID'])
# Attach database
gff_database = gffutils.FeatureDB(db_name)
# Check that all fragments are on the same contig.
first_fragment_contig = gff_database[fragment_pieces[0]][0]
frag_same_contig = all([first_fragment_contig == gff_database[fragment][0] for fragment in fragment_pieces])
if frag_same_contig:
# Get all coordinates
frag_coors = []
for frag in fragment_pieces:
frag_coors.append(gff_database[frag][3])
frag_coors.append(gff_database[frag][4])
# Construct region to be searched for annotations between fragments:
max_frag_coor = max(frag_coors)
min_frag_coor = min(frag_coors)
region = (first_fragment_contig, min_frag_coor, max_frag_coor)
# Find all features that are completely within the region
region_features = gff_database.region(region=region, completely_within=True)
# Find if some pieces are refound and change old_locus_tag to ID
refound_pieces = [[i, fragment_piece] for i, fragment_piece in enumerate(fragment_pieces) if 'refound' in fragment_piece]
if refound_pieces:
for i, piece in refound_pieces:
fragment_pieces[i] = gff_database[piece]['ID'][0]
# find all genes that are not part of the fragmented gene
region_locus_tags = set([feature[8]['locus_tag'][0] for feature in region_features])
excess_genes = region_locus_tags.difference(fragment_pieces)
# check the number of excess genes, if any then False to being core
if len(excess_genes) > 0:
fragments_close.append(False)
else:
fragments_close.append(True)
else:
fragments_close.append(False)
return fragments_close
# TODO - find out what the non-closed file problem is here! Can be seen when running unit-tests.
def read_gene_presence_absence(pres_abs_file, core_gene_presence, low_freq_gene, source_program, input_gffs, tmp_folder_path, gene_data_dict, corrected_dir, logger):
"""
Function that pass a Roary style gene presence/absence file.
:param pres_abs_file: File path to the gene presence/absence file identified
:param core_gene_presence: The ratio of genomes in which a gene must present, to be seen as a core gene
:param low_freq_gene: The ratio of genomes in which a gene must not surpass, to be seen as a low-frequency gene
:param source_program: The program from which the pan-genome was produced
:param input_gffs: A list of file-paths to the gff files given as input
:param tmp_folder_path: A file-path to the temporary folder of the Corekaburra run
:param logger: Program logger
:return: Directories of directories of core and low frequency genes, and a directory of pan genome clusters and their annotation.
"""
# Open the presence/absense file to index gene into core, accessory, or low-frequency genes
with open(pres_abs_file, 'r', newline='', ) as gene_presence_absence:
# Read column header line
gff_file_names = gene_presence_absence.readline()
# Strip for whitespace
gff_file_names = gff_file_names.strip()
# split column names
gff_file_names = gff_file_names.split(',')
# Remove the quotes from Rorary input
if source_program == 'Roary':
gff_file_names = [filename.replace('"', '') for filename in gff_file_names]
# Index gff filenames and column position in dict for better search
gff_file_dict = {}
for i, gff_name in enumerate(gff_file_names[14:]):
gff_file_dict[gff_name] = i
# Read remaining lines and construct a nested dicts one dict for each genome and its core genes,
# and a dict for low frequency genes found in less than set percent of isolates
# Initialise reader object to read remaining lines
reader = csv.reader(gene_presence_absence, delimiter=',')
# Counters
core_gene_number = 0
low_freq_gene_number = 0
acc_gene_number = 0
# Determine number of isolates that represent core and low frequency genes
core_gene_isolate_presence = floor(len(gff_file_dict.keys()) * core_gene_presence)
low_freq_gene_isolate_presence = ceil(len(gff_file_dict.keys()) * low_freq_gene)
logger.info(f"------------Opening the gene presence/absence file------------\n"
f"Core genes must be found in {core_gene_isolate_presence} or more genomes\n"
f"Low frequency genes must be found in less than {low_freq_gene_isolate_presence} genomes\n")
# initialise dict of dicts to hold genes from each gffs and to be returned
core_gene_dict = {item: {} for item in gff_file_names[14:]}
low_freq_gene_dict = {item: {} for item in gff_file_names[14:]}
acc_gene_dict = {item: {} for item in gff_file_names[14:]}
# Read lines from file and determine if core, low frequency or 'regular' accessory and record annotations
for line in reader:
# Remove quotes if Roary
if source_program == 'Roary':
line = [element.replace('"', '') for element in line]
# Get number of genes in line and average presence of genes in genomes
gene_isolate_presence = int(line[3])
no_seq_presence = int(line[4])
# Check if core gene, if then add annotations to genomes
# Check if gene is present in all genomes and no one gene is fragmented
if core_gene_isolate_presence <= gene_isolate_presence == no_seq_presence:
# Add gene cluster to genomes
for genome in core_gene_dict.keys():
# Check if there is an annotation for the given genome
if len(line[14 + gff_file_dict[genome]]) > 0:
core_gene_dict[genome][line[14+gff_file_dict[genome]]] = line[0]
core_gene_number += 1
# Check if gene is present in all genomes, but more than one copy is present
elif core_gene_isolate_presence <= gene_isolate_presence:
# Identify annotations for genomes that are fragmented genes
fragment_info = [[genes, gff] for genes, gff in zip(line[14:], gff_file_names[14:]) if ';' in genes]
# Check that each annotation is neighboring the other annotation.
fragments_close = check_fragmented_gene(fragment_info, input_gffs, tmp_folder_path, gene_data_dict,
corrected_dir, logger) # TODO - If a core gene is found to be made up of fragments not places close enough (With something in between) should this then not be subtracted from the core gene count? - How would this be handled if there is a gff that is not given as input?
# Check if gene was found to be a core gene
if all(fragments_close):
# Add the gene to the annotation dict
for genome in core_gene_dict:
# Get the annoations for a specific genome
genes_in_genome = line[14 + gff_file_dict[genome]]
# If there is an annotation add id
if len(genes_in_genome) > 0:
# Check if genome has fragments of genes,
# if | |
"""
This module implements communications with Geopedia
"""
import os
import io
import copy
import json
import logging
import pkg_resources
from configparser import RawConfigParser
from itertools import islice
from sys import version_info
from collections import OrderedDict
import requests
import shapely.geometry
import attr
from attr.validators import instance_of
from werkzeug.datastructures import FileStorage
from sentinelhub import SHConfig, GeopediaFeatureIterator, get_json, Geometry
from .constants import GeopediaType, GPD_FEATURE, GPD_TABLE, PermissionType
LOGGER = logging.getLogger(__name__)
class GeopediaConfig:
@staticmethod
def set_sh_config():
config = SHConfig()
expected_base_url = 'https://www.geopedia.world/rest/' if GeopediaConfig.is_production() else \
'https://test.geopedia.world/rest/'
if config.geopedia_rest_url != expected_base_url:
config.geopedia_rest_url = expected_base_url
config.save()
@staticmethod
def is_production():
return os.environ.get('PRODUCTION', 'false').lower() == 'true'
@staticmethod
def get_config_path():
filename = '.geopedia.config' if GeopediaConfig.is_production() else '.geopedia-test.config'
config_path = os.path.join(os.path.dirname(__file__), 'data', filename)
if not os.path.isfile(config_path):
raise IOError('Geopedia configuration file does not exist: %s' % os.path.abspath(config_path))
return config_path
@staticmethod
def load_config():
""" Load Geopedia configuration file storing authentication and table IDs
"""
config_parser = RawConfigParser()
config_parser.read(GeopediaConfig.get_config_path())
return dict(config_parser.items('geopedia')), dict(config_parser.items('tables'))
@attr.s()
class GeopediaPayloadBase:
""" Base class for responses obtained from Geopedia
"""
payload = attr.ib(validator=instance_of(dict))
@property
def properties(self):
""" A dictionary of table properties
"""
return self.payload['properties']
@property
def id(self):
""" A dictionary of table properties
"""
return int(self.payload['id'])
@attr.s()
class GeopediaTable(GeopediaPayloadBase):
""" Container for basic properties of a Geopedia table
"""
gpd_store = attr.ib()
def __attrs_post_init__(self):
""" This method happens right after init
"""
self.field_name_map = {}
for field_props in self.properties:
field_name = field_props['name']
if field_name in ['id', 'properties', 'geometry']:
raise ValueError("Table with ID {} has a forbidden column name '{}'".format(self.id, field_name))
self.field_name_map[field_name] = field_props
if len(self.field_name_map) < len(self.properties):
raise ValueError('Some fields in the table {} have same names'.format(self.id))
def __contains__(self, item):
""" Checks if column name exists in the table
"""
return item in self.field_name_map
@property
def name(self):
""" Name of the table
"""
return self.payload['name']
@property
def gpd_session(self):
""" Provides GeopediaSession object from the store
"""
return self.gpd_store.gpd_session
@staticmethod
def load(table_id, gpd_store):
""" Load an instance of GeopediaTable
"""
# For now we need entire gpd_store because it is keeping the session alive
# This should be changed when session updating is fixed at Geopedia
gpd_session = gpd_store.gpd_session
url = '{}data/v2/tables/{}'.format(gpd_session.base_url, table_id)
payload = get_json(url, headers=gpd_session.session_headers)
return GeopediaTable(payload=payload, gpd_store=gpd_store)
def get_field_id(self, field_name):
""" Get a field id from a field name
"""
if field_name not in self.field_name_map:
raise ValueError("Field with name '{}' is not in a table with ID {}".format(field_name, self.id))
return self.field_name_map[field_name]['fieldId']
def get_mandatory_fields(self):
""" Get names of mandatory fields
"""
return [field_name for field_name, field_props in self.field_name_map.items()
if field_props['settings']['mandatory']]
def query_columns(self, column_names, conditions, return_all=True):
""" The method makes a query to Geopedia table with filter conditions on values. It returns filtered table
content.
Example:
query_columns(['input_source_id','is_done'], ['=1', '=False'])
:param column_names: Names of table columns to apply query
:type column_names: str or list of str
:param conditions: Logical conditions to be applied to corresponding columns
:type conditions: str or list of str
:param return_all: Whether to return all elements satisfying query or only the first one. Default is all.
:type return_all: bool
:return: Items from Geopedia table with properties
:rtype: GeopediaRowData or list(GeopediaRowData)
"""
column_names = [column_names] if isinstance(column_names, str) else column_names
conditions = [conditions] if isinstance(conditions, str) else conditions
if len(column_names) != len(conditions):
raise ValueError("Name of columns and conditions must be of same length")
field_ids = [self.get_field_id(name) for name in column_names]
query = ' && '.join([col + expr for col, expr in zip(field_ids, conditions)])
gpd_iterator = GeopediaFeatureIterator(self.id, query_filter=query, gpd_session=self.gpd_session)
return self._return_query_results(gpd_iterator, query, return_all)
def query_rows(self, row_ids):
""" The method makes a query to Geopedia table for specified rows. It returns table content for those rows.
Note: If input is a single ID it will return a single result, but if input is a list of IDs it will return a
list of results.
:param row_ids: IDs of queried rows
:type row_ids: int or list(int)
:return: Data about one or multiple Geopedia rows
:rtype: GeopediaRowData or list(GeopediaRowData)
"""
return_all = not isinstance(row_ids, (int, str))
row_ids = row_ids if return_all else [row_ids]
query = ' || '.join(['id{} = {}'.format(self.id, row_id) for row_id in row_ids])
gpd_iterator = GeopediaFeatureIterator(self.id, query_filter=query, gpd_session=self.gpd_session)
return self._return_query_results(gpd_iterator, query, return_all)
def _return_query_results(self, gpd_iterator, query, return_all):
""" Helper method for returning 1 or all results of a query to Geopedia table
"""
if return_all:
return [GeopediaRowData(result) for result in gpd_iterator]
try:
return GeopediaRowData(next(gpd_iterator))
except StopIteration:
raise RuntimeError("There are no items for query '{}' in table '{}'".format(query, self.name))
@attr.s()
class GeopediaRowData(GeopediaPayloadBase):
""" Container for results obtained from querying Geopedia tables
"""
@property
def geometry(self):
""" Helper function to return a WKT polygon from a Geopedia geometry
Given a geometry field from a Geopedia table, return a WKT polygon in POP_WEB CRS
:param geometry: Dictionary describing a Geopedia geometry feature
:return: WKT polygon in popular web-mercator
"""
geometry_payload = self.payload['geometry']
return Geometry(shapely.geometry.shape(geometry_payload),
crs=geometry_payload['crs']['properties']['name'].split(':')[-1])
def __getitem__(self, item):
""" Obtaining property values without using .properties all the time
"""
try:
return self.properties[item]
except KeyError:
raise KeyError("Result from a table with ID {} does not contain a property "
"'{}'".format(self.payload['@id'].rsplit('/')[-1], item))
def __setitem__(self, item, value):
""" Setting a new property
"""
self.properties[item] = value
class SaveToGeopedia:
def __init__(self, gpd_tables, session_id):
self.gpd_tables = gpd_tables
self.session_id = session_id
self.base_url = '{}'.format(SHConfig().geopedia_rest_url)
def _get_headers(self, is_json=True, session_id=None):
headers = {
'X-Gpd-ClassificationApp': 'true',
'X-GPD-Session': self.session_id if session_id is None else session_id
}
if is_json:
headers['Content-type'] = 'application/json; charset=utf-8'
return headers
def _send_json(self, request_url, data=None, is_json=True, files=None, session_id=None):
""" POST request to Geopedia.
Data as json or files are written to Geopedia tables
:param request_url: url where POST request is attempted
:param data: JSON data to be posted
:param is_json: Flag indicating whether data or files are posted
:param files: Files to be posted to Geopedia
:return: Geopedia row data instance
"""
if needs_ordered_dicts() and data is not None:
data = self._apply_ordered_dicts(data)
response = requests.post(url=request_url,
data=data if data is None else json.dumps(data),
headers=self._get_headers(is_json=is_json, session_id=session_id),
files=files)
LOGGER.info('Sampling table - POST: Response: %s, Status: %d', response.reason, response.status_code)
try:
response.raise_for_status()
except requests.RequestException as exception:
LOGGER.info('Payload of the failed request:\n%s', json.dumps(data))
LOGGER.info('Server response:\n%s', str(response.text))
raise exception
payload = response.json()
if isinstance(payload, list):
payload = payload[0]
return GeopediaRowData(payload)
@staticmethod
def _apply_ordered_dicts(data):
if 'properties' in data:
data['properties'] = [OrderedDict([('type', prop['type']), ('value', prop['value'])])
for prop in data['properties']]
return data
@staticmethod
def _set_feature(table, values_dict):
""" Set up feature to be written to geopedia based on the table's structure
:param table: GeopediaTable object
:param values_dict: Dictionary with the values to be inputted to the table. The keys of this dict correspond to
the `name` entry of the `props` dictionary
:return: Dictionary feature to be written to Geopedia
"""
values_dict = {key: value for key, value in values_dict.items() if key in table}
if not set(table.get_mandatory_fields()).issubset(values_dict):
raise ValueError("Some mandatory fields are missing in the payload:\n"
"Values: {}\n"
"Mandatory fields: {}".format(values_dict, table.get_mandatory_fields()))
# create feature starting from the template
feature = copy.deepcopy(GPD_FEATURE)
feature['tableId'] = table.id
feature['properties'].extend([{'type': GeopediaType[prop['type']].value,
'value': SaveToGeopedia._prepare_value(values_dict, prop)}
for prop in table.properties])
# if there is a geometry feature, replace with correct format
if 'primaryGeometry' in values_dict:
for feat_prop in feature['properties']:
if feat_prop['type'] == 'geometry':
wkt_geometry = feat_prop['value']
feat_prop['value'] = {'wkt': wkt_geometry, 'crsId': 'EPSG:3857'}
feat_prop['geomAuxData'] = {'mbr': None, 'pointInside': None}
break
return feature
@staticmethod
def _prepare_value(values_dict, prop):
""" Prepares a values which will be stored in a Geopedia table and makes sure it is of correct type
"""
name = prop['name']
if name not in values_dict:
return None
value = values_dict[name]
expected_type = GeopediaType[prop['type']]
if isinstance(value, (dict, list)) and expected_type is not GeopediaType.BINARYREFERENCE:
value = json.dumps(value)
if not isinstance(value, expected_type.python_type):
value = expected_type.python_type(value)
return value
def _update_feature(self, table, values_dict, row_id):
""" Set up feature to be updated on geopedia based on the table's structure
:param table: GeopediaTable object
:param values_dict: Dictionary with the values to be inputted to the table. The keys of this dict correspond to
the `name` entry of the `props` dictionary
:param row_id: Value of ID row to be updated
:return: Dictionary feature to be updated on Geopedia
"""
feature = self._set_feature(table, values_dict)
# set action to update
feature['properties'][0]['value'] = row_id
feature['id'] = row_id
feature['@id'] = 'feature/{}'.format(row_id)
feature['storeAction'] = 'UPDATE'
return feature
| |
blank=True, null=True) # Field name made lowercase.
frenchcountryregionname = models.CharField(db_column='FrenchCountryRegionName', max_length=50, blank=True, null=True) # Field name made lowercase.
postalcode = models.CharField(db_column='PostalCode', max_length=15, blank=True, null=True) # Field name made lowercase.
salesterritorykey = models.ForeignKey('Dimsalesterritory', models.DO_NOTHING, db_column='SalesTerritoryKey', blank=True, null=True) # Field name made lowercase.
ipaddresslocator = models.CharField(db_column='IpAddressLocator', max_length=15, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimGeography'
class Dimorganization(models.Model):
organizationkey = models.IntegerField(db_column='OrganizationKey', primary_key=True) # Field name made lowercase.
parentorganizationkey = models.ForeignKey('self', models.DO_NOTHING, db_column='ParentOrganizationKey', blank=True, null=True) # Field name made lowercase.
percentageofownership = models.CharField(db_column='PercentageOfOwnership', max_length=16, blank=True, null=True) # Field name made lowercase.
organizationname = models.CharField(db_column='OrganizationName', max_length=50, blank=True, null=True) # Field name made lowercase.
currencykey = models.ForeignKey(Dimcurrency, models.DO_NOTHING, db_column='CurrencyKey', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimOrganization'
class Dimproduct(models.Model):
productkey = models.IntegerField(db_column='ProductKey', primary_key=True) # Field name made lowercase.
productalternatekey = models.CharField(db_column='ProductAlternateKey', max_length=25, blank=True, null=True) # Field name made lowercase.
productsubcategorykey = models.ForeignKey('Dimproductsubcategory', models.DO_NOTHING, db_column='ProductSubcategoryKey', blank=True, null=True) # Field name made lowercase.
weightunitmeasurecode = models.CharField(db_column='WeightUnitMeasureCode', max_length=3, blank=True, null=True) # Field name made lowercase.
sizeunitmeasurecode = models.CharField(db_column='SizeUnitMeasureCode', max_length=3, blank=True, null=True) # Field name made lowercase.
englishproductname = models.CharField(db_column='EnglishProductName', max_length=50) # Field name made lowercase.
spanishproductname = models.CharField(db_column='SpanishProductName', max_length=50) # Field name made lowercase.
frenchproductname = models.CharField(db_column='FrenchProductName', max_length=50) # Field name made lowercase.
standardcost = models.DecimalField(db_column='StandardCost', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
finishedgoodsflag = models.BooleanField(db_column='FinishedGoodsFlag') # Field name made lowercase.
color = models.CharField(db_column='Color', max_length=15) # Field name made lowercase.
safetystocklevel = models.SmallIntegerField(db_column='SafetyStockLevel', blank=True, null=True) # Field name made lowercase.
reorderpoint = models.SmallIntegerField(db_column='ReorderPoint', blank=True, null=True) # Field name made lowercase.
listprice = models.DecimalField(db_column='ListPrice', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
size = models.CharField(db_column='Size', max_length=50, blank=True, null=True) # Field name made lowercase.
sizerange = models.CharField(db_column='SizeRange', max_length=50, blank=True, null=True) # Field name made lowercase.
weight = models.FloatField(db_column='Weight', blank=True, null=True) # Field name made lowercase.
daystomanufacture = models.IntegerField(db_column='DaysToManufacture', blank=True, null=True) # Field name made lowercase.
productline = models.CharField(db_column='ProductLine', max_length=2, blank=True, null=True) # Field name made lowercase.
dealerprice = models.DecimalField(db_column='DealerPrice', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
class_field = models.CharField(db_column='Class', max_length=2, blank=True, null=True) # Field name made lowercase. Field renamed because it was a Python reserved word.
style = models.CharField(db_column='Style', max_length=2, blank=True, null=True) # Field name made lowercase.
modelname = models.CharField(db_column='ModelName', max_length=50, blank=True, null=True) # Field name made lowercase.
largephoto = models.BinaryField(db_column='LargePhoto', blank=True, null=True) # Field name made lowercase.
englishdescription = models.CharField(db_column='EnglishDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
frenchdescription = models.CharField(db_column='FrenchDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
chinesedescription = models.CharField(db_column='ChineseDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
arabicdescription = models.CharField(db_column='ArabicDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
hebrewdescription = models.CharField(db_column='HebrewDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
thaidescription = models.CharField(db_column='ThaiDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
germandescription = models.CharField(db_column='GermanDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
japanesedescription = models.CharField(db_column='JapaneseDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
turkishdescription = models.CharField(db_column='TurkishDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
startdate = models.DateTimeField(db_column='StartDate', blank=True, null=True) # Field name made lowercase.
enddate = models.DateTimeField(db_column='EndDate', blank=True, null=True) # Field name made lowercase.
status = models.CharField(db_column='Status', max_length=7, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimProduct'
unique_together = (('productalternatekey', 'startdate'),)
class Dimproductcategory(models.Model):
productcategorykey = models.IntegerField(db_column='ProductCategoryKey', primary_key=True) # Field name made lowercase.
productcategoryalternatekey = models.IntegerField(db_column='ProductCategoryAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
englishproductcategoryname = models.CharField(db_column='EnglishProductCategoryName', max_length=50) # Field name made lowercase.
spanishproductcategoryname = models.CharField(db_column='SpanishProductCategoryName', max_length=50) # Field name made lowercase.
frenchproductcategoryname = models.CharField(db_column='FrenchProductCategoryName', max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimProductCategory'
class Dimproductsubcategory(models.Model):
productsubcategorykey = models.IntegerField(db_column='ProductSubcategoryKey', primary_key=True) # Field name made lowercase.
productsubcategoryalternatekey = models.IntegerField(db_column='ProductSubcategoryAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
englishproductsubcategoryname = models.CharField(db_column='EnglishProductSubcategoryName', max_length=50) # Field name made lowercase.
spanishproductsubcategoryname = models.CharField(db_column='SpanishProductSubcategoryName', max_length=50) # Field name made lowercase.
frenchproductsubcategoryname = models.CharField(db_column='FrenchProductSubcategoryName', max_length=50) # Field name made lowercase.
productcategorykey = models.ForeignKey(Dimproductcategory, models.DO_NOTHING, db_column='ProductCategoryKey', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimProductSubcategory'
class Dimpromotion(models.Model):
promotionkey = models.IntegerField(db_column='PromotionKey', primary_key=True) # Field name made lowercase.
promotionalternatekey = models.IntegerField(db_column='PromotionAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
englishpromotionname = models.CharField(db_column='EnglishPromotionName', max_length=255, blank=True, null=True) # Field name made lowercase.
spanishpromotionname = models.CharField(db_column='SpanishPromotionName', max_length=255, blank=True, null=True) # Field name made lowercase.
frenchpromotionname = models.CharField(db_column='FrenchPromotionName', max_length=255, blank=True, null=True) # Field name made lowercase.
discountpct = models.FloatField(db_column='DiscountPct', blank=True, null=True) # Field name made lowercase.
englishpromotiontype = models.CharField(db_column='EnglishPromotionType', max_length=50, blank=True, null=True) # Field name made lowercase.
spanishpromotiontype = models.CharField(db_column='SpanishPromotionType', max_length=50, blank=True, null=True) # Field name made lowercase.
frenchpromotiontype = models.CharField(db_column='FrenchPromotionType', max_length=50, blank=True, null=True) # Field name made lowercase.
englishpromotioncategory = models.CharField(db_column='EnglishPromotionCategory', max_length=50, blank=True, null=True) # Field name made lowercase.
spanishpromotioncategory = models.CharField(db_column='SpanishPromotionCategory', max_length=50, blank=True, null=True) # Field name made lowercase.
frenchpromotioncategory = models.CharField(db_column='FrenchPromotionCategory', max_length=50, blank=True, null=True) # Field name made lowercase.
startdate = models.DateTimeField(db_column='StartDate') # Field name made lowercase.
enddate = models.DateTimeField(db_column='EndDate', blank=True, null=True) # Field name made lowercase.
minqty = models.IntegerField(db_column='MinQty', blank=True, null=True) # Field name made lowercase.
maxqty = models.IntegerField(db_column='MaxQty', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimPromotion'
class Dimreseller(models.Model):
resellerkey = models.IntegerField(db_column='ResellerKey', primary_key=True) # Field name made lowercase.
geographykey = models.ForeignKey(Dimgeography, models.DO_NOTHING, db_column='GeographyKey', blank=True, null=True) # Field name made lowercase.
reselleralternatekey = models.CharField(db_column='ResellerAlternateKey', unique=True, max_length=15, blank=True, null=True) # Field name made lowercase.
phone = models.CharField(db_column='Phone', max_length=25, blank=True, null=True) # Field name made lowercase.
businesstype = models.CharField(db_column='BusinessType', max_length=20) # Field name made lowercase.
resellername = models.CharField(db_column='ResellerName', max_length=50) # Field name made lowercase.
numberemployees = models.IntegerField(db_column='NumberEmployees', blank=True, null=True) # Field name made lowercase.
orderfrequency = models.CharField(db_column='OrderFrequency', max_length=1, blank=True, null=True) # Field name made lowercase.
ordermonth = models.SmallIntegerField(db_column='OrderMonth', blank=True, null=True) # Field name made lowercase.
firstorderyear = models.IntegerField(db_column='FirstOrderYear', blank=True, null=True) # Field name made lowercase.
lastorderyear = models.IntegerField(db_column='LastOrderYear', blank=True, null=True) # Field name made lowercase.
productline = models.CharField(db_column='ProductLine', max_length=50, blank=True, null=True) # Field name made lowercase.
addressline1 = models.CharField(db_column='AddressLine1', max_length=60, blank=True, null=True) # Field name made lowercase.
addressline2 = models.CharField(db_column='AddressLine2', max_length=60, blank=True, null=True) # Field name made lowercase.
annualsales = models.DecimalField(db_column='AnnualSales', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
bankname = models.CharField(db_column='BankName', max_length=50, blank=True, null=True) # Field name made lowercase.
minpaymenttype = models.SmallIntegerField(db_column='MinPaymentType', blank=True, null=True) # Field name made lowercase.
minpaymentamount = models.DecimalField(db_column='MinPaymentAmount', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
annualrevenue = models.DecimalField(db_column='AnnualRevenue', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
yearopened = models.IntegerField(db_column='YearOpened', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimReseller'
class Dimsalesreason(models.Model):
salesreasonkey = models.IntegerField(db_column='SalesReasonKey', primary_key=True) # Field name made lowercase.
salesreasonalternatekey = models.IntegerField(db_column='SalesReasonAlternateKey') # Field name made lowercase.
salesreasonname = models.CharField(db_column='SalesReasonName', max_length=50) # Field name made lowercase.
salesreasonreasontype = models.CharField(db_column='SalesReasonReasonType', max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimSalesReason'
class Dimsalesterritory(models.Model):
salesterritorykey = models.IntegerField(db_column='SalesTerritoryKey', primary_key=True) # Field name made lowercase.
salesterritoryalternatekey = models.IntegerField(db_column='SalesTerritoryAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
salesterritoryregion = models.CharField(db_column='SalesTerritoryRegion', max_length=50) # Field name made lowercase.
salesterritorycountry = models.CharField(db_column='SalesTerritoryCountry', max_length=50) # Field name made lowercase.
salesterritorygroup = models.CharField(db_column='SalesTerritoryGroup', max_length=50, blank=True, null=True) # Field name made lowercase.
salesterritoryimage = models.BinaryField(db_column='SalesTerritoryImage', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimSalesTerritory'
class Dimscenario(models.Model):
scenariokey = models.IntegerField(db_column='ScenarioKey', primary_key=True) # Field name made lowercase.
scenarioname = models.CharField(db_column='ScenarioName', max_length=50, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimScenario'
class Factadditionalinternationalproductdescription(models.Model):
productkey = models.IntegerField(db_column='ProductKey') # Field name made lowercase.
culturename = models.CharField(db_column='CultureName', max_length=50) # Field name made lowercase.
productdescription = models.TextField(db_column='ProductDescription') # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactAdditionalInternationalProductDescription'
unique_together = (('productkey', 'culturename'),)
class Factcallcenter(models.Model):
factcallcenterid = models.IntegerField(db_column='FactCallCenterID', primary_key=True) # Field name made lowercase.
datekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DateKey') # Field name made lowercase.
wagetype = models.CharField(db_column='WageType', max_length=15) # Field name made lowercase.
shift = models.CharField(db_column='Shift', max_length=20) # Field name made lowercase.
leveloneoperators = models.SmallIntegerField(db_column='LevelOneOperators') # Field name made lowercase.
leveltwooperators = models.SmallIntegerField(db_column='LevelTwoOperators') # Field name made | |
import sys, os
sys.path = [os.getcwd()] + sys.path
from utils.tools import submit_jobs, random_params, lists_to_tuples
# Plain-S 64 width 8 resblock
# Plain-M 64 width 16 resblock
# Plain-L 100 width 16 resblock
pretrain_paths = {
'resnet20x4': "/data/pretrained/lightning_models/layerwise_resnet20x4_cifar100_b8242.ckpt",
'resnet20': "/data/pretrained/lightning_models/layerwise_resnet20_cifar100_400ba.ckpt",
"EDSR50_newtail_short_x2": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_3fa19.ckpt", # 1000 epoch
"EDSR50_newtail_short_x4": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_dbb90.ckpt", # 1000 epoch
"EDSR64_newtail_short_x2": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_9b790.ckpt", # 1000 epoch
"EDSR64_newtail_short_x3": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_27695.ckpt", # 1000 epoch
"EDSR64_newtail_short_x4": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_1980c.ckpt", # 1000 epoch
"EDSR64_newtail_x2": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_537c4.ckpt", # 1000 epoch
"EDSR64_newtail_x3": "/data/pretrained/lightning_models/layerwise_edsrx3_div2k_fe594.ckpt", # 1000 epoch
"EDSR64_newtail_x4": "/data/pretrained/lightning_models/layerwise_edsrx4_div2k_69068.ckpt", # 1000 epoch
"EDSR100_newtail_x2": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_b00e1.ckpt", # 1000 epoch
"EDSR100_newtail_x3": "/data/pretrained/lightning_models/layerwise_edsrx3_div2k_613be.ckpt", # 1000 epoch
"EDSR100_newtail_x4": "/data/pretrained/lightning_models/layerwise_edsrx4_div2k_5e9dd.ckpt", # 1000 epoch
"EDSR64x2": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_fa9af.ckpt", # 1000 epoch
"EDSR100x2": "/data/pretrained/lightning_models/layerwise_edsrx2_div2k_1c96e.ckpt", # 1000 epoch
"EDSR64x4": "/data/pretrained/lightning_models/layerwise_edsr64x4_div2k_cbe41.ckpt", # 1000 epoch
"EDSRx4": "/data/pretrained/lightning_models/layerwise_edsrx4_div2k_e324f.ckpt",
"EDSR_100x4": "/data/pretrained/lightning_models/layerwise_edsr100x4_div2k_8b9b5.ckpt",
"EDSR_200x4": "/data/pretrained/lightning_models/layerwise_edsr200x4_div2k_ca503.ckpt",
"RDNx4": "/data/pretrained/lightning_models/layerwise_rdnx4_div2k_03029.ckpt",
"RDNx4_0bias": "/data/pretrained/lightning_models/layerwise_rdnx4_div2k_03029_0bias.ckpt",
}
templates = {
"cifar100-classification": {
'layer_type': 'repvgg',
'gpus': 1,
'num_epochs': 300,
'weight_decay': 5e-4,
'max_lr': 0.2,
'lr_scheduler': 'OneCycLR',
'optimizer': 'SGD',
'teacher_pretrain_path': pretrain_paths['resnet20x4'],
"dataset": {'name': "cifar100", 'total_batch_size': 256},
"seed": [233, 234, 235, 236],
'ignore_exist': True,
'save_model': False,
},
'DIV2K-SRx4': {
'task': 'super-resolution',
'loss': 'L1',
'metric': 'psnr255',
'rgb_range': 255,
'gpus': 1,
'num_epochs': 300,
'weight_decay': 0,
'lr_scheduler': 'OneCycLR',
'optimizer': 'Adam',
'teacher_pretrain_path': pretrain_paths['EDSRx4'],
"dataset": {
'name': "DIV2K",
'total_batch_size': 32,
'patch_size': 96,
'ext': 'sep',
'repeat': 20,
"test_bz": 1,
'scale': 4,
},
'scale': 4,
"seed": [233, 234, 235, 236],
'ignore_exist': True,
'save_model': False,
},
'DIV2Kx4-EXP': {
'task': 'super-resolution',
'loss': 'L1',
'gpus': 1,
'teacher_pretrain_path': "to be filled",
'max_lr': 2e-4,
'weight_decay': 0,
'lr_scheduler': 'OneCycLR',
'optimizer': 'Adam',
'num_epochs': 1000,
'scale': 4,
"dataset": {
'name': "DIV2K",
'scale': 4,
'total_batch_size': 16,
'patch_size': 192,
'ext': 'sep',
'repeat': 20,
'test_bz': 1,
},
'rgb_range': 255,
"seed": [233, 234, 235, 236],
'save_model': True,
'inference_statics': True,
'test_benchmark': True,
'ignore_exist': True,
'metric': 'psnr_gray_shave_x4',
},
'DIV2Kx3-EXP': {
'task': 'super-resolution',
'loss': 'L1',
'gpus': 1,
'teacher_pretrain_path': "to be filled",
'max_lr': 2e-4,
'weight_decay': 0,
'lr_scheduler': 'OneCycLR',
'optimizer': 'Adam',
'num_epochs': 1000,
'scale': 3,
"dataset": {
'name': "DIV2K",
'scale': 3,
'total_batch_size': 16,
'patch_size': 192,
'ext': 'sep',
'repeat': 20,
'test_bz': 1,
},
'rgb_range': 255,
"seed": [233, 234, 235, 236],
'save_model': True,
'inference_statics': True,
'test_benchmark': True,
'ignore_exist': True,
'metric': 'psnr_gray_shave_x3',
},
'DIV2Kx2-EXP': {
'task': 'super-resolution',
'loss': 'L1',
'gpus': 1,
'teacher_pretrain_path': pretrain_paths['EDSR64x2'],
'max_lr': 2e-4,
'weight_decay': 0,
'lr_scheduler': 'OneCycLR',
'optimizer': 'Adam',
'num_epochs': 1000,
'scale': 2,
"dataset": {
'name': "DIV2K",
'scale': 2,
'total_batch_size': 16,
'patch_size': 192,
'ext': 'sep',
'repeat': 20,
'test_bz': 1,
},
'rgb_range': 255,
"seed": [233, 234, 235, 236],
'save_model': True,
'inference_statics': True,
'test_benchmark': True,
'ignore_exist': True,
'metric': 'psnr_gray_shave_x2',
},
}
def params_for_SR_real_progressive():
params = {
'project_name': 'deip_SRx4_freeze_progressive',
'method': 'DEIP_Full_Progressive',
'description': 'DEIP_Full_Progressive',
'init_stu_with_teacher': [1],
'layer_type': ['normal_no_bn'],
'lr_scheduler': ['OneCycLR', 'none'],
'rank_eps': [0.3],
'freeze_trained': [0, 1],
'freeze_teacher_bn': [0, 1],
'max_lr': [2e-4],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_real_progressive_small():
params = {
'project_name': 'deip_SRx4_progressive_small',
'num_epochs': 100,
'max_lr': [2e-4],
'seed': [233, 234],
'gpus': 1,
}
return {**params_for_SR_real_progressive(), **params}
def params_for_SR_progressive():
params = {
'project_name': 'deip_SRx4_progressive',
'method': 'Progressive_Distillation',
'init_stu_with_teacher': [1],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.05],
'max_lr': [5e-4],
'distill_coe': [0, 1, 3, 10],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_progressive_small():
params = {
'project_name': 'deip_SRx4_progressive_small',
'method': 'Progressive_Distillation',
'layer_type': 'normal_no_bn',
'init_stu_with_teacher': [1],
'num_epochs': 100,
'max_lr': [5e-4],
'distill_coe': [0, 0.01, 1e-4, 1],
'gpus': 1,
}
return {**params_for_SR_progressive(), **params}
def params_for_SR_structure():
params = {
'project_name': 'deip_SRx4_structure',
'description': 'direct_train',
'init_stu_with_teacher': [0],
'layer_type': 'plain_sr-bn', # ['plain_sr', 'plain_sr-bn', 'plain_sr-prelu'],
'rank_eps': [0.05],
'max_lr': [2e-4, 5e-4, 1e-3],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_init():
params = {
'project_name': 'deip_SRx4_init_rerun',
'description': 'direct_train',
'init_stu_with_teacher': [1],
'layer_type': ['normal', 'normal_no_bn', 'normal_no_bn_prelu'],
'rank_eps': [0.1], # 0.05, 0.6, 1, 2
'max_lr': [5e-4, 1e-3],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_baseline_with_add_ori():
params = {
'project_name': 'deip_SRx4_baseline',
'add_ori': [1],
'init_stu_with_teacher': [0],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.01, 0.05, 0.1, 0.2], # 0.05, 0.6, 1, 2
'max_lr': [2e-4, 5e-4],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_baseline():
params = {
'project_name': 'deip_SRx4_baseline',
'description': 'direct_train',
'init_stu_with_teacher': [1],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.01, 0.05, 0.1, 0.2], # 0.05, 0.6, 1, 2
'max_lr': [2e-4, 5e-4],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_baseline_small():
params = params_for_SR_baseline()
params['num_epochs'] = 100
params['project_name'] = 'deip_SRx4_baseline100'
params['max_lr'] = 5e-4
return params
def params_for_baseline():
params = {
'project_name': 'deip_baselines_20x4',
'description': 'direct_train',
'layer_type': 'normal',
'init_stu_with_teacher': [0],
# 'rank_eps': [0.01, 0.05, 0.1, 0.2],
'rank_eps': [0.3, 0.4, 0.5],
'max_lr': [0.2], # 0.05 for plainx4, 0.5 for repvgg on 0.05, 0.2 for repvgg on 0.2, 0.3, 0.5
}
return {**templates['cifar100-classification'], **params}
def params_for_deip_distillation():
params = {
'project_name': 'deip_distillation_repeat',
'description': 'common_distillation',
'init_stu_with_teacher': [0, 1],
'method': 'Distillation',
'dist_method': ['KD', 'Progressive_FD', 'FD_Conv1x1_MSE'],
'rank_eps': 5e-2,
'distill_coe': [1, 0.1],
'max_lr': [0.2],
}
return {**templates['cifar100-classification'], **params}
def params_for_deip_progressive_distillation():
params = {
'project_name': 'deip_distillation_progressive',
'description': 'progressive_distillation',
'method': 'Progressive_Distillation',
'layer_type': 'normal',
'init_stu_with_teacher': [1],
'rank_eps': [0.01, 0.05, 0.1, 0.2],
'max_lr': [0.2],
'seed': 233,
}
return {**templates['cifar100-classification'], **params}
def params_for_unit_test():
params = {
'project_name': 'unit_test',
'method': 'DEIP_Init',
'init_stu_with_teacher': [1],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.1], # 0.05, 0.6, 1, 2
'max_lr': [5e-4],
'num_epochs': 1,
'seed': 0,
}
return {**templates['DIV2K-SRx4'], **params}
def deip_CIFAR100_init_new():
params = {
'project_name': 'deip_CIFAR100_init_new',
'method': 'DEIP_Init',
'init_stu_with_teacher': [0, 1],
'layer_type': 'normal_no_bn',
'rank_eps': [0.05, 0.1, 0.2, 0.3],
'max_lr': [0.05, 0.1, 0.2], # 0.05 for plainx4, 0.5 for repvgg on 0.05, 0.2 for repvgg on 0.2, 0.3, 0.5
'seed': 233,
}
return {**templates['cifar100-classification'], **params}
def deip_CIFAR100_init_new_distill():
params = {
'project_name': 'deip_CIFAR100_init_new',
'method': 'DEIP_Init',
'init_stu_with_teacher': [1],
'layer_type': ['normal_no_bn', 'normal'],
'rank_eps': [0.05, 0.1],
'max_lr': [0.05, 0.2], # 0.05 for plainx4, 0.5 for repvgg on 0.05, 0.2 for repvgg on 0.2, 0.3, 0.5
'distill_coe': [0.3, 0.5],
'distill_alpha': [0.01, 0.001],
'dist_method': {
'name': 'BridgeDistill',
'distill_loss': ['MSE'],
},
'seed': 233,
}
return {**templates['cifar100-classification'], **params}
def params_for_SR_new_init():
params = {
'project_name': 'deip_SRx4_init_new_Ridge',
'method': 'DEIP_Init',
'init_stu_with_teacher': 1,
'ridge_alpha': [1e-2],
'teacher_pretrain_path': pretrain_paths["RDNx4"],
# 'teacher_pretrain_path': pretrain_paths["EDSRx4"],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.1, 0.2],
'max_lr': [2e-4],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_stable_test():
params = {
'project_name': 'deip_SRx4_stable_test',
'method': 'DEIP_Init',
'init_stu_with_teacher': 1,
'teacher_pretrain_path': pretrain_paths["EDSR_200x4"],
'layer_type': ['normal_no_bn_scale', 'normal_no_bn', 'normal'],
'fix_r': 200,
'max_lr': [2e-4],
'seed': [233, 234],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_new_init_std_align():
params = {
'project_name': 'deip_SRx4_init_new_align',
'method': 'DEIP_Init',
'init_stu_with_teacher': 1,
'teacher_pretrain_path': pretrain_paths["RDNx4"],
# 'teacher_pretrain_path': pretrain_paths["EDSR_100x4"],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.1, 0.2],
'max_lr': [2e-4],
'std_align': 1,
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_new_init_equal_width():
params = {
'project_name': 'deip_SRx4_init_new_equal_width',
'method': 'DEIP_Init',
'init_stu_with_teacher': 1,
'teacher_pretrain_path': pretrain_paths['RDNx4'],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.1],
'fix_r': [50, 70, 100],
'max_lr': [2e-4],
'seed': 233,
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_new_init_distill():
params = {
'project_name': 'deip_SRx4_init_distill_verify',
'method': 'DEIP_Init',
'init_stu_with_teacher': 1,
'teacher_pretrain_path': pretrain_paths['EDSRx4'],
'layer_type': ['normal_no_bn'],
'rank_eps': [0.1],
'ridge_alpha': 0,
'max_lr': [2e-4],
'decompose_adjust': [0, 3],
'distill_coe': [0.1, 0.3],
'distill_alpha': [1e-5],
'dist_method': {
'name': 'BridgeDistill',
'distill_loss': ['MSE'],
},
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_new_init_distill_new_coe():
params = {
'project_name': 'deip_SRx4_distill_new_coe',
'method': 'DEIP_Init',
'init_stu_with_teacher': 1,
'teacher_pretrain_path': pretrain_paths['RDNx4'],
'layer_type': ['normal_no_bn'],
'distill_coe_mod': 'new',
'distill_coe': [0.1, 1, 10],
'distill_alpha': 1e-5,
'ridge_alpha': 1e-2,
'rank_eps': [0.1],
'max_lr': [2e-4],
'dist_method': {
'name': 'BridgeDistill',
'distill_loss': ['MSE'],
},
"fix_distill_module": 1,
'seed': [233, 234],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_SR_new_conv_init():
params = {
'project_name': 'deip_SRx4_init_new_conv_init',
'description': 'trying to avoid vanishing',
'method': 'DEIP_Init',
'init_stu_with_teacher': 0,
'init_tail': 1,
'layer_type': ['normal_no_bn'],
'rank_eps': [0.1],
'max_lr': [2e-4, 5e-4, 1e-3],
}
return {**templates['DIV2K-SRx4'], **params}
def params_for_EXP_main_x2():
params = {
'project_name': 'CVPR_EXP_MAIN_x2',
'method': 'DEIP_Init',
# 'fix_r': 100,
'rank_eps': 0.14,
'teacher_pretrain_path': pretrain_paths['EDSR64_newtail_x2'],
'init_stu_with_teacher': 1,
'layer_type': 'normal_no_bn',
'ridge_alpha': 0,
'distill_coe': 0.3,
'distill_alpha': 1e-5,
'dist_method': {
'name': 'BridgeDistill',
'distill_loss': 'MSE',
},
}
return {**templates['DIV2Kx2-EXP'], **params}
def params_for_EXP_main_x3():
params = {
'project_name': 'CVPR_EXP_MAIN_x3',
'method': 'DEIP_Init',
'rank_eps': 0.14,
'init_stu_with_teacher': 1,
'teacher_pretrain_path': pretrain_paths['EDSR64_newtail_x3'],
'layer_type': 'normal_no_bn',
'ridge_alpha': 0,
'distill_coe': 0.3,
'distill_alpha': 1e-5,
'dist_method': {
'name': 'BridgeDistill',
'distill_loss': 'MSE',
},
}
return {**templates['DIV2Kx3-EXP'], **params}
def params_for_EXP_main_x4():
params = {
'project_name': 'CVPR_EXP_MAIN_x4',
'method': 'DEIP_Init',
# 'rank_eps': [0.11, 0.12, 0.13, 0.14, 0.15],
'fix_r': 64,
'init_stu_with_teacher': 1,
'teacher_pretrain_path': pretrain_paths['EDSR64_newtail_x4'],
'layer_type': 'normal_no_bn',
'ridge_alpha': 0,
# 'distill_coe': [0.1, 0.3, 0.5, 1, 2],
'distill_coe': 0.3,
'distill_alpha': 1e-5,
# 'distill_alpha': [1e-3, 1e-4, 1e-5, 1e-6],
'dist_method': {
'name': 'BridgeDistill',
'distill_loss': 'MSE',
},
'seed': [235, 236],
}
return {**templates['DIV2Kx4-EXP'], **params}
def params_for_EXP_Baseline_x4():
params = {
'project_name': 'CVPR_EXP_Baseline_x4',
'method': 'DirectTrain',
'fix_r': 64,
'init_stu_with_teacher': 0,
'teacher_pretrain_path': [pretrain_paths['EDSR64_newtail_x4'], pretrain_paths['EDSR64_newtail_short_x4']],
'layer_type': 'normal_no_bn',
}
return {**templates['DIV2Kx4-EXP'], **params}
def params_for_EXP_ablation_x4():
params = {
'project_name': 'CVPR_EXP_Ablation_x4',
'method': 'DEIP_Init',
'fix_r': 64,
'init_stu_with_teacher': [0, 1],
'teacher_pretrain_path': pretrain_paths['EDSR64_newtail_x4'],
'layer_type': 'normal_no_bn',
'ridge_alpha': 0,
'distill_coe': [0, 0.3],
'init_distill': [0, 1],
'decompose_adjust': [0, 3],
'distill_alpha': 1e-5,
'dist_method': {
'name': 'BridgeDistill',
'distill_loss': 'MSE',
},
'seed': 236,
}
return {**templates['DIV2Kx4-EXP'], **params}
def params_for_EXP_cmp_init():
params = {
'project_name': 'CVPR_EXP_Ablation_Init_x2',
'method': 'DirectTrain',
'fix_r': 64,
'teacher_pretrain_path': pretrain_paths['EDSR64_newtail_x2'],
'init_stu_with_teacher': 0,
# 'layer_type': 'repvgg',
'layer_type': 'normal_no_bn',
'conv_init': ['kaiming_normal', 'kaiming_uniform', 'xavier_uniform', 'xavier_normal'],
'gpus': 1,
}
return {**templates['DIV2Kx2-EXP'], **params}
def params_for_EXP_cmp_repvggx4():
params = {
'project_name': 'CVPR_EXP_Ablation_repvgg_x4',
'method': 'DirectTrain',
'fix_r': | |
<filename>azure-iot-hub/azure/iot/hub/iothub_registry_manager.py
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .auth import ConnectionStringAuthentication
from .protocol.iot_hub_gateway_service_ap_is20190630 import (
IotHubGatewayServiceAPIs20190630 as protocol_client,
)
from .protocol.models import (
Device,
Module,
SymmetricKey,
X509Thumbprint,
AuthenticationMechanism,
Configuration,
ServiceStatistics,
RegistryStatistics,
)
class IoTHubRegistryManager(object):
"""A class to provide convenience APIs for IoTHub Registry Manager operations,
based on top of the auto generated IotHub REST APIs
"""
def __init__(self, connection_string):
"""Initializer for a Registry Manager Service client.
After a successful creation the class has been authenticated with IoTHub and
it is ready to call the member APIs to communicate with IoTHub.
:param str connection_string: The IoTHub connection string used to authenticate connection
with IoTHub.
:returns: Instance of the IoTHubRegistryManager object.
:rtype: :class:`azure.iot.hub.IoTHubRegistryManager`
"""
self.auth = ConnectionStringAuthentication(connection_string)
self.protocol = protocol_client(self.auth, "https://" + self.auth["HostName"])
def create_device_with_sas(self, device_id, primary_key, secondary_key, status):
"""Creates a device identity on IoTHub using SAS authentication.
:param str device_id: The name (deviceId) of the device.
:param str primary_key: Primary authentication key.
:param str secondary_key: Secondary authentication key.
:param str status: Initital state of the created device.
(Possible values: "enabled" or "disabled")
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: Device object containing the created device.
"""
symmetric_key = SymmetricKey(primary_key=primary_key, secondary_key=secondary_key)
kwargs = {
"device_id": device_id,
"status": status,
"authentication": AuthenticationMechanism(type="sas", symmetric_key=symmetric_key),
}
device = Device(**kwargs)
return self.protocol.service.create_or_update_device(device_id, device)
def create_device_with_x509(self, device_id, primary_thumbprint, secondary_thumbprint, status):
"""Creates a device identity on IoTHub using X509 authentication.
:param str device_id: The name (deviceId) of the device.
:param str primary_thumbprint: Primary X509 thumbprint.
:param str secondary_thumbprint: Secondary X509 thumbprint.
:param str status: Initital state of the created device.
(Possible values: "enabled" or "disabled")
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: Device object containing the created device.
"""
x509_thumbprint = X509Thumbprint(
primary_thumbprint=primary_thumbprint, secondary_thumbprint=secondary_thumbprint
)
kwargs = {
"device_id": device_id,
"status": status,
"authentication": AuthenticationMechanism(
type="selfSigned", x509_thumbprint=x509_thumbprint
),
}
device = Device(**kwargs)
return self.protocol.service.create_or_update_device(device_id, device)
def create_device_with_certificate_authority(self, device_id, status):
"""Creates a device identity on IoTHub using certificate authority.
:param str device_id: The name (deviceId) of the device.
:param str status: Initial state of the created device.
(Possible values: "enabled" or "disabled").
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: Device object containing the created device.
"""
kwargs = {
"device_id": device_id,
"status": status,
"authentication": AuthenticationMechanism(type="certificateAuthority"),
}
device = Device(**kwargs)
return self.protocol.service.create_or_update_device(device_id, device)
def update_device_with_sas(self, device_id, etag, primary_key, secondary_key, status):
"""Updates a device identity on IoTHub using SAS authentication.
:param str device_id: The name (deviceId) of the device.
:param str etag: The etag (if_match) value to use for the update operation.
:param str primary_key: Primary authentication key.
:param str secondary_key: Secondary authentication key.
:param str status: Initital state of the created device.
(Possible values: "enabled" or "disabled").
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: The updated Device object containing the created device.
"""
symmetric_key = SymmetricKey(primary_key=primary_key, secondary_key=secondary_key)
kwargs = {
"device_id": device_id,
"status": status,
"etag": etag,
"authentication": AuthenticationMechanism(type="sas", symmetric_key=symmetric_key),
}
device = Device(**kwargs)
return self.protocol.service.create_or_update_device(device_id, device, "*")
def update_device_with_x509(
self, device_id, etag, primary_thumbprint, secondary_thumbprint, status
):
"""Updates a device identity on IoTHub using X509 authentication.
:param str device_id: The name (deviceId) of the device.
:param str etag: The etag (if_match) value to use for the update operation.
:param str primary_thumbprint: Primary X509 thumbprint.
:param str secondary_thumbprint: Secondary X509 thumbprint.
:param str status: Initital state of the created device.
(Possible values: "enabled" or "disabled").
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: The updated Device object containing the created device.
"""
x509_thumbprint = X509Thumbprint(
primary_thumbprint=primary_thumbprint, secondary_thumbprint=secondary_thumbprint
)
kwargs = {
"device_id": device_id,
"status": status,
"etag": etag,
"authentication": AuthenticationMechanism(
type="selfSigned", x509_thumbprint=x509_thumbprint
),
}
device = Device(**kwargs)
return self.protocol.service.create_or_update_device(device_id, device)
def update_device_with_certificate_authority(self, device_id, etag, status):
"""Updates a device identity on IoTHub using certificate authority.
:param str device_id: The name (deviceId) of the device.
:param str etag: The etag (if_match) value to use for the update operation.
:param str status: Initital state of the created device.
(Possible values: "enabled" or "disabled").
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: The updated Device object containing the created device.
"""
kwargs = {
"device_id": device_id,
"status": status,
"etag": etag,
"authentication": AuthenticationMechanism(type="certificateAuthority"),
}
device = Device(**kwargs)
return self.protocol.service.create_or_update_device(device_id, device)
def get_device(self, device_id):
"""Retrieves a device identity from IoTHub.
:param str device_id: The name (deviceId) of the device.
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: The Device object containing the requested device.
"""
return self.protocol.service.get_device(device_id)
def get_configuration(self, device_id):
"""Retrieves the IoTHub configuration for a particular device.
:param str device_id: The name (deviceId) of the device.
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: The Configuration object.
"""
return self.protocol.service.get_configuration(device_id)
def delete_device(self, device_id, etag=None):
"""Deletes a device identity from IoTHub.
:param str device_id: The name (deviceId) of the device.
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: None.
"""
if etag is None:
etag = "*"
self.protocol.service.delete_device(device_id, etag)
def get_service_statistics(self):
"""Retrieves the IoTHub service statistics.
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: The ServiceStatistics object.
"""
return self.protocol.service.get_service_statistics()
def get_device_registry_statistics(self):
"""Retrieves the IoTHub device registry statistics.
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: The RegistryStatistics object.
"""
return self.protocol.service.get_device_registry_statistics()
def create_module_with_sas(
self, device_id, module_id, managed_by, primary_key, secondary_key, status
):
"""Creates a module identity for a device on IoTHub using SAS authentication.
:param str device_id: The name (deviceId) of the device.
:param str module_id: The name (moduleID) of the module.
:param str managed_by: The name of the manager device (edge).
:param str primary_key: Primary authentication key.
:param str secondary_key: Secondary authentication key.
:param str status: Initital state of the created device (enabled or disabled).
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: Module object containing the created module.
"""
symmetric_key = SymmetricKey(primary_key=primary_key, secondary_key=secondary_key)
kwargs = {
"device_id": device_id,
"module_id": module_id,
"managed_by": managed_by,
"status": status,
"authentication": AuthenticationMechanism(type="sas", symmetric_key=symmetric_key),
}
module = Module(**kwargs)
return self.protocol.service.create_or_update_module(device_id, module_id, module)
def create_module_with_x509(
self, device_id, module_id, managed_by, primary_thumbprint, secondary_thumbprint, status
):
"""Creates a module identity for a device on IoTHub using X509 authentication.
:param str device_id: The name (deviceId) of the device.
:param str module_id: The name (moduleID) of the module.
:param str managed_by: The name of the manager device (edge).
:param str primary_thumbprint: Primary X509 thumbprint.
:param str secondary_thumbprint: Secondary X509 thumbprint.
:param str status: Initital state of the created device (enabled or disabled).
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: Module object containing the created module.
"""
x509_thumbprint = X509Thumbprint(
primary_thumbprint=primary_thumbprint, secondary_thumbprint=secondary_thumbprint
)
kwargs = {
"device_id": device_id,
"module_id": module_id,
"managed_by": managed_by,
"status": status,
"authentication": AuthenticationMechanism(
type="selfSigned", x509_thumbprint=x509_thumbprint
),
}
module = Module(**kwargs)
return self.protocol.service.create_or_update_module(device_id, module_id, module)
def create_module_with_certificate_authority(self, device_id, module_id, managed_by, status):
"""Creates a module identity for a device on IoTHub using certificate authority.
:param str device_id: The name (deviceId) of the device.
:param str module_id: The name (moduleID) of the module.
:param str managed_by: The name of the manager device (edge).
:param str status: Initital state of the created device (enabled or disabled).
:raises: `HttpOperationError<msrest.exceptions.HttpOperationError>`
if the HTTP response status is not in [200].
:returns: Module object containing the created module.
"""
kwargs = {
"device_id": device_id,
"module_id": module_id,
"managed_by": managed_by,
"status": status,
"authentication": AuthenticationMechanism(type="certificateAuthority"),
}
module = Module(**kwargs)
return self.protocol.service.create_or_update_module(device_id, module_id, module)
def update_module_with_sas(
self, device_id, module_id, managed_by, etag, primary_key, secondary_key, status
):
"""Updates a module identity for a device on IoTHub using SAS authentication.
:param str device_id: The name (deviceId) of the device.
:param str module_id: The name (moduleID) of the module.
:param str managed_by: The name of the manager device (edge).
:param str etag: The etag (if_match) value to use for the update operation.
:param str primary_key: Primary authentication key.
| |
# import the necessary packages
from imutils.video import VideoStream
import argparse
import numpy as np, cv2
import imutils, time
# import the necessary packages
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
class CentroidTracker():
def __init__(self, maxDisappeared=3):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
def register(self, centroid):
# when registering an object we use the next available object
# ID to store the centroid
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
# check to see if the list of input bounding box rectangles
# is empty
if len(rects) == 0:
# loop over any existing tracked objects and mark them
# as disappeared
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
# if we have reached a maximum number of consecutive
# frames where a given object has been marked as
# missing, deregister it
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# return early as there are no centroids or tracking info
# to update
return self.objects
# initialize an array of input centroids for the current frame
inputCentroids = np.zeros((len(rects), 2), dtype="int")
# loop over the bounding box rectangles
for (i, (startX, startY, endX, endY)) in enumerate(rects):
# use the bounding box coordinates to derive the centroid
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
# if we are currently not tracking any objects take the input
# centroids and register each of them
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
# otherwise, are are currently tracking objects so we need to
# try to match the input centroids to existing object
# centroids
else:
# grab the set of object IDs and corresponding centroids
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
# compute the distance between each pair of object
# centroids and input centroids, respectively -- our
# goal will be to match an input centroid to an existing
# object centroid
D = dist.cdist(np.array(objectCentroids), inputCentroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value as at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
# val
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# otherwise, if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
# initialize our centroid tracker and frame dimensions
ct = CentroidTracker(); maxobjectID = 0;
###############################
from imutils.video import VideoStream
import argparse
import imutils, numpy as np
import time, cv2
args = {
'video': "stable_Baltimore & Charles - AM (1)_TrimEnd.mp4", #"../overpass.mp4",
'tracker':'kcf'
}
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
ROI_CAPTURED = False; refPt = []
video_dir = "Guilford & Madison - AM.avi" #
video_dir = "stable_Baltimore & Charles - AM (1)_TrimEnd.mp4"
#video_dir = "Guilford & LexingtonFayette - AM.avi"
backSub = cv2.createBackgroundSubtractorMOG2()
trackers = cv2.MultiTracker_create() # Create Multi-Tracker Object
vs = cv2.VideoCapture(video_dir) # Load video
print("[INFO] video path loaded..")
state = [0, 1] # track states
print("PRESS 's' to select anchor points.")
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
_, frame_o = vs.read()
# check to see if we have reached the end of the stream
if frame_o is None:
break
# resize the frame (so we can process it faster)
frame = imutils.resize(frame_o, width=1000)
# grab the updated bounding box coordinates (if any) for each
# object that is being tracked
(success, boxes) = trackers.update(frame)
# loop over the bounding boxes and draw then on the frame
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
vs = cv2.VideoCapture(video_dir) # Load video
_, frame_o = vs.read()
# resize the frame (so we can process it faster)
frame = imutils.resize(frame_o, width=1000)
for i in state:
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
box = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# create a new object tracker for the bounding box and add it
# to our multi-object tracker
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
trackers.add(tracker, frame, box)
strip = [];
(success, boxes) = trackers.update(frame)
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
strip.append((x+w//2, y+h//2))
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#break out of loop
break
# press `r` to reset
elif key == ord("r"):
trackers = cv2.MultiTracker_create() # reset multi-object tracker
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
exit()
cv2.destroyAllWindows()
print("\nGOING THROUGH THE VIDEO USING THE ANCHOR TAGS\n")
print("PRESS 'l' to draw strip.")
#Get the y-value from a line using two points on the line
def getyfrom(y, point1, point2):
(x1, y1) = point1
(x2, y2) = point2
m = (y2-y1) / (x2-x1)
return int(y1 + | |
i in pv])
else:
pv = ','.join(['"%s"' % i if i.find(' ') > -1 else i \
for i in pv])
elif isinstance(pv, dict):
if flag == '-f':
filt_lst = []
for k, v_lst in pv.items():
for v in v_lst:
if v is None or v == '': continue
v = v.replace('"', '').replace("'", '')
filt_lst.append("%s.%s" % (k, v))
if len(filt_lst) == 0: continue
pv = '"%s"' % ','.join(filt_lst)
elif isinstance(pv, bool):
if not pv:
continue
else:
pv = ''
else:
if isinstance(pv, str) and pv.find(' ') > -1:
pv = '"%s"' % pv
syntax_params.append('%s %s' % (flag, pv))
out_syntax = "python %s %s -s" % (os.path.realpath(__file__),
' '.join(syntax_params))
return out_syntax
def get_input(self, msg, err_msg=None, required=True, password=False):
"""
Gets an input from the user for an argument.
:param msg: The message used to prompt the user.
:type msg: str
:param err_msg: The message to print when the user enters an invalid
input.
:type err_msg: str
:param required: Determines if the argument is required.
:type required: boolean
:param password: Determines if the argument is for password entry.
:type password: boolean
:return: The value entered by the user.
:rtype: str
"""
if password:
# If the argument is for password entry, hide entry
in_val = getpass.getpass(prompt='->> %s: ' % msg)
else:
output = "\n->> %s: " % msg
if msg.endswith('\n'):
output = "\n->> %s:\n" % msg.strip('\n')
in_val = input(output)
if required and in_val == '':
eod_util.Eodms_OrderDownload().print_support(err_msg)
self.logger.error(err_msg)
sys.exit(1)
return in_val
def print_syntax(self):
"""
Prints the command-line syntax for the script.
"""
print("\nUse this command-line syntax to run the same parameters:")
cli_syntax = self.build_syntax()
print(cli_syntax)
self.logger.info("Command-line Syntax: %s" % cli_syntax)
def prompt(self):
"""
Prompts the user for the input options.
"""
username = self.params.get('username')
password = self.params.get('password')
input = self.params.get('input')
collections = self.params.get('collections')
process = self.params.get('process')
filters = self.params.get('filters')
dates = self.params.get('dates')
maximum = self.params.get('maximum')
priority = self.params.get('priority')
output = self.params.get('output')
csv_fields = self.params.get('csv_fields')
aws = self.params.get('aws')
silent = self.params.get('silent')
version = self.params.get('version')
if version:
print("%s: Version %s" % (__title__, __version__))
sys.exit(0)
self.eod.set_silence(silent)
new_user = False
new_pass = False
if username is None or password is None:
print("\n--------------Enter EODMS Credentials--------------")
if username is None:
username = self.config_info.get('RAPI', 'username')
if username == '':
msg = "Enter the username for authentication"
err_msg = "A username is required to order images."
username = self.get_input(msg, err_msg)
new_user = True
else:
print("\nUsing the username set in the 'config.ini' file...")
if password is None:
password = self.config_info.get('RAPI', 'password')
if password == '':
msg = 'Enter the password for authentication'
err_msg = "A password is required to order images."
password = self.get_input(msg, err_msg, password=True)
new_pass = True
else:
password = <PASSWORD>(password).decode("utf-8")
print("Using the password set in the 'config.ini' file...")
if new_user or new_pass:
suggestion = ''
if self.eod.silent:
suggestion = " (it is best to store the credentials if " \
"you'd like to run the script in silent mode)"
answer = input("\n->> Would you like to store the credentials "
"for a future session%s? (y/n):" % suggestion)
if answer.lower().find('y') > -1:
self.config_info.set('RAPI', 'username', username)
pass_enc = base64.b64encode(password.encode("utf-8")).decode(
"utf-8")
self.config_info.set('RAPI', 'password',
str(pass_enc))
config_fn = os.path.join(os.path.dirname(
os.path.abspath(__file__)),
'config.ini')
cfgfile = open(config_fn, 'w')
self.config_info.write(cfgfile, space_around_delimiters=False)
cfgfile.close()
# Get number of attempts when querying the RAPI
self.eod.set_attempts(self.config_info.get('RAPI', 'access_attempts'))
self.eod.create_session(username, password)
self.params = {'collections': collections,
'dates': dates,
'input': input,
'maximum': maximum,
'process': process}
print()
coll_lst = self.eod.eodms_rapi.get_collections(True)
if coll_lst is None:
msg = "Failed to retrieve a list of available collections."
self.logger.error(msg)
self.eod.print_support(msg)
sys.exit(1)
print("\n(For more information on the following prompts, please refer"
" to the README file.)")
#########################################
# Get the type of process
#########################################
if process is None or process == '':
self.process = self.ask_process()
else:
self.process = process
self.params['process'] = self.process
if self.process == 'full':
self.logger.info("Searching, ordering and downloading images "
"using an AOI.")
# Get the collection(s)
coll = self.ask_collection(collections)
self.params['collections'] = coll
# If Radarsat-1, ask user if they want to download from AWS
if 'Radarsat1' in coll:
aws = self.ask_aws(aws)
self.params['aws'] = aws
# Get the AOI file
inputs = self.ask_aoi(input)
self.params['input'] = inputs
# Get the filter(s)
filt_dict = self.ask_filter(filters)
self.params['filters'] = filt_dict
# Get the date(s)
dates = self.ask_dates(dates)
self.params['dates'] = dates
# Get the output geospatial filename
output = self.ask_output(output)
self.params['output'] = output
# Get the maximum(s)
maximum = self.ask_maximum(maximum)
self.params['maximum'] = maximum
# Get the priority
priority = self.ask_priority(priority)
self.params['priority'] = priority
# Print command-line syntax for future processes
self.print_syntax()
self.eod.search_order_download(self.params)
elif self.process == 'order_csv':
self.logger.info("Ordering and downloading images using results "
"from a CSV file.")
#########################################
# Get the CSV file
#########################################
msg = "Enter the full path of the CSV file exported " \
"from the EODMS UI website"
inputs = self.ask_input_file(input, msg)
self.params['input'] = inputs
fields = self.eod.get_input_fields(inputs)
csv_fields = self.ask_fields(csv_fields, fields)
self.params['csv_fields'] = csv_fields
# Get the output geospatial filename
output = self.ask_output(output)
self.params['output'] = output
# Get the maximum(s)
maximum = self.ask_maximum(maximum)
self.params['maximum'] = maximum
# Get the priority
priority = self.ask_priority(priority)
self.params['priority'] = priority
# Print command-line syntax for future processes
self.print_syntax()
# Run the order_csv process
self.eod.order_csv(self.params)
elif self.process == 'download_aoi' or self.process == 'search_only':
if self.process == 'download_aoi':
self.logger.info("Downloading existing orders using an AOI.")
else:
self.logger.info("Searching for images using an AOI.")
# Get the collection(s)
coll = self.ask_collection(collections)
self.params['collections'] = coll
# Get the AOI file
inputs = self.ask_aoi(input)
self.params['input'] = inputs
# Get the filter(s)
filt_dict = self.ask_filter(filters)
self.params['filters'] = filt_dict
# Get the date(s)
dates = self.ask_dates(dates)
self.params['dates'] = dates
# Get the output geospatial filename
output = self.ask_output(output)
self.params['output'] = output
# Print command-line syntax for future processes
self.print_syntax()
if self.process == 'download_aoi':
self.eod.download_aoi(self.params)
else:
self.eod.search_only(self.params)
elif self.process == 'download_only':
# Download existing orders using CSV file from previous session
self.logger.info("Downloading images using results from a CSV "
"file from a previous session.")
# Get the CSV file
msg = "Enter the full path of the CSV Results file from a " \
"previous session"
inputs = self.ask_input_file(input, msg)
self.params['input'] = inputs
# Get the output geospatial filename
output = self.ask_output(output)
self.params['output'] = output
# Print command-line syntax for future processes
self.print_syntax()
# Run the download_only process
self.eod.download_only(self.params)
elif self.process == 'record_id':
# Order and download a single or set of images using Record IDs
self.logger.info("Ordering and downloading images using "
"Record IDs")
inputs = self.ask_record_ids(input)
self.params['input'] = inputs
# If Radarsat-1, ask user if they want to download from AWS
if 'Radarsat1' in inputs:
aws = self.ask_aws(aws)
self.params['aws'] = aws
# Get the output geospatial filename
output = self.ask_output(output)
self.params['output'] = output
# Get the priority
priority = self.ask_priority(priority)
self.params['priority'] = priority
# Print command-line syntax for future processes
self.print_syntax()
# Run the order_csv process
self.eod.order_ids(self.params)
else:
self.eod.print_support("That is not a valid process type.")
self.logger.error(
"An invalid parameter was entered during the prompt.")
sys.exit(1)
def get_config():
"""
Gets the configuration information from the config file.
:return: The information extracted from the config file.
:rtype: configparser.ConfigParser
"""
config = configparser.ConfigParser()
config_fn = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'config.ini')
config.read(config_fn)
return config
def print_support(err_str=None):
"""
Prints the 2 different support message depending if an error occurred.
:param err_str: The error string to print along with support.
:type err_str: str
"""
eod_util.Eodms_OrderDownload().print_support(err_str)
output_help = '''The output file path containing the results in a
geospatial format.
The output parameter can be:
- None (empty): No output will be created (a results CSV file will still be
created in the 'results' folder)
- GeoJSON: The output will be in the GeoJSON format
(use extension .geojson or .json)
- KML: The output will be in KML format (use extension .kml) (requires GDAL Python package)
- GML: The output will be in GML format (use extension .gml) (requires GDAL Python package)
- Shapefile: The output will be ESRI Shapefile (requires GDAL Python package)
(use extension .shp)'''
@click.command(context_settings={'help_option_names': ['-h', '--help']})
@click.option('--username', '-u', default=None,
help='The username of the EODMS account used for '
'authentication.')
@click.option('--password', '-p', default=None,
| |
<reponame>NISH1001/neurgoo
#!/usr/bin/env python3
from __future__ import annotations
import random
from abc import ABC, abstractmethod
from typing import Optional, Sequence, Tuple, Type
import numpy as np
from .misc.eval import Evaluator
from .structures import NULL_TENSOR, Shape, Tensor
class BaseMixin:
@property
def __classname__(self) -> str:
return self.__class__.__name__
class AbstractLayer(BaseMixin, ABC):
"""
This represent an abstract layer from which
downstream layer classes are implemented
Two main attributes of any layer is to perform:
- forward pass (see `feed_forward(...)`)
- back propagation (see `backpropagate(...)`)
Note:
Layers could be:
- neuron layer
- activation function
- neuron
- loss
We assume everything is a layer
"""
def __init__(self, trainable: bool = True, debug: bool = False) -> None:
self._trainable = bool(trainable)
self.debug = bool(debug)
self.mode = "train"
def train_mode(self) -> None:
self.trainable = True
self.mode = "train"
def eval_mode(self) -> None:
self.trainable = False
self.mode = "eval"
@abstractmethod
def initialize(self, *args, **kwargs) -> None:
raise NotImplementedError()
@property
def output_shape(self) -> Shape:
return Shape()
@property
def input_shape(self) -> Shape:
return Shape()
@property
def trainable(self) -> bool:
return self._trainable
@trainable.setter
def trainable(self, val: bool) -> None:
assert isinstance(val, (bool, int))
self._trainable = bool(val)
@abstractmethod
def feed_forward(self, X: Tensor, **kwargs) -> Tensor:
raise NotImplementedError()
def __call__(self, X: Tensor, **kwargs) -> Tensor:
return self.feed_forward(X, **kwargs)
@abstractmethod
def backpropagate(self, grad_accum: Tensor) -> Tensor:
"""
Back-propagate the gradient
Args:
grad_accum: ``Tensor``
Accumulated gradient wrt the output of the layer.
Returns:
``Tensor`` gradient wrt input to the layer.
"""
raise NotImplementedError()
@property
def num_params(self) -> int:
return 0
@property
def layer_name(self) -> str:
return self.__classname__
def __str__(self) -> str:
return f"{self.__classname__} || Shape: ({self.input_shape}, {self.output_shape}) || trainable: {self.trainable}"
class ActivationLayer(AbstractLayer):
"""
This class represents an activation layer.
Some of the implementation can be found at `neurgoo.layers.activations`:
- `neurgoo.layers.activations.Sigmoid`
- `neurgoo.layers.activations.ReLU`
- `neurgoo.layers.activations.LeakyReLU`
- `neurgoo.layers.activations.Softmax`
Each activation layer has 2 primary attributes which should be implemented:
- `__call__(...)` method which acts as a functor, and allows us to do object
calls
- `gradient(...)` which computes the gradient w.r.t input to the
function. Down the line this is used in `backpropagate(...)` method
"""
def __init__(self):
self._input_cache = NULL_TENSOR.copy()
self.mode = "train"
def initialize(self):
pass
def feed_forward(self, x):
if self.mode == "train":
self._input_cache = x
self.trainable = True
elif self.mode == "eval":
self.trainable = False
return self(x)
def __call__(self, x):
raise NotImplementedError()
def backpropagate(self, grad_accum: Tensor) -> Tensor:
return grad_accum * self.gradient(self._input_cache)
def gradient(self, x: Tensor) -> Tensor:
raise NotImplementedError()
def __str__(self) -> str:
return f"{self.__classname__} || Attrs => {self.__dict__}"
class AbstractLoss(BaseMixin, ABC):
"""
This class represents loss component of the neural network system.
Any implementation of the loss should have 2 main attributes:
- `loss`, which computes the loss when target and predictions are
given
- `gradient` which computes the gradient required for
`backpropagate(...)` methods
Current implementations:
- `neurgoo.losses.MeanSquaredError`
- `neurgoo.losses.BinaryCrossEntropyLoss`
- `neurgoo.losses.CrossEntropyLossWithLogits`
- `neurgoo.losses.HingeLoss`
"""
def __init__(self, name: Optional[str] = None) -> None:
name = name or self.__classname__
@abstractmethod
def loss(self, actual: Tensor, predicted: Tensor) -> Tensor:
raise NotImplementedError()
def __call__(self, actual: Tensor, predicted: Tensor) -> Tensor:
return self.loss(actual, predicted)
def feed_forward(self, actual: Tensor, predicted: Tensor) -> Tensor:
return self.loss(actual, predicted)
@abstractmethod
def gradient(self, actual: Tensor, predicted: Tensor) -> Tensor:
raise NotImplementedError()
class OptimParam(BaseMixin):
"""
Represents a parameter type that any optimizer can affect
for gradient update step.
This is a very naive-implementation to make sure we have loose
segregation between layer, backpropagation and optimizer.
Currently we have weights and biases as two `OptimParam` object
at `neurgoo.layers.Linear`.
Also, when we do `model.params()`, we are basically
getting references to layer's OptimParam variables.
These params are passed to any optimizer to perform gradient update operation.
Attributes:
`val`: ``np.ndarray` aliased as `Tensor`
This stores actual array
``grad``: ``Tensor``
This stores delta value to be used for updating `val`
in the optimizer
"""
def __init__(
self, val: Optional[Tensor] = None, requires_grad: bool = True
) -> None:
self.val: Tensor = val or NULL_TENSOR
self.grad: Tensor = NULL_TENSOR
self.requires_grad: bool = bool(requires_grad)
@property
def shape(self) -> Shape:
return self.val.shape
@classmethod
def default_empty(cls) -> OptimParam:
return cls(np.array([]), requires_grad=True)
def __repr__(self) -> str:
return str(self)
def __str__(self) -> str:
name = self.__classname__
return f"{name} || requires_grad={self.requires_grad} || val_shape = {self.val.shape} || grad_shape = {self.grad.shape}"
class AbstractModel(AbstractLayer):
"""
This is an abstraction for a collection of layers.
In this we can:
- add any number of layers
- do forward pass (calls feed_forward method of each layer
iteratively)
- do backward pass (calls backpropagate method of each layer
iteratively)
Note:
1) `eval_mode()` disables any trainable param
and also avoids caching of input. This is for memory optimization
as we don't have to store any input cache as we don't do backpropagate
during evaluation only mode.
2) `train_mode()` enables the training and input cache
See `neurgoo.models.DefaultNNModel` for current implementation.
"""
def __init__(
self,
layers: Optional[Sequence[Type[AbstractLayer]]] = None,
name: Optional[str] = None,
trainable: bool = True,
debug: bool = False,
):
self.name = name or self.__classname__
self.trainable = bool(trainable)
self.debug = bool(debug)
layers = list(layers or [])
self._sanity_check_layers(layers)
self.layers = layers
def initialize(self) -> None:
pass
def _sanity_check_layers(self, layers: Sequence[Type[AbstractLayer]]) -> bool:
if layers is None:
return True
if layers is not None and not isinstance(layers, (list, tuple)):
raise TypeError(
f"Invalid type for layers. Expected any of list, tuple. Got {type(layers)}"
)
for i, layer in enumerate(layers):
if not isinstance(layer, AbstractLayer):
raise TypeError(f"Invalid type for [layer={layer}] at [index={i}]")
return True
def add_layer(self, layer: Type[AbstractLayer]) -> Type[AbstractModel]:
if not isinstance(layer, AbstractLayer):
raise TypeError(
f"Invalid type for layer={layer}. Expected a base type of AbstractLayer. Got {type(layer)}"
)
self.layers.append(layer)
return self
def add_layers(self, layers: Sequence[Type[AbstractLayer]]) -> Type[AbstractModel]:
self._sanity_check_layers(layers)
for layer in layers:
self.add_layer(layer)
return self
def predict(self, X: Tensor) -> Tensor:
return self.feed_forward(X)
def feed_forward(self, X: Tensor) -> Tensor:
for layer in self.layers:
X = layer.feed_forward(X)
return X
def __getitem__(self, index: int) -> Type[AbstractLayer]:
return self.layers[index]
def __call__(self, X: Tensor) -> Tensor:
return self.feed_forward(X)
def params(self) -> Tuple[OptimParam]:
"""
Return a collection of all the ``OptimParam``
object stored in any layer.
"""
res = []
for layer in self.layers:
for var, t in layer.__dict__.items():
if isinstance(t, OptimParam):
res.append(getattr(layer, var))
return tuple(res)
def backpropagate(self, grad: Tensor) -> Tensor:
for layer in reversed(self.layers):
grad = layer.backpropagate(grad)
return grad
def train_mode(self) -> None:
self.trainable = True
for layer in self.layers:
layer.train_mode()
def eval_mode(self) -> None:
self.trainable = False
for layer in self.layers:
layer.eval_mode()
def __str__(self) -> str:
name = self.name
layers_str = "\n".join([str(layer) for layer in self.layers])
return f"[Model=({name}, {self.__classname__})]\nnum_layers={len(self.layers)}\nLayers=[\n{layers_str}\n]"
class AbstractOptimizer(BaseMixin, ABC):
"""
For any subsequent optimizer implementation, we should implement their
`step(...)` method where we access trainable params and update their
values accordingly.
See `neurgoo.optimizers.SGD` for current implementation.
"""
def __init__(
self, params: Tuple[OptimParam], lr: float = 1e-3, debug: bool = False
) -> None:
self._sanity_check_params(params)
self.params = params
if not isinstance(lr, float):
raise TypeError(f"Invalid type for lr. Expected float. Got {type(lr)}")
self.lr = lr
self.debug = bool(debug)
def _sanity_check_params(self, params: Tuple[OptimParam]) -> bool:
assert params is not None
if not isinstance(params, tuple):
raise TypeError(
f"Invalid type for params. Expected tuple. Got {type(params)}"
)
for i, param in enumerate(params):
if not isinstance(param, OptimParam):
raise TypeError(
f"Invalid type for param at index={i}. Expected type of OptimParam. Got {type(param)}"
)
@abstractmethod
def step(self) -> None:
raise NotImplementedError()
class AbstractModelTrainer(BaseMixin, ABC):
"""
This component encapsulates all the main training loop,
through `fit(...)` method.
See `neurgoo.trainers.DefaultModelTrainer` class for current implementation.
"""
def __init__(
self,
model: Type[AbstractModel],
loss: Type[AbstractLoss],
optimizer: Type[AbstractOptimizer],
evaluator: Evaluator,
debug: bool = False,
) -> None:
self.debug = bool(debug)
if not isinstance(model, AbstractModel):
raise TypeError(
f"Invalid type for model. Expected any type of AbstractModel. Got {type(model)}"
)
self.model = model
if not isinstance(loss, AbstractLoss):
raise TypeError(
f"Invalid type for loss. Expected any type of AbstractLoss. Got {type(loss)}"
)
self.loss = loss
if not isinstance(optimizer, AbstractOptimizer):
raise TypeError(
f"Invalid type for optimizer. Expected any type of AbstractOptimizer. Got {type(optimizer)}"
)
self.optimizer = optimizer
if not isinstance(evaluator, Evaluator):
raise TypeError(
f"Invalid type for evaluator. Expected type of | |
import autosar
import math
import re
import sys
import copy
INVALID_TYPE_CODE = -1
UINT8_TYPE_CODE = 0
UINT16_TYPE_CODE = 1
UINT32_TYPE_CODE = 2
UINT64_TYPE_CODE = 3
SINT8_TYPE_CODE = 4
SINT16_TYPE_CODE = 5
SINT32_TYPE_CODE = 6
SINT64_TYPE_CODE = 7
STRING_TYPE_CODE = 8
RECORD_TYPE_CODE = 9
REFERENCE_TYPE_CODE = 10
VTYPE_INVALID = -1
VTYPE_SCALAR = 0
VTYPE_LIST = 1
VTYPE_MAP = 2
MAX_RECURSE_DEPTH = 10
def match_pair(s,left,right):
if (len(s)>0) and (s[0]==left):
count=1
for i in range(1,len(s)):
if s[i]==right:
count-=1
if count==0:
return (s[1:i],s[i+1:])
elif s[i]==left:
count+=1 #nested pair
return (None,"")
return (None,s)
def parse_str(s):
return match_pair(s,'"','"')
def _getIntegerTypeCode(dataType):
"""
using an autosar integer datatype, calculate how many bits are required
to store its value and based on that, calculate what APX type code to use.
"""
global args
if dataType.minVal >= 0:
bits = _calcUIntTypeLen(dataType)
if bits <=8:
if (dataType.minVal>0) or (dataType.maxVal<255):
return 'C(%d,%d)'%(dataType.minVal,dataType.maxVal)
else:
return 'C'
elif bits <=16:
return 'S'
elif bits <=32:
return 'L'
elif bits <=64:
return 'U'
elif dataType.minVal<0:
bits = _calcIntTypeLen(dataType)
if bits <=8:
if (dataType.minval>-128) or dataType.maxVal<127:
return 'c(%d,%d)'%(dataType.minval,dataType.maxVal)
else:
return 'c'
elif bits <=16:
return 's'
elif bits <=32:
return 'l'
elif bits <=64:
return 'u'
else:
print("not implemented (min=%s)"%dataType.minval)
def _calcUIntTypeLen(dataType):
"""
returs number of bits needed to represent the autosar integer
type value.
"""
if isinstance(dataType,autosar.datatype.IntegerDataType):
return int(math.ceil(math.log(dataType.maxVal,2)))
return None
def _calcIntTypeLen(dataType):
"""
same as _calcUIntTypeLen but for signed integers
"""
if isinstance(dataType,autosar.datatype.IntegerDataType):
return int(math.ceil(math.log(abs(dataType.maxVal),2)))+1
return None
def _derive_c_typename(dataElement):
"""
returns the C typename for simple data types
"""
if dataElement.typeCode == UINT8_TYPE_CODE: retval = 'uint8'
elif dataElement.typeCode == UINT16_TYPE_CODE: retval = 'uint16'
elif dataElement.typeCode == UINT32_TYPE_CODE: retval = 'uint32'
elif dataElement.typeCode == SINT8_TYPE_CODE: retval = 'sint8'
elif dataElement.typeCode == SINT16_TYPE_CODE: retval = 'sint16'
elif dataElement.typeCode == SINT32_TYPE_CODE: retval = 'sint32'
elif dataElement.typeCode == STRING_TYPE_CODE: retval = 'uint8'
else:
raise NotImplementedError('typeCode={:d}'.format(dataElement.typeCode))
return retval
def _typeCodeToStr(typeCode):
mapping = ['C', 'S', 'L', 'U', 'c', 's', 'l', 'u', 'a']
if (typeCode>=0) and (typeCode <= STRING_TYPE_CODE ):
return mapping[typeCode]
else:
raise ValueError('Invalid typeCode: {:d}'.format(typeCode))
class Port:
"""
APX port base type
"""
def __init__(self, portType, name, dataSignature, attributes=None):
self.portType = portType #string containing 'P' for provide port or 'R' for require port
self.name = name #name of the port
self.dsg = DataSignature(dataSignature)
self.attr = PortAttribute(attributes) if attributes is not None else None
self.id = None
def __str__(self):
return self.to_string(False)
def to_string(self, normalized):
if self.attr is not None:
return '%s"%s"%s:%s'%(self.portType, self.name, self.dsg.to_string(normalized), str(self.attr))
else:
return '%s"%s"%s'%(self.portType, self.name, self.dsg.to_string(normalized))
def resolve_type(self, typeList):
return self.dsg.resolve_data_element(typeList)
@property
def data_element(self):
data_element = self.dsg.dataElement
if data_element.isReference:
if isinstance(data_element.typeReference, DataType):
return data_element.typeReference.dsg.dataElement
else:
raise ApxTypeError('Unresolved type reference: {}'.format(str(data_element.typeReference)))
else:
return data_element
@property
def init_value(self):
if self.attr is None:
return None
else:
return self.attr.initValue
class RequirePort(Port):
"""
APX require port
"""
def __init__(self, name, dataSignature, attributes=None):
super().__init__('R',name, dataSignature, attributes)
def mirror(self):
return ProvidePort(self.name, self.dsg.to_string(normalized=True), str(self.attr) if self.attr is not None else None)
def clone(self):
return RequirePort(self.name, self.dsg.to_string(normalized=True), str(self.attr) if self.attr is not None else None)
class ProvidePort(Port):
"""
APX provide port
"""
def __init__(self, name, dataSignature, attributes=None):
super().__init__('P',name, dataSignature, attributes)
def mirror(self):
return RequirePort(self.name, str(self.dsg), str(self.attr) if self.attr is not None else None)
def clone(self):
return ProvidePort(self.name, str(self.dsg), str(self.attr) if self.attr is not None else None)
class DataType:
"""
APX datatype
"""
def __init__(self, name, dataSignature, attributes=None):
self.name = name
self.dsg = DataSignature(dataSignature, None, self)
self.attr = TypeAttribute(attributes) if attributes is not None else None
self.id = None
def __str__(self):
return self.to_string()
def to_string(self, normalized=False):
if self.attr is not None:
return 'T"%s"%s:%s'%(self.name, self.dsg.to_string(normalized), str(self.attr))
else:
return 'T"%s"%s'%(self.name, self.dsg.to_string(normalized))
def clone(self):
if self.attr is not None:
return DataType(self.name, self.dsg.to_string(normalized=True), str(self.attr) )
else:
return DataType(self.name, self.dsg.to_string(normalized=True) )
@property
def dataElement(self):
return self.dsg.dataElement
class DataSignature:
"""
APX Datasignature
"""
def __init__(self, dsg, typeList=None, parent=None):
if isinstance(dsg, str):
(dataElement,remain)=DataSignature.parseDataSignature(dsg, typeList)
if len(remain)>0:
raise ParseError("string '%s' not fully parsed"%dsg)
assert(isinstance(dataElement, DataElement))
self.dataElement=dataElement
elif isinstance(dsg, DataElement):
self.dataElement=copy.deepcopy(DataElement)
else:
raise NotImplementedError(type(dsg))
self.parent=parent
def __str__(self):
return self.dataElement.to_string()
def to_string(self, normalized=False):
return self.dataElement.to_string(normalized)
def packLen(self):
result=0
stack = []
i = iter([self.dataElement])
while True:
try:
dataElement = next(i)
except StopIteration:
try:
i = stack.pop()
continue
except IndexError:
break
if dataElement.typeCode == RECORD_TYPE_CODE:
stack.append(i)
i=iter(dataElement.elements)
else:
elemSize=self._calcElemSize(dataElement)
result+=elemSize
return result
def _calcElemSize(self, dataElement):
typeCodes = [UINT8_TYPE_CODE, UINT16_TYPE_CODE, UINT32_TYPE_CODE, UINT64_TYPE_CODE,
SINT8_TYPE_CODE, SINT16_TYPE_CODE, SINT32_TYPE_CODE, SINT64_TYPE_CODE,
STRING_TYPE_CODE]
typeSizes = [1, 2, 4, 8, 1, 2, 4, 8, 1]
if dataElement.typeCode == REFERENCE_TYPE_CODE:
dataType = dataElement.typeReference
assert(isinstance(dataType, DataType))
return self._calcElemSize(dataType.dsg.dataElement)
elif dataElement.typeCode == RECORD_TYPE_CODE:
elemSize = 0
for childElement in dataElement.elements:
elemSize+=self._calcElemSize(childElement)
else:
try:
i = typeCodes.index(dataElement.typeCode)
except ValueError:
raise NotImplementedError(dataElement.typeCode)
elemSize = typeSizes[i]
if dataElement.isArray():
return elemSize*dataElement.arrayLen
else:
return elemSize
def ctypename(self,typeList=None):
"""
Returns the C type name of the data signature as a string. This return value can be used for code generation in C/C++ code.
"""
if self.dataElement.isReference:
data_type = self.dataElement.typeReference
assert(isinstance(data_type, DataType))
return data_type.name
else:
return _derive_c_typename(self.dataElement)
def isComplexType(self, typeList = None):
return self.dataElement.isComplexType(typeList)
def isArray(self, typeList = None):
return self.dataElement.isArray(typeList)
def createInitData(self, initValue):
return self.dataElement.createInitData(initValue)
def resolve_data_element(self, typeList = None):
self.dataElement.resolve_type(typeList)
return self.dataElement.resolve_data_element()
@staticmethod
def _parseRecordSignature(remain, typeList):
recordElement = DataElement.Record()
while len(remain)>0:
(name,remain)=match_pair(remain,'"','"')
if len(remain)>0:
(childElement,remain)=DataSignature.parseDataSignature(remain, typeList)
if childElement is None:
if remain[0] == '}':
return (recordElement,remain[1:])
else:
raise ParseError('syntax error while parsing record')
else:
assert(isinstance(childElement, DataElement))
childElement.name = name
recordElement.elements.append(childElement)
raise ParseError("Missing '}' in data signature")
@staticmethod
def _parseExtendedTypeCode(text):
values=re.split(r'\s*,\s*',text)
if len(values)<2:
raise Exception("min,max missing from %s"%text)
minVal = int(values[0])
maxVal = int(values[1])
return (minVal, maxVal)
@staticmethod
def parseDataSignature(s, typeList=None):
remain=s
c=remain[0]
if c=='{': #start of record
remain = remain[1:]
return DataSignature._parseRecordSignature(remain,typeList)
if c=='T': #typeRef
(data,remain2)=match_pair(remain[1:],'[',']')
if data is not None:
(data2,remain3)=match_pair(data,r'"',r'"')
if data2 is not None:
assert(len(remain3)==0)
dataType = DataSignature._get_type_by_name(typeList, data2)
return DataElement.TypeReference(dataType), remain2
else:
dataType = DataSignature._get_type_by_id(typeList, int(data))
return DataElement.TypeReference(dataType), remain2
else:
raise ParseError("Parse failure near '%s', unmatched '[]' pair "%remain[1:])
else:
typeCodesChar=['C','S','L','U','c','s','l','u','a']
typeCodeInt = [UINT8_TYPE_CODE, UINT16_TYPE_CODE, UINT32_TYPE_CODE, UINT64_TYPE_CODE,
SINT8_TYPE_CODE, SINT16_TYPE_CODE, SINT32_TYPE_CODE, SINT64_TYPE_CODE,
STRING_TYPE_CODE]
try:
i = typeCodesChar.index(c)
except ValueError:
return (None,remain)
remain=remain[1:]
if (len(remain)>0) and (remain[0]=='('):
(data,remain)=match_pair(remain[0:],'(',')')
if data is None:
raise ParseError("Expecting ')' near: "+remain)
(minVal,maxVal) = DataSignature._parseExtendedTypeCode(data)
else:
(minVal,maxVal) = (None, None)
if (len(remain)>0) and (remain[0]=='['):
(value,remain)=match_pair(remain[0:],'[',']')
if value is None:
raise ParseError("Expecting ']' near: "+remain)
arrayLen=int(value)
else:
arrayLen = None
dataElement = DataElement(None, typeCodeInt[i], minVal, maxVal, arrayLen)
return (dataElement, remain)
@staticmethod
def _get_type_by_id(typeList, i):
if typeList is None:
return i
if i<len(typeList):
return typeList[i]
else:
raise ValueError('Invalid type id: {:d}'.format(i))
@staticmethod
def _get_type_by_name(typeList, name):
if typeList is None:
return name
for dataType in typeList:
if dataType.name == name:
return dataType
else:
raise ValueError('No data type found with name {}'.format(name))
class DataElement:
"""
This class describes the type of data that is contained in a data signature. A data signature contains one data element.
"""
def __init__(self, name=None, typeCode = INVALID_TYPE_CODE, minVal = None, maxVal = None, arrayLen = None, elements = None, reference=None):
self.name = name
self.typeMinVal = None
self.typeMaxVal = None
self.strNullTerminator = True #TODO: make this configurable in the future
typeCodeInt = [UINT8_TYPE_CODE, UINT16_TYPE_CODE, UINT32_TYPE_CODE, UINT64_TYPE_CODE,
SINT8_TYPE_CODE, SINT16_TYPE_CODE, SINT32_TYPE_CODE, SINT64_TYPE_CODE,
STRING_TYPE_CODE]
typeMinVal = [0, 0, 0, 0,
-128, -32768, -2147483648, None, #python has issues with 64-bit integer literals, ignore for now
0]
typeMaxVal = [255, 65535, 0xFFFFFFFF, None, #python has issues with 64-bit integer literals, ignore for now
127, 32767, 2147483647, None,
255]
if reference is not None:
self.typeCode = REFERENCE_TYPE_CODE
assert(isinstance(reference, (int, str, DataType)))
self.typeReference = reference
self.minVal = None
self.maxVal = None
self.arrayLen = None
else:
self.typeCode = typeCode
self.minVal = minVal
self.maxVal = maxVal
if typeCode < RECORD_TYPE_CODE:
self.typeMinVal = typeMinVal[typeCode] #this can be used in case user hasn't specifically set self.minVal
self.typeMaxVal = typeMaxVal[typeCode] #this can be used in case user hasn't specifically set self.maxVal
self.arrayLen = arrayLen
self.typeReference = None
if elements is not None:
self.elements = list(elements)
else:
self.elements = None
@property
def arrayLen(self):
return self._arrayLen
@arrayLen.setter
def arrayLen(self, value):
if value is not None:
if value < 0:
raise ValueError('invalid length: %s'%value)
self._arrayLen = value
else:
self._arrayLen = None
def isArray(self, typeList = None):
dataElement = self.resolve_data_element()
return dataElement.arrayLen is not None
def isRecord(self, typeList = None):
dataElement = self.resolve_data_element()
return dataElement.typeCode == RECORD_TYPE_CODE
def isComplexType(self, typeList = None):
return self.isArray() or self.isRecord()
| |
the context is not yet established. Any other errors would mean this isn't supported and we can't use
# the current version installed if we need session_key interrogation.
# https://github.com/gssapi/gss-ntlmssp/blob/9d7a275a4d6494606fb54713876e4f5cbf4d1362/src/gss_sec_ctx.c#L1277
if getattr(o_err, 'min_code', 0) == 1314127894: # ERR_NOTAVAIL
ntlm_features['session_key'] = True
else:
log.debug("GSSAPI ntlmssp does not support session key interrogation: %s" % str(o_err))
_gss_ntlmssp_available.result = ntlm_features # type: ignore
return _gss_ntlmssp_available(session_key=session_key)
def _gss_ntlmssp_reset_crypto(context: "gssapi.SecurityContext", outgoing: bool = True) -> None:
""" Resets the NTLM RC4 ciphers when being used with SPNEGO. """
reset_crypto = gssapi.OID.from_int_seq(_GSS_NTLMSSP_RESET_CRYPTO_OID)
value = b"\x00\x00\x00\x00" if outgoing else b"\x01\x00\x00\x00"
set_sec_context_option(reset_crypto, context=context, value=value)
def _gss_sasl_description(mech: "gssapi.OID") -> typing.Optional[bytes]:
""" Attempts to get the SASL description of the mech specified. """
try:
res = _gss_sasl_description.result # type: ignore
return res[mech.dotted_form]
except (AttributeError, KeyError):
res = getattr(_gss_sasl_description, 'result', {})
try:
sasl_desc = gssapi.raw.inquire_saslname_for_mech(mech).mech_description
except Exception as e:
log.debug("gss_inquire_saslname_for_mech(%s) failed: %s" % (mech.dotted_form, str(e)))
sasl_desc = None
res[mech.dotted_form] = sasl_desc
_gss_sasl_description.result = res # type: ignore
return _gss_sasl_description(mech)
def _kinit(
username: bytes,
password: bytes,
forwardable: typing.Optional[bool] = None,
) -> "gssapi.raw.Creds":
"""Gets a Kerberos credential.
This will get the GSSAPI credential that contains the Kerberos TGT inside
it. This is used instead of gss_acquire_cred_with_password as the latter
does not expose a way to request a forwardable ticket. This way makes it
possible to request whatever is needed before making it usable in GSSAPI.
Args:
username: The username to get the credential for.
password: The password to use to retrieve the credential.
forwardable: Whether to request a forwardable credential.
Returns:
gssapi.raw.Creds: The GSSAPI credential for the Kerberos mech.
"""
ctx = krb5.init_context()
princ = krb5.parse_name_flags(ctx, username)
init_opt = krb5.get_init_creds_opt_alloc(ctx)
if hasattr(krb5, "get_init_creds_opt_set_default_flags"):
# Heimdal requires this to be set in order to load the default options from krb5.conf. This follows the same
# code that it's own gss_acquire_cred_with_password does.
realm = krb5.principal_get_realm(ctx, princ)
krb5.get_init_creds_opt_set_default_flags(ctx, init_opt, b"gss_krb5", realm)
krb5.get_init_creds_opt_set_canonicalize(init_opt, True)
if forwardable is not None:
krb5.get_init_creds_opt_set_forwardable(init_opt, forwardable)
cred = krb5.get_init_creds_password(ctx, princ, init_opt, password=password)
mem_ccache = krb5.cc_new_unique(ctx, b"MEMORY")
krb5.cc_initialize(ctx, mem_ccache, princ)
krb5.cc_store_cred(ctx, mem_ccache, cred)
# acquire_cred_from is less dangerous than krb5_import_cred which uses a raw pointer to access the ccache. Heimdal
# has only recently added this API (not in a release as of 2021) so there's a fallback to the latter API.
if hasattr(gssapi.raw, "acquire_cred_from"):
kerberos = gssapi.OID.from_int_seq(GSSMech.kerberos.value)
gssapi_creds = gssapi.raw.acquire_cred_from(
{b"ccache": b"MEMORY:" + mem_ccache.name},
mechs=[kerberos],
usage="initiate",
).creds
else:
gssapi_creds = gssapi.raw.Creds()
gssapi.raw.krb5_import_cred(gssapi_creds, cache=mem_ccache.addr)
return gssapi_creds
class GSSAPIProxy(ContextProxy):
"""GSSAPI proxy class for GSSAPI on Linux.
This proxy class for GSSAPI exposes GSSAPI calls into a common interface for SPNEGO authentication. This context
uses the Python gssapi library to interface with the gss_* calls to provider Kerberos, and potentially native
ntlm/negotiate functionality.
"""
def __init__(
self,
username: typing.Optional[str] = None,
password: typing.Optional[str] = None,
hostname: typing.Optional[str] = None,
service: typing.Optional[str] = None,
channel_bindings: typing.Optional[GssChannelBindings] = None,
context_req: ContextReq = ContextReq.default,
usage: str = 'initiate',
protocol: str = 'negotiate',
options: NegotiateOptions = NegotiateOptions.none,
_is_wrapped: bool = False,
**kwargs: typing.Any,
) -> None:
if not HAS_GSSAPI:
raise ImportError("GSSAPIProxy requires the Python gssapi library: %s" % GSSAPI_IMP_ERR)
super(GSSAPIProxy, self).__init__(username, password, hostname, service, channel_bindings, context_req, usage,
protocol, options, _is_wrapped)
mech_str = {
'kerberos': GSSMech.kerberos.value,
'negotiate': GSSMech.spnego.value,
'ntlm': GSSMech.ntlm.value,
}[self.protocol]
mech = gssapi.OID.from_int_seq(mech_str)
cred = None
try:
cred = _get_gssapi_credential(mech, self.usage, username=username, password=password,
context_req=context_req)
except GSSError as gss_err:
raise SpnegoError(base_error=gss_err, context_msg="Getting GSSAPI credential") from gss_err
context_kwargs = {}
if self.channel_bindings:
context_kwargs['channel_bindings'] = ChannelBindings(
initiator_address_type=self.channel_bindings.initiator_addrtype,
initiator_address=self.channel_bindings.initiator_address,
acceptor_address_type=self.channel_bindings.acceptor_addrtype,
acceptor_address=self.channel_bindings.acceptor_address,
application_data=self.channel_bindings.application_data
)
if self.usage == 'initiate':
spn = "%s@%s" % (service if service else 'host', hostname or 'unspecified')
context_kwargs['name'] = gssapi.Name(spn, name_type=gssapi.NameType.hostbased_service)
context_kwargs['mech'] = mech
context_kwargs['flags'] = self._context_req
self._context = gssapi.SecurityContext(creds=cred, usage=self.usage, **context_kwargs)
@classmethod
def available_protocols(cls, options: typing.Optional[NegotiateOptions] = None) -> typing.List[str]:
return _available_protocols(options=options)
@classmethod
def iov_available(cls) -> bool:
# NOTE: Even if the IOV headers are unavailable, if NTLM was negotiated then IOV won't work. Unfortunately we
# cannot determine that here as we may not know the protocol until after negotiation.
return HAS_IOV
@property
def client_principal(self) -> typing.Optional[str]:
# Looks like a bug in python-gssapi where the value still has the terminating null char.
return to_text(self._context.initiator_name).rstrip('\x00') if self.usage == 'accept' else None
@property
def complete(self) -> bool:
return self._context.complete
@property
def negotiated_protocol(self) -> typing.Optional[str]:
try:
# For an acceptor this can be blank until the first token is received
oid = self._context.mech.dotted_form
except AttributeError:
return None
return {
GSSMech.kerberos.value: 'kerberos',
GSSMech.ntlm.value: 'ntlm',
# Only set until the negotiate process is complete, will change to one of the above once the context is
# set up.
GSSMech.spnego.value: 'negotiate',
}.get(oid, 'unknown: %s' % self._context.mech.dotted_form)
@property # type: ignore
@wrap_system_error(NativeError, "Retrieving session key")
def session_key(self) -> bytes:
return inquire_sec_context_by_oid(self._context, gssapi.OID.from_int_seq(_GSS_C_INQ_SSPI_SESSION_KEY))[0]
@wrap_system_error(NativeError, "Processing security token")
def step(self, in_token: typing.Optional[bytes] = None) -> typing.Optional[bytes]:
if not self._is_wrapped:
log.debug("GSSAPI step input: %s", base64.b64encode(in_token or b"").decode())
out_token = self._context.step(in_token)
self._context_attr = int(self._context.actual_flags)
if not self._is_wrapped:
log.debug("GSSAPI step output: %s", base64.b64encode(out_token or b"").decode())
return out_token
@wrap_system_error(NativeError, "Wrapping data")
def wrap(self, data: bytes, encrypt: bool = True, qop: typing.Optional[int] = None) -> WrapResult:
res = gssapi.raw.wrap(self._context, data, confidential=encrypt, qop=qop)
# gss-ntlmssp used to hardcode the conf_state=0 which results in encrpted=False. Because we know it is always
# sealed we just manually set to True.
# https://github.com/gssapi/gss-ntlmssp/pull/15
encrypted = True if self.negotiated_protocol == 'ntlm' else res.encrypted
return WrapResult(data=res.message, encrypted=encrypted)
@wrap_system_error(NativeError, "Wrapping IOV buffer")
def wrap_iov(
self,
iov: typing.List[IOVBuffer],
encrypt: bool = True,
qop: typing.Optional[int] = None,
) -> IOVWrapResult:
iov_buffer = IOV(*self._build_iov_list(iov), std_layout=False)
encrypted = wrap_iov(self._context, iov_buffer, confidential=encrypt, qop=qop)
return IOVWrapResult(buffers=_create_iov_result(iov_buffer), encrypted=encrypted)
def wrap_winrm(self, data: bytes) -> WinRMWrapResult:
if self.negotiated_protocol == 'ntlm':
# NTLM does not support IOV wrapping, luckily the header is a fixed size so we can split at that.
wrap_result = self.wrap(data).data
header = wrap_result[:16]
enc_data = wrap_result[16:]
padding = b""
else:
iov = self.wrap_iov([BufferType.header, data, BufferType.padding]).buffers
header = iov[0].data
enc_data = iov[1].data
padding = iov[2].data or b""
return WinRMWrapResult(header=header, data=enc_data + padding, padding_length=len(padding))
@wrap_system_error(NativeError, "Unwrapping data")
def unwrap(self, data: bytes) -> UnwrapResult:
res = gssapi.raw.unwrap(self._context, data)
# See wrap for more info.
encrypted = True if self.negotiated_protocol == 'ntlm' else res.encrypted
return UnwrapResult(data=res.message, encrypted=encrypted, qop=res.qop)
@wrap_system_error(NativeError, "Unwrapping IOV buffer")
def unwrap_iov(self, iov: typing.List[IOVBuffer]) -> IOVUnwrapResult:
iov_buffer = IOV(*self._build_iov_list(iov), std_layout=False)
res = unwrap_iov(self._context, iov_buffer)
return IOVUnwrapResult(buffers=_create_iov_result(iov_buffer), encrypted=res.encrypted, qop=res.qop)
def unwrap_winrm(self, header: bytes, data: bytes) -> bytes:
# This is an extremely weird setup, we need to use gss_unwrap for NTLM but for Kerberos it depends on the
# underlying provider that is used. Right now the proper IOV buffers required to work on both AES and RC4
# encrypted only works for MIT KRB5 whereas Heimdal fails. It currently mandates a padding buffer of a
# variable size which we cannot achieve in the way that WinRM encrypts the data. This is fixed in the source
# code but until it is widely distributed we just need to use a way that is known to just work with AES. To
# ensure that MIT works on both RC4 and AES we check the description which differs between the 2 implemtations.
# It's not perfect but I don't know of another way to achieve this until more time has passed.
# https://github.com/heimdal/heimdal/issues/739
sasl_desc = _gss_sasl_description(self._context.mech)
# https://github.com/krb5/krb5/blob/f2e28f13156785851819fc74cae52100e0521690/src/lib/gssapi/krb5/gssapi_krb5.c#L686
if sasl_desc and sasl_desc == b'Kerberos 5 GSS-API Mechanism':
# TODO: Should done when self.negotiated_protocol == 'kerberos', above explains why this can't be done yet.
iov = self.unwrap_iov([
(IOVBufferType.header, header),
data,
IOVBufferType.data
]).buffers
return iov[1].data
else:
return self.unwrap(header + data).data
@wrap_system_error(NativeError, "Signing message")
def sign(self, data: bytes, qop: typing.Optional[int] = None) -> bytes:
return gssapi.raw.get_mic(self._context, data, qop=qop)
@wrap_system_error(NativeError, "Verifying message")
def verify(self, data: bytes, mic: bytes) -> int:
return gssapi.raw.verify_mic(self._context, data, mic)
@property
def _context_attr_map(self) -> typing.List[typing.Tuple[ContextReq, int]]:
attr_map = [
(ContextReq.delegate, 'delegate_to_peer'),
(ContextReq.mutual_auth, 'mutual_authentication'),
(ContextReq.replay_detect, 'replay_detection'),
(ContextReq.sequence_detect, 'out_of_sequence_detection'),
(ContextReq.confidentiality, 'confidentiality'),
(ContextReq.integrity, 'integrity'),
# Only present when the DCE extensions are installed.
(ContextReq.identify, 'identify'),
# Only present with newer versions of python-gssapi https://github.com/pythongssapi/python-gssapi/pull/218.
(ContextReq.delegate_policy, 'ok_as_delegate'),
]
| |
<reponame>qbetterk/user-simulator
# -*- coding: utf-8 -*-
import copy
import json
import os
import re
import shutil
import urllib
from collections import OrderedDict
from io import BytesIO
from zipfile import ZipFile
from tqdm import tqdm
import numpy as np
from simulator.multiwoz_utils.utils import dbPointer
from simulator.multiwoz_utils.utils import delexicalize
from simulator.multiwoz_utils.utils.nlp import normalize
np.set_printoptions(precision=3)
np.random.seed(2)
# GLOBAL VARIABLES
DICT_SIZE = 400
MAX_LENGTH = 50
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def fixDelex(filename, data, data2, idx, idx_acts):
"""Given system dialogue acts fix automatic delexicalization."""
try:
turn = data2[filename.strip('.json')][str(idx_acts)]
except:
return data
if not isinstance(turn, str) and not isinstance(turn, unicode):
for k, act in turn.items():
if 'Attraction' in k:
if 'restaurant_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("restaurant", "attraction")
if 'hotel_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("hotel", "attraction")
if 'Hotel' in k:
if 'attraction_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("attraction", "hotel")
if 'restaurant_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("restaurant", "hotel")
if 'Restaurant' in k:
if 'attraction_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("attraction", "restaurant")
if 'hotel_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("hotel", "restaurant")
return data
def delexicaliseReferenceNumber(sent, turn):
"""Based on the belief state, we can find reference number that
during data gathering was created randomly."""
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital'] # , 'police']
if turn['metadata']:
for domain in domains:
if turn['metadata'][domain]['book']['booked']:
for slot in turn['metadata'][domain]['book']['booked'][0]:
if slot == 'reference':
val = '[' + domain + '_' + slot + ']'
else:
val = '[' + domain + '_' + slot + ']'
key = normalize(turn['metadata'][domain]['book']['booked'][0][slot])
sent = (' ' + sent + ' ').replace(' ' + key + ' ', ' ' + val + ' ')
# try reference with hashtag
key = normalize("#" + turn['metadata'][domain]['book']['booked'][0][slot])
sent = (' ' + sent + ' ').replace(' ' + key + ' ', ' ' + val + ' ')
# try reference with ref#
key = normalize("ref#" + turn['metadata'][domain]['book']['booked'][0][slot])
sent = (' ' + sent + ' ').replace(' ' + key + ' ', ' ' + val + ' ')
return sent
def addBookingPointer(task, turn, pointer_vector):
"""Add information about availability of the booking option."""
# Booking pointer
rest_vec = np.array([1, 0])
if task['goal']['restaurant']:
if turn['metadata']['restaurant'].has_key("book"):
if turn['metadata']['restaurant']['book'].has_key("booked"):
if turn['metadata']['restaurant']['book']["booked"]:
if "reference" in turn['metadata']['restaurant']['book']["booked"][0]:
rest_vec = np.array([0, 1])
hotel_vec = np.array([1, 0])
if task['goal']['hotel']:
if turn['metadata']['hotel'].has_key("book"):
if turn['metadata']['hotel']['book'].has_key("booked"):
if turn['metadata']['hotel']['book']["booked"]:
if "reference" in turn['metadata']['hotel']['book']["booked"][0]:
hotel_vec = np.array([0, 1])
train_vec = np.array([1, 0])
if task['goal']['train']:
if turn['metadata']['train'].has_key("book"):
if turn['metadata']['train']['book'].has_key("booked"):
if turn['metadata']['train']['book']["booked"]:
if "reference" in turn['metadata']['train']['book']["booked"][0]:
train_vec = np.array([0, 1])
pointer_vector = np.append(pointer_vector, rest_vec)
pointer_vector = np.append(pointer_vector, hotel_vec)
pointer_vector = np.append(pointer_vector, train_vec)
return pointer_vector
def addDBPointer(turn):
"""Create database pointer for all related domains."""
domains = ['restaurant', 'hotel', 'attraction', 'train']
pointer_vector = np.zeros(6 * len(domains))
for domain in domains:
num_entities = dbPointer.queryResult(domain, turn)
pointer_vector = dbPointer.oneHotVector(num_entities, domain, pointer_vector)
return pointer_vector
def get_summary_bstate(bstate):
"""Based on the mturk annotations we form multi-domain belief state"""
domains = [u'taxi', u'restaurant', u'hospital', u'hotel', u'attraction', u'train', u'police']
summary_bstate = []
for domain in domains:
domain_active = False
booking = []
# print(domain,len(bstate[domain]['book'].keys()))
for slot in sorted(bstate[domain]['book'].keys()):
if slot == 'booked':
if bstate[domain]['book']['booked']:
booking.append(1)
else:
booking.append(0)
else:
if bstate[domain]['book'][slot] != "":
booking.append(1)
else:
booking.append(0)
if domain == 'train':
if 'people' not in bstate[domain]['book'].keys():
booking.append(0)
if 'ticket' not in bstate[domain]['book'].keys():
booking.append(0)
summary_bstate += booking
for slot in bstate[domain]['semi']:
slot_enc = [0, 0, 0] # not mentioned, dontcare, filled
if bstate[domain]['semi'][slot] == 'not mentioned':
slot_enc[0] = 1
elif bstate[domain]['semi'][slot] == 'dont care' or bstate[domain]['semi'][slot] == 'dontcare' or \
bstate[domain]['semi'][slot] == "don't care":
slot_enc[1] = 1
elif bstate[domain]['semi'][slot]:
slot_enc[2] = 1
if slot_enc != [0, 0, 0]:
domain_active = True
summary_bstate += slot_enc
# quasi domain-tracker
if domain_active:
summary_bstate += [1]
else:
summary_bstate += [0]
# print(len(summary_bstate))
assert len(summary_bstate) == 94
return summary_bstate
def analyze_dialogue(dialogue, maxlen):
"""Cleaning procedure for all kinds of errors in text and annotation."""
d = dialogue
# do all the necessary postprocessing
if len(d['log']) % 2 != 0:
# print path
print
'odd # of turns'
return None # odd number of turns, wrong dialogue
d_pp = {}
d_pp['goal'] = d['goal'] # for now we just copy the goal
usr_turns = []
sys_turns = []
for i in range(len(d['log'])):
if len(d['log'][i]['text'].split()) > maxlen:
print
'too long'
return None # too long sentence, wrong dialogue
if i % 2 == 0: # usr turn
if 'db_pointer' not in d['log'][i]:
print
'no db'
return None # no db_pointer, probably 2 usr turns in a row, wrong dialogue
text = d['log'][i]['text']
if not is_ascii(text):
print
'not ascii'
return None
# d['log'][i]['tkn_text'] = self.tokenize_sentence(text, usr=True)
usr_turns.append(d['log'][i])
else: # sys turn
text = d['log'][i]['text']
if not is_ascii(text):
print
'not ascii'
return None
# d['log'][i]['tkn_text'] = self.tokenize_sentence(text, usr=False)
belief_summary = get_summary_bstate(d['log'][i]['metadata'])
d['log'][i]['belief_summary'] = belief_summary
sys_turns.append(d['log'][i])
d_pp['usr_log'] = usr_turns
d_pp['sys_log'] = sys_turns
return d_pp
def get_dial(dialogue):
"""Extract a dialogue from the file"""
dial = []
d_orig = analyze_dialogue(dialogue, MAX_LENGTH) # max turn len is 50 words
if d_orig is None:
return None
usr = [t['text'] for t in d_orig['usr_log']]
db = [t['db_pointer'] for t in d_orig['usr_log']]
bs = [t['belief_summary'] for t in d_orig['sys_log']]
sys = [t['text'] for t in d_orig['sys_log']]
for u, d, s, b in zip(usr, db, sys, bs):
dial.append((u, s, d, b))
return dial
def createDict(word_freqs):
words = word_freqs.keys()
freqs = word_freqs.values()
sorted_idx = np.argsort(freqs)
sorted_words = [words[ii] for ii in sorted_idx[::-1]]
# Extra vocabulary symbols
_GO = '_GO'
EOS = '_EOS'
UNK = '_UNK'
PAD = '_PAD'
extra_tokens = [_GO, EOS, UNK, PAD]
worddict = OrderedDict()
for ii, ww in enumerate(extra_tokens):
worddict[ww] = ii
for ii, ww in enumerate(sorted_words):
worddict[ww] = ii + len(extra_tokens)
for key, idx in worddict.items():
if idx >= DICT_SIZE:
del worddict[key]
return worddict
def loadData():
data_url = "data/multiwoz-master/data/multi-woz/data.json"
dataset_url = "https://www.repository.cam.ac.uk/bitstream/handle/1810/280608/MULTIWOZ2.zip?sequence=3&isAllowed=y"
if not os.path.exists("data"):
os.makedirs("data")
os.makedirs("data/multi-woz")
if not os.path.exists(data_url):
print("Downloading and unzipping the MultiWOZ dataset")
resp = urllib.urlopen(dataset_url)
zip_ref = ZipFile(BytesIO(resp.read()))
zip_ref.extractall("data/multi-woz")
zip_ref.close()
shutil.copy('data/multi-woz/MULTIWOZ2 2/data.json', 'data/multi-woz/')
shutil.copy('data/multi-woz/MULTIWOZ2 2/valListFile.json', 'data/multi-woz/')
shutil.copy('data/multi-woz/MULTIWOZ2 2/testListFile.json', 'data/multi-woz/')
shutil.copy('data/multi-woz/MULTIWOZ2 2/dialogue_acts.json', 'data/multi-woz/')
def createDelexData():
"""Main function of the script - loads delexical dictionary,
goes through each dialogue and does:
1) data normalization
2) delexicalization
3) addition of database pointer
4) saves the delexicalized data
"""
# download the data
loadData()
# create dictionary of delexicalied values that then we will search against, order matters here!
dic = delexicalize.prepareSlotValuesIndependent()
fin1 = file('data/multi-woz/data.json')
data = json.load(fin1)
fin2 = file('data/multi-woz/dialogue_acts.json')
data2 = json.load(fin2)
for dialogue_name in tqdm(data):
dialogue = data[dialogue_name]
# print dialogue_name
idx_acts = 1
for idx, turn in enumerate(dialogue['log']):
# normalization, split and delexicalization of the sentence
sent = normalize(turn['text'])
words = sent.split()
sent = delexicalize.delexicalise(' '.join(words), dic)
# parsing reference number GIVEN belief state
sent = delexicaliseReferenceNumber(sent, turn)
# changes to numbers only here
digitpat = re.compile('\d+')
sent = re.sub(digitpat, '[value_count]', sent)
# delexicalized sentence added to the dialogue
dialogue['log'][idx]['text'] = sent
if idx % 2 == 1: # if it's a system turn
# add database pointer
pointer_vector = addDBPointer(turn)
# add booking pointer
pointer_vector = addBookingPointer(dialogue, turn, pointer_vector)
# print pointer_vector
dialogue['log'][idx - 1]['db_pointer'] = pointer_vector.tolist()
# FIXING delexicalization:
dialogue = fixDelex(dialogue_name, dialogue, data2, idx, idx_acts)
idx_acts += 1
delex_data[dialogue_name] = dialogue
with open('data/multi-woz/delex.json', 'w') as outfile:
json.dump(delex_data, outfile)
return delex_data
def main():
print('Create delexicalized dialogues. Get yourself a coffee, this might take a while.')
delex_data = createDelexData()
def delexicalize_one_sent0(sent, dic):
sent, kv_dic = normalize(sent)
words = sent.split()
sent, kv_dic_tmp = delexicalize.delexicalise(' '.join(words), dic)
kv_dic.update(kv_dic_tmp)
# parsing reference number GIVEN belief state
# sent = delexicaliseReferenceNumber(sent, turn)
# changes to numbers only here
# digitpat = re.compile('(?<!looking for)(?<=for) \d+ (?!of)|(?<=party of) \d+ | \d+ (?=people|person|of us)')
# # digitpat = re.compile(' \d+ ')
# value_count = re.findall(digitpat, sent)
# while value_count:
#
# index = sent.find(value_count[0])
#
# if not delexicalize.check_balance(sent[:index]):
# # pdb.set_trace()
# value_count.pop(0)
# continue
#
# sent = sent[:index] + \
# ' [value_count|' + value_count[0][1:-1] + '] ' + \
# sent[index + len(value_count[0]):]
# value_count = re.findall(digitpat, sent)
digitpat = re.compile('\d+')
digits = re.findall(digitpat, sent)
if len(digits):
kv_dic.update({'[value_count]': digits})
sent = re.sub(digitpat, '[value_count]', sent)
return sent, kv_dic
import sys, os, re, pdb
import pickle as pkl
import sys, os, re, | |
"""
.. module:: wagtailsnapshotpublisher.views
"""
import json
import logging
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.forms.models import modelform_factory
from django.http import JsonResponse, HttpResponseServerError, Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core import serializers
from django.db.models import Q
from wagtail.core.models import Page, PageRevision
from wagtail.admin import messages
from djangosnapshotpublisher.publisher_api import PublisherAPI
from djangosnapshotpublisher.models import ContentRelease
from .models import WSSPContentRelease, document_load_dynamic_elements
from .forms import PublishReleaseForm, FrozenReleasesForm
from .utils import get_dynamic_element_keys
from .signals import release_was_staged, reindex_release
logger = logging.getLogger('django')
DATETIME_FORMAT='%Y-%m-%d %H:%M'
#
# Return upcoming scheduled releases.
#
def get_releases(request, site_code):
""" get_releases """
# time_now = timezone.now()
# logger.info('Getting releases for site code %s after %s', site_code, time_now.strftime(DATETIME_FORMAT))
# response = list_live_and_upcoming_content_releases(site_code, 1, time_now) # Status FROZEN = 1
# if response['status'] == 'success':
# releases = list(response['content'].values('uuid', 'title', 'publish_datetime', 'is_live'))
# logger.info('Releases are %s', releases)
# else:
# return response
live_release = None
stage_release = None
publisher_api = PublisherAPI()
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'success':
live_release = {
'uuid': response['content'].uuid,
'title': response['content'].title,
'publish_datetime': response['content'].publish_datetime,
}
response = publisher_api.get_stage_content_release(site_code)
if response['status'] == 'success':
stage_release = {
'uuid': response['content'].uuid,
'title': response['content'].title,
'publish_datetime': response['content'].publish_datetime,
}
return JsonResponse({
'stage': stage_release,
'live': live_release,
}, safe=False)
def list_live_and_upcoming_content_releases(site_code, status=None, after=None):
""" list_content_releases """
try:
content_releases = ContentRelease.objects.filter(site_code=site_code)
if status:
content_releases = content_releases.filter(status=status)
if after:
content_releases = content_releases.filter(Q(publish_datetime__gte=after) | Q(is_live=True))
return { 'status': 'success', 'content': content_releases }
except:
return { 'status': 'failed', 'content': 'Unable to fetch upcoming content releases' }
def get_content_details(site_code, release_uuid, content_type, content_key):
""" get_content_details """
publisher_api = PublisherAPI()
content_release = None
try:
if release_uuid:
# get ContentRelease
content_release = WSSPContentRelease.objects.get(
site_code=site_code,
uuid=release_uuid,
)
else:
# get live ContentRelease
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'error':
return response
else:
release = response['content']
content_release = WSSPContentRelease.objects.get(id=release.id)
release_uuid = content_release.uuid
except WSSPContentRelease.DoesNotExist:
pass
# Fetch document from the content release.
response = publisher_api.get_document_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
base_content_release = None
if response['status'] == 'error' and response['error_code'] == 'release_document_does_not_exist':
# Release doc not found, try in the base release for preview releases.
if content_release.status == 0:
if content_release.use_current_live_as_base_release:
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'success':
release = response['content']
base_content_release = WSSPContentRelease.objects.get(id=release.id)
else:
base_content_release = content_release.base_release
if base_content_release != None:
# Fetch document from the base content release if available (should only happen for preview releases).
response = publisher_api.get_document_from_content_release(
site_code,
base_content_release.uuid,
content_key,
content_type,
)
if response['status'] == 'success':
data = json.loads(response['content'].document_json)
response_extra = publisher_api.get_document_extra_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
if response_extra['status'] == 'success':
try:
dynamic_element_keys = json.loads(response_extra['content'].get(key='dynamic_element_keys').content)
data, updated = document_load_dynamic_elements(content_release, data, dynamic_element_keys)
except:
pass
else:
return response
return data
def unpublish_page(request, page_id, release_id, recursively=False):
""" unpublish_page """
page = get_object_or_404(Page, id=page_id).specific
page.unpublish_or_delete_from_release(release_id, recursively)
return redirect('wagtailadmin_explore', page.get_parent().id)
def unpublish_recursively_page(request, page_id, release_id):
""" unpublish_recursively_page """
return unpublish_page(request, page_id, release_id, True)
def unpublish(request, content_app, content_class, content_id, release_id):
""" unpublish """
model_class = apps.get_model(content_app, content_class)
instance = get_object_or_404(model_class, id=content_id)
instance.unpublish_or_delete_from_release(release_id)
return redirect(absolute_path(request, content_app, content_class))
def republish_page_and_children(request, page_id=None):
page = Page.objects.get(id=page_id).specific
if request.method == 'GET':
site_code = page.site_code
publisher_api = PublisherAPI()
rsp = publisher_api.list_content_releases(site_code, status='0', after=None) # Fetch preview releases ('0' will be cast to an integer)
if rsp['status'] == 'success':
releases = rsp['content']
else:
releases = []
details = {
'page': page,
'releases': releases,
}
return render(request, 'wagtailadmin/republish/detail.html', details)
if request.method == 'POST':
release_uuid = request.POST.get('release_uuid')
publish_current = request.POST.get('publish_current') # Only re-publish pages that are already published to the release.
release = WSSPContentRelease.objects.get(uuid=release_uuid)
rsp = page.republish_to_release_with_id(release.id, not publish_current)
if rsp['code'] == 0:
error = None
results = rsp['results']
total_count = rsp['total_count']
error_count = rsp['error_count']
else:
error = 'Failed to re-publish page: %s' % (rsp['message'])
results = []
total_count = None
error_count = None
details = {
'page': page,
'results': results,
'results_total': total_count,
'results_errors': error_count,
'error': error,
}
return render(request, 'wagtailadmin/republish/results.html', details)
def unpublish_page_and_children(request, page_id=None):
page = Page.objects.get(id=page_id).specific
if request.method == 'GET':
site_code = page.site_code
publisher_api = PublisherAPI()
rsp = publisher_api.list_content_releases(site_code, status='0', after=None) # Fetch preview releases ('0' will be cast to an integer)
if rsp['status'] == 'success':
releases = rsp['content']
else:
releases = []
details = {
'page': page,
'releases': releases,
}
return render(request, 'wagtailadmin/unpublish/detail.html', details)
if request.method == 'POST':
release_uuid = request.POST.get('release_uuid')
release = WSSPContentRelease.objects.get(uuid=release_uuid)
rsp = page.unpublish_from_release_with_id(release.id)
if rsp['code'] == 0:
error = None
results = rsp['results']
total_count = rsp['total_count']
error_count = rsp['error_count']
else:
error = 'Failed to un-publish page: %s' % (rsp['message'])
results = []
total_count = None
error_count = None
details = {
'page': page,
'results': results,
'results_total': total_count,
'results_errors': error_count,
'error': error,
}
return render(request, 'wagtailadmin/unpublish/results.html', details)
def remove_page_and_children(request, page_id=None):
page = Page.objects.get(id=page_id).specific
if request.method == 'GET':
site_code = page.site_code
publisher_api = PublisherAPI()
rsp = publisher_api.list_content_releases(site_code, status='0', after=None) # Fetch preview releases ('0' will be cast to an integer)
if rsp['status'] == 'success':
releases = rsp['content']
else:
releases = []
details = {
'page': page,
'releases': releases,
}
return render(request, 'wagtailadmin/remove/detail.html', details)
if request.method == 'POST':
release_uuid = request.POST.get('release_uuid')
publish_current = request.POST.get('publish_current')
release = WSSPContentRelease.objects.get(uuid=release_uuid)
rsp = page.remove_from_release_with_id(release.id)
if rsp['code'] == 0:
error = None
results = rsp['results']
total_count = rsp['total_count']
error_count = rsp['error_count']
else:
error = 'Failed to un-publish page: %s' % (rsp['message'])
results = []
total_count = None
error_count = None
details = {
'page': page,
'results': results,
'results_total': total_count,
'results_errors': error_count,
'error': error,
}
return render(request, 'wagtailadmin/remove/results.html', details)
def remove_page(request, page_id, release_id, recursively=False):
""" remove_page """
page = get_object_or_404(Page, id=page_id).specific
page.unpublish_or_delete_from_release(release_id, recursively, True)
return redirect('wagtailadmin_explore', page.get_parent().id)
def remove_recursively_page(request, page_id, release_id):
""" remove_recursively_page """
return remove_page(request, page_id, release_id, True)
def remove(request, content_app, content_class, content_id, release_id):
""" remove """
model_class = apps.get_model(content_app, content_class)
instance = get_object_or_404(model_class, id=content_id)
instance.unpublish_or_delete_from_release(release_id, False, True)
return redirect(absolute_path(request, content_app, content_class))
def preview_model(request, content_app, content_class, content_id, preview_mode='default',
load_dynamic_element=False):
""" preview_model """
model_class = apps.get_model(content_app, content_class)
form_class = modelform_factory(
model_class, fields=[field.name for field in model_class._meta.get_fields()])
form = form_class(request.POST)
if form.is_valid():
instance = form.save(commit=False)
serializers = instance.get_serializers()
serialized_page = serializers[preview_mode]['class'](instance=instance)
data = serialized_page.data
if load_dynamic_element:
dynamic_element_keys = get_dynamic_element_keys(data)
if dynamic_element_keys:
data, updated = document_load_dynamic_elements(instance.live_release, data, dynamic_element_keys)
return JsonResponse(data)
else:
if not settings.TESTING:
print(form.errors)
return HttpResponseServerError('Form is not valid')
def preview_instance(request, content_app, content_class, content_id, preview_mode='default',
load_dynamic_element=True):
""" preview_instance """
model_class = apps.get_model(content_app, content_class)
instance = model_class.objects.get(id=content_id)
serializers = instance.get_serializers()
serialized_page = serializers[preview_mode]['class'](instance=instance)
data = serialized_page.data
if load_dynamic_element:
dynamic_element_keys = get_dynamic_element_keys(data)
if dynamic_element_keys:
data, updated = document_load_dynamic_elements(instance.live_release, data, dynamic_element_keys)
return JsonResponse(data)
def compare_release(request, release_id, release_id_to_compare_to=None, set_live_button=False, set_stage_button=False):
""" compare_release """
publisher_api = PublisherAPI()
release = WSSPContentRelease.objects.get(id=release_id)
publish_release_form = PublishReleaseForm()
frozen_releases_form = FrozenReleasesForm(release.site_code)
if request.method == 'POST' and release_id_to_compare_to is None:
frozen_releases_form = FrozenReleasesForm(release.site_code, request.POST)
if frozen_releases_form.is_valid():
# redirect to compare with this release
release_id_to_compare_to = frozen_releases_form.cleaned_data['releases']
return release_detail(request, release_id, set_live_button, set_stage_button, release_id_to_compare_to.id)
if request.method == 'POST' and release_id_to_compare_to is None:
publish_release_form = PublishReleaseForm(request.POST)
if publish_release_form.is_valid():
publish_datetime = publish_release_form.cleaned_data['publish_datetime']
if publish_datetime:
publish_datetime = publish_datetime.strftime('%Y-%m-%dT%H:%M:%S%z')
return release_set_live(request, release_id, publish_datetime)
if frozen_releases_form.fields['releases'].queryset is None or \
not frozen_releases_form.fields['releases'].queryset.exists():
frozen_releases_form = None
# get current live release
compare_with_live = True
response = publisher_api.get_live_content_release(release.site_code)
if response['status'] == 'error':
return {
'release': release,
'error_msg': response['error_msg'],
'publish_release_form': publish_release_form,
}
release_to_compare_to = response['content']
if release_id_to_compare_to and release_to_compare_to.id != release_id_to_compare_to:
compare_with_live = False
release_to_compare_to = WSSPContentRelease.objects.get(id=release_id_to_compare_to)
else:
release_to_compare_to = WSSPContentRelease.objects.get(id=release_to_compare_to.id)
response = publisher_api.compare_content_releases(release.site_code, release.uuid,
release_to_compare_to.uuid)
comparison = response['content']
added_pages = []
removed_pages = []
changed_pages = []
extra_contents = []
for item in comparison:
if item['content_type'] == 'page':
try:
if item['diff'] == 'Added':
try:
page_revision = PageRevision.objects.get(id=item['parameters']['revision_id'])
except:
logger.info('Page has been deleted from Wagtail: %s' % (item))
item['deleted_in_wagtail'] = True
item['page_revision'] = page_revision
item['title'] = json.loads(page_revision.content_json)['title']
added_pages.append(item)
if item['diff'] == 'Removed':
try:
page_revision = PageRevision.objects.get(id=item['parameters']['revision_id'])
except:
logger.info('Page has been deleted from Wagtail: %s' % (item))
item['deleted_in_wagtail'] = True
item['title'] = json.loads(page_revision.content_json)['title']
item['page_revision'] = page_revision
removed_pages.append(item)
if item['diff'] == 'Changed' and 'revision_id' in item['parameters']['release_from']:
try:
page_revision = PageRevision.objects.get(id=item['parameters']['release_from']['revision_id'])
except:
logger.info('Page has been deleted from Wagtail: %s' % (item))
item['deleted_in_wagtail'] = True
item['page_revision_from'] = page_revision
item['page_revision_compare_to'] = PageRevision.objects.get(
id=item['parameters']['release_compare_to']['revision_id'])
item['title'] = json.loads(page_revision.content_json)['title']
changed_pages.append(item)
except Exception as e:
logger.info('Error while comparing item: %s' % (item))
else:
extra_contents.append(item)
return {
'comparison': comparison,
'added_pages': added_pages,
'changed_pages': changed_pages,
'removed_pages': removed_pages,
'extra_contents': json.dumps(extra_contents, indent=4) if extra_contents and \
request.user.has_perm('wagtailadmin.access_dev') else None,
'release': release,
'release_to_compare_to': release_to_compare_to,
'publish_release_form': publish_release_form,
'frozen_releases_form': frozen_releases_form,
'compare_with_live': compare_with_live,
}
def release_detail(request, release_id, set_live_button=False, set_stage_button=False, | |
# Compatible IDs:
# USB\Class_02&SubClass_02&Prot_00
# USB\Class_02&SubClass_02
# USB\Class_02
#
# Grab what we hope is the instance ID
line = line.decode()
if line.startswith("USB"):
instance_id = line
else:
# If the next line is the Name we want then we're done
if instance_id and ("Name: " + device_description in line):
found = True
printer.string("{}\"{}\" found with instance ID \"{}\"". \
format(prompt, device_description,
instance_id))
break
instance_id = None
if found:
# Now run devcon to reset the device
printer.string("{}running {} to reset device \"{}\"...". \
format(prompt, DEVCON_PATH, instance_id))
cmd = [DEVCON_PATH, "restart", "@" + instance_id]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=False) # Has to be False or devcon won't work
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
else:
printer.string("{}device with description \"{}\" not found.". \
format(prompt, device_description))
except subprocess.CalledProcessError:
printer.string("{} unable to find and reset device.".format(prompt))
return success
# Open the required serial port.
def open_serial(serial_name, speed, printer, prompt):
'''Open serial port'''
serial_handle = None
text = "{}: trying to open \"{}\" as a serial port...". \
format(prompt, serial_name)
try:
return_value = serial.Serial(serial_name, speed, timeout=0.05)
serial_handle = return_value
printer.string("{} opened.".format(text))
except (ValueError, serial.SerialException) as ex:
printer.string("{}{} while accessing port {}: {}.".
format(prompt, type(ex).__name__,
serial_handle.name, str(ex)))
return serial_handle
def open_telnet(port_number, printer, prompt):
'''Open telnet port on localhost'''
telnet_handle = None
text = "{}trying to open \"{}\" as a telnet port on localhost...". \
format(prompt, port_number)
try:
telnet_handle = Telnet("localhost", int(port_number), timeout=5)
if telnet_handle is not None:
printer.string("{} opened.".format(text))
else:
printer.string("{} failed.".format(text))
except (socket.error, socket.timeout, ValueError) as ex:
printer.string("{}{} failed to open telnet {}: {}.".
format(prompt, type(ex).__name__,
port_number, str(ex)))
return telnet_handle
def install_lock_acquire(install_lock, printer, prompt, keep_going_flag=None):
'''Attempt to acquire install lock'''
timeout_seconds = INSTALL_LOCK_WAIT_SECONDS
success = False
if install_lock:
printer.string("{}waiting for install lock...".format(prompt))
while not install_lock.acquire(False) and (timeout_seconds > 0) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
if timeout_seconds > 0:
printer.string("{}got install lock.".format(prompt))
success = True
else:
printer.string("{}failed to aquire install lock.".format(prompt))
else:
printer.string("{}warning, there is no install lock.".format(prompt))
return success
def install_lock_release(install_lock, printer, prompt):
'''Release install lock'''
if install_lock:
install_lock.release()
printer.string("{}install lock released.".format(prompt))
def fetch_repo(url, directory, branch, printer, prompt, submodule_init=True, force=False):
'''Fetch a repo: directory can be relative or absolute, branch can be a hash'''
got_code = False
success = False
dir_text = directory
if dir_text == ".":
dir_text = "this directory"
if printer and prompt:
printer.string("{}in directory {}, fetching"
" {} to {}.".format(prompt, os.getcwd(),
url, dir_text))
if not branch:
branch = "master"
if os.path.isdir(directory):
# Update existing code
with ChangeDir(directory):
if printer and prompt:
printer.string("{}updating code in {}...".
format(prompt, dir_text))
target = branch
if branch.startswith("#"):
# Actually been given a branch, lose the
# preceding #
target = branch[1:len(branch)]
# Try this once and, if it fails and force is set,
# do a git reset --hard and try again
tries = 1
if force:
tries += 1
while tries > 0:
try:
call_list = []
call_list.append("git")
call_list.append("fetch")
call_list.append("origin")
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
# Try to pull the code
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code:
tries = 0
else:
if force:
# git reset --hard
printer.string("{}in directory {} calling git reset --hard...". \
format(prompt, os.getcwd()))
try:
text = subprocess.check_output(subprocess_osify(["git", "reset",
"--hard"]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
force = False
tries -= 1
if not got_code:
# If we still haven't got the code, delete the
# directory for a true clean start
deltree(directory, printer, prompt)
if not os.path.isdir(directory):
# Clone the repo
if printer and prompt:
printer.string("{}cloning from {} into {}...".
format(prompt, url, dir_text))
try:
text = subprocess.check_output(subprocess_osify(["git", "clone", "-q",
url, directory]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code and os.path.isdir(directory):
# Check out the correct branch and recurse submodules
with ChangeDir(directory):
target = "origin/" + branch
if branch.startswith("#"):
# Actually been given a branch, so lose the
# "origin/" and the preceding #
target = branch[1:len(branch)]
if printer and prompt:
printer.string("{}checking out {}...".
format(prompt, target))
try:
call_list = ["git", "-c", "advice.detachedHead=false",
"checkout", "--no-progress"]
if submodule_init:
call_list.append("--recurse-submodules")
printer.string("{}also recursing sub-modules (can take some time" \
" and gives no feedback).".format(prompt))
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
return success
def exe_where(exe_name, help_text, printer, prompt):
'''Find an executable using where.exe or which on linux'''
success = False
try:
printer.string("{}looking for \"{}\"...". \
format(prompt, exe_name))
# See here:
# https://stackoverflow.com/questions/14928860/passing-double-quote-shell-commands-in-python-to-subprocess-popen
# ...for why the construction "".join() is necessary when
# passing things which might have spaces in them.
# It is the only thing that works.
if is_linux():
cmd = ["which {}".format(exe_name.replace(":", "/"))]
printer.string("{}detected linux, calling \"{}\"...".format(prompt, cmd))
else:
cmd = ["where", "".join(exe_name)]
printer.string("{}detected nonlinux, calling \"{}\"...".format(prompt, cmd))
text = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{} found in {}".format(prompt, exe_name,
line.decode()))
success = True
except subprocess.CalledProcessError:
if help_text:
printer.string("{}ERROR {} not found: {}". \
format(prompt, exe_name, help_text))
else:
printer.string("{}ERROR {} not found". \
format(prompt, exe_name))
return success
def exe_version(exe_name, version_switch, printer, prompt):
'''Print the version of a given executable'''
success = False
if not version_switch:
version_switch = "--version"
try:
text = subprocess.check_output(subprocess_osify(["".join(exe_name), version_switch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError:
printer.string("{}ERROR {} either not found or didn't like {}". \
format(prompt, exe_name, version_switch))
return success
def exe_terminate(process_pid):
'''Jonathan's killer'''
process = psutil.Process(process_pid)
for proc in process.children(recursive=True):
proc.terminate()
process.terminate()
def read_from_process_and_queue(process, read_queue):
'''Read from a process, non-blocking'''
while process.poll() is None:
string = process.stdout.readline().decode()
if string and string != "":
read_queue.put(string)
else:
sleep(0.1)
def queue_get_no_exception(the_queue, block=True, timeout=None):
'''A version of queue.get() that doesn't throw an Empty exception'''
thing = None
try:
thing = the_queue.get(block=block, timeout=timeout)
except queue.Empty:
pass
return thing
def capture_env_var(line, env, printer, prompt):
'''A bit of exe_run that needs to be called from two places'''
# Find a KEY=VALUE bit in the line,
# parse it out and put it in the dictionary
# we were given
pair = line.split('=', 1)
if len(pair) == 2:
env[pair[0]] = pair[1].rstrip()
else:
printer.string("{}WARNING: not an environment variable: \"{}\"".
format(prompt, line))
# Note: if returned_env is given then "set"
# will be executed after the exe and the environment
# variables will be returned in it. The down-side
# of this is that the return value of the exe is,
# of course, lost.
def exe_run(call_list, guard_time_seconds=None, printer=None, prompt=None,
shell_cmd=False, set_env=None, returned_env=None,
bash_cmd=False, keep_going_flag=None):
'''Call an executable, printing out what it does'''
success = False
start_time = time()
flibbling = False
kill_time = None
read_time = start_time
if returned_env is not None:
# The caller wants the environment after the
# command has run, so, from this post:
# https://stackoverflow.com/questions/1214496/how-to-get-environment-from-a-subprocess
# append a tag that we can detect
# to the command and then call set,
# from which we can parse the environment
call_list.append("&&")
call_list.append("echo")
call_list.append("flibble")
call_list.append("&&")
if is_linux():
call_list.append("env")
bash_cmd = True
else:
call_list.append("set")
# I've seen output from set get lost,
# possibly because the process ending
# is asynchronous with | |
if c.parent else 1.0)
for c in self.iterComponents()
]
) # c x 1
totalVol = volumes.sum()
if totalVol == 0.0:
# there are no children so no volume or number density
return [0.0] * len(nucNames)
nucDensForEachComp = numpy.array(
[
[c.getNumberDensity(nuc) for nuc in nucNames]
for c in self.iterComponents()
]
) # c x n
return volumes.dot(nucDensForEachComp) / totalVol
def _getNdensHelper(self):
"""
Return a number densities dict with unexpanded lfps.
Notes
-----
This is implemented more simply on the component level.
"""
nucNames = self.getNuclides()
return dict(zip(nucNames, self.getNuclideNumberDensities(nucNames)))
def getNumberDensities(self, expandFissionProducts=False):
"""
Retrieve the number densities in atoms/barn-cm of all nuclides (or those requested) in the object.
Parameters
----------
expandFissionProducts : bool (optional)
expand the fission product number densities
nuclideNames : iterable (optional)
nuclide names to get number densities
Returns
-------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm)
"""
numberDensities = self._getNdensHelper()
if expandFissionProducts:
return self._expandLFPs(numberDensities)
return numberDensities
def getNeutronEnergyDepositionConstants(self):
"""
Get the neutron energy deposition group constants for a composite.
Returns
-------
energyDepConstants: numpy.array
Neutron energy generation group constants (in Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.r.core.lib:
raise RuntimeError(
"Cannot get neutron energy deposition group constants without "
"a library. Please ensure a library exists."
)
return xsCollections.computeNeutronEnergyDepositionConstants(
self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
)
def getGammaEnergyDepositionConstants(self):
"""
Get the gamma energy deposition group constants for a composite.
Returns
-------
energyDepConstants: numpy.array
Energy generation group constants (in Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.r.core.lib:
raise RuntimeError(
"Cannot get gamma energy deposition group constants without "
"a library. Please ensure a library exists."
)
return xsCollections.computeGammaEnergyDepositionConstants(
self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
)
def getTotalEnergyGenerationConstants(self):
"""
Get the total energy generation group constants for a composite.
Gives the total energy generation rates when multiplied by the multigroup flux.
Returns
-------
totalEnergyGenConstant: numpy.array
Total (fission + capture) energy generation group constants (Joules/cm)
"""
return (
self.getFissionEnergyGenerationConstants()
+ self.getCaptureEnergyGenerationConstants()
)
def getFissionEnergyGenerationConstants(self):
"""
Get the fission energy generation group constants for a composite.
Gives the fission energy generation rates when multiplied by the multigroup
flux.
Returns
-------
fissionEnergyGenConstant: numpy.array
Energy generation group constants (Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.r.core.lib:
raise RuntimeError(
"Cannot compute energy generation group constants without a library"
". Please ensure a library exists."
)
return xsCollections.computeFissionEnergyGenerationConstants(
self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
)
def getCaptureEnergyGenerationConstants(self):
"""
Get the capture energy generation group constants for a composite.
Gives the capture energy generation rates when multiplied by the multigroup
flux.
Returns
-------
fissionEnergyGenConstant: numpy.array
Energy generation group constants (Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.r.core.lib:
raise RuntimeError(
"Cannot compute energy generation group constants without a library"
". Please ensure a library exists."
)
return xsCollections.computeCaptureEnergyGenerationConstants(
self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
)
def _expandLFPs(self, numberDensities):
"""
Expand the LFPs on the numberDensities dictionary using this composite's
lumpedFissionProductCollection.
"""
lfpCollection = self.getLumpedFissionProductCollection()
if lfpCollection: # may not have lfps in non-fuel
lfpDensities = lfpCollection.getNumberDensities(self)
numberDensities = {
nucName: numberDensities.get(nucName, 0.0)
+ lfpDensities.get(nucName, 0.0)
for nucName in set(numberDensities) | set(lfpDensities)
}
# remove LFPs from the result
for lfpName in lfpCollection:
numberDensities.pop(lfpName, None)
else:
lfpMass = sum(
dens
for name, dens in numberDensities.items()
if isinstance(nuclideBases.byName[name], nuclideBases.LumpNuclideBase)
)
if lfpMass:
raise RuntimeError(
"Composite {} is attempting to expand lumped fission products, but does not have "
"an lfpCollection.".format(self)
)
return numberDensities
def getChildrenWithNuclides(self, nucNames):
"""Return children that contain any nuclides in nucNames."""
nucNames = set(nucNames) # only convert to set once
return [child for child in self if nucNames.intersection(child.getNuclides())]
def getAncestor(self, fn):
"""
Return the first ancestor that satisfies the supplied predicate.
Parameters
----------
fn : Function-like object
The predicate used to test the validity of an ancestor. Should return true
if the ancestor satisfies the caller's requirements
"""
if fn(self):
return self
if self.parent is None:
return None
else:
return self.parent.getAncestor(fn)
def getAncestorAndDistance(
self, fn, _distance=0
) -> Optional[Tuple["ArmiObject", int]]:
"""
Return the first ancestor that satisfies the supplied predicate, along with how
many levels above self the ancestor lies.
Parameters
----------
fn : Function-like object
The predicate used to test the validity of an ancestor. Should return true
if the ancestor satisfies the caller's requirements
"""
if fn(self):
return self, _distance
if self.parent is None:
return None
else:
return self.parent.getAncestorAndDistance(fn, _distance + 1)
def getAncestorWithFlags(self, typeSpec: TypeSpec, exactMatch=False):
"""
Return the first ancestor that matches the passed flags.
Parameters
----------
typeSpec : TypeSpec
A collection of flags to match on candidate parents
exactMatch : bool
Whether the flags match should be exact
Returns
-------
armi.composites.ArmiObject
the first ancestor up the chain of parents that matches the passed flags
Notes
-----
This will throw an error if no ancestor can be found that matches the typeSpec
See Also
--------
ArmiObject.hasFlags()
"""
if self.hasFlags(typeSpec, exact=exactMatch):
return self
if self.parent is None:
return None
else:
return self.parent.getAncestorWithFlags(typeSpec, exactMatch=exactMatch)
def getTotalNDens(self):
"""
Return the total number density of all atoms in this object.
Returns
-------
nTot : float
Total ndens of all nuclides in atoms/bn-cm. Not homogenized.
"""
nFPsPerLFP = (
fissionProductModel.NUM_FISSION_PRODUCTS_PER_LFP
) # LFPs count as two! Big deal in non BOL cases.
return sum(
dens * (nFPsPerLFP if "LFP" in name else 1.0)
for name, dens in self.getNumberDensities().items()
)
def setNumberDensity(self, nucName, val):
"""
Set the number density of this nuclide to this value.
This distributes atom density evenly across all children that contain nucName.
If the nuclide doesn't exist in any of the children, then that's actually an
error. This would only happen if some unnatural nuclide like Pu239 built up in
fresh UZr. That should be anticipated and dealt with elsewhere.
"""
activeChildren = self.getChildrenWithNuclides({nucName})
if not activeChildren:
activeVolumeFrac = 1.0
if val:
raise ValueError(
"The nuclide {} does not exist in any children of {}; "
"cannot set its number density to {}. The nuclides here are: {}".format(
nucName, self, val, self.getNuclides()
)
)
else:
activeVolumeFrac = sum(
vf for ci, vf in self.getVolumeFractions() if ci in activeChildren
)
dehomogenizedNdens = (
val / activeVolumeFrac
) # scale up to dehomogenize on children.
for child in activeChildren:
child.setNumberDensity(nucName, dehomogenizedNdens)
def setNumberDensities(self, numberDensities):
"""
Set one or more multiple number densities. Reset any non-listed nuclides to 0.0.
Parameters
----------
numberDensities : dict
nucName: ndens pairs.
Notes
-----
We'd like to not have to call setNumberDensity for each nuclide because we don't
want to call ``getVolumeFractions`` for each nuclide (it's inefficient).
"""
numberDensities.update(
{nuc: 0.0 for nuc in self.getNuclides() if nuc not in numberDensities}
)
self.updateNumberDensities(numberDensities)
def updateNumberDensities(self, numberDensities):
"""
Set one or more multiple number densities. Leaves unlisted number densities alone.
This changes a nuclide number density only on children that already have that
nuclide, thereby allowing, for example, actinides to stay in the fuel component
when setting block-level values.
The complication is that various number densities are distributed among various
components. This sets the number density for each nuclide evenly across all
components that contain it.
Parameters
----------
numberDensities : dict
nucName: ndens pairs.
"""
children, volFracs = zip(*self.getVolumeFractions())
childNucs = tuple(set(child.getNuclides()) for child in children)
allDehomogenizedNDens = collections.defaultdict(dict)
# compute potentially-different homogenization factors for each child. evenly
# distribute entire number density over the subset of active children.
for nuc, dens in numberDensities.items():
# get "active" indices, i.e., indices of children containing nuc
# NOTE: this is one of the rare instances in which (imo), using explicit
# indexing clarifies subsequent code since it's not necessary to zip +
# filter + extract individual components (just extract by filtered index).
indiciesToSet = tuple(
i for i, nucsInChild in enumerate(childNucs) if nuc in nucsInChild
)
if not indiciesToSet:
| |
<reponame>aerospike/aerospike-admin<filename>lib/live_cluster/health_check_controller.py<gh_stars>10-100
import copy
import time
from lib.health import util as health_util
from lib.get_controller import get_sindex_stats
from lib.utils import util
from lib.base_controller import CommandHelp
from .live_cluster_command_controller import LiveClusterCommandController
@CommandHelp(
"Checks for common inconsistencies and print if there is any.",
"This command is still in beta and its output should not be directly acted upon without further analysis.",
hide=True,
)
class HealthCheckController(LiveClusterCommandController):
last_snapshot_collection_time = 0
last_snapshot_count = 0
def __init__(self):
self.modifiers = set()
def _get_asstat_data(self, stanza):
if stanza == "service":
return self.cluster.info_statistics(nodes=self.nodes)
elif stanza == "namespace":
return self.cluster.info_all_namespace_statistics(nodes=self.nodes)
elif stanza == "sets":
return self.cluster.info_all_set_statistics(nodes=self.nodes)
elif stanza == "bins":
return self.cluster.info_bin_statistics(nodes=self.nodes)
elif stanza == "xdr":
return self.cluster.info_XDR_statistics(nodes=self.nodes)
elif stanza == "dc":
return self.cluster.info_all_dc_statistics(nodes=self.nodes)
elif stanza == "sindex":
return util.flip_keys(
get_sindex_stats(cluster=self.cluster, nodes=self.nodes)
)
elif stanza == "udf":
return self.cluster.info_udf_list(nodes=self.nodes)
elif stanza == "endpoints":
return self.cluster.info_service_list(nodes=self.nodes)
elif stanza == "services":
return self.cluster.info_peers_flat_list(nodes=self.nodes)
def _get_asconfig_data(self, stanza):
if stanza == "xdr":
return self.cluster.info_XDR_get_config(nodes=self.nodes)
elif stanza == "dc":
return self.cluster.info_dc_get_config(nodes=self.nodes)
elif stanza == "roster":
return self.cluster.info_roster(nodes=self.nodes)
elif stanza == "racks":
return self.cluster.info_racks(nodes=self.nodes)
else:
return self.cluster.info_get_config(nodes=self.nodes, stanza=stanza)
def _get_as_meta_data(self, stanza):
if stanza == "build":
return self.cluster.info("build", nodes=self.nodes)
if stanza == "node_id":
return self.cluster.info("node", nodes=self.nodes)
elif stanza == "edition":
editions = self.cluster.info("edition", nodes=self.nodes)
if not editions:
return editions
editions_in_shortform = {}
for node, edition in editions.items():
if not edition or isinstance(edition, Exception):
continue
editions_in_shortform[node] = util.convert_edition_to_shortform(edition)
return editions_in_shortform
elif stanza == "health":
return self.cluster.info_health_outliers(nodes=self.nodes)
@CommandHelp(
"Displays health summary. If remote server System credentials provided, then it will collect remote system stats",
"and analyse that also. If credentials are not available then it will collect only localhost system statistics.",
" Options:",
" -f <string> - Query file path. Default: inbuilt health queries.",
" -o <string> - Output file path. ",
" This parameter works if Query file path provided, otherwise health command will work in interactive mode.",
" -v - Enable to display extra details of assert errors.",
" -d - Enable to display extra details of exceptions.",
" -n <int> - Number of snapshots. Default: 1",
" -s <int> - Sleep time in seconds between each snapshot. Default: 1 sec",
" -oc <string> - Output filter Category. ",
" This parameter works if Query file path provided, otherwise health command will work in interactive mode.",
" Format : string of dot (.) separated category levels",
" -wl <string> - Output filter Warning level. Expected value CRITICAL or WARNING or INFO ",
" This parameter works if Query file path provided, otherwise health command will work in interactive mode.",
" --enable-ssh - Enables the collection of system statistics from a remote server.",
" --ssh-user <string> - Default user ID for remote servers. This is the ID of a user of the system, not the ID of an Aerospike user.",
" --ssh-pwd <string> - Default password or passphrase for key for remote servers. This is the user's password for logging into",
" the system, not a password for logging into Aerospike.",
" --ssh-port <int> - Default SSH port for remote servers. Default: 22",
" --ssh-key <string> - Default SSH key (file path) for remote servers.",
" --ssh-cf <string> - Remote System Credentials file path.",
" If the server credentials are not in the credentials file, then authentication is attempted with the default",
" credentials.",
" File format : each line should contain <IP[:PORT]>,<USER_ID>,<PASSWORD or PASSPHRASE>,<SSH_KEY>",
" Example: 192.168.3.11,uid,pwd",
" 192.168.3.11:3232,uid,pwd",
" 192.168.3.11:3232,uid,,key_path",
" 192.168.3.11:3232,uid,passphrase,key_path",
" [2001::1234:10],uid,pwd",
" [2001::1234:10]:3232,uid,,key_path",
)
def _do_default(self, line):
output_file = util.get_arg_and_delete_from_mods(
line=line,
arg="-o",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
snap_count = util.get_arg_and_delete_from_mods(
line=line,
arg="-n",
return_type=int,
default=1,
modifiers=self.modifiers,
mods=self.mods,
)
sleep_tm = util.get_arg_and_delete_from_mods(
line=line,
arg="-s",
return_type=int,
default=1,
modifiers=self.modifiers,
mods=self.mods,
)
verbose = util.check_arg_and_delete_from_mods(
line=line, arg="-v", default=False, modifiers=self.modifiers, mods=self.mods
)
debug = util.check_arg_and_delete_from_mods(
line=line, arg="-d", default=False, modifiers=self.modifiers, mods=self.mods
)
output_filter_category = util.get_arg_and_delete_from_mods(
line=line,
arg="-oc",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
output_filter_warning_level = util.get_arg_and_delete_from_mods(
line,
arg="-wl",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
enable_ssh = util.check_arg_and_delete_from_mods(
line=line,
arg="--enable-ssh",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
default_user = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-user",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_pwd = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-pwd",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_ssh_port = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-port",
return_type=int,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
default_ssh_key = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-key",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
credential_file = util.get_arg_and_delete_from_mods(
line=line,
arg="--ssh-cf",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
# Query file can be specified without -f
# hence always parsed in the end
query_file = util.get_arg_and_delete_from_mods(
line=line,
arg="-f",
return_type=str,
default=None,
modifiers=self.modifiers,
mods=self.mods,
)
if not query_file and line:
query_file = line[0]
if query_file:
query_file = util.strip_string(query_file)
if output_file:
output_file = util.strip_string(output_file)
if output_filter_category:
output_filter_category = [
util.strip_string(c).upper()
for c in util.strip_string(output_filter_category).split(".")
]
else:
output_filter_category = []
if output_filter_warning_level:
output_filter_warning_level = util.strip_string(
output_filter_warning_level
).upper()
if (
time.time() - HealthCheckController.last_snapshot_collection_time > 60
) or HealthCheckController.last_snapshot_count != snap_count:
# There is possibility of different cluster-names in old
# heartbeat protocol. As asadm works with single cluster,
# so we are setting one static cluster-name.
cluster_name = "C1"
stanza_dict = {
"statistics": (
self._get_asstat_data,
[
(
"service",
"SERVICE",
[("CLUSTER", cluster_name), ("NODE", None)],
),
(
"namespace",
"NAMESPACE",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("NAMESPACE", None),
],
),
(
"sets",
"SET",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
(
"NAMESPACE",
(
"ns_name",
"ns",
),
),
(
"SET",
(
"set_name",
"set",
),
),
],
),
(
"bins",
"BIN",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("NAMESPACE", None),
],
),
(
"xdr",
"XDR",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("DC", None),
],
),
(
"dc",
"DC",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("DC", None),
],
),
(
"sindex",
"SINDEX",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("NAMESPACE", ("ns",)),
("SET", ("set",)),
("SINDEX", ("indexname",)),
],
),
],
),
"config": (
self._get_asconfig_data,
[
(
"service",
"SERVICE",
[("CLUSTER", cluster_name), ("NODE", None)],
),
("xdr", "XDR", [("CLUSTER", cluster_name), ("NODE", None)]),
(
"network",
"NETWORK",
[("CLUSTER", cluster_name), ("NODE", None)],
),
(
"dc",
"DC",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("DC", None),
],
),
(
"namespace",
"NAMESPACE",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("NAMESPACE", None),
],
),
(
"roster",
"ROSTER",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("NAMESPACE", None),
],
),
(
"racks",
"RACKS",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("NAMESPACE", None),
(None, None),
("RACKS", None),
],
),
],
),
"original_config": (
self.cluster.info_get_originalconfig,
[
(
"service",
"SERVICE",
[("CLUSTER", cluster_name), ("NODE", None)],
),
("xdr", "XDR", [("CLUSTER", cluster_name), ("NODE", None)]),
(
"network",
"NETWORK",
[("CLUSTER", cluster_name), ("NODE", None)],
),
(
"dc",
"DC",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("DC", None),
],
),
(
"namespace",
"NAMESPACE",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("NAMESPACE", None),
],
),
],
),
"cluster": (
self._get_as_meta_data,
[
(
"build",
"METADATA",
[
("CLUSTER", cluster_name),
("NODE", None),
("KEY", "version"),
],
),
(
"edition",
"METADATA",
[
("CLUSTER", cluster_name),
("NODE", None),
("KEY", "edition"),
],
),
(
"node_id",
"METADATA",
[
("CLUSTER", cluster_name),
("NODE", None),
("KEY", "node-id"),
],
),
],
),
"endpoints": (
self._get_asstat_data,
[
(
"endpoints",
"METADATA",
[
("CLUSTER", cluster_name),
("NODE", None),
("KEY", "endpoints"),
],
),
],
),
"services": (
self._get_asstat_data,
[
(
"services",
"METADATA",
[
("CLUSTER", cluster_name),
("NODE", None),
("KEY", "services"),
],
),
],
),
"metadata": (
self._get_asstat_data,
[
(
"udf",
"UDF",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("FILENAME", None),
],
),
],
),
"health": (
self._get_as_meta_data,
[
(
"health",
"METADATA",
[
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("OUTLIER", None),
],
),
],
),
}
sys_cmd_dict = {
"sys_stats": (
util.restructure_sys_data,
[
(
"free-m",
"SYSTEM",
"FREE",
True,
[(None, None), ("CLUSTER", cluster_name), ("NODE", None)],
),
(
"top",
"SYSTEM",
"TOP",
True,
[(None, None), ("CLUSTER", cluster_name), ("NODE", None)],
),
(
"iostat",
"SYSTEM",
"IOSTAT",
False,
[
(None, None),
("CLUSTER", cluster_name),
("NODE", None),
(None, None),
("DEVICE", None),
],
),
(
"meminfo",
"SYSTEM",
"MEMINFO",
True,
[("CLUSTER", cluster_name), ("NODE", None)],
),
(
"dmesg",
"SYSTEM",
"DMESG",
True,
[("CLUSTER", cluster_name), ("NODE", None)],
),
(
"lscpu",
"SYSTEM",
"LSCPU",
True,
[
("CLUSTER", cluster_name),
("NODE", None),
("LSCPU", None),
],
),
(
"iptables",
"SYSTEM",
"IPTABLES",
True,
[("CLUSTER", cluster_name), ("NODE", None)],
),
(
"sysctlall",
"SYSTEM",
"SYSCTLALL",
True,
[
("CLUSTER", cluster_name),
("NODE", None),
("SYSCTL", None),
],
),
(
"hdparm",
"SYSTEM",
"HDPARM",
True,
[
("CLUSTER", cluster_name),
("NODE", None),
| |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
from torch.utils.data import Dataset
import numpy as np
import time
import os
import cv2
import sys
import utils
from datasets.scannet_scene import ScanNetScene
class PlaneDatasetSingle(Dataset):
def __init__(self, options, config, split, random=True, loadNeighborImage=False, load_semantics=False, load_boundary=False):
self.options = options
self.config = config
self.split = split
self.random = random
self.dataFolder = options.dataFolder
self.scenes = []
self.sceneImageIndices = []
self.loadClassMap()
planenet_scene_ids_val = np.load('datasets/scene_ids_val.npy')
planenet_scene_ids_val = {scene_id.decode('utf-8'): True for scene_id in planenet_scene_ids_val}
with open(self.dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' + split + '.txt') as f:
for line in f:
scene_id = line.strip()
if split == 'test':
## Remove scenes which are in PlaneNet's training set for fair comparison
if scene_id not in planenet_scene_ids_val:
continue
pass
scenePath = self.dataFolder + '/scans/' + scene_id
if not os.path.exists(scenePath + '/' + scene_id + '.txt') or not os.path.exists(scenePath + '/annotation/planes.npy'):
continue
scene = ScanNetScene(options, scenePath, scene_id, self.confident_labels, self.layout_labels, load_semantics=load_semantics, load_boundary=load_boundary)
self.scenes.append(scene)
self.sceneImageIndices += [[len(self.scenes) - 1, imageIndex] for imageIndex in range(len(scene.imagePaths))]
continue
pass
if random:
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
else:
np.random.seed(0)
pass
np.random.shuffle(self.sceneImageIndices)
self.invalid_indices = {}
with open(self.dataFolder + '/invalid_indices_' + split + '.txt', 'r') as f:
for line in f:
tokens = line.split(' ')
if len(tokens) == 3:
assert(int(tokens[2]) < 10000)
invalid_index = int(tokens[1]) * 10000 + int(tokens[2])
if invalid_index not in self.invalid_indices:
self.invalid_indices[invalid_index] = True
pass
pass
continue
pass
self.sceneImageIndices = [[sceneIndex, imageIndex] for sceneIndex, imageIndex in self.sceneImageIndices if (sceneIndex * 10000 + imageIndex) not in self.invalid_indices]
print('num images', len(self.sceneImageIndices))
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
self.loadNeighborImage = loadNeighborImage
return
def loadClassMap(self):
classLabelMap = {}
with open(self.dataFolder + '/scannetv2-labels.combined.tsv') as info_file:
line_index = 0
for line in info_file:
if line_index > 0:
line = line.split('\t')
key = line[1].strip()
if line[4].strip() != '':
label = int(line[4].strip())
else:
label = -1
pass
classLabelMap[key] = label
classLabelMap[key + 's'] = label
classLabelMap[key + 'es'] = label
pass
line_index += 1
continue
pass
confidentClasses = {'wall': True,
'floor': True,
'cabinet': True,
'bed': True,
'chair': False,
'sofa': False,
'table': True,
'door': True,
'window': True,
'bookshelf': False,
'picture': True,
'counter': True,
'blinds': False,
'desk': True,
'shelf': False,
'shelves': False,
'curtain': False,
'dresser': True,
'pillow': False,
'mirror': False,
'entrance': True,
'floor mat': True,
'clothes': False,
'ceiling': True,
'book': False,
'books': False,
'refridgerator': True,
'television': True,
'paper': False,
'towel': False,
'shower curtain': False,
'box': True,
'whiteboard': True,
'person': False,
'night stand': True,
'toilet': False,
'sink': False,
'lamp': False,
'bathtub': False,
'bag': False,
'otherprop': False,
'otherstructure': False,
'otherfurniture': False,
'unannotated': False,
'': False
}
self.confident_labels = {}
for name, confidence in confidentClasses.items():
if confidence and name in classLabelMap:
self.confident_labels[classLabelMap[name]] = True
pass
continue
self.layout_labels = {1: True, 2: True, 22: True, 9: True}
return
def __len__(self):
return len(self.sceneImageIndices)
def transformPlanes(self, transformation, planes):
planeOffsets = np.linalg.norm(planes, axis=-1, keepdims=True)
centers = planes
centers = np.concatenate([centers, np.ones((planes.shape[0], 1))], axis=-1)
newCenters = np.transpose(np.matmul(transformation, np.transpose(centers)))
newCenters = newCenters[:, :3] / newCenters[:, 3:4]
refPoints = planes - planes / np.maximum(planeOffsets, 1e-4)
refPoints = np.concatenate([refPoints, np.ones((planes.shape[0], 1))], axis=-1)
newRefPoints = np.transpose(np.matmul(transformation, np.transpose(refPoints)))
newRefPoints = newRefPoints[:, :3] / newRefPoints[:, 3:4]
planeNormals = newRefPoints - newCenters
planeNormals /= np.linalg.norm(planeNormals, axis=-1, keepdims=True)
planeOffsets = np.sum(newCenters * planeNormals, axis=-1, keepdims=True)
newPlanes = planeNormals * planeOffsets
return newPlanes
def __getitem__(self, index):
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
if self.config.ANCHOR_TYPE == 'layout':
return self.getItemLayout(index)
if self.config.ANCHOR_TYPE == 'structure':
return self.getItemStructure(index)
while True:
if self.random:
index = np.random.randint(len(self.sceneImageIndices))
else:
index = index % len(self.sceneImageIndices)
pass
sceneIndex, imageIndex = self.sceneImageIndices[index]
scene = self.scenes[sceneIndex]
try:
image, planes, plane_info, segmentation, depth, camera, extrinsics = scene[imageIndex]
if len(planes) == 0:
index += 1
continue
except:
index += 1
continue
pass
if segmentation.max() < 0:
index += 1
continue
break
instance_masks = []
class_ids = []
parameters = []
if len(planes) > 0:
if 'joint' in self.config.ANCHOR_TYPE:
distances = np.linalg.norm(np.expand_dims(planes, 1) - self.config.ANCHOR_PLANES, axis=-1)
plane_anchors = distances.argmin(-1)
elif self.config.ANCHOR_TYPE == 'Nd':
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)
normal_anchors = distances_N.argmin(-1)
distances_d = np.abs(np.expand_dims(plane_offsets, -1) - self.config.ANCHOR_OFFSETS)
offset_anchors = distances_d.argmin(-1)
elif self.config.ANCHOR_TYPE in ['normal', 'patch']:
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)
normal_anchors = distances_N.argmin(-1)
elif self.config.ANCHOR_TYPE == 'normal_none':
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
pass
pass
for planeIndex, plane in enumerate(planes):
m = segmentation == planeIndex
if m.sum() < 1:
continue
instance_masks.append(m)
if self.config.ANCHOR_TYPE == 'none':
class_ids.append(1)
parameters.append(np.concatenate([plane, np.zeros(1)], axis=0))
elif 'joint' in self.config.ANCHOR_TYPE:
class_ids.append(plane_anchors[planeIndex] + 1)
residual = plane - self.config.ANCHOR_PLANES[plane_anchors[planeIndex]]
parameters.append(np.concatenate([residual, np.zeros(1)], axis=0))
elif self.config.ANCHOR_TYPE == 'Nd':
class_ids.append(normal_anchors[planeIndex] * len(self.config.ANCHOR_OFFSETS) + offset_anchors[planeIndex] + 1)
normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]
offset = plane_offsets[planeIndex] - self.config.ANCHOR_OFFSETS[offset_anchors[planeIndex]]
parameters.append(np.concatenate([normal, np.array([offset])], axis=0))
elif self.config.ANCHOR_TYPE == 'normal':
class_ids.append(normal_anchors[planeIndex] + 1)
normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]
parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))
elif self.config.ANCHOR_TYPE == 'normal_none':
class_ids.append(1)
normal = plane_normals[planeIndex]
parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))
else:
assert(False)
pass
continue
parameters = np.array(parameters, dtype=np.float32)
mask = np.stack(instance_masks, axis=2)
class_ids = np.array(class_ids, dtype=np.int32)
image, image_metas, gt_class_ids, gt_boxes, gt_masks, gt_parameters = load_image_gt(self.config, index, image, mask, class_ids, parameters, augment=self.split == 'train')
## RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
## If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
gt_parameters = gt_parameters[ids]
## Add to batch
rpn_match = rpn_match[:, np.newaxis]
image = utils.mold_image(image.astype(np.float32), self.config)
depth = np.concatenate([np.zeros((80, 640)), depth, np.zeros((80, 640))], axis=0)
segmentation = np.concatenate([np.full((80, 640), fill_value=-1, dtype=np.int32), segmentation, np.full((80, 640), fill_value=-1, dtype=np.int32)], axis=0)
info = [image.transpose((2, 0, 1)).astype(np.float32), image_metas, rpn_match, rpn_bbox.astype(np.float32), gt_class_ids, gt_boxes.astype(np.float32), gt_masks.transpose((2, 0, 1)).astype(np.float32), gt_parameters, depth.astype(np.float32), segmentation, camera.astype(np.float32)]
if self.loadNeighborImage:
if imageIndex + self.options.frameGap < len(scene.imagePaths):
imagePath = scene.imagePaths[imageIndex + self.options.frameGap]
else:
imagePath = scene.imagePaths[imageIndex - self.options.frameGap]
pass
image_2 = cv2.imread(imagePath)
image_2 = cv2.resize(image_2, (self.config.IMAGE_MAX_DIM, self.config.IMAGE_MAX_DIM))
info.append(image_2.transpose((2, 0, 1)).astype(np.float32))
extrinsics_2_inv = []
posePath = imagePath.replace('color', 'pose').replace('.jpg', '.txt')
with open(posePath, 'r') as f:
for line in f:
extrinsics_2_inv += [float(value) for value in line.strip().split(' ') if value.strip() != '']
continue
f.close()
pass
extrinsics_2_inv = np.array(extrinsics_2_inv).reshape((4, 4))
extrinsics_2 = np.linalg.inv(extrinsics_2_inv)
temp = extrinsics_2[1].copy()
extrinsics_2[1] = extrinsics_2[2]
extrinsics_2[2] = -temp
transformation = np.matmul(extrinsics_2, np.linalg.inv(extrinsics))
if np.any(np.isnan(transformation)):
transformation = np.concatenate([np.diag(np.ones(3)), np.zeros((3, 1))], axis=-1)
pass
rotation = transformation[:3, :3]
translation = transformation[:3, 3]
axis, angle = utils.rotationMatrixToAxisAngle(rotation)
pose = np.concatenate([translation, axis * angle], axis=0).astype(np.float32)
info.append(pose)
info.append(scene.scenePath + ' ' + str(imageIndex))
pass
return info
def getAnchorPlanesNormalOffset(self, visualize=False):
for k in [7, ]:
print('k', k)
filename_N = self.dataFolder + '/anchor_planes_N_' + str(k) + '.npy'
filename_d = self.dataFolder + '/anchor_planes_d.npy'
if os.path.exists(filename_N) and os.path.exists(filename_d) and False:
return
if os.path.exists('test/anchor_planes/all_planes.npy'):
all_planes = np.load('test/anchor_planes/all_planes.npy')
else:
all_planes = []
for sceneIndex, imageIndex in self.sceneImageIndices[:10000]:
if len(all_planes) % 100 == 0:
print(len(all_planes))
pass
scene = self.scenes[sceneIndex]
image, planes, plane_info, segmentation, depth, camera, extrinsics = scene[imageIndex]
planes = planes[np.linalg.norm(planes, axis=-1) > 1e-4]
if len(planes) == 0:
continue
all_planes.append(planes)
continue
all_planes = np.concatenate(all_planes, axis=0)
np.save('test/anchor_planes/all_planes.npy', all_planes)
pass
from sklearn.cluster import KMeans
num_anchor_planes_N = k
num_anchor_planes_d = 3
offsets = np.linalg.norm(all_planes, axis=-1)
normals = all_planes / np.expand_dims(offsets, -1)
kmeans_N = KMeans(n_clusters=num_anchor_planes_N).fit(normals)
self.anchor_planes_N = kmeans_N.cluster_centers_
## Global offset anchors
kmeans_d = KMeans(n_clusters=num_anchor_planes_d).fit(np.expand_dims(offsets, -1))
self.anchor_planes_d = kmeans_d.cluster_centers_
if visualize:
color_map = utils.ColorPalette(max(num_anchor_planes_N, num_anchor_planes_d)).getColorMap()
normals_rotated = normals.copy()
normals_rotated[:, 1] = normals[:, 2]
normals_rotated[:, 2] = -normals[:, 1]
plane_cloud = np.concatenate([normals_rotated, color_map[kmeans_N.labels_]], axis=-1)
utils.writePointCloud('test/anchor_planes/anchor_planes_N.ply', plane_cloud)
plane_cloud = np.concatenate([all_planes, color_map[kmeans_d.labels_]], axis=-1)
utils.writePointCloud('test/anchor_planes/anchor_planes_d.ply', plane_cloud)
width = 500
height = 500
Us = np.round(np.arctan2(normals[:, 1], normals[:, 0]) / np.pi * width).astype(np.int32)
| |
pair is absolute,
# second is relative, and hence is the width.
for hint in hints:
if not hint:
continue
pos = hint[0]
if isinstance(pos, float) and (int(pos) == pos):
pos = int(pos)
width = hint[1]
if isinstance(width, float) and (int(width) == width):
width = int(width)
if isH:
op = kHStemName
else:
op = kVStemName
newStem = XMLElement("string")
newStem.text = "%s %s %s" % (op, pos, width)
newHintSetArray.append(newStem)
def addFlexHint(flexList, flexArray):
for pointTag in flexList:
newFlexTag = XMLElement("string")
newFlexTag.text = pointTag
flexArray.append(newFlexTag)
def fixStartPoint(outlineItem, opList):
# For the GLIF format, the idea of first/last point is funky, because
# the format avoids identifying a start point. This means there is no
# implied close-path line-to. If the last implied or explicit path-close
# operator is a line-to, then replace the "mt" with linto, and remove the
# last explicit path-closing line-to, if any. If the last op is a curve,
# then leave the first two point args on the stack at the end of the point
# list, and move the last curveto to the first op, replacing the move-to.
_, firstX, firstY = opList[0]
lastOp, lastX, lastY = opList[-1]
firstPointElement = outlineItem[0]
if (firstX == lastX) and (firstY == lastY):
del outlineItem[-1]
firstPointElement.set("type", lastOp)
else:
# we have an implied final line to. All we need to do is convert
# the inital moveto to a lineto.
firstPointElement.set("type", "line")
bezToUFOPoint = {
"mt": 'move',
"rmt": 'move',
"hmt": 'move',
"vmt": 'move',
"rdt": 'line',
"dt": 'line',
"hdt": "line",
"vdt": "line",
"rct": 'curve',
"ct": 'curve',
"rcv": 'curve', # Morisawa's alternate name for 'rct'.
"vhct": 'curve',
"hvct": 'curve',
}
def convertCoords(curX, curY):
showX = int(curX)
if showX != curX:
showX = curX
showY = int(curY)
if showY != curY:
showY = curY
return showX, showY
def convertBezToOutline(ufoFontData, glyphName, bezString):
""" Since the UFO outline element has no attributes to preserve, I can
just make a new one.
"""
# convert bez data to a UFO glif XML representation
#
# Convert all bez ops to simplest UFO equivalent. Add all hints to vertical
# and horizontal hint lists as encountered; insert a HintMask class
# whenever a new set of hints is encountered after all operators have been
# processed, convert HintMask items into hintmask ops and hintmask bytes
# add all hints as prefix review operator list to optimize T2 operators.
# if useStem3 == 1, then any counter hints must be processed as stem3
# hints, else the opposite. Counter hints are used only in LanguageGroup 1
# glyphs, aka ideographs
bezString = re.sub(r"%.+?\n", "", bezString) # supress comments
bezList = re.findall(r"(\S+)", bezString)
if not bezList:
return "", None
flexList = []
# Create an initial hint mask. We use this if
# there is no explicit initial hint sub.
hintMask = HintMask(0)
hintMaskList = [hintMask]
vStem3Args = []
hStem3Args = []
argList = []
opList = []
newHintMaskName = None
inPreFlex = False
hintInfoDict = None
opIndex = 0
curX = 0
curY = 0
newOutline = XMLElement("outline")
outlineItem = None
seenHints = 0
for token in bezList:
try:
val = float(token)
argList.append(val)
continue
except ValueError:
pass
if token == "newcolors":
pass
elif token in ["beginsubr", "endsubr"]:
pass
elif token in ["snc"]:
hintMask = HintMask(opIndex)
# If the new colors precedes any marking operator,
# then we want throw away the initial hint mask we
# made, and use the new one as the first hint mask.
if opIndex == 0:
hintMaskList = [hintMask]
else:
hintMaskList.append(hintMask)
newHintMaskName = hintMask.pointName
elif token in ["enc"]:
pass
elif token == "div":
value = argList[-2] / float(argList[-1])
argList[-2:] = [value]
elif token == "rb":
if newHintMaskName is None:
newHintMaskName = hintMask.pointName
hintMask.hList.append(argList)
argList = []
seenHints = 1
elif token == "ry":
if newHintMaskName is None:
newHintMaskName = hintMask.pointName
hintMask.vList.append(argList)
argList = []
seenHints = 1
elif token == "rm": # vstem3's are vhints
if newHintMaskName is None:
newHintMaskName = hintMask.pointName
seenHints = 1
vStem3Args.append(argList)
argList = []
if len(vStem3Args) == 3:
hintMask.vstem3List.append(vStem3Args)
vStem3Args = []
elif token == "rv": # hstem3's are hhints
seenHints = 1
hStem3Args.append(argList)
argList = []
if len(hStem3Args) == 3:
hintMask.hstem3List.append(hStem3Args)
hStem3Args = []
elif token == "preflx1":
# the preflx1/preflx2 sequence provides the same i as the flex
# sequence; the difference is that the preflx1/preflx2 sequence
# provides the argument values needed for building a Type1 string
# while the flex sequence is simply the 6 rcurveto points. Both
# sequences are always provided.
argList = []
# need to skip all move-tos until we see the "flex" operator.
inPreFlex = True
elif token == "preflx2a":
argList = []
elif token == "preflx2":
argList = []
elif token == "flxa": # flex with absolute coords.
inPreFlex = False
flexPointName = kBaseFlexName + str(opIndex).zfill(4)
flexList.append(flexPointName)
curveCnt = 2
i = 0
# The first 12 args are the 6 args for each of
# the two curves that make up the flex feature.
while i < curveCnt:
curX = argList[0]
curY = argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[2]
curY = argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[4]
curY = argList[5]
showX, showY = convertCoords(curX, curY)
opName = 'curve'
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
outlineItem.append(newPoint)
opList.append([opName, curX, curY])
opIndex += 1
if i == 0:
argList = argList[6:12]
i += 1
# attach the point name to the first point of the first curve.
outlineItem[-6].set(kPointName, flexPointName)
if newHintMaskName is not None:
# We have a hint mask that we want to attach to the first
# point of the flex op. However, there is already a flex
# name in that attribute. What we do is set the flex point
# name into the hint mask.
hintMask.pointName = flexPointName
newHintMaskName = None
argList = []
elif token == "flx":
inPreFlex = False
flexPointName = kBaseFlexName + str(opIndex).zfill(4)
flexList.append(flexPointName)
curveCnt = 2
i = 0
# The first 12 args are the 6 args for each of the two curves
# that make up the flex feature.
while i < curveCnt:
curX += argList[0]
curY += argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[2]
curY += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[4]
curY += argList[5]
showX, showY = convertCoords(curX, curY)
opName = 'curve'
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
outlineItem.append(newPoint)
opList.append([opName, curX, curY])
opIndex += 1
if i == 0:
argList = argList[6:12]
i += 1
# attach the point name to the first point of the first curve.
outlineItem[-6].set(kPointName, flexPointName)
if newHintMaskName is not None:
# We have a hint mask that we want to attach to the first
# point of the flex op. However, there is already a flex name
# in that attribute. What we do is set the flex point name
# into the hint mask.
hintMask.pointName = flexPointName
newHintMaskName = None
argList = []
elif token == "sc":
pass
elif token == "cp":
pass
elif token == "ed":
pass
else:
if inPreFlex and (token[-2:] == "mt"):
continue
if token[-2:] in ["mt", "dt", "ct", "cv"]:
opIndex += 1
else:
print("Unhandled operation %s %s" % (argList, token))
raise BezParseError(
"Unhandled operation: '%s' '%s'.", argList, token)
dx = dy = 0
opName = bezToUFOPoint[token]
if token[-2:] in ["mt", "dt"]:
if token in ["mt", "dt"]:
curX = argList[0]
curY = argList[1]
else:
if token in ["rmt", "rdt"]:
dx = argList[0]
dy = argList[1]
elif token in ["hmt", "hdt"]:
dx = argList[0]
| |
(features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV
columns, in order. One per column of the input record. If this is not
provided, infers the column names from the first row of the records.
These names will be the keys of the features dict of each dataset element.
column_defaults: A optional list of default values for the CSV fields. One
item per selected column of the input record. Each item in the list is
either a valid CSV dtype (float32, float64, int32, int64, or string), or a
`Tensor` with one of the aforementioned types. The tensor can either be
a scalar default value (if the column is optional), or an empty tensor (if
the column is required). If a dtype is provided instead of a tensor, the
column is also treated as required. If this list is not provided, tries
to infer types based on reading the first num_rows_for_inference rows of
files specified, and assumes all columns are optional, defaulting to `0`
for numeric values and `""` for string values. If both this and
`select_columns` are specified, these must have the same lengths, and
`column_defaults` is assumed to be sorted in order of increasing column
index.
label_name: A optional string corresponding to the label column. If
provided, the data for this column is returned as a separate `Tensor` from
the features dictionary, so that the dataset complies with the format
expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
function.
select_columns: An optional list of integer indices or string column
names, that specifies a subset of columns of CSV data to select. If
column names are provided, these must correspond to names provided in
`column_names` or inferred from the file header lines. When this argument
is specified, only a subset of CSV columns will be parsed and returned,
corresponding to the columns specified. Using this results in faster
parsing and lower memory usage. If both this and `column_defaults` are
specified, these must have the same lengths, and `column_defaults` is
assumed to be sorted in order of increasing column index.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
na_value: Additional string to recognize as NA/NaN.
header: A bool that indicates whether the first rows of provided CSV files
correspond to header lines with column names, and should not be included
in the data.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but increases memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature
batches to prefetch for performance improvement. Recommended value is the
number of batches consumed per training step. Defaults to auto-tune.
num_parallel_reads: Number of threads used to read CSV records from files.
If >1, the results will be interleaved.
sloppy: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
num_rows_for_inference: Number of rows of a file to use for type inference
if record_defaults is not provided. If None, reads all the rows of all
the files. Defaults to 100.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_name`.
Raises:
ValueError: If any of the arguments is malformed.
"""
# Create dataset of all matching filenames
filenames = _get_file_names(file_pattern, False)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if shuffle:
dataset = dataset.shuffle(len(filenames), shuffle_seed)
# Clean arguments; figure out column names and defaults
if column_names is None:
if not header:
raise ValueError("Cannot infer column names without a header line.")
# If column names are not provided, infer from the header lines
column_names = _infer_column_names(filenames, field_delim, use_quote_delim)
if len(column_names) != len(set(column_names)):
raise ValueError("Cannot have duplicate column names.")
if select_columns is not None:
select_columns = _get_sorted_col_indices(select_columns, column_names)
if column_defaults is not None:
column_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in column_defaults
]
else:
# If column defaults are not provided, infer from records at graph
# construction time
column_defaults = _infer_column_defaults(
filenames, len(column_names), field_delim, use_quote_delim, na_value,
header, num_rows_for_inference, select_columns)
if select_columns is not None and len(column_defaults) != len(select_columns):
raise ValueError(
"If specified, column_defaults and select_columns must have same "
"length."
)
if select_columns is not None and len(column_names) > len(select_columns):
# Pick the relevant subset of column names
column_names = [column_names[i] for i in select_columns]
if label_name is not None and label_name not in column_names:
raise ValueError("`label_name` provided must be one of the columns.")
def filename_to_dataset(filename):
return CsvDataset(
filename,
record_defaults=column_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
na_value=na_value,
select_cols=select_columns,
header=header,
compression_type=compression_type,
)
def map_fn(*columns):
"""Organizes columns into a features dictionary.
Args:
*columns: list of `Tensor`s corresponding to one csv record.
Returns:
An OrderedDict of feature names to values for that particular record. If
label_name is provided, extracts the label feature to be returned as the
second element of the tuple.
"""
features = collections.OrderedDict(zip(column_names, columns))
if label_name is not None:
label = features.pop(label_name)
return features, label
return features
# Read files sequentially (if num_parallel_reads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
filename_to_dataset, cycle_length=num_parallel_reads, sloppy=sloppy))
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# Apply batch before map for perf, because map has high overhead relative
# to the size of the computation in each map.
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(batch_size=batch_size,
drop_remainder=num_epochs is None)
dataset = dataset_ops.MapDataset(
dataset, map_fn, use_inter_op_parallelism=False)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
_DEFAULT_READER_BUFFER_SIZE_BYTES = 4 * 1024 * 1024 # 4 MB
class CsvDataset(dataset_ops.DatasetSource):
"""A Dataset comprising lines from one or more CSV files."""
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
"""Creates a `CsvDataset` by reading and decoding CSV files.
The elements of this dataset correspond to records from the file(s).
RFC 4180 format is expected for CSV files
(https://tools.ietf.org/html/rfc4180)
Note that we allow leading and trailing spaces with int or float field.
For example, suppose we have a file 'my_file0.csv' with four CSV columns of
different data types:
```
abcdefg,4.28E10,5.55E6,12
hijklmn,-5.3E14,,2
```
We can construct a CsvDataset from it as follows:
```python
dataset = tf.contrib.data.CsvDataset(
"my_file*.csv",
[tf.float32, # Required field, use dtype or empty tensor
tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0
tf.int32, # Required field, use dtype or empty tensor
],
select_cols=[1,2,3] # Only parse last three columns
)
```
The expected output of its iterations is:
```python
next_element = dataset.make_one_shot_iterator().get_next()
with tf.Session() | |
<gh_stars>10-100
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tasks around Cluster/SharedStoragePool."""
from oslo_log import log as logging
from random import randint
import time
import uuid
import pypowervm.const as c
from pypowervm.i18n import _
import pypowervm.tasks.storage as tsk_stg
import pypowervm.util as u
import pypowervm.wrappers.cluster as clust
from pypowervm.wrappers import job
import pypowervm.wrappers.storage as stor
LOG = logging.getLogger(__name__)
IMGTYP = stor.LUType.IMAGE
MKRSZ = 0.001
SLEEP_U_MIN = 30
SLEEP_U_MAX = 60
def crt_cluster_ssp(clust_name, ssp_name, repos_pv, first_node, data_pv_list):
"""Creates a Cluster/SharedStoragePool via the ClusterCreate Job.
The Job takes two parameters: clusterXml and sspXml.
:param clust_name: String name for the Cluster.
:param ssp_name: String name for the SharedStoragePool.
:param repos_pv: storage.PV representing the repository hdisk. The name and
udid properties must be specified.
:param first_node: cluster.Node representing the initial VIOS in the
cluster. (Cluster creation must be done with a single
node; other nodes may be added later.) The Node wrapper
must contain either
- mtms, lpar_id, AND hostname; or
- vios_uri
The indicated node must be able to see each disk.
:param data_pv_list: Iterable of storage.PV instances to use as the data
volume(s) for the SharedStoragePool.
"""
adapter = repos_pv.adapter
# Pull down the ClusterCreate Job template
jresp = adapter.read(clust.Cluster.schema_type,
suffix_type=c.SUFFIX_TYPE_DO, suffix_parm='Create')
jwrap = job.Job.wrap(jresp.entry)
cluster = clust.Cluster.bld(adapter, clust_name, repos_pv, first_node)
ssp = stor.SSP.bld(adapter, ssp_name, data_pv_list)
# Job parameters are CDATA containing XML of above
jparams = [
jwrap.create_job_parameter(
'clusterXml', cluster.toxmlstring(), cdata=True),
jwrap.create_job_parameter(
'sspXml', ssp.toxmlstring(), cdata=True)]
jwrap.run_job(None, job_parms=jparams)
return jwrap
def _find_lus(tier, luname):
"""Finds image LUs whose name contains the specified luname.
:param tier: Tier EntryWrapper representing the Tier to search.
:param luname: The LU name substring to search for.
:return: All LUs in the tier a) of type image; and b) whose names contain
luname.
"""
lufeed = stor.LUEnt.search(tier.adapter, parent=tier, lu_type=IMGTYP)
return [lu for lu in lufeed if luname in lu.name]
def _upload_in_progress(lus, luname, first):
"""Detect whether another host has an upload is in progress.
:param lus: List of LUs to be considered (i.e. whose names contain the name
of the LU we intend to upload).
:param luname: The name of the LU we intend to upload.
:param first: Boolean indicating whether this is this the first time we
detected an upload in progress. Should be True the first
and until the first time this method returns True.
Thereafter, should be False.
:return: True if another host has an upload in progress; False otherwise.
"""
mkr_lus = [lu for lu in lus
if lu.name != luname and lu.name.endswith(luname)]
if mkr_lus:
# Info the first time; debug thereafter to avoid flooding the log.
if first:
LOG.info(_('Waiting for in-progress upload(s) to complete. '
'Marker LU(s): %s'),
str([lu.name for lu in mkr_lus]))
else:
LOG.debug('Waiting for in-progress upload(s) to complete. '
'Marker LU(s): %s',
str([lu.name for lu in mkr_lus]))
return True
return False
def _upload_conflict(tier, luname, mkr_luname):
"""Detect an upload conflict with another host (our thread should bail).
:param tier: Tier EntryWrapper representing the Tier to search.
:param luname: The name of the LU we intend to upload.
:param mkr_luname: The name of the marker LU we use to signify our upload
is in progress.
:return: True if we find a winning conflict and should abandon our upload;
False otherwise.
"""
# Refetch the feed. We must do this in case one or more other threads
# created their marker LU since our last feed GET.
lus = _find_lus(tier, luname)
# First, if someone else already started the upload, we clean up
# and wait for that one.
if any([lu for lu in lus if lu.name == luname]):
LOG.info(_('Abdicating in favor of in-progress upload.'))
return True
# The lus list should be all markers at this point. If there's
# more than one (ours), then the first (by alpha sort) wins.
if len(lus) > 1:
lus.sort(key=lambda l: l.name)
winner = lus[0].name
if winner != mkr_luname:
# We lose. Delete our LU and let the winner proceed
LOG.info(_('Abdicating upload in favor of marker %s.'),
winner)
# Remove just our LU - other losers take care of theirs
return True
return False
def get_or_upload_image_lu(tier, luname, vios_uuid, io_handle, b_size,
upload_type=tsk_stg.UploadType.IO_STREAM_BUILDER):
"""Ensures our SSP has an LU containing the specified image.
If an LU of type IMAGE with the specified luname already exists in our SSP,
return it. Otherwise, create it, prime it with the image contents provided
via stream_func, and return it.
This method assumes that consumers employ a naming convention such that an
LU with a given name represents the same data (size and content) no matter
where/when it's created/uploaded - for example, by including the image's
MD5 checksum in the name.
This method is designed to coordinate the upload of a particular image LU
across multiple hosts which use the same SSP, but otherwise can not
communicate with each other.
:param tier: Tier EntryWrapper of the Shared Storage Pool Tier on which the
image LU is to be hosted.
:param luname: The name of the image LU. Note that the name may be
shortened to satisfy length restrictions.
:param vios_uuid: The UUID of the Virtual I/O Server through which the
upload should be performed, if necessary.
:param io_handle: The I/O handle (as defined by the upload_type). This is
only used if the image_lu needs to be uploaded.
:param b_size: Integer size, in bytes, of the image provided by
stream_func's return value.
:param upload_type: (Optional, Default: IO_STREAM_BUILDER) Defines the way
in which the LU should be uploaded. Refer to the
UploadType enumeration for valid upload mechanisms.
It defaults to IO_STREAM_BUILDER for legacy reasons.
:return: LUEnt EntryWrapper representing the image LU.
"""
# Marker (upload-in-progress) LU name prefixed with 'partxxxxxxxx'
prefix = 'part%s' % uuid.uuid4().hex[:8]
# Ensure the marker LU name won't be too long
luname = u.sanitize_file_name_for_api(
luname, max_len=c.MaxLen.FILENAME_DEFAULT - len(prefix))
mkr_luname = prefix + luname
first = True
while True:
# (Re)fetch the list of image LUs whose name *contains* luname.
lus = _find_lus(tier, luname)
# Does the LU already exist in its final, uploaded form? If so, then
# only that LU will exist, with an exact name match.
if len(lus) == 1 and lus[0].name == luname:
LOG.info(_('Using already-uploaded image LU %s.'), luname)
return lus[0]
# Is there an upload in progress?
if _upload_in_progress(lus, luname, first):
first = False
_sleep_for_upload()
continue
# No upload in progress (at least as of when we grabbed the feed).
LOG.info(_('Creating marker LU %s'), mkr_luname)
tier, mkrlu = tsk_stg.crt_lu(tier, mkr_luname, MKRSZ, typ=IMGTYP)
# We must remove the marker LU if
# a) anything fails beyond this point; or
# b) we successfully upload the image LU.
try:
# If another process (possibly on another host) created a marker LU
# at the same time, there could be multiple marker LUs out there.
# We all use _upload_conflict to decide which one of us gets to do
# the upload.
if _upload_conflict(tier, luname, mkr_luname):
_sleep_for_upload()
continue
# Okay, we won. Do the actual upload.
LOG.info(_('Uploading to image LU %(lu)s (marker %(mkr)s).'),
{'lu': luname, 'mkr': mkr_luname})
# Create the new Logical Unit. The LU size needs to be decimal GB.
tier, new_lu = tsk_stg.crt_lu(
tier, luname, u.convert_bytes_to_gb(b_size, dp=2), typ=IMGTYP)
try:
tsk_stg.upload_lu(vios_uuid, new_lu, io_handle, b_size,
upload_type=upload_type)
except Exception as exc:
LOG.exception(exc)
# We need to remove the LU so it doesn't block others
# attempting to use the same one.
LOG.exception(_('Removing failed LU %s.'), luname)
new_lu.delete()
raise
return new_lu
finally:
# Signal completion, or clean up, by removing the marker LU.
mkrlu.delete()
def _sleep_for_upload():
"""Sleeps if a conflict was | |
clenshaw_curtis_in_polynomial_order)
sparse_grid.set_function(function)
num_refinement_steps = 10
priority_dict = dict()
active_subspace_indices,I = get_active_subspace_indices(
sparse_grid.active_subspace_indices_dict,
sparse_grid.subspace_indices)
for ii in range(active_subspace_indices.shape[1]):
subspace_index = active_subspace_indices[:,ii]
# use dummy value of 1 for refinement indicator
priority,error = refinement_indicator(subspace_index,1,sparse_grid)
key = hash_array(subspace_index)
if key in priority_dict:
assert np.allclose(priority_dict[key],priority)
else:
priority_dict[key]=priority
for jj in range(num_refinement_steps):
sparse_grid.refine()
active_subspace_indices,I = get_active_subspace_indices(
sparse_grid.active_subspace_indices_dict,
sparse_grid.subspace_indices)
for ii in range(active_subspace_indices.shape[1]):
subspace_index = active_subspace_indices[:,ii]
# use dummy value of 1 for refinement indicator
priority,error=refinement_indicator(subspace_index,1,sparse_grid)
key = hash_array(subspace_index)
if key in priority_dict:
assert np.allclose(priority_dict[key],priority)
else:
priority_dict[key]=priority
def test_polynomial_quadrature_order_accuracy(self):
from pyapprox.orthonormal_polynomials_1d import \
evaluate_orthonormal_polynomial_1d
level = 2
alpha = 0
beta = 0
cc_x,cc_w = clenshaw_curtis_pts_wts_1D(level)
degree = 9
ab = jacobi_recurrence(
degree+1,alpha=alpha,beta=beta,probability=True)
#cc_x,cc_w = gauss_quadrature(ab,cc_x.shape[0])
def function(x):
p = evaluate_orthonormal_polynomial_1d(x, degree, ab)
# evaluate polynomial with all coefficients equal to one
return p.sum(axis=1)
gauss_x,gauss_w = gauss_quadrature(ab,degree+1)
# compute interpolant using Clenshaw-Curtis samples
vandermonde = evaluate_orthonormal_polynomial_1d(
cc_x, cc_x.shape[0]-1, ab)
values = function(cc_x)
coeff = np.linalg.lstsq(vandermonde,values,rcond=None)[0]
#print coeff.shape
# integrate interpolant using Gauss-Jacobi quadrature
vandermonde = evaluate_orthonormal_polynomial_1d(
gauss_x, cc_x.shape[0]-1, ab)
interp_values = np.dot(vandermonde,coeff)
gauss_mean = np.dot(interp_values,gauss_w)
gauss_variance = np.dot(interp_values**2,gauss_w)-gauss_mean**2
cc_mean = np.dot(values,cc_w)
cc_variance = np.dot(values**2,cc_w)-cc_mean**2
pce_mean = coeff[0]
pce_variance = np.sum(coeff[1:]**2)
assert np.allclose(gauss_mean, cc_mean)
assert np.allclose(gauss_mean, pce_mean)
assert np.allclose(gauss_variance, pce_variance)
exact_variance = degree # is the sum of the coefficients which are all 1
#print gauss_variance,exact_variance,cc_variance, pce_variance
def test_convert_sparse_grid_to_pce(self):
num_vars=2
max_level=2
max_level_1d=[max_level]*(num_vars)
max_num_sparse_grid_samples=None
error_tol=None
admissibility_function = partial(
max_level_admissibility_function,max_level,max_level_1d,
max_num_sparse_grid_samples,error_tol)
refinement_indicator = variance_refinement_indicator
sparse_grid = CombinationSparseGrid(num_vars)
sparse_grid.set_refinement_functions(
refinement_indicator,admissibility_function,
clenshaw_curtis_rule_growth)
sparse_grid.set_univariate_rules(clenshaw_curtis_in_polynomial_order)
sparse_grid.set_function(function_I)
while(not sparse_grid.active_subspace_queue.empty() or
sparse_grid.subspace_indices.shape[1]==0):
sparse_grid.refine()
from scipy.stats import uniform
var_trans = define_iid_random_variable_transformation(
uniform(-1,2),num_vars)
pce_opts = {'poly_type':'jacobi','alpha_poly':0.,'beta_poly':0.,
'var_trans':var_trans}
pce = convert_sparse_grid_to_polynomial_chaos_expansion(
sparse_grid,pce_opts)
# check that the sparse grid and the pce have the same polynomial terms
assert len(sparse_grid.poly_indices_dict)==pce.indices.shape[1]
for index in pce.indices.T:
assert hash_array(index) in sparse_grid.poly_indices_dict
pce_vals = pce(sparse_grid.samples)
assert np.allclose(pce_vals,sparse_grid.values)
filename = 'sparse-grid-test.pkl'
sparse_grid.save(filename)
sparse_grid_from_file = pickle.load(open(filename,'rb'))
assert sparse_grid_from_file == sparse_grid
os.remove(filename)
def economical_quad_rules_helper(self,selected_variables_idx,
all_univariate_variables,all_sp_variables,
all_ranges,all_weight_functions,
max_level,growth_rules=None):
def function(x):
vals = np.hstack((
np.sum((x+1)**2,axis=0)[:,np.newaxis],
np.sum((x-2)**2,axis=0)[:,np.newaxis]))
return vals
univariate_variables = []
variables = []
ranges = np.empty(2*selected_variables_idx.shape[0])
weight_functions = []
for ii in range(len(selected_variables_idx)):
index = selected_variables_idx[ii]
univariate_variables.append(all_univariate_variables[index])
variables.append(all_sp_variables[index])
ranges[2*ii:2*(ii+1)] = all_ranges[2*index:2*(index+1)]
weight_functions.append(all_weight_functions[index])
variable = IndependentMultivariateRandomVariable(univariate_variables)
var_trans = AffineRandomVariableTransformation(variable)
num_vars=len(univariate_variables)
max_level_1d=[max_level]*(num_vars)
quad_rules, growth_rules, unique_quadrule_indices = \
get_sparse_grid_univariate_leja_quadrature_rules_economical(
var_trans,growth_rules)
assert len(quad_rules)==len(growth_rules)
max_num_sparse_grid_samples=None
error_tol=None
admissibility_function = partial(
max_level_admissibility_function,max_level,max_level_1d,
max_num_sparse_grid_samples,error_tol)
refinement_indicator = variance_refinement_indicator
sparse_grid = CombinationSparseGrid(num_vars)
sparse_grid.set_refinement_functions(
refinement_indicator,admissibility_function,growth_rules,
unique_quadrule_indices=unique_quadrule_indices)
sparse_grid.set_univariate_rules(quad_rules)
sparse_grid.set_function(function,var_trans)
while(not sparse_grid.active_subspace_queue.empty() or
sparse_grid.subspace_indices.shape[1]==0):
sparse_grid.refine()
#plt.plot(sparse_grid.samples[0,:],sparse_grid.samples[1,:],'o')
#plt.show()
# multivariate integration takes to long so break up into 1d integrals
# weight_function = weight_function_w*weight_function_x*\
# weight_function_y*weight_function_z
exact_mean = np.zeros(2)
for ii in range(len(variables)):
exact_mean[0] += float(
sp.integrate(weight_functions[ii]*(variables[ii]+1)**2,
(variables[ii],ranges[2*ii],ranges[2*ii+1])))
assert np.allclose(1.,float(sp.integrate(weight_functions[ii],
(variables[ii],ranges[2*ii],ranges[2*ii+1]))))
exact_mean[1] += float(
sp.integrate(weight_functions[ii]*(variables[ii]-2)**2,
(variables[ii],ranges[2*ii],ranges[2*ii+1])))
assert np.allclose(exact_mean,sparse_grid.moments()[0])
return unique_quadrule_indices
def test_economical_quad_rules(self):
alpha_stat1,beta_stat1=2,2
alpha_stat2,beta_stat2=3,3
beta_var0 = {'var_type':'beta','range':[0,1],
'alpha_stat':alpha_stat1,'beta_stat':beta_stat1}
beta_var1 = {'var_type':'beta','range':[-1,1],
'alpha_stat':alpha_stat1,'beta_stat':beta_stat1}
beta_var2 = {'var_type':'beta','range':[-1,1],
'alpha_stat':alpha_stat2,'beta_stat':beta_stat2}
gaussian_var = {'var_type':'gaussian','mean':0.,'variance':1.}
univariate_variables = [beta_var0,beta_var1,beta_var2,
gaussian_var,beta_var1]
from scipy.stats import beta,norm
univariate_variables = [
beta(alpha_stat1,beta_stat1),beta(alpha_stat1,beta_stat1,-1,2),
beta(alpha_stat2,beta_stat2,-1,2),norm(),
beta(alpha_stat1,beta_stat1,-1,2)]
v,w,x,y = sp.Symbol('v'),sp.Symbol('w'),sp.Symbol('x'),sp.Symbol('y')
z = sp.Symbol('z')
from pyapprox.utilities import beta_pdf_on_ab,gaussian_pdf
weight_function_v = beta_pdf_on_ab(alpha_stat1,beta_stat1,0,1,v)
weight_function_w = beta_pdf_on_ab(alpha_stat1,beta_stat1,-1,1,w)
weight_function_x = beta_pdf_on_ab(alpha_stat2,beta_stat2,-1,1,x)
weight_function_y = gaussian_pdf(0,1,y,package=sp)
weight_function_z = beta_pdf_on_ab(alpha_stat1,beta_stat1,-1,1,z)
ranges = [0,1,-1,1,-1,1,-sp.oo,sp.oo,-1,1]
sp_variables = [v,w,x,y,z]
weight_functions = [
weight_function_v,weight_function_w,weight_function_x,
weight_function_y,weight_function_z]
selected_variables_idx = np.asarray([0,1])
unique_quadrule_indices = self.economical_quad_rules_helper(
selected_variables_idx,univariate_variables,sp_variables,
ranges,weight_functions,1,clenshaw_curtis_rule_growth)
assert lists_of_arrays_equal(unique_quadrule_indices,[[0,1]])
# assumes that only one type of quadrule can be specified
selected_variables_idx = np.asarray([0,1])
self.assertRaises(Exception,self.economical_quad_rules_helper,
selected_variables_idx,univariate_variables,sp_variables,
ranges,weight_functions,2,
[clenshaw_curtis_rule_growth,leja_growth_rule])
selected_variables_idx = np.asarray([2,3,0,1])
unique_quadrule_indices = self.economical_quad_rules_helper(
selected_variables_idx,univariate_variables,sp_variables,
ranges,weight_functions,2)
assert lists_of_arrays_equal(unique_quadrule_indices,[[0],[1],[2,3]])
selected_variables_idx = np.asarray([1,2,3,4])
unique_quadrule_indices = self.economical_quad_rules_helper(
selected_variables_idx,univariate_variables,sp_variables,
ranges,weight_functions,2)
assert lists_of_arrays_equal(unique_quadrule_indices,[[0,3],[1],[2]])
selected_variables_idx = np.asarray([0,1,2,3,4])
unique_quadrule_indices = self.economical_quad_rules_helper(
selected_variables_idx,univariate_variables,sp_variables,
ranges,weight_functions,2)
assert lists_of_arrays_equal(unique_quadrule_indices,[[0,1,4],[2],[3]])
@skiptest
def test_economical_quad_rules(self):
# copy test_economical_quad_rules and make sure this test passes when
# configure variables are added.'
raise Exception
def test_convert_sparse_grid_to_pce_mixed_basis(self):
def function(x):
return np.hstack((
np.sum((x+1)**2,axis=0)[:,np.newaxis],
np.sum((x-2)**2,axis=0)[:,np.newaxis]))
num_vars=2
max_level=5
max_level_1d=[max_level]*(num_vars)
alpha_stat,beta_stat=2,2
from scipy.stats import beta,norm
beta_var = {'var_type':'beta','range':[-1,1],
'alpha_stat':alpha_stat,'beta_stat':beta_stat}
gaussian_var = {'var_type':'gaussian','mean':0.,'variance':1.}
univariate_variables = [beta(alpha_stat,beta_stat,-1,2),norm()]
variable = IndependentMultivariateRandomVariable(univariate_variables)
var_trans = AffineRandomVariableTransformation(variable)
quad_rules, growth_rules, unique_quadrule_indices = \
get_sparse_grid_univariate_leja_quadrature_rules_economical(
var_trans)
max_num_sparse_grid_samples=None
error_tol=None
admissibility_function = partial(
max_level_admissibility_function,max_level,max_level_1d,
max_num_sparse_grid_samples,error_tol)
refinement_indicator = variance_refinement_indicator
sparse_grid = CombinationSparseGrid(num_vars)
sparse_grid.set_refinement_functions(
refinement_indicator,admissibility_function,growth_rules,
unique_quadrule_indices=unique_quadrule_indices)
sparse_grid.set_univariate_rules(quad_rules)
sparse_grid.set_function(function)
while(not sparse_grid.active_subspace_queue.empty() or
sparse_grid.subspace_indices.shape[1]==0):
sparse_grid.refine()
poly_types_opts = {
'type1':{'poly_type':'jacobi','alpha_poly':beta_stat-1,
'beta_poly':alpha_stat-1,'var_nums':[0]},
'type2':{'poly_type':'hermite','var_nums':[1]},
}
pce_opts = {'var_trans':var_trans,'poly_types':poly_types_opts}
pce = convert_sparse_grid_to_polynomial_chaos_expansion(
sparse_grid,pce_opts)
# check that the sparse grid and the pce have the same polynomial terms
assert len(sparse_grid.poly_indices_dict)==pce.indices.shape[1]
for index in pce.indices.T:
assert hash_array(index) in sparse_grid.poly_indices_dict
pce_vals = pce(sparse_grid.samples)
assert np.allclose(pce_vals,sparse_grid.values)
# num_validation_samples=int(1e6)
# validation_samples = np.vstack((
# 2*np.random.beta(alpha_stat,beta_stat,(1,num_validation_samples))-1,
# np.random.normal(0,1,(1,num_validation_samples))))
# validation_values = function(validation_samples)
# print (validation_values.mean(axis=0))
x,y = sp.Symbol('x'),sp.Symbol('y')
from pyapprox.utilities import beta_pdf_on_ab, gaussian_pdf
weight_function_x = beta_pdf_on_ab(alpha_stat,beta_stat,-1,1,x)
weight_function_y = gaussian_pdf(0,1,y,package=sp)
weight_function = weight_function_x*weight_function_y
ranges = [-1,1,-sp.oo,sp.oo]
exact_mean = [
float(sp.integrate(weight_function*((x+1)**2+(y+1)**2),
(x,ranges[0],ranges[1]),(y,ranges[2],ranges[3]))),
float(sp.integrate(weight_function*((x-2)**2+(y-2)**2),
(x,ranges[0],ranges[1]),(y,ranges[2],ranges[3])))]
assert np.allclose(exact_mean,pce.mean())
def test_error_based_stopping_criteria(self):
alpha_stat,beta_stat = [1,2]
num_vars = 2
level = 2
from pyapprox.indexing import compute_hyperbolic_indices
indices = compute_hyperbolic_indices(num_vars,level,.5)
univariate_quadrature_rule = partial(
beta_leja_quadrature_rule,alpha_stat,beta_stat,
growth_rule=leja_growth_rule,samples_filename=None)
poly = PolynomialChaosExpansion()
from scipy.stats import uniform
var_trans = define_iid_random_variable_transformation(
uniform(-1,2),num_vars)
#range_tol=0)
poly_opts = {'poly_type':'jacobi','alpha_poly':beta_stat-1,
'beta_poly':alpha_stat-1,'var_trans':var_trans}
poly.configure(poly_opts)
# to generate quadrature rule that integrates all inner products, i,e,
# so grammian using indices is the identity must integrate the double set
double_set_indices=get_indices_double_set(indices)
poly.set_indices(double_set_indices)
def basis_matrix_function(x):
vals = np.atleast_2d(
poly.basis_matrix(x).sum(axis=1)).T
return vals
max_level_1d = None
max_num_sparse_grid_samples = None
admissibility_function = partial(
max_level_admissibility_function,np.inf,max_level_1d,
max_num_sparse_grid_samples,1e-4)
refinement_indicator=partial(
variance_refinement_indicator,normalize=False,mean_only=True)
sparse_grid = CombinationSparseGrid(num_vars)
sparse_grid.set_refinement_functions(
refinement_indicator,admissibility_function,leja_growth_rule)
sparse_grid.set_univariate_rules(univariate_quadrature_rule)
sparse_grid.set_function(basis_matrix_function)
sparse_grid.build()
samples, weights = extract_sparse_grid_quadrature_rule(sparse_grid)
poly.set_indices(indices)
basis_matrix = poly.basis_matrix(samples)
inner_products = (basis_matrix.T*weights).dot(basis_matrix)
I = np.where(abs(inner_products)>1e-8)
# check only non-zero inner-products are along diagonal, i.e.
# for integrals of indices multiplied by themselves
assert np.allclose(I,np.tile(np.arange(indices.shape[1]),(2,1)))
# from pyapprox.visualization import plot_2d_indices, plot_3d_indices
# plot_2d_indices(
# indices,other_indices=[sparse_grid.poly_indices,
# double_set_indices])
# plt.show()
class MultilevelPolynomialModelConfigureVariableTransformation(object):
def __init__(self,nvars):
self.nvars=nvars
def map_from_canonical_space(self,canonical_samples):
assert canonical_samples.shape[0]==self.nvars
samples = canonical_samples.copy()
samples = samples*2
return samples
def num_vars(self):
return self.nvars
class TestAdaptiveMultiIndexSparseGrid(unittest.TestCase):
def test_multi_index_sparse_grid(self):
num_vars=2
num_model_levels=3
model = MultilevelPolynomialModel(num_model_levels)
ranges = [2*(-1)**(ii+1) for ii in range(2*num_vars)]
canonical_ranges = [(-1)**(ii+1) for ii in range(2*num_vars)]
var_trans = AffineBoundedVariableTransformation(canonical_ranges,ranges)
config_var_trans = \
MultilevelPolynomialModelConfigureVariableTransformation(1)
num_validation_samples = 100
validation_samples = np.random.uniform(
-1.,1.,(num_vars+1,num_validation_samples))
validation_samples[:-1,:] = var_trans.map_from_canonical_space(
validation_samples[:-1,:])
validation_samples[-1,:]=num_model_levels-1
validation_samples[-1,:]=config_var_trans.map_from_canonical_space(
validation_samples[-1:])
validation_values = model(validation_samples)
max_level=5
max_level_1d=[max_level]*(num_vars+1)
max_level_1d[-1]=num_model_levels-1
max_num_sparse_grid_samples=None
error_tol=None
admissibility_function = partial(
max_level_admissibility_function,max_level,max_level_1d,
max_num_sparse_grid_samples,error_tol)
refinement_indicator = variance_refinement_indicator
cost_function=model.cost_function
sparse_grid = CombinationSparseGrid(num_vars+1)
sparse_grid.set_function(model,var_trans)
sparse_grid.set_config_variable_index(num_vars,config_var_trans)
sparse_grid.set_refinement_functions(
refinement_indicator,admissibility_function,
clenshaw_curtis_rule_growth,cost_function)
sparse_grid.set_univariate_rules(
clenshaw_curtis_in_polynomial_order)
while(not sparse_grid.active_subspace_queue.empty() or
sparse_grid.subspace_indices.shape[1]==0):
sparse_grid.refine()
model_level_evals_list = get_num_model_evaluations_from_samples(
sparse_grid.samples,sparse_grid.num_config_vars)
model_level_evals = np.asarray(
model_level_evals_list,dtype=int)[0,:]
model_ids = np.asarray(model_level_evals_list,dtype=int)[1:,:]
model_ids = config_var_trans.map_from_canonical_space(model_ids)
equivalent_costs,total_costs = get_equivalent_cost(
cost_function,model_level_evals,model_ids)
assert np.allclose(
total_costs,sparse_grid.num_equivalent_function_evaluations)
assert np.allclose(
sparse_grid.num_equivalent_function_evaluations/total_costs,1)
approx_values = sparse_grid(validation_samples)
#print np.linalg.norm(approx_values-validation_values)/np.sqrt(
# validation_values.shape[0])
assert np.allclose(approx_values,validation_values)
def test_online_cost_function(self):
"""
Test use of work_qoi_index and WorkTracker to determine costs of
evaluating a model as sparse grid is built
"""
num_vars=2
num_model_levels=3
base_model = MultilevelPolynomialModel(num_model_levels,return_work=True)
from pyapprox.models.wrappers import TimerModelWrapper, WorkTrackingModel
# TimerModelWrapper is hard to test because cost is constantly changing because of variable wall time. So for testing instead use function of polynomial model that just fixes cost for each level of the multilevel model
timer_model=base_model
model = WorkTrackingModel(timer_model,base_model,1)
ranges = [2*(-1)**(ii+1) for ii in range(2*num_vars)]
canonical_ranges = [(-1)**(ii+1) for ii in range(2*num_vars)]
var_trans = AffineBoundedVariableTransformation(canonical_ranges,ranges)
config_var_trans = \
MultilevelPolynomialModelConfigureVariableTransformation(1)
# when computing validation values do not return work
# or comparison of validation values with approx values will
# compare matrices of different sizes
num_validation_samples = 100
validation_samples = np.random.uniform(
-1.,1.,(num_vars+1,num_validation_samples))
validation_samples[:-1,:] = var_trans.map_from_canonical_space(
validation_samples[:-1,:])
validation_samples[-1,:]=num_model_levels-1
validation_samples[-1,:]=config_var_trans.map_from_canonical_space(
validation_samples[-1:])
validation_values = model(validation_samples)
max_level=5
max_level_1d=[max_level]*(num_vars+1)
max_level_1d[-1]=num_model_levels-1
max_num_sparse_grid_samples=None
error_tol=None
admissibility_function = partial(
max_level_admissibility_function,max_level,max_level_1d,
max_num_sparse_grid_samples,error_tol)
refinement_indicator = variance_refinement_indicator
cost_function=model.cost_function
sparse_grid = CombinationSparseGrid(num_vars+1)
sparse_grid.set_function(model,var_trans)
sparse_grid.set_config_variable_index(num_vars,config_var_trans)
sparse_grid.set_refinement_functions(
refinement_indicator,admissibility_function,
clenshaw_curtis_rule_growth,cost_function)
sparse_grid.set_univariate_rules(
clenshaw_curtis_in_polynomial_order)
while(not sparse_grid.active_subspace_queue.empty() or
sparse_grid.subspace_indices.shape[1]==0):
sparse_grid.refine()
model_level_evals_list = get_num_model_evaluations_from_samples(
sparse_grid.samples,sparse_grid.num_config_vars)
model_level_evals = np.asarray(
model_level_evals_list,dtype=int)[0,:]
model_ids = np.asarray(model_level_evals_list,dtype=int)[1:,:]
model_ids = config_var_trans.map_from_canonical_space(model_ids)
equivalent_costs,total_costs = get_equivalent_cost(
cost_function,model_level_evals,model_ids)
#print(total_costs,sparse_grid.num_equivalent_function_evaluations,sparse_grid.num_config_vars)
assert np.allclose(
total_costs,sparse_grid.num_equivalent_function_evaluations)
assert np.allclose(
sparse_grid.num_equivalent_function_evaluations/total_costs,1)
approx_values = sparse_grid(validation_samples)
#print np.linalg.norm(approx_values-validation_values)/np.sqrt(
# validation_values.shape[0])
assert np.allclose(approx_values,validation_values)
def test_convert_multi_index_sparse_grid_to_pce(self):
num_vars=2
num_levels=3
model = MultilevelPolynomialModel(num_levels)
num_validation_samples = 100
validation_samples = np.random.uniform(
-1.,1.,(num_vars+1,num_validation_samples))
validation_samples[-1,:]=num_levels-1
validation_values = model(validation_samples)
max_level=5
max_level_1d=[max_level]*(num_vars+1)
max_level_1d[-1]=num_levels-1
max_num_sparse_grid_samples=None
error_tol=None
admissibility_function = partial(
max_level_admissibility_function,max_level,max_level_1d,
max_num_sparse_grid_samples,error_tol)
cost_function = lambda x: 1.
refinement_indicator = variance_refinement_indicator
sparse_grid = CombinationSparseGrid(num_vars+1)
sparse_grid.set_function(model)
sparse_grid.set_config_variable_index(num_vars)
sparse_grid.set_refinement_functions(
refinement_indicator,admissibility_function,
clenshaw_curtis_rule_growth)
sparse_grid.set_univariate_rules(clenshaw_curtis_in_polynomial_order)
while(not sparse_grid.active_subspace_queue.empty() or
sparse_grid.subspace_indices.shape[1]==0):
sparse_grid.refine()
# the pce will have no knowledge of configure variables.
from scipy.stats import uniform
var_trans = | |
<filename>lib/doekbase/data_api/wsfile.py
"""
Workspace implemented over files, using the mongomock package.
"""
__author__ = '<NAME> <<EMAIL>>'
__date__ = '9/3/15'
# Imports
# Stdlib
try:
import cStringIO as StringIO
except:
import StringIO
from datetime import datetime
import json
import logging
import msgpack
import os
import re
import sys
# Third-party
import mongomock as mm
# Local
from doekbase.data_api.util import get_logger, log_start, log_end
from doekbase.workspace.client import ServerError
# Logging
_log = get_logger(__name__)
# Globals
NUMERIC_REF_PAT = re.compile('\d+/\d+(/\d+)?')
# Exceptions
class LibError(ServerError):
"""To imitate server errors, raise this with a description
of the error as the argument.
"""
def __init__(self, description):
super(LibError, self).__init__('ServerError', -32500, description)
# Functions and classes
ws_url_template = 'https://{}.kbase.us/services/ws/'
def workspace_to_file(ref, workspace='narrative', token=None):
"""Convert Workspace objects to the JSON format read by the
mongomock module.
Args:
ref (str): Workspace object reference e.g. '1019/4/1'
workspace (str): Name or full URL for workspace to contact
'narrative' or 'ci' are recognized
token (str): KBase auth token
Return:
(dict) Object in the mock schema
"""
from doekbase.workspace.client import Workspace
if re.match(r'https://.*', workspace):
url = workspace
else:
url = ws_url_template.format(workspace)
if token is None:
token = os.environ.get('KB_AUTH_TOKEN', '')
if not token:
raise ValueError('No `token` given and environment does not '
'contain value for KB_AUTH_TOKEN')
ws = Workspace(url, token=token)
objlist = ws.get_objects([{'ref': ref}])
obj, oi = objlist[0], objlist[0]['info']
canonical_ref = "{0}/{1}/{2}".format(oi[6], oi[0], oi[4])
canonical_name = "{0}/{1}".format(oi[7], oi[1])
# convert to our schema
d = {'ref': canonical_ref,
'type': oi[2],
'name': canonical_name,
'links': obj['refs'],
'data': obj['data'],
'metadata': oi[10]
}
_log.debug('workspace_to_file: returning record for: {}'
.format(canonical_ref))
return d
class WorkspaceFile(object):
"""Mock object for KBase Workspace service.
You can use this in place of the doekbase.client.workspace.Workspace class.
To insulate from changes in the workspace storage format, the input
data is in a simplified and reduced schema. The input should be a list
of JSON ojects, separated by commas and bracketed by "[ ]" like a
normal JSON list. Each object should have these fields:
* ref - object reference like "123/4"
* type - Name of the type of this object, e.g. "FooType"
* name - Name of this object, e.g. "ReferenceGenomeAnnotations/kb|g.3157"
* links - List of references, each in the same form as the 'ref' field.
* data - JSON object with the data (whatever you want)
"""
#: Use MessagePack encoding for workspace objects
use_msgpack = True
use_redis = False
_loaded = {} # static cache of loaded refs
#: Version of the workspace we are emulating
VERSION = '0.3.5'
def __init__(self, working_directory):
"""Create file-based Workspace instance, using files in
the given working directory.
Additional files are added with the `load` method.
Args:
working_directory (str): Path to directory with files to load.
"""
self._wd = working_directory
# create client and collection
client = mm.MongoClient()
self.collection = client.db.collection
# This monkey-patch avoids a copy of the parsed workspace object
# as it is added to the mongomock collection. Of course, this
# means that this dict MUST be treated as immutable by other code.
self.collection._internalize_dict = lambda d: d
# some internal state
self._oids = {}
def load(self, ref):
"""Load data from a given reference.
The reference will be translated into a file to load,
using the following formula:
``<working_directory> + '/' + <ref> + <ext>`,
where ``<working_directory>`` is the path given to the class
constructor, ``<ref>`` is the reference given to this
function, and
``<ext>`` is a file extension '.msgpack' if
`use_msgpack` is True and '.json' otherwise.
Thus, for ``WorkspaceFile('/tmp/data').load('foo_bar')``,
the path loaded would be '/tmp/data/foo_bar.msgpack'.
See class documentation on format of input data.
Args:
ref (str): The reference
Notes:
* Post-condition: Object is loaded if and only if that reference was
not loaded previously. Modification timestamp of the underlying
file is NOT checked, you must manually invalidate modified
data with :meth:`unload(ref)`.
Raises:
IOError: file not found or not readable.
ValueError: parsing failed.
"""
# log start
t0 = log_start(_log, 'WorkspaceFile.load', level=logging.DEBUG,
kvp=dict(ref=ref))
# stop if already loaded in the past
if ref in self._loaded:
# log done and return
log_end(_log, t0, 'WorkspaceFile.load', level=logging.DEBUG,
kvp=dict(ref=ref, cached='yes'))
return
# create the full path from the reference
ext = 'msgpack' if self.use_msgpack else 'json'
full_path = '{}.{}'.format(os.path.join(self._wd, ref), ext)
# open the file; raises IOError on failure
f = open(full_path)
# parse the file
try:
record = msgpack.load(f) if self.use_msgpack else json.load(f)
except Exception as err:
raise ValueError('Loading {}: {}'.format(full_path, err))
finally:
f.close()
# cache the parsed data, both by reference and by 'name'
# (if name is not the same as reference)
#print("@@ REF={r} RECORD[ref]={rr} RECORD[name]={n}"
# .format(r=ref, rr=record['ref'], n=record['name']))
self._loaded[ref] = record
self._loaded[record['ref']] = record
self._loaded[record['name']] = record
#print('@@ STORE RECORD BY ({},{})'.format(record['ref'], record['name']))
# insert the parsed data into mongomock
self.collection.insert(record)
# log done
log_end(_log, t0, 'WorkspaceFile.load', level=logging.DEBUG,
kvp=dict(ref=ref, cached='no'))
def unload(self, ref):
"""Force reload of ``ref`` the next time.
Does nothing if ``ref`` is not already loaded.
Args:
ref (str): The reference
Post:
ref is no longer loaded
"""
if ref in self._loaded:
del self._loaded[ref]
# Public methods
def copy_object(self, prm):
# do nothing
return
def get_object_history(self, prm):
return []
def get_object_info_new(self, prm):
ref = prm['objects'][0]['ref']
records = self._find_ref(ref)
result = [self._make_info_tuple(record, record['ref'])
for record in records]
return result
def get_object_provenance(self, prm):
return []
def get_object_subset(self, prm):
"""Note: this is not efficient. It actually looks at
the whole object.
"""
# loop over each specified subset, and add all results
# to a single list in `result`
result = []
for subset in prm:
ref, paths = subset['ref'], subset['included']
# get matching records and data in the paths
records = self._find_ref(ref)
# add to result
for r in records:
extracted = {} # all extracted paths
for p in paths:
d = r['data'] # alias
parts = p.split('/')
# Look for value matching path in 'd'
e = extracted
for i in xrange(len(parts) - 1):
if parts[i] in d:
d = d[parts[i]]
if parts[i] not in e and isinstance(d, dict):
e[parts[i]] = {}
if isinstance(e[parts[i]], dict):
e = e[parts[i]]
else:
break
else:
if parts[-1] in d:
e[parts[-1]] = d[parts[-1]]
_log.debug(extracted)
if len(extracted) > 0:
#print("@@ add extracted: {}".format(extracted))
obj = self._make_object(r, ref, data=extracted)
result.append(obj)
return result
def get_objects(self, prm):
result = []
for refs in prm:
ref = refs['ref']
records = self._find_ref(ref)
#print("@@ GO, got records: {}".format(records))
objects = [self._make_object(record, ref) for record in records]
result.extend(objects)
return result
def get_type_info(self, type_name):
return self._make_type_info({'type': type_name})
def list_referencing_objects(self, prm):
result = []
for refs in prm:
ref_result = []
ref = refs['ref']
# find every record that refers to this one
for rfr in self.collection.find({'links': ref}):
info_tuple = self._make_info_tuple(rfr, rfr['ref'])
ref_result.append(info_tuple)
result.append(ref_result)
return result
def translate_to_MD5_types(self, types):
m = {}
for t in types:
if t in MD5_TYPES:
m[t] = MD5_TYPES[t]
else:
raise LibError('Type schema record was not found for {}'
.format(t))
return m
def ver(self):
return self.VERSION
def get_children(self):
return []
# ___ Internal methods ___
def _get_oid(self, ref):
if ref in self._oids:
return self._oids[ref]
new_oid = (len(self._oids) + abs(hash(ref))) % sys.maxint
self._oids[ref] = new_oid
return new_oid
def _make_info(self, record, ref):
"""Make and return a single 'info' section.
"""
assert re.match(NUMERIC_REF_PAT, ref) # require numeric ref
#print("@@ make_info from: {}".format(record))
ws_id = int(ref.split('/')[0])
oid = self._get_oid(ref)
data = record['data']
info = {'object_id': oid,
'object_name': 'Object{:d}'.format(oid),
'object_reference': ref,
'object_reference_versioned': '{}/{}'.format(ref, '1'),
'type_string': record['type'],
'save_date': datetime.isoformat(datetime.now()),
'version': 1,
'saved_by': 'CookieMonster',
'workspace_id': ws_id,
'workspace_name': record['name'],
'object_checksum': 0,
'object_size': len(data),
'object_metadata': record['metadata']
}
return info
def _make_info_tuple(self, record, ref):
"""Make the object_info type tuple:
0: obj_id objid, 1: obj_name name,
2: type_string type, 3: timestamp save_date,
4: int version, 5: username saved_by,
6: ws_id wsid, 7: ws_name workspace,
8: string chsum, 9: int size, 10: usermeta meta
"""
assert re.match(NUMERIC_REF_PAT, ref) # require numeric ref
wsid = int(ref.split('/')[0])
ver = int(ref.split('/')[-1])
return (self._get_oid(ref), record['name'],
record['type'], datetime.isoformat(datetime.now()),
ver, 'joe',
wsid, record['name'],
'0', 0, {}
)
def _make_object(self, record, ref, data=None):
canonical_ref = record['ref']
r = {
'data': data or record['data'],
'object_info': self._make_info(record, canonical_ref),
'provenance': [],
'creator': 'Gonzo',
'created': datetime.isoformat(datetime.now()),
'refs': [],
'copied': '',
'copy_source_inaccessible': 0,
'extracted_ids': {},
'handle_error': '',
'handle_stacktrace': ''
}
return r
def _make_type_info(self, | |
<filename>inputs.py
"""Inputs - user input for humans.
Inputs aims to provide easy to use, cross-platform, user input device
support for Python. I.e. keyboards, mice, gamepads, etc.
Currently supported platforms are the Raspberry Pi, Linux, Windows and
Mac OS X.
"""
# Copyright (c) 2016, 2018: Zeth
# All rights reserved.
#
# BSD Licence
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from __future__ import division
import os
import sys
import io
import glob
import struct
import platform
import math
import time
import codecs
from warnings import warn
from itertools import count
from operator import itemgetter
from multiprocessing import Process, Pipe
import ctypes
__version__ = "0.5"
WIN = True if platform.system() == 'Windows' else False
MAC = True if platform.system() == 'Darwin' else False
NIX = True if platform.system() == 'Linux' else False
if WIN:
# pylint: disable=wrong-import-position
import ctypes.wintypes
DWORD = ctypes.wintypes.DWORD
HANDLE = ctypes.wintypes.HANDLE
WPARAM = ctypes.wintypes.WPARAM
LPARAM = ctypes.wintypes.WPARAM
MSG = ctypes.wintypes.MSG
else:
DWORD = ctypes.c_ulong
HANDLE = ctypes.c_void_p
WPARAM = ctypes.c_ulonglong
LPARAM = ctypes.c_ulonglong
MSG = ctypes.Structure
if NIX:
from fcntl import ioctl
OLD = sys.version_info < (3, 4)
PERMISSIONS_ERROR_TEXT = (
"The user (that this program is being run as) does "
"not have permission to access the input events, "
"check groups and permissions, for example, on "
"Debian, the user needs to be in the input group.")
# Standard event format for most devices.
# long, long, unsigned short, unsigned short, int
EVENT_FORMAT = str('llHHi')
EVENT_SIZE = struct.calcsize(EVENT_FORMAT)
def chunks(raw):
"""Yield successive EVENT_SIZE sized chunks from raw."""
for i in range(0, len(raw), EVENT_SIZE):
yield struct.unpack(EVENT_FORMAT, raw[i:i+EVENT_SIZE])
if OLD:
def iter_unpack(raw):
"""Yield successive EVENT_SIZE chunks from message."""
return chunks(raw)
else:
def iter_unpack(raw):
"""Yield successive EVENT_SIZE chunks from message."""
return struct.iter_unpack(EVENT_FORMAT, raw)
def convert_timeval(seconds_since_epoch):
"""Convert time into C style timeval."""
frac, whole = math.modf(seconds_since_epoch)
microseconds = math.floor(frac * 1000000)
seconds = math.floor(whole)
return seconds, microseconds
SPECIAL_DEVICES = (
("Raspberry Pi Sense HAT Joystick",
"/dev/input/by-id/gpio-Raspberry_Pi_Sense_HAT_Joystick-event-kbd"),
("Nintendo Wii Remote",
"/dev/input/by-id/bluetooth-Nintendo_Wii_Remote-event-joystick"),
("FT5406 memory based driver",
"/dev/input/by-id/gpio-Raspberry_Pi_Touchscreen_Display-event-mouse"),
)
XINPUT_MAPPING = (
(1, 0x11),
(2, 0x11),
(3, 0x10),
(4, 0x10),
(5, 0x13a),
(6, 0x13b),
(7, 0x13d),
(8, 0x13e),
(9, 0x136),
(10, 0x137),
(13, 0x130),
(14, 0x131),
(15, 0x134),
(16, 0x133),
(17, 0x11),
('l_thumb_x', 0x00),
('l_thumb_y', 0x01),
('left_trigger', 0x02),
('r_thumb_x', 0x03),
('r_thumb_y', 0x04),
('right_trigger', 0x05),
)
XINPUT_DLL_NAMES = (
"XInput1_4.dll",
"XInput9_1_0.dll",
"XInput1_3.dll",
"XInput1_2.dll",
"XInput1_1.dll"
)
XINPUT_ERROR_DEVICE_NOT_CONNECTED = 1167
XINPUT_ERROR_SUCCESS = 0
XBOX_STYLE_LED_CONTROL = {
0: 'off',
1: 'all blink, then previous setting',
2: '1/top-left blink, then on',
3: '2/top-right blink, then on',
4: '3/bottom-left blink, then on',
5: '4/bottom-right blink, then on',
6: '1/top-left on',
7: '2/top-right on',
8: '3/bottom-left on',
9: '4/bottom-right on',
10: 'rotate',
11: 'blink, based on previous setting',
12: 'slow blink, based on previous setting',
13: 'rotate with two lights',
14: 'persistent slow all blink',
15: 'blink once, then previous setting'
}
DEVICE_PROPERTIES = (
(0x00, "INPUT_PROP_POINTER"), # needs a pointer
(0x01, "INPUT_PROP_DIRECT"), # direct input devices
(0x02, "INPUT_PROP_BUTTONPAD"), # has button(s) under pad
(0x03, "INPUT_PROP_SEMI_MT"), # touch rectangle only
(0x04, "INPUT_PROP_TOPBUTTONPAD"), # softbuttons at top of pad
(0x05, "INPUT_PROP_POINTING_STICK"), # is a pointing stick
(0x06, "INPUT_PROP_ACCELEROMETER"), # has accelerometer
(0x1f, "INPUT_PROP_MAX"),
(0x1f + 1, "INPUT_PROP_CNT"))
EVENT_TYPES = (
(0x00, "Sync"),
(0x01, "Key"),
(0x02, "Relative"),
(0x03, "Absolute"),
(0x04, "Misc"),
(0x05, "Switch"),
(0x11, "LED"),
(0x12, "Sound"),
(0x14, "Repeat"),
(0x15, "ForceFeedback"),
(0x16, "Power"),
(0x17, "ForceFeedbackStatus"),
(0x1f, "Max"),
(0x1f+1, "Current"))
SYNCHRONIZATION_EVENTS = (
(0, "SYN_REPORT"),
(1, "SYN_CONFIG"),
(2, "SYN_MT_REPORT"),
(3, "SYN_DROPPED"),
(0xf, "SYN_MAX"),
(0xf+1, "SYN_CNT"))
KEYS_AND_BUTTONS = (
(0, "KEY_RESERVED"),
(1, "KEY_ESC"),
(2, "KEY_1"),
(3, "KEY_2"),
(4, "KEY_3"),
(5, "KEY_4"),
(6, "KEY_5"),
(7, "KEY_6"),
(8, "KEY_7"),
(9, "KEY_8"),
(10, "KEY_9"),
(11, "KEY_0"),
(12, "KEY_MINUS"),
(13, "KEY_EQUAL"),
(14, "KEY_BACKSPACE"),
(15, "KEY_TAB"),
(16, "KEY_Q"),
(17, "KEY_W"),
(18, "KEY_E"),
(19, "KEY_R"),
(20, "KEY_T"),
(21, "KEY_Y"),
(22, "KEY_U"),
(23, "KEY_I"),
(24, "KEY_O"),
(25, "KEY_P"),
(26, "KEY_LEFTBRACE"),
(27, "KEY_RIGHTBRACE"),
(28, "KEY_ENTER"),
(29, "KEY_LEFTCTRL"),
(30, "KEY_A"),
(31, "KEY_S"),
(32, "KEY_D"),
(33, "KEY_F"),
(34, "KEY_G"),
(35, "KEY_H"),
(36, "KEY_J"),
(37, "KEY_K"),
(38, "KEY_L"),
(39, "KEY_SEMICOLON"),
(40, "KEY_APOSTROPHE"),
(41, "KEY_GRAVE"),
(42, "KEY_LEFTSHIFT"),
(43, "KEY_BACKSLASH"),
(44, "KEY_Z"),
(45, "KEY_X"),
(46, "KEY_C"),
(47, "KEY_V"),
(48, "KEY_B"),
(49, "KEY_N"),
(50, "KEY_M"),
(51, "KEY_COMMA"),
(52, "KEY_DOT"),
(53, "KEY_SLASH"),
(54, "KEY_RIGHTSHIFT"),
(55, "KEY_KPASTERISK"),
(56, "KEY_LEFTALT"),
(57, "KEY_SPACE"),
(58, "KEY_CAPSLOCK"),
(59, "KEY_F1"),
(60, "KEY_F2"),
(61, "KEY_F3"),
(62, "KEY_F4"),
(63, "KEY_F5"),
(64, "KEY_F6"),
(65, "KEY_F7"),
(66, "KEY_F8"),
(67, "KEY_F9"),
(68, "KEY_F10"),
(69, "KEY_NUMLOCK"),
(70, "KEY_SCROLLLOCK"),
(71, "KEY_KP7"),
(72, "KEY_KP8"),
(73, "KEY_KP9"),
(74, "KEY_KPMINUS"),
(75, "KEY_KP4"),
(76, "KEY_KP5"),
(77, "KEY_KP6"),
(78, "KEY_KPPLUS"),
(79, "KEY_KP1"),
(80, "KEY_KP2"),
(81, "KEY_KP3"),
(82, "KEY_KP0"),
(83, "KEY_KPDOT"),
(85, "KEY_ZENKAKUHANKAKU"),
(86, "KEY_102ND"),
(87, "KEY_F11"),
(88, "KEY_F12"),
(89, "KEY_RO"),
(90, "KEY_KATAKANA"),
(91, "KEY_HIRAGANA"),
(92, "KEY_HENKAN"),
(93, "KEY_KATAKANAHIRAGANA"),
(94, "KEY_MUHENKAN"),
(95, "KEY_KPJPCOMMA"),
(96, "KEY_KPENTER"),
(97, "KEY_RIGHTCTRL"),
(98, "KEY_KPSLASH"),
(99, "KEY_SYSRQ"),
(100, "KEY_RIGHTALT"),
(101, "KEY_LINEFEED"),
(102, "KEY_HOME"),
(103, "KEY_UP"),
(104, "KEY_PAGEUP"),
(105, "KEY_LEFT"),
(106, "KEY_RIGHT"),
(107, "KEY_END"),
(108, "KEY_DOWN"),
(109, "KEY_PAGEDOWN"),
(110, "KEY_INSERT"),
(111, "KEY_DELETE"),
(112, "KEY_MACRO"),
(113, "KEY_MUTE"),
(114, "KEY_VOLUMEDOWN"),
(115, "KEY_VOLUMEUP"),
(116, "KEY_POWER"), # SC System Power Down
(117, "KEY_KPEQUAL"),
(118, "KEY_KPPLUSMINUS"),
(119, "KEY_PAUSE"),
(120, "KEY_SCALE"), # AL Compiz Scale (Expose)
(121, "KEY_KPCOMMA"),
(122, "KEY_HANGEUL"),
(123, "KEY_HANJA"),
(124, "KEY_YEN"),
(125, "KEY_LEFTMETA"),
(126, "KEY_RIGHTMETA"),
(127, "KEY_COMPOSE"),
(128, "KEY_STOP"), # AC Stop
(129, "KEY_AGAIN"),
(130, "KEY_PROPS"), # AC Properties
(131, "KEY_UNDO"), # AC Undo
(132, "KEY_FRONT"),
(133, "KEY_COPY"), # AC Copy
(134, "KEY_OPEN"), # AC Open
(135, "KEY_PASTE"), # AC Paste
(136, "KEY_FIND"), # AC Search
(137, "KEY_CUT"), # AC Cut
(138, "KEY_HELP"), # AL Integrated Help Center
(139, "KEY_MENU"), # Menu (show menu)
(140, "KEY_CALC"), # AL Calculator
(141, "KEY_SETUP"),
(142, "KEY_SLEEP"), # SC System Sleep
(143, "KEY_WAKEUP"), # System Wake Up
(144, "KEY_FILE"), # AL Local Machine Browser
(145, "KEY_SENDFILE"),
(146, "KEY_DELETEFILE"),
(147, "KEY_XFER"),
(148, "KEY_PROG1"),
(149, "KEY_PROG2"),
(150, "KEY_WWW"), # AL Internet Browser
(151, "KEY_MSDOS"),
(152, "KEY_COFFEE"), # AL Terminal Lock/Screensaver
(153, "KEY_ROTATE_DISPLAY"), # Display orientation for e.g. tablets
(154, "KEY_CYCLEWINDOWS"),
(155, "KEY_MAIL"),
(156, "KEY_BOOKMARKS"), # AC Bookmarks
(157, "KEY_COMPUTER"),
(158, "KEY_BACK"), # AC Back
(159, "KEY_FORWARD"), # AC Forward
(160, "KEY_CLOSECD"),
(161, "KEY_EJECTCD"),
(162, "KEY_EJECTCLOSECD"),
(163, "KEY_NEXTSONG"),
(164, "KEY_PLAYPAUSE"),
(165, "KEY_PREVIOUSSONG"),
(166, "KEY_STOPCD"),
(167, "KEY_RECORD"),
(168, "KEY_REWIND"),
(169, "KEY_PHONE"), # Media Select Telephone
(170, "KEY_ISO"),
(171, "KEY_CONFIG"), # AL Consumer Control Configuration
(172, "KEY_HOMEPAGE"), # AC Home
(173, "KEY_REFRESH"), # AC Refresh
(174, "KEY_EXIT"), # AC Exit
(175, "KEY_MOVE"),
(176, "KEY_EDIT"),
(177, "KEY_SCROLLUP"),
(178, "KEY_SCROLLDOWN"),
(179, "KEY_KPLEFTPAREN"),
(180, "KEY_KPRIGHTPAREN"),
(181, "KEY_NEW"), # AC New
(182, "KEY_REDO"), # AC Redo/Repeat
(183, "KEY_F13"),
(184, "KEY_F14"),
(185, "KEY_F15"),
(186, "KEY_F16"),
(187, "KEY_F17"),
(188, "KEY_F18"),
(189, "KEY_F19"),
(190, "KEY_F20"),
(191, "KEY_F21"),
(192, "KEY_F22"),
(193, "KEY_F23"),
(194, "KEY_F24"),
(200, "KEY_PLAYCD"),
(201, "KEY_PAUSECD"),
(202, "KEY_PROG3"),
(203, "KEY_PROG4"),
(204, "KEY_DASHBOARD"), # AL Dashboard
(205, "KEY_SUSPEND"),
(206, "KEY_CLOSE"), # AC Close
(207, "KEY_PLAY"),
(208, "KEY_FASTFORWARD"),
(209, "KEY_BASSBOOST"),
(210, "KEY_PRINT"), # AC Print
(211, "KEY_HP"),
(212, "KEY_CAMERA"),
(213, "KEY_SOUND"),
(214, "KEY_QUESTION"),
(215, "KEY_EMAIL"),
(216, "KEY_CHAT"),
(217, "KEY_SEARCH"),
(218, "KEY_CONNECT"),
(219, "KEY_FINANCE"), # AL Checkbook/Finance
(220, "KEY_SPORT"),
(221, "KEY_SHOP"),
(222, "KEY_ALTERASE"),
(223, "KEY_CANCEL"), # AC Cancel
| |
import pandas as pd
from matplotlib import pyplot as plt, dates, ticker
from matplotlib.pyplot import setp
from matplotlib.ticker import PercentFormatter
from switch_model.tools.graph.main import graph_scenarios, graph, Scenario
STORAGE_CAPACITY_LEGEND = "Storage Capacity (TWh)"
X_LABEL = "WECC-wide Storage Capacity (TWh)"
def set_styles(tools):
tools.plt.rcParams['font.family'] = 'sans-serif'
@graph(
"figure_1",
supports_multi_scenario=True,
# title="Figure 1: The value of storage in the WECC"
)
def figure_1(tools):
set_styles(tools)
figure_size = (12, 12)
fig = tools.get_figure(size=figure_size)
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2) # , sharey=ax1)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
figure_1_panel_1(tools, ax1)
figure_1_panel_2(tools, ax2)
figure_1_panel_3(tools, ax3)
figure_1_panel_4(tools, ax4)
def figure_1_panel_1(tools, ax):
df = tools.get_dataframe('load_balance.csv', usecols=[
"timestamp",
"normalized_energy_balance_duals_dollar_per_mwh",
"scenario_name"
]).rename(columns={"normalized_energy_balance_duals_dollar_per_mwh": "value"})
df = tools.transform.timestamp(df)
df = df.groupby(["scenario_name", "hour"], as_index=False)["value"].mean()
df = df.pivot(index="hour", columns="scenario_name", values="value")
df = df.rename_axis(STORAGE_CAPACITY_LEGEND, axis=1)
df.loc[24] = df.loc[0]
# Convert from $/MWh to cents/kWh
df *= 0.1
df.plot(ax=ax, colormap="seismic", xlabel="Time of Day (PST)", marker=".",
ylabel=u"Normalized Duals (\xa2/kWh)")
ax.set_xlim(0, 24)
ax.set_ylim(0, df.max().max())
ax.set_title("A. Mean Energy Balance Duals by Time of Day")
def figure_1_panel_1_old(tools, ax):
df = tools.get_dataframe('load_balance.csv') \
.rename(columns={"normalized_energy_balance_duals_dollar_per_mwh": "value"})
# df = df.groupby(["scenario_name", "load_zone"], as_index=False)["value"].mean()
load_balance_group = df.groupby("scenario_name").value
quartile = 0.01
quartile_inner = 0.25
mean = load_balance_group.mean().rename("mean_val")
median = load_balance_group.median().rename("median_val")
upper = load_balance_group.quantile(1 - quartile).rename("upper")
lower = load_balance_group.quantile(quartile).rename("lower")
upper_inner = load_balance_group.quantile(1 - quartile_inner).rename("upper_inner")
lower_inner = load_balance_group.quantile(quartile_inner).rename("lower_inner")
df = pd.concat([lower, lower_inner, median, mean, upper_inner, upper], axis=1)
# Convert from $/MWh to cents/kWh
df *= 0.1
x = df.index.values
ax.plot(x, df.mean_val.values, color="blue", label="Mean", marker=".")
ax.plot(x, df.median_val.values, color="black", label="Median", marker=".")
ax.fill_between(x, df.upper_inner.values, df.upper.values, alpha=0.2, color="black", edgecolor=None,
label=f"{int(quartile * 100)}-{int(100 - quartile * 100)}th percentile")
ax.fill_between(x, df.lower.values, df.lower_inner.values, alpha=0.2, color="black", edgecolor=None)
ax.fill_between(x, df.lower_inner.values, df.upper_inner.values, alpha=0.4, color="black", edgecolor=None,
label=f"{int(quartile_inner * 100)}-{int(100 - quartile_inner * 100)}th percentile")
ax.set_ylabel(u"Normalized Duals (\xa2/kWh)")
ax.legend()
ax.set_xlabel(X_LABEL)
def figure_1_panel_2(tools, ax):
df = tools.get_dataframe('load_balance.csv', usecols=[
"timestamp",
"normalized_energy_balance_duals_dollar_per_mwh",
"scenario_name"
]).rename(columns={"normalized_energy_balance_duals_dollar_per_mwh": "value"})
df = df.groupby(["scenario_name", "timestamp"], as_index=False).mean()
df = tools.transform.timestamp(df)
# df["Month"] = df["datetime"].dt.month
# df = df.groupby(["scenario_name", "Month"], as_index=False)["value"].mean()
# df = df.pivot(columns="Month", index="scenario_name", values="value")
df = df.set_index("datetime")
df = df.groupby("scenario_name", as_index=False).rolling("7D", center=True)["value"].mean()
df = df.unstack("scenario_name").rename_axis("Storage Capacity (TWh)", axis=1)
# Convert from $/MWh to cents/kWh
df *= 0.1
df.plot(ax=ax, colormap="seismic", xlabel="Month of Year", ylabel=u"Normalized Duals (\xa2/kWh)")
# ax.xaxis.set_major_locator(dates.MonthLocator())
# ax.xaxis.set_major_formatter(dates.DateFormatter("%b"))
ax.set_title("B. Mean Energy Balance Duals throughout the Year")
def figure_1_panel_3(tools, ax):
# Calculate transmission
tx = tools.get_dataframe("transmission.csv", usecols=["BuildTx", "scenario_name"], convert_dot_to_na=True).fillna(0)
tx = tx.groupby("scenario_name")["BuildTx"].sum().rename("Transmission")
# Get new buildout
buildout = tools.get_dataframe("BuildGen.csv").rename(columns={"GEN_BLD_YRS_1": "GENERATION_PROJECT"})
# Keep only latest year
buildout = buildout[buildout["GEN_BLD_YRS_2"] == 2050]
# Merge with projects to get gen_type
projects = tools.get_dataframe("generation_projects_info.csv", from_inputs=True, usecols=[
"GENERATION_PROJECT", "gen_tech", "gen_energy_source", "scenario_name"
])
buildout = buildout.merge(
projects,
on=["GENERATION_PROJECT", "scenario_name"],
validate="one_to_one",
how="left"
)
del projects
buildout = tools.transform.gen_type(buildout)
# Filter out storage since it's not considered generation
buildout = buildout[buildout["gen_type"] != "Storage"]
# Sum accross the entire scenario
buildout = buildout.groupby("scenario_name")["BuildGen"].sum().rename("Generation")
# Merge into same dataframe
df = pd.concat([tx, buildout], axis=1)
# Convert to percent against baseline
df = (df / df.iloc[0] - 1) * 100
# Plot
df.plot(ax=ax, marker=".")
ax.set_ylabel("Change in Capacity Built Compared to Baseline")
ax.yaxis.set_major_formatter(PercentFormatter())
ax.set_xlabel(X_LABEL)
ax.set_title("C. Impact of Storage on Transmission & Generation Investments")
def figure_1_panel_4(tools, ax):
# Read dispatch.csv
df = tools.get_dataframe(
'dispatch.csv',
usecols=["gen_tech", "gen_energy_source", "Curtailment_MW", "is_renewable", "tp_weight_in_year_hrs",
"scenario_name"],
na_filter=False, # For performance
)
# Keep only renewable
df = df[df["is_renewable"]]
# Add the gen_type column
df = tools.transform.gen_type(df)
# Convert to GW
df["value"] = df["Curtailment_MW"] * df["tp_weight_in_year_hrs"] / 1000
df = df.groupby(["scenario_name", "gen_type"], as_index=False).value.sum()
df = df.pivot(index="scenario_name", columns="gen_type", values="value")
df /= 1000
df = df.rename_axis("Technology", axis=1)
df.plot(
ax=ax,
color=tools.get_colors(),
marker="."
)
ax.set_ylabel("Yearly Curtailment (GWh)")
ax.set_xlabel(X_LABEL)
ax.set_title("D. Impact of Storage on Curtailment")
@graph(
"figure_2",
title="Figure 2: Generation Mix",
supports_multi_scenario=True
)
def figure_2(tools):
set_styles(tools)
fig = tools.get_figure(size=(12, 12))
ax1 = fig.add_subplot(3, 2, 1)
ax2 = fig.add_subplot(3, 2, 2, sharey=ax1)
ax3 = fig.add_subplot(3, 2, 3, sharey=ax1, sharex=ax1)
ax4 = fig.add_subplot(3, 2, 4, sharey=ax1, sharex=ax2)
ax5 = fig.add_subplot(3, 2, 5)
ax6 = fig.add_subplot(3, 2, 6)
setp(ax2.get_yticklabels(), visible=False)
setp(ax4.get_yticklabels(), visible=False)
setp(ax1.get_xticklabels(), visible=False)
setp(ax2.get_xticklabels(), visible=False)
figure_2_energy_balance(tools, ax1, scenario_name=1.94)
figure_2_energy_balance(tools, ax2, scenario_name=4)
figure_2_energy_balance(tools, ax3, scenario_name=16)
figure_2_energy_balance(tools, ax4, scenario_name=64)
figure_2_panel_5(tools, ax5)
figure_2_panel_6(tools, ax6)
handles, labels = ax1.get_legend_handles_labels()
unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
plt.figlegend(*zip(*unique))
def filter_scenario(df, scenario_name):
return df[df["scenario_name"] == scenario_name].drop(columns=["scenario_name"]).copy()
def figure_2_energy_balance(tools, ax, scenario_name):
# return
# Get dispatch dataframe
dispatch = tools.get_dataframe("dispatch.csv", usecols=[
"timestamp", "gen_tech", "gen_energy_source", "DispatchGen_MW", "scenario_name"
]).rename({"DispatchGen_MW": "value"}, axis=1)
dispatch = filter_scenario(dispatch, scenario_name)
dispatch = tools.transform.gen_type(dispatch)
# Sum dispatch across all the projects of the same type and timepoint
dispatch = dispatch.groupby(["timestamp", "gen_type"], as_index=False).sum()
dispatch = dispatch[dispatch["gen_type"] != "Storage"]
# Get load dataframe
load = tools.get_dataframe("load_balance.csv", usecols=[
"timestamp", "zone_demand_mw", "TXPowerNet", "scenario_name"
])
load = filter_scenario(load, scenario_name)
def process_time(df):
df = df.astype({"period": int})
df = df[df["period"] == df["period"].max()].drop(columns="period")
return df.set_index("datetime")
# Sum load across all the load zones
load = load.groupby("timestamp", as_index=False).sum()
# Include Tx Losses in demand and flip sign
load["value"] = (load["zone_demand_mw"] + load["TXPowerNet"]) * -1
# Rename and convert from wide to long format
load = load[["timestamp", "value"]]
# Add the timestamp information and make period string to ensure it doesn't mess up the graphing
dispatch = process_time(tools.transform.timestamp(dispatch))
load = process_time(tools.transform.timestamp(load))
# Convert to TWh (incl. multiply by timepoint duration)
dispatch["value"] *= dispatch["tp_duration"] / 1e6
load["value"] *= load["tp_duration"] / 1e6
days = 14
freq = str(days) + "D"
offset = tools.pd.Timedelta(freq) / 2
def rolling_sum(df):
df = df.rolling(freq, center=True).value.sum().reset_index()
df["value"] /= days
df = df[(df.datetime.min() + offset < df.datetime) & (df.datetime < df.datetime.max() - offset)]
return df
dispatch = rolling_sum(dispatch.groupby("gen_type", as_index=False))
load = rolling_sum(load).set_index("datetime")["value"]
# Get the state of charge data
soc = tools.get_dataframe("StateOfCharge.csv", dtype={"STORAGE_GEN_TPS_1": str}) \
.rename(columns={"STORAGE_GEN_TPS_2": "timepoint", "StateOfCharge": "value"})
soc = filter_scenario(soc, scenario_name)
# Sum over all the projects that are in the same scenario with the same timepoint
soc = soc.groupby(["timepoint"], as_index=False).sum()
soc["value"] /= 1e6 # Convert to TWh
max_soc = soc["value"].max()
# Group by time
soc = process_time(tools.transform.timestamp(soc, use_timepoint=True, key_col="timepoint"))
soc = soc.rolling(freq, center=True)["value"].mean().reset_index()
soc = soc[(soc.datetime.min() + offset < soc.datetime) & (soc.datetime < soc.datetime.max() - offset)]
soc = soc.set_index("datetime")["value"]
dispatch = dispatch[dispatch["value"] != 0]
dispatch = dispatch.pivot(columns="gen_type", index="datetime", values="value")
dispatch = dispatch[dispatch.std().sort_values().index].rename_axis("Technology", axis=1)
total_dispatch = dispatch.sum(axis=1)
max_val = max(total_dispatch.max(), load.max())
# Scale soc to the graph
soc *= 100 / scenario_name
# Plot
# Get the colors for the lines
# plot
ax.set_ylim(0, max_val * 1.05)
dispatch.plot(
ax=ax,
color=tools.get_colors(),
legend=False,
xlabel=""
)
ax2 = ax.twinx()
ax2.yaxis.set_major_formatter(PercentFormatter())
ax2.set_ylim(0, 100)
soc.plot(ax=ax2, color="black", linestyle="dotted", label="State of Charge", xlabel="")
load.plot(ax=ax, color="red", linestyle="dashed", label="Total Demand", xlabel="")
total_dispatch.plot(ax=ax, color="green", linestyle="dashed", label="Total Generation", xlabel="")
ax.fill_between(total_dispatch.index, total_dispatch.values, load.values, alpha=0.2, where=load < total_dispatch,
facecolor="green")
ax.fill_between(total_dispatch.index, total_dispatch.values, load.values, alpha=0.2, where=load > total_dispatch,
facecolor="red")
ax.set_title(str(scenario_name) + "TWh of storage")
def figure_2_panel_5(tools, ax):
df = tools.get_dataframe("gen_cap.csv",
usecols=["gen_tech", "gen_energy_source", "GenCapacity", "scenario_name"])
df = tools.transform.gen_type(df)
df = df.rename({"GenCapacity": "value"}, axis=1)
df = df.groupby(["scenario_name", "gen_type"], as_index=False).value.sum()
scaling = df[df["scenario_name"] == 1.94][["gen_type", "value"]].rename(columns={"value": "scaling"})
df = df.merge(scaling, on="gen_type")
df.value /= df.scaling
df.value = (df.value - 1) * 100
df = df[df["gen_type"].isin(("Wind", "Solar", "Biomass"))]
df = df.pivot(index="scenario_name", columns="gen_type", values="value")
df = df.rename_axis("Technology", axis=1)
df.plot(
ax=ax,
color=tools.get_colors(),
legend=False
)
ax.set_ylabel("Percent Change in Installed Capacity against Baseline")
ax.yaxis.set_major_formatter(PercentFormatter())
ax.set_xlabel(X_LABEL)
ax.set_title("B. Impact of Storage on Generation Mix")
def figure_2_panel_6(tools, ax):
df = tools.get_dataframe("storage_builds.csv").astype({"scenario_name": "int"})
df = df[df["build_year"] == 2050]
df["power"] = df["IncrementalPowerCapacityMW"] / 1000
df["energy"] = df["IncrementalEnergyCapacityMWh"] / 1000
df = tools.transform.load_zone(df)
df = df.groupby(["scenario_name", "region"], as_index=False)[["power", "energy"]].sum()
# df = df[df["scenario_name"] < 30]
# Filter out rows where there's no power built
# df = df[df["power"] > 0.0001]
df["duration"] = (df["energy"] / df["power"]) / 24
df = df.rename(columns={"scenario_name": STORAGE_CAPACITY_LEGEND})
average_duration = df.groupby(STORAGE_CAPACITY_LEGEND).sum()
average_duration = ((average_duration["energy"] / average_duration["power"]) / 24).rename("duration")
average_power = df.groupby(STORAGE_CAPACITY_LEGEND)["power"].mean().rename("power")
average = pd.concat([average_power, average_duration], axis=1).reset_index()
ax.scatter(
df["duration"],
df["power"],
c=df[STORAGE_CAPACITY_LEGEND],
s=10,
cmap="seismic"
)
average.drop(STORAGE_CAPACITY_LEGEND, axis=1).set_index("duration").plot(ax=ax, legend=False)
average.plot.scatter(
"duration",
"power",
c=STORAGE_CAPACITY_LEGEND,
s=50,
colormap="seismic",
ax=ax,
marker="^",
logx=True,
logy=True,
)
# df_lines = df.pivot(index="duration", values="power", columns="region").interpolate(axis='index', limit_area="inside")
# df_lines.plot(ax=ax, legend=False, color="black", linewidth=0.5)
ax.tick_params(axis='x', which='both')
ax.set_xlabel("Storage Duration (days)")
ax.set_ylabel("Storage Power (GW)")
ax.set_title("C. Storage Buildout Per Scenario")
def figure_2_panel_6_old(tools, ax):
dispatch = tools.get_dataframe("dispatch_annual_summary.csv",
usecols=["scenario_name", "Energy_GWh_typical_yr", "gen_tech", "gen_energy_source"]
).rename({"Energy_GWh_typical_yr": "value"}, axis=1)
dispatch = tools.transform.gen_type(dispatch)
# Filter out technologies we don't want
dispatch = dispatch[dispatch["gen_type"] != "Storage"]
dispatch = dispatch[dispatch["value"] != 0]
# Group and pivot
dispatch | |
<filename>OpenSeesAPI/Model/Element/Material/Section.py
__author__ = 'marafi'
"""
section secType? secTag? arg1? ...
The type of section created and the additional arguments required depends on the secType? provided in the command.
NOTE:
The valid queries to any section when creating an ElementRecorder are 'force', and 'deformation'. Some sections have additional queries to which they will respond. These are documented in the NOTES section for those sections.
The following contain information about secType? and the args required for each of the available section types:
Elastic Section
Fiber Section
NDFiber Section
Wide Flange Section
RC Section
Parallel Section
Section Aggregator
Uniaxial Section
Elastic Membrane Plate Section
Plate Fiber Section
Bidirectional Section
Isolator2spring Section
"""
from OpenSeesAPI.OpenSees import OpenSees
class Elastic(OpenSees):
"""
section Elastic $secTag $E $A $Iz <$G $alphaY>
section Elastic $secTag $E $A $Iz $Iy $G $J <$alphaY $alphaZ>
$secTag unique section tag
$E Young's Modulus
$A cross-sectional area of section
$Iz second moment of area about the local z-axis
$Iy second moment of area about the local y-axis (required for 3D analysis)
$G Shear Modulus (optional for 2D analysis, required for 3D analysis)
$J torsional moment of inertia of section (required for 3D analysis)
$alphaY shear shape factor along the local y-axis (optional)
$alphaZ shear shape factor along the local z-axis (optional)
"""
def __init__(self, id, E, A, Iz, G=None, Iy=None, J=None, AlphaY=None, AlphaZ=None, **kwargs):
self._id = id
self._E = E
self._A = A
self._Iz = Iz
self._G = G
self._Iy =Iy
self._J = J
self._AlphaY = AlphaY
self._AlphaZ = AlphaZ
self._EndCommand = ''
self.__dict__.update(kwargs)
if self._J==None:
if self._self._G != None:
self._EndCommand = '%f %f'%(self._G, self._AlphaY)
self._CommandLine = 'section Elastic %d %f %f %f %s'%(self._id, self._E, self._A, self._Iz, self._EndCommand)
else:
if self._AlphaY != None:
self._EndCommand = '%f %f'%(self._AlphaY, self._AlphaZ)
self._CommandLine = 'section Elastic %d %f %f %f %f %f %f %s'%(self._id, self._E, self._A , self._Iz, self._Iy, self._G, self._J, self._EndCommand)
class WFSection2d(OpenSees):
"""
section WFSection2d $secTag $matTag $d $tw $bf $tf $Nfw $Nff
$secTag unique section tag
$matTag tag of uniaxialMaterial assigned to each fiber
$d section depth
$tw web thickness
$bf flange width
$tf flange thickness
$Nfw number of fibers in the web
$Nff number of fibers in each flange
"""
def __init__(self, id, Mat, d, tw, bf, tf, Nfw, Nff, **kwargs):
self._id = id
self._Mat = Mat
self._d = d
self._tw = tw
self._bf = bf
self._tf = tf
self._Nfw = Nfw
self._Nff = Nff
self.__dict__.update(kwargs)
self._CommandLine = 'section WFSection2d %d %d %f %f %f %f %f %f'%(self._id, self._Mat.id, self._d, self._tw, self._bf, self._tf, self._Nfw, self._Nff)
class RCSection2d(OpenSees):
"""
section RCSection2d $secTag $coreTag $coverTag $steelTag $d $b $cover $Atop $Abot $Aside $Nfcore $Nfcover $Nfs
$secTag unique section tag
$coreTag tag of uniaxialMaterial assigned to each fiber in the core region
$coverTag tag of uniaxialMaterial assigned to each fiber in the cover region
$steelTag tag of uniaxialMaterial assigned to each reinforcing bar
$d section depth
$b section width
$cover cover depth (assumed uniform around perimeter)
$Atop area of reinforcing bars in top layer
$Abot area of reinforcing bars in bottom layer
$Aside area of reinforcing bars on intermediate layers
$Nfcore number of fibers through the core depth
$Nfcover number of fibers through the cover depth
$Nfs number of bars on the top and bottom rows of reinforcement (Nfs-2 bars will be placed on the side rows)
"""
def __init__(self, id, coreMat, coverMat, steelMat, d, b, cover, Atop, Abot, Aside ,Nfcore ,Nfcover, Nfs, **kwargs):
self._id = id
self._coverMat = coverMat
self._steelMat = steelMat
self._coreMat = coreMat
self._d = d
self._b = b
self._cover = cover
self._Atop = Atop
self._Abot = Abot
self._Aside = Aside
self._Nfcore = Nfcore
self._Nfcover = Nfcover
self._Nfs = Nfs
self.__dict__.update(kwargs)
self._CommandLine = 'section RCSection2d %d %d %d %f %f %f %f %f %f %d %d %d'%(self._id, self._coreMat.id, self._coverMat.id, self._steelMat.id, self._d, self._b, self._cover, self._Atop, self._Abot, self._Aside, self._Nfcore, self._Nfcover, self._Nfs)
class NDFiber(OpenSees):
"""
section NDFiber $secTag {
fiber...
patch...
layer...
...
}
$secTag unique tag among all sections
fiber... command to generate a single fiber.
patch... command to generate a number of fibers over a geometric cross-section
layer... command to generate a row of fibers along a geometric-arc
"""
def __init__(self, id, fibers, **kwargs):
self._id = id
self._fibers = fibers
self.__dict__.update(kwargs)
self._CommandLine = 'section Fiber %d { \n'%(self._id)+''.join(map(lambda x: ' %s\n'%x._CommandLine, self._fibers))+'}'
class Aggregator(OpenSees):
"""
section Aggregator $secTag $matTag1 $dof1 $matTag2 $dof2 ....... <-section $sectionTag>
$secTag unique section tag
$matTag1 $matTag2 ... tag of previously-defined UniaxialMaterial objects
$dof1 $dof2 ... the force-deformation quantity to be modeled by this section object. One of the following section dof may be used:
P Axial force-deformation
Mz Moment-curvature about section local z-axis
Vy Shear force-deformation along section local y-axis
My Moment-curvature about section local y-axis
Vz Shear force-deformation along section local z-axis
T Torsion Force-Deformation
$sectionTag tag of previously-defined Section object to which the UniaxialMaterial objects are aggregated as additional force-deformation relationships
"""
def __init__(self, id, MatList, DOFList, Section=None, **kwargs):
self._id = id
self._MatList = MatList
self._DOFList = DOFList
self._Section = Section
self.__dict__.update(kwargs)
@property
def CommandLine(self):
if self._Section == None:
self._CommandLine = 'section Aggregator %d %s'%(self._id, ''.join(map(lambda x: ' %d %d'%(x[0].id, x[1]), zip(*[self._MatList, self._DOFList]))))
else:
self._CommandLine = 'section Aggregator %d %s -section %d'%(self._id, ''.join(map(lambda x: ' %d %s'%(x[0].id, x[1]), zip(*[self._MatList, self._DOFList]))), self._Section.id)
return self._CommandLine
class Uniaxial(OpenSees):
"""
section Uniaxial $secTag $matTag $quantity
$secTag unique section tag
$matTag tag of uniaxial material
$quantity the force-deformation quantity to be modeled by this section object. One of the following strings is used:
P Axial force-deformation
Mz Moment-curvature about section local z-axis
Vy Shear force-deformation along section local y-axis
My Moment-curvature about section local z-axis
Vz Shear force-deformation along section local z-axis
T Torsion Force-Deformation
"""
def __init__(self, id, Mat, quantity):
self._id = id
self._Mat = Mat
self._quantity = quantity
self._CommandLine = 'section Uniaxial %d %d %s'%(self._id, self._Mat.id, self._quantity)
class ElasticMembranePlateSection(OpenSees):
"""
section ElasticMembranePlateSection $secTag $E $nu $h $rho
$secTag unique section tag
$E Young's Modulus
$nu Poisson's Ratio
$h depth of section
$rho mass density
"""
def __init__(self, id, E, nu, h, rho, **kwargs):
self._id = id
self._E = E
self._nu = nu
self._h = h
self._rho = rho
self.__dict__.update(kwargs)
self._CommandLine = 'section ElasticMembranePlateSection %d %f %f %f %f'%(self._id, self._E, self._nu, self._h, self._rho)
class PlateFiber(OpenSees):
"""
section PlateFiber $secTag $matTag $h
$secTag unique section tag
$matTag nDMaterial tag to be assigned to each fiber
$h plate thickness
"""
def __init__(self, id, Mat, h):
self._id = id
self._Mat = Mat
self._h = h
self._CommandLine = 'section PlateFiber %d %d %f'%(self._id, self._Mat.id, self._h)
class FiberSection(OpenSees):
"""
This commnand allows the user to construct a FiberSection object. Each FiberSection object is composed of Fibers, with each fiber containing a UniaxialMaterial, an area and a location (y,z). The command to generate FiberSection object contains in { } the commands to generate all the fibers in the object. To construct a FiberSection and populate it, the following command is used:
section Fiber $secTag <-GJ $GJ> {
fiber...
patch...
layer...
...
}
$secTag unique tag among sections
$GJ linear-elastic torsional stiffness assigned to the section (optional, default = no torsional stiffness)
fiber... command to generate a single fiber
patch... command to generate a number of fibers over a geometric cross-section
layer... command to generate a row of fibers along a geometric-arc
"""
def __init__(self, id, fibers, GJ=None, **kwargs):
self._id = id
self._fibers = fibers
self._GJ = GJ
self.__dict__.update(kwargs)
self._EndCommand = ''
if self._GJ != None:
self._EndCommand = '-GJ %f'%self._GJ
self._CommandLine = 'section Fiber %d %s { \n'%(self._id, self._EndCommand)+''.join(map(lambda x: ' %s\n'%x._CommandLine, self._fibers))+'}'
class Fiber:
class Fiber(object):
"""
fiber $yLoc $zLoc $A $matTag
$yLoc y coordinate of the fiber in the section (local coordinate system)
$zLoc z coordinate of the fiber in the section (local coordinate system)
$A area of the fiber.
$matTag material tag associated with this fiber (UniaxialMaterial tag for a FiberSection and NDMaterial tag for use in an NDFiberSection).
"""
def __init__(self, yLoc, zLoc, A, Mat, **kwargs):
self._yLoc = yLoc
self._zLoc = zLoc
self._A = A
self._Mat = Mat
self.__dict__.update(kwargs)
self._CommandLine = 'fiber %f %f %f %d'%(self.yLoc, self._zLoc, self._A, self._Mat.id)
class Layer:
class Straight(object):
"""
layer straight $matTag $numFiber $areaFiber $yStart $zStart $yEnd $zEnd
$matTag material tag of previously created material (UniaxialMaterial tag for a FiberSection or NDMaterial tag for use in an NDFiberSection)
$numFibers number of fibers along line
$areaFiber area of each fiber
$yStart $zEnd y and z-coordinates of first fiber in line (local | |
<reponame>mikespub-org/remko-webapp2
# Copyright 2011 webapp2 AUTHORS.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
webapp2_extras.appengine.auth.models
====================================
Auth related models.
"""
import time
try:
from ndb import model
except ImportError: # pragma: no cover
from google.appengine.ext.ndb import model
from webapp2_extras import auth, security
class Unique(model.Model):
"""A model to store unique values.
The only purpose of this model is to "reserve" values that must be unique
within a given scope, as a workaround because datastore doesn't support
the concept of uniqueness for entity properties.
For example, suppose we have a model `User` with three properties that
must be unique across a given group: `username`, `auth_id` and `email`::
class User(model.Model):
username = model.StringProperty(required=True)
auth_id = model.StringProperty(required=True)
email = model.StringProperty(required=True)
To ensure property uniqueness when creating a new `User`, we first create
`Unique` records for those properties, and if everything goes well we can
save the new `User` record::
@classmethod
def create_user(cls, username, auth_id, email):
# Assemble the unique values for a given class and attribute scope.
uniques = [
'User.username.%s' % username,
'User.auth_id.%s' % auth_id,
'User.email.%s' % email,
]
# Create the unique username, auth_id and email.
success, existing = Unique.create_multi(uniques)
if success:
# The unique values were created, so we can save the user.
user = User(username=username, auth_id=auth_id, email=email)
user.put()
return user
else:
# At least one of the values is not unique.
# Make a list of the property names that failed.
props = [name.split('.', 2)[1] for name in uniques]
raise ValueError('Properties %r are not unique.' % props)
Based on the idea from http://goo.gl/pBQhB
"""
@classmethod
def create(cls, value):
"""Creates a new unique value.
:param value:
The value to be unique, as a string.
The value should include the scope in which the value must be
unique (ancestor, namespace, kind and/or property name).
For example, for a unique property `email` from kind `User`, the
value can be `User.email:<EMAIL>`. In this case `User.email`
is the scope, and `<EMAIL>` is the value to be unique.
:returns:
True if the unique value was created, False otherwise.
"""
def txn(e):
return e.put() if not e.key.get() else None
entity = cls(key=model.Key(cls, value))
return model.transaction(lambda: txn(entity)) is not None
@classmethod
def create_multi(cls, values):
"""Creates multiple unique values at once.
:param values:
A sequence of values to be unique. See :meth:`create`.
:returns:
A tuple (bool, list_of_keys). If all values were created, bool is
True and list_of_keys is empty. If one or more values weren't
created, bool is False and the list contains all the values that
already existed in datastore during the creation attempt.
"""
# Maybe do a preliminary check, before going for transactions?
# entities = model.get_multi(keys)
# existing = [entity.key.id() for entity in entities if entity]
# if existing:
# return False, existing
# Create all records transactionally.
def func(entity):
return entity.put() if not entity.key.get() else None
keys = [model.Key(cls, value) for value in values]
entities = [cls(key=key) for key in keys]
created = [model.transaction(lambda: func(e)) for e in entities]
if created != keys:
# A poor man's "rollback": delete all recently created records.
model.delete_multi(k for k in created if k)
return False, [k.id() for k in keys if k not in created]
return True, []
@classmethod
def delete_multi(cls, values):
"""Deletes multiple unique values at once.
:param values:
A sequence of values to be deleted.
"""
return model.delete_multi(model.Key(cls, v) for v in values)
class UserToken(model.Model):
"""Stores validation tokens for users."""
created = model.DateTimeProperty(auto_now_add=True)
updated = model.DateTimeProperty(auto_now=True)
user = model.StringProperty(required=True, indexed=False)
subject = model.StringProperty(required=True)
token = model.StringProperty(required=True)
@classmethod
def get_key(cls, user, subject, token):
"""Returns a token key.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
Randomly generated token.
:returns:
``model.Key`` containing a string id in the following format:
``{user_id}.{subject}.{token}.``
"""
return model.Key(cls, f"{str(user)}.{subject}.{token}")
@classmethod
def create(cls, user, subject, token=None):
"""Creates a new token for the given user.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
Optionally an existing token may be provided.
If None, a random token will be generated.
:returns:
The newly created :class:`UserToken`.
"""
user = str(user)
token = token or security.generate_random_string(entropy=128)
key = cls.get_key(user, subject, token)
entity = cls(key=key, user=user, subject=subject, token=token)
entity.put()
return entity
@classmethod
def get(cls, user=None, subject=None, token=None):
"""Fetches a user token.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
The existing token needing verified.
:returns:
A :class:`UserToken` or None if the token does not exist.
"""
if user and subject and token:
return cls.get_key(user, subject, token).get()
assert (
subject and token
), "subject and token must be provided to UserToken.get()."
return cls.query(cls.subject == subject, cls.token == token).get()
class User(model.Expando):
"""Stores user authentication credentials or authorization ids."""
#: The model used to ensure uniqueness.
unique_model = Unique
#: The model used to store tokens.
token_model = UserToken
created = model.DateTimeProperty(auto_now_add=True)
updated = model.DateTimeProperty(auto_now=True)
# ID for third party authentication, e.g. 'google:username'. UNIQUE.
auth_ids = model.StringProperty(repeated=True)
# Hashed password. Not required because third party authentication
# doesn't use password.
password = model.StringProperty()
def get_id(self):
"""Returns this user's unique ID, which can be an integer or string."""
return self._key.id()
def add_auth_id(self, auth_id):
"""A helper method to add additional auth ids to a User
:param auth_id:
String representing a unique id for the user. Examples:
- own:username
- google:username
:returns:
A tuple (boolean, info). The boolean indicates if the user
was saved. If creation succeeds, ``info`` is the user entity;
otherwise it is a list of duplicated unique properties that
caused creation to fail.
"""
self.auth_ids.append(auth_id)
unique = f"{self.__class__.__name__}.auth_id:{auth_id}"
ok = self.unique_model.create(unique)
if ok:
self.put()
return True, self
else:
return False, ["auth_id"]
@classmethod
def get_by_auth_id(cls, auth_id):
"""Returns a user object based on a auth_id.
:param auth_id:
String representing a unique id for the user. Examples:
- own:username
- google:username
:returns:
A user object.
"""
return cls.query(cls.auth_ids == auth_id).get()
@classmethod
def get_by_auth_token(cls, user_id, token):
"""Returns a user object based on a user ID and token.
:param user_id:
The user_id of the requesting user.
:param token:
The token string to be verified.
:returns:
A tuple ``(User, timestamp)``, with a user object and
the token timestamp, or ``(None, None)`` if both were not found.
"""
token_key = cls.token_model.get_key(user_id, "auth", token)
user_key = model.Key(cls, user_id)
# Use get_multi() to save a RPC call.
valid_token, user = model.get_multi([token_key, user_key])
if valid_token and user:
timestamp = int(time.mktime(valid_token.created.timetuple()))
return user, timestamp
return None, None
@classmethod
def get_by_auth_password(cls, auth_id, password):
"""Returns a user object, validating password.
:param auth_id:
Authentication id.
:param password:
<PASSWORD>.
:returns:
A user object, if found and password matches.
:raises:
``auth.InvalidAuthIdError`` or ``auth.InvalidPasswordError``.
"""
user = cls.get_by_auth_id(auth_id)
if not user:
raise auth.InvalidAuthIdError()
if not security.check_password_hash(password, user.password):
raise auth.InvalidPasswordError()
return user
@classmethod
def validate_token(cls, user_id, subject, token):
"""Checks for existence of a token, given user_id, subject and token.
:param user_id:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
The token string to be validated.
:returns:
A :class:`UserToken` or None if the token does not exist.
"""
return cls.token_model.get(user=user_id, subject=subject, token=token)
@classmethod
def create_auth_token(cls, user_id):
"""Creates a new authorization token for a given user ID.
:param user_id:
User unique ID.
:returns:
A string with the authorization token.
"""
return cls.token_model.create(user_id, "auth").token
@classmethod
def validate_auth_token(cls, user_id, token):
return cls.validate_token(user_id, "auth", token)
@classmethod
def delete_auth_token(cls, user_id, token):
"""Deletes a given authorization token.
:param user_id:
User unique ID.
:param token:
A string with | |
icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_h_r(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "h_rifle")
value = 50000
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a hunting rifle!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n50000 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "h_rifle", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a hunting rifle run the `nr.hunt` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_alc(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
fr_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
value = 8500*amount
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n{value} is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
if not fr_data:
amt = int(amount)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "alcohol", amt)
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` alcohol run the `nr.use alcohol` command to make use of it!', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if fr_data:
amt = int(amount) + fr_data['amt']
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", amt, ctx.author.id, "alcohol")
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought some alcohol run the `nr.use alcohol` command to make use of it!', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
async def buy_ls(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
ls = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
ls_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
value = 25000*amount
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n{value} is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
if not ls_data:
amt = int(amount)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "life_saver", amt)
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` life saver(s) the next time you die your life will be saved', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if ls_data:
amt = int(amount) + ls_data['amt']
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", amt, ctx.author.id, "life_saver")
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` life saver(s) the next time you die your life will be saved', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
async def buy_drums(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "drum")
value = 100000
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a drum!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n100000 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "drum", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a drum run the `nr.use drum` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_guitar(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "guitar")
value = 75000
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a drum!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n75000 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "guitar", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a guitar run the `nr.use guitar` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_nrmdl(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
ls = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
ls_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
value = 10000000*amount
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n{value} is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
if not ls_data:
amt = int(amount)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "nr_medal", amt)
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` Nora medal(s) welcome to the money gang', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if ls_data:
amt = int(amount) + ls_data['amt']
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", amt, ctx.author.id, "nr_medal")
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` Nora medal(s) welcome to the money gang', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
async def buy_nr_trophy(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
ls = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
ls_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
value = int(50000000*amount)
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to | |
# coding=utf-8
from app.customer.models.vip import UserVip, Vip
from app.customer.models.user import User
import datetime
import time
from mongoengine import *
from base.settings import CHATPAMONGO
from api.convert.convert_user import *
from app.customer.models.real_video_verify import RealVideoVerify
from app.util.messageque.msgsender import MessageSender
import logging
import international
from app_redis.user.models.user import UserRedis
connect(CHATPAMONGO.db, host=CHATPAMONGO.host, port=CHATPAMONGO.port, username=CHATPAMONGO.username,
password=<PASSWORD>)
# 用户动态 (朋友圈)
class UserMoment(Document):
user_id = IntField(verbose_name=u'用户id', required=True)
like_count = IntField(verbose_name=u'用户点赞数', required=True)
like_user_list = ListField(IntField(verbose_name=u"用户id"), verbose_name=u"点赞用户_id")
comment_count = IntField(verbose_name=u"用户评论数")
img_list = ListField(verbose_name=u"图片列表")
content = StringField(verbose_name=u"用户动态的文字内容")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
show_status = IntField(verbose_name=u"是否展示", default=1) # 1:展示 2:数美屏蔽 3:举报 4:数美部分屏蔽 5:数美鉴定中
delete_status = IntField(verbose_name=u"删除状态", default=1) # 1:未删除 2:删除
ispass = IntField(verbose_name=u"是否忽略") # 1:忽略 2:未忽略
type = IntField(verbose_name=u"动态的类型") # 1:普通动态 2:精华照片动态 3:私房视频 4:普通相册动态
video_id = StringField(verbose_name=u"私房视频id", max_length=64)
cover_url = StringField(verbose_name=u"封面照片地址", max_length=256)
video_url = StringField(verbose_name=u"视频地址", max_length=256)
price = IntField(verbose_name=u"私房视频价格")
is_public = IntField(verbose_name=u"是否公开") # 1:公开 2:未公开
rank_score = FloatField(verbose_name=u"排名得分")
is_pure = IntField(verbose_name=u"是否清纯") # 1:清纯 2.老用户 3.老主播 4.新用户
is_top = IntField(verbose_name=u"是否置顶") # 1:置顶
top_time = DateTimeField(verbose_name=u"置顶时间")
@classmethod
def create(cls, user_id, picture_urls, content):
user_moment = UserMoment()
user_moment.user_id = user_id
user_moment.like_count = 0
user_moment.like_user_list = []
user_moment.comment_count = 0
picture_url_list = picture_urls.split(',')
if picture_url_list:
for picture_url in picture_url_list:
if picture_url:
pic_url = User.convert_http_to_https(picture_url)
dict = {
"url": pic_url,
"status": 1
}
user_moment.img_list.append(dict)
user_moment.content = content
user_moment.show_status = 5
user_moment.delete_status = 1
user_moment.ispass = 2
user_moment.type = 1
user_moment.is_public = 1
user_moment.create_time = datetime.datetime.now()
user_moment.save()
if user_moment.img_list:
MessageSender.send_picture_detect(pic_url="", user_id=0, pic_channel=0, source=2, obj_id=str(user_moment.id))
else:
user_moment.update(set__show_status=1)
user = User.objects.filter(id=user_id).first()
pure_id = "597ef85718ce420b7d46ce11"
if user.is_video_auth == 1:
if user.label:
if pure_id in user.label:
user_moment.update(set__is_pure=1)
else:
user_moment.update(set__is_pure=3)
else:
user_moment.update(set__is_pure=3)
else:
if UserRedis.is_target_user(user.id):
user_moment.update(set__is_pure=2)
else:
user_moment.update(set__is_pure=4)
@classmethod
def update_like(cls, status, user_id, moment_id):
user_moment = UserMoment.objects.filter(id=moment_id).first()
like_user_list = user_moment.like_user_list
if status == 1:
# 点赞
if int(user_id) not in like_user_list:
user_moment.update(inc__like_count=1)
user_moment.update(push__like_user_list=user_id)
# 与我相关消息
if int(user_id) != int(user_moment.user_id):
AboutMeMessage.create_about_me(user_moment.user_id, user_id, user_moment.user_id, moment_id, 1, "")
MessageSender.send_about_me_message(user_moment.user_id)
elif status == 2:
# 取消点赞
user_moment.update(dec__like_count=1)
if user_id in like_user_list:
user_moment.update(pull__like_user_list=user_id)
def normal_info(self):
user_id = self.user_id
user = User.objects.filter(id=user_id).first()
head_image = user.image
age = User.get_age(user.birth_date)
create_time = UserMoment.get_time(self.create_time)
date_time = self.create_time.strftime('%Y-%m-%d')
imgs = self.img_list
img_list = []
if imgs:
for img in imgs:
if int(img["status"]) == 1:
img_list.append(img["url"])
moment_look = UserMomentLook.objects.filter(user_moment_id=str(self.id)).first()
look_count = 0
if moment_look:
look_user_ids = moment_look.user_id_list
look_count = len(look_user_ids)
price = self.price
if not price:
price = 0
type = self.type
if not type:
type = 1
show_video = RealVideoVerify.objects(user_id=user.id, status=1).order_by("-update_time").first()
if show_video:
real_video_auth = show_video.status
else:
real_video_auth = 3
if user:
data = {
"moment_id": str(self.id),
"user_id": user.id,
"gender": user.gender_desc,
"head_image": head_image,
"nickname": user.nickname,
"age": age,
"create_time": create_time,
"img_list": img_list,
"comment_count": self.comment_count,
"like_count": self.like_count,
"type": type,
"look_count": look_count,
"content": self.content,
"date_time": date_time,
"video_id": self.video_id,
"cover_url": self.cover_url,
"video_url": self.video_url,
"price": price,
"real_video_auth": real_video_auth
}
user_vip = UserVip.objects.filter(user_id=user_id).first()
if user_vip:
vip = Vip.objects.filter(id=user_vip.vip_id).first()
data["vip_icon"] = vip.icon_url
return data
# 动态发布规则
@classmethod
def check_moment_count(cls, user):
"""
VIP:
1)动态:每日发布5条
播主VIP:
1)动态:每日发布15条
播主:
1)动态:每日发布10条
普通用户:
1)动态:每日发布2条
"""
vip_count = 5
anchor_vip_count = 15
anchor_count = 10
user_count = 2
is_video = user.is_video_auth
user_vip = UserVip.objects.filter(user_id=user.id).first()
now = datetime.datetime.now()
starttime = now.strftime("%Y-%m-%d 00:00:00")
endtime = now.strftime('%Y-%m-%d 23:59:59')
today_moment_count = UserMoment.objects.filter(user_id=user.id, show_status__ne=2, is_public=1,
create_time__gte=starttime, create_time__lte=endtime).count()
code = 1
message = ""
if user_vip:
if int(is_video) == 1:
# 播住vip
if today_moment_count >= anchor_vip_count:
code = 2
message = u"播主VIP,每日动态发布最多15条"
else:
# 用户vip
if today_moment_count >= vip_count:
code = 2
message = u"用户VIP,每日动态发布最多5条"
else:
if int(is_video) == 1:
# 播主:
if today_moment_count >= anchor_count:
code = 2
message = u"播主每日动态发布最多10条"
else:
# 普通用户
if today_moment_count >= user_count:
code = 2
message = u"普通用户每日动态发布最多2条"
return code, message
@classmethod
def get_time(cls, date_time):
now = datetime.datetime.now()
second = time.mktime(now.timetuple()) - time.mktime(date_time.timetuple())
if second <= 0:
second = 0
# 时间格式
if second == 0:
interval = "刚刚"
elif second < 30:
interval = str(int(second)) + "秒以前"
elif second >= 30 and second < 60:
interval = "半分钟前"
elif second >= 60 and second < 60 * 60:
# 大于1分钟 小于1小时
minute = int(second / 60)
interval = str(minute) + "分钟前"
elif second >= 60 * 60 and second < 60 * 60 * 24:
# 大于1小时 小于24小时
hour = int((second / 60) / 60)
interval = str(hour) + "小时前"
elif second >= 60 * 60 * 24 and second <= 60 * 60 * 24 * 2:
# 大于1D 小于2D
interval = "昨天" + date_time.strftime('%H:%M')
elif second >= 60 * 60 * 24 * 2 and second <= 60 * 60 * 24 * 7:
# 大于2D小时 小于 7天
day = int(((second / 60) / 60) / 24)
interval = str(day) + "天前"
elif second <= 60 * 60 * 24 * 365 and second >= 60 * 60 * 24 * 7:
# 大于7天小于365天
interval = date_time.strftime('%Y-%m-%d %H:%M')
elif second >= 60 * 60 * 24 * 365:
# 大于365天
interval = date_time.strftime('%Y-%m-%d %H:%M')
else:
interval = "0"
return interval
@classmethod
def get_index_moments(cls, page, page_count, user):
is_show_top = True
if not user:
is_pure = [1, 4]
else:
is_target = UserRedis.is_target_user(user.id)
if user.is_video_auth == 1 and not UserRedis.is_pure_anchor(user.id):# 老主播
is_show_top = False
is_pure = [1, 2, 3]
elif user.is_video_auth != 1 and not is_target: # 新用户
is_pure = [1, 4]
elif user.is_video_auth != 1 and is_target: # 老用户
is_pure = [1, 2, 3, 4]
is_show_top = False
else: # 新主播
return cls.objects.filter(show_status__in=[1, 3, 4], delete_status=1, is_public=1).order_by("-create_time")[(page - 1) * page_count:page * page_count]
moment_list = []
top_ids = []
if is_show_top:
# 置顶动态
top_list = cls.objects.filter(is_top=1).order_by("-top_time")
if top_list:
for top in top_list:
if int(page) == 1:
moment_list.append(top)
top_ids.append(str(top.id))
# 动态
moments = cls.objects.filter(show_status__in=[1, 3, 4], id__nin=top_ids, delete_status=1, is_public=1, is_pure__in=is_pure).order_by("-create_time")[(page - 1) * page_count:page * page_count]
if moments:
for moment in moments:
moment_list.append(moment)
return moment_list
class UserComment(Document):
user_moment_id = StringField(verbose_name=u"用户发布动态的 id", max_length=64)
user_id = IntField(verbose_name=u'用户id', required=True)
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
content = StringField(verbose_name=u"评论内容", max_length=512)
comment_type = IntField(verbose_name=u"评论类型") # 1:评论动态 2:回复评论
reply_user_id = IntField(verbose_name=u"回复用户id")
delete_status = IntField(verbose_name=u"删除状态", default=1) # 1:未删除 2:删除
@classmethod
def create_comment(cls, moment_id, user_id, content, comment_type, reply_user_id):
user_coment = UserComment()
user_coment.user_moment_id = moment_id
user_coment.user_id = user_id
user_coment.content = content
user_coment.comment_type = comment_type
if comment_type == 2:
user_coment.reply_user_id = reply_user_id
user_coment.delete_status = 1
user_coment.create_time = datetime.datetime.now()
user_coment.save()
# 更新评论数
user_moment = UserMoment.objects.filter(id=str(moment_id)).first()
user_moment.update(inc__comment_count=1)
# 与我相关消息
if int(comment_type) == 1:
AboutMeMessage.create_about_me(user_moment.user_id, user_id, user_moment.user_id, str(moment_id), 2, content)
MessageSender.send_about_me_message(user_moment.user_id)
if int(comment_type) == 2:
if int(user_id) == int(user_moment.user_id):
AboutMeMessage.create_about_me(reply_user_id, user_id, reply_user_id, str(moment_id), 3, content)
MessageSender.send_about_me_message(reply_user_id)
else:
if int(reply_user_id) != int(user_moment.user_id):
AboutMeMessage.create_about_me(user_moment.user_id, user_id, reply_user_id, str(moment_id), 3, content)
MessageSender.send_about_me_message(user_moment.user_id)
AboutMeMessage.create_about_me(reply_user_id, user_id, reply_user_id, str(moment_id), 3, content)
MessageSender.send_about_me_message(reply_user_id)
return user_coment
@classmethod
def delete_comment(cls, comment_id, user_id):
try:
user_comment = UserComment.objects.filter(id=str(comment_id)).first()
if user_comment:
if user_comment.user_id == int(user_id):
user_comment.delete_status = 2
user_comment.save()
# 更新评论数
comment_count = len(UserComment.objects.filter(user_moment_id=user_comment.user_moment_id, delete_status=1))
user_moment = UserMoment.objects.filter(id=user_comment.user_moment_id).first()
user_moment.update(set__comment_count=comment_count)
return True
else:
return False
except Exception,e:
logging.error("delete comment error:{0}".format(e))
return False
return True
@classmethod
def check_comment_count(cls, user):
vip_count = 15
anchor_vip_count = 15
anchor_count = 10
user_count = 2
is_video = user.is_video_auth
user_vip = UserVip.objects.filter(user_id=user.id).first()
now = datetime.datetime.now()
starttime = now.strftime("%Y-%m-%d 00:00:00")
endtime = now.strftime('%Y-%m-%d 23:59:59')
ignore_moments = UserMoment.objects.filter(is_public=2)
ignore_moment_ids = []
if ignore_moments:
for ignore_moment in ignore_moments:
ignore_moment_ids.append(str(ignore_moment.id))
today_comment_used_count = UserComment.objects.filter(user_id=user.id, delete_status=1, user_moment_id__nin=ignore_moment_ids,
create_time__gte=starttime, create_time__lte=endtime).count()
code = 1
message = ""
if user_vip:
if int(is_video) == 1:
# 播住vip
if today_comment_used_count >= anchor_vip_count:
code = 2
message = u"播主VIP,每日动态评论最多15条"
else:
# 用户vip
if today_comment_used_count >= vip_count:
code = 2
message = u"用户VIP,每日动态评论最多5条"
else:
if int(is_video) == 1:
# 播主:
if today_comment_used_count >= anchor_count:
code = 2
message = u"播主每日动态评论最多10条"
else:
# 普通用户
if today_comment_used_count >= user_count:
code = 2
message = u"普通用户每日动态评论最多2条"
return code, message
def normal_info(self):
user = User.objects.filter(id=self.user_id).first()
if user:
nickname = user.nickname
head_image = user.image
create_time = UserMoment.get_time(self.create_time)
data = {
"moment_id": self.user_moment_id,
"comment_id": str(self.id),
"user_id": user.id,
"nickname": nickname,
"head_image": head_image,
"create_time": create_time,
"comment_content": self.content,
"comment_type": self.comment_type,
}
user_vip = UserVip.objects.filter(user_id=user.id).first()
if user_vip:
vip = Vip.objects.filter(id=user_vip.vip_id).first()
data["vip_icon"] = vip.icon_url
if self.comment_type == 2:
reply_user = User.objects.filter(id=self.reply_user_id).first()
if reply_user:
reply_nickname = reply_user.nickname
else:
reply_nickname = ""
data["reply_user_id"] = self.reply_user_id
data["reply_nickname"] = reply_nickname
return data
class UserMomentLook(Document):
user_moment_id = StringField(verbose_name=u"用户发布动态的 id", max_length=64)
user_id_list = ListField(IntField(verbose_name=u"用户id"), verbose_name=u"看过用户_id")
@classmethod
def inc_look(cls, user_id, user_moment_id):
moment_look = UserMomentLook.objects.filter(user_moment_id=user_moment_id).first()
user_id = int(user_id)
if not moment_look:
new_look = | |
name must be specified as "name"')
if not conn:
conn = get_conn()
try:
data = conn.delete_storage_account(kwargs['name'])
return {'Success': 'The storage account was successfully deleted'}
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message))
def list_services(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List hosted services associated with the account
CLI Example:
.. code-block:: bash
salt-cloud -f list_services my-azure
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_services function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
data = conn.list_hosted_services()
ret = {}
for item in data.hosted_services:
ret[item.service_name] = object_to_dict(item)
ret[item.service_name]['name'] = item.service_name
return ret
def show_service(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List hosted service properties
CLI Example:
.. code-block:: bash
salt-cloud -f show_service my-azure name=my_service
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_service function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
data = conn.get_hosted_service_properties(
kwargs['name'],
kwargs.get('details', False)
)
ret = object_to_dict(data)
return ret
def create_service(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Create a new hosted service
CLI Example:
.. code-block:: bash
salt-cloud -f create_service my-azure name=my_service label=my_service location='West US'
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_service function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if 'label' not in kwargs:
raise SaltCloudSystemExit('A label must be specified as "label"')
if 'location' not in kwargs and 'affinity_group' not in kwargs:
raise SaltCloudSystemExit('Either a location or an affinity_group '
'must be specified (but not both)')
try:
data = conn.create_hosted_service(
kwargs['name'],
kwargs['label'],
kwargs.get('description', None),
kwargs.get('location', None),
kwargs.get('affinity_group', None),
kwargs.get('extended_properties', None),
)
return {'Success': 'The service was successfully created'}
except AzureConflictHttpError:
raise SaltCloudSystemExit('There was a conflict. This usually means that the service already exists.')
def delete_service(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Delete a specific service associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_service my-azure name=my_service
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_service function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if not conn:
conn = get_conn()
try:
conn.delete_hosted_service(kwargs['name'])
return {'Success': 'The service was successfully deleted'}
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message))
def list_disks(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List disks associated with the account
CLI Example:
.. code-block:: bash
salt-cloud -f list_disks my-azure
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_disks function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
data = conn.list_disks()
ret = {}
for item in data.disks:
ret[item.name] = object_to_dict(item)
return ret
def show_disk(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Return information about a disk
CLI Example:
.. code-block:: bash
salt-cloud -f show_disk my-azure name=my_disk
'''
if call != 'function':
raise SaltCloudSystemExit(
'The get_disk function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
data = conn.get_disk(kwargs['name'])
return object_to_dict(data)
# For consistency with Azure SDK
get_disk = show_disk
def cleanup_unattached_disks(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Cleans up all disks associated with the account, which are not attached.
*** CAUTION *** This is a destructive function with no undo button, and no
"Are you sure?" confirmation!
CLI Examples:
.. code-block:: bash
salt-cloud -f cleanup_unattached_disks my-azure name=my_disk
salt-cloud -f cleanup_unattached_disks my-azure name=my_disk delete_vhd=True
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_disk function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
disks = list_disks(kwargs=kwargs, conn=conn, call='function')
for disk in disks:
if disks[disk]['attached_to'] is None:
del_kwargs = {
'name': disks[disk]['name'],
'delete_vhd': kwargs.get('delete_vhd', False)
}
log.info('Deleting disk {name}, deleting VHD: {delete_vhd}'.format(**del_kwargs))
data = delete_disk(kwargs=del_kwargs, call='function')
return True
def delete_disk(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Delete a specific disk associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_disk my-azure name=my_disk
salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_disk function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if not conn:
conn = get_conn()
try:
data = conn.delete_disk(kwargs['name'], kwargs.get('delete_vhd', False))
return {'Success': 'The disk was successfully deleted'}
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message))
def update_disk(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Update a disk's properties
CLI Example:
.. code-block:: bash
salt-cloud -f update_disk my-azure name=my_disk label=my_disk
salt-cloud -f update_disk my-azure name=my_disk new_name=another_disk
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_disk function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
old_data = show_disk(kwargs={'name': kwargs['name']}, call='function')
data = conn.update_disk(
disk_name=kwargs['name'],
has_operating_system=kwargs.get('has_operating_system', old_data['has_operating_system']),
label=kwargs.get('label', old_data['label']),
media_link=kwargs.get('media_link', old_data['media_link']),
name=kwargs.get('new_name', old_data['name']),
os=kwargs.get('os', old_data['os']),
)
return show_disk(kwargs={'name': kwargs['name']}, call='function')
def list_service_certificates(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List certificates associated with the service
CLI Example:
.. code-block:: bash
salt-cloud -f list_service_certificates my-azure name=my_service
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_service_certificates function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A service name must be specified as "name"')
if not conn:
conn = get_conn()
data = conn.list_service_certificates(service_name=kwargs['name'])
ret = {}
for item in data.certificates:
ret[item.thumbprint] = object_to_dict(item)
return ret
def show_service_certificate(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Return information about a service certificate
CLI Example:
.. code-block:: bash
salt-cloud -f show_service_certificate my-azure name=my_service_certificate \\
thumbalgorithm=sha1 thumbprint=0123456789ABCDEF
'''
if call != 'function':
raise SaltCloudSystemExit(
'The get_service_certificate function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A service name must be specified as "name"')
if 'thumbalgorithm' not in kwargs:
raise SaltCloudSystemExit('A thumbalgorithm must be specified as "thumbalgorithm"')
if 'thumbprint' not in kwargs:
raise SaltCloudSystemExit('A thumbprint must be specified as "thumbprint"')
data = conn.get_service_certificate(
kwargs['name'],
kwargs['thumbalgorithm'],
kwargs['thumbprint'],
)
return object_to_dict(data)
# For consistency with Azure SDK
get_service_certificate = show_service_certificate
def add_service_certificate(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Add a new service certificate
CLI Example:
.. code-block:: bash
salt-cloud -f add_service_certificate my-azure name=my_service_certificate \\
data='...CERT_DATA...' certificate_format=sha1 password=<PASSWORD>
'''
if call != 'function':
raise SaltCloudSystemExit(
'The add_service_certificate function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if 'data' not in kwargs:
raise SaltCloudSystemExit('Certificate data must be specified as "data"')
if 'certificate_format' not in kwargs:
raise SaltCloudSystemExit('A certificate_format must be specified as "certificate_format"')
if 'password' not in kwargs:
raise SaltCloudSystemExit('A password must be specified as "password"')
try:
data = conn.add_service_certificate(
kwargs['name'],
kwargs['data'],
kwargs['certificate_format'],
kwargs['password'],
)
return {'Success': 'The service certificate was successfully added'}
except AzureConflictHttpError:
raise SaltCloudSystemExit('There was a conflict. This usually means that the '
'service certificate already exists.')
def delete_service_certificate(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Delete a specific certificate associated with the service
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_service_certificate my-azure name=my_service_certificate \\
thumbalgorithm=sha1 thumbprint=0123456789ABCDEF
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_service_certificate function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if 'thumbalgorithm' not in kwargs:
raise SaltCloudSystemExit('A thumbalgorithm must be specified as "thumbalgorithm"')
if 'thumbprint' not in kwargs:
raise SaltCloudSystemExit('A thumbprint must be specified as "thumbprint"')
if not conn:
| |
import argparse
import pyrealsense2 as rs
import numpy as np
import cv2
from models import * # set ONNX_EXPORT in models.py
from utils.datasets_v3 import *
from utils.utils import *
from utils.tugas_akhir_v3_5 import *
def detect(save_img=False):
img_size = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
# Initialize
device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# Initialize model
model = Darknet(opt.cfg, img_size)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
load_darknet_weights(model, weights)
# Second-stage classifier
classify = False
if classify:
modelc = torch_utils.load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Eval mode
model.to(device).eval()
# Fuse Conv2d + BatchNorm2d layers
# model.fuse()
# Export mode
if ONNX_EXPORT:
model.fuse()
img = torch.zeros((1, 3) + img_size) # (1, 3, 320, 192)
f = opt.weights.replace(opt.weights.split('.')[-1], 'onnx') # *.onnx filename
torch.onnx.export(model, img, f, verbose=False, opset_version=11,
input_names=['images'], output_names=['classes', 'boxes'])
# Validate exported model
import onnx
model = onnx.load(f) # Load the ONNX model
onnx.checker.check_model(model) # Check that the IR is well formed
print(onnx.helper.printable_graph(model.graph)) # Print a human readable representation of the graph
return
# Half precision
half = half and device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadRealSense2(width = 640, height = 480, fps = 30, img_size = img_size)
else:
save_img = True
dataset = LoadImages(source, img_size=img_size)
# Get names and colors
names = load_classes(opt.names)
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
matching = matching_object(match_model = opt.match_model, max_obj = opt.n_object) #Matching Object Initialization
# test_col = collision_test(max_obj = opt.n_object)
t0 = time.time()
img = torch.zeros((1, 3, img_size, img_size), device=device) # init img
_ = model(img.half() if half else img.float()) if device.type != 'cpu' else None # run once
# Process Variable
id = None
prev_n = 0
frame = 0
warning = None
# Area
w = 2000
h = 2000
# Time Variable
total_time = 0
t_obs_pred = 0 # mark observer's predict step
# Frame Time
frame_time = 2000 # frame_time
# YOLO + NMS Time
yolo_nms = 2000
# Match + Update Time
match_time = 2000
t_match_1 = 0 # mark the start of matching
t_match_2 = 0 # mark the end of matching
# Predict Time
pred_time = 2000
start_pred = 0 # mark the start of predict
stop_pred = 0 # mark the end of predict
# Update Time
upd_time = 2000
for path, depth, distance, depth_scale, img, im0s, vid_cap in dataset:
# Untuk mengukur waktu
t_start = time.time()
# Sebelum loop objek, hapus memory untuk objek yang sudah lama hilang (lebih dari 50 frame)
matching.clear_memory(frame = frame, max_miss = 10)
# Observer prediction step + save to memory
start_pred = time.time()
t_pred = time.time() - t_obs_pred
matching.predict_and_save(t_pred, frame)
t_obs_pred = time.time()
stop_pred = time.time()
# Get detections
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = torch_utils.time_synchronized()
pred = model(img, augment=opt.augment)[0]
t2 = torch_utils.time_synchronized()
# to float
if half:
pred = pred.float()
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
multi_label=False, classes=[0, 2], agnostic=opt.agnostic_nms)
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i]
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
s += '%gx%g ' % img.shape[2:] # print string
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results
# Update maximum allowable id
new_n = int(len(det))
matching.update_max_id(prev_n, new_n)
# Reset id memory
matching.reset_id_memory()
# print(str(len(det)) + " objects detected")
for *xyxy, conf, cls in det:
if save_img or view_img: # Add bbox to image
label = '%s' % (names[int(cls)])
# Calculate depth
# Splitting xyxy* (measurement)
xmin = int(xyxy[0])
ymin = int(xyxy[1])
xmax = int(xyxy[2])
ymax = int(xyxy[3])
# Calculate width and height
w = xmax - xmin
h = ymax - ymin
# Calculating measured centroid of the object (in Pixel)
xc = int(round(((xmax + xmin) / 2), 0))
yc = int(round(((ymax + ymin) / 2), 0))
depth_pixel = [xc, yc]
xc_msr = float((xyxy[2] + xyxy[0])/2)
yc_msr = float((xyxy[3] + xyxy[1])/2)
meas_pixel = [xc_msr, yc_msr]
# Calculating depth using CV2.Mean
jarak = calcdepth(xmin, ymin, xmax, ymax, distance, depth_scale)
# Cropping newly detected object
object = im0[ymin:ymax, xmin:xmax]
if depth_pixel is None:
print("depth_pixel is None")
print(depth_pixel)
continue
else:
t_match_1 = time.time()
id, upd_time = matching.main(object, meas_pixel, frame, label, jarak)
t_match_2 = time.time()
if id is not None:
# Collision Test
# collision, final_time = test_col.collision_time(1, matching.kalman_array[id, frame, 0],
# matching.kalman_array[id, frame, 6],
# matching.kalman_array[id, frame, 1],
# matching.kalman_array[id, frame, 7],
# matching.kalman_array[id, frame, 2],
# matching.kalman_array[id, frame, 8])
# warning = test_col.warning(collision, final_time, id)
# Visualization
plot_one_box_gilbert(xyxy, im0, id=str(id), color=colors[int(id)], dist = round(matching.kalman_array[id, frame, 1], 1))
# Save trajectory to results.txt
if save_txt and id is not None: # Write to file
with open(save_path + '.txt', 'a') as file:
file.write(('%g ' * 9 + '\n') % (
total_time, id,
matching.kalman_array[id, frame, 0], matching.kalman_array[id, frame, 1],
matching.kalman_array[id, frame, 2], matching.kalman_array[id, frame, 3],
matching.kalman_array[id, frame, 4], matching.kalman_array[id, frame, 5],
frame))
# Update prev_n
prev_n = int(new_n)
matching.write_missing_objects(frame, total_time, save_path, save_txt)
# Stream results
if view_img:
# matching.plot_missing_objects(frame, im0, colors)
if opt.put_text:
try:
fps = str(int(round(1 / frame_time, 0))) + " fps"
except:
fps = "Div by 0"
match = os.path.split(opt.match_model)
put_txt(im0, str1=match[-1], fps=fps, warning = warning)
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
vid_writer.write(im0)
# Frame Count
frame += 1
area = w * h
yolo_nms = t2 - t1
match_time = t_match_2 - t_match_1
pred_time = stop_pred - start_pred
frame_time = time.time() - t_start
total_time = total_time + frame_time
print("frame = ", frame)
print("fps = ", (1 / frame_time))
# if save_txt and id is not None: # Write to file
# with open("C:/Users/HP/Desktop/1 object/r" + '.txt', 'a') as file:
# file.write(('%g ' * 9 + '\n') % (
# id,
# total_time,
# frame_time,
# yolo_nms,
# match_time,
# pred_time,
# upd_time,
# matching.vector_array[id, frame-1, 2],
# area))
if save_txt or save_img:
print('Results saved to %s' % os.getcwd() + os.sep + out)
if platform == 'darwin': # MacOS
os.system('open ' + save_path)
print('Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default='data/coco.names', help='*.names path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify | |
the special
__iter__() method.
@tag6
(6) Negative subscripts for array-like indexing are supported.
Therefore,
bitvec[-i]
is legal assuming that the index range is not violated. A
negative index carries the usual Python interpretation: The
last element of a bit vector is indexed -1 and the first
element -(n+1) if n is the total number of bits in the bit
vector. Negative subscripts are made possible by
special-casing such access in the implementation of the
__getitem__ method (actually it is the _getbit method).
@tag7
(7) You can reset a previously constructed bit vector to either the
all-zeros state or the all-ones state by
bv1 = BitVector(size = 25)
...
...
bv1.reset(1)
...
...
bv1.reset(0)
The first call to reset() will set all the bits of bv1 to 1's
and the second call all the bits to 0's.
@title
LOGICAL OPERATIONS ON BIT VECTORS:
@tag8
(8) Given two bit vectors bv1 and bv2, you can perform bitwise
logical operations on them by
result_bv = bv1 ^ bv2 # for bitwise XOR
result_bv = bv1 & bv2 # for bitwise AND
result_bv = bv1 | bv2 # for bitwise OR
result_bv = ~bv1 # for bitwise negation
These are made possible by implementing the __xor__, __and__,
__or__, and __invert__ methods, respectively.
@title
COMPARING BIT VECTORS:
@tag9
(9) Given two bit vectors bv1 and bv2, you can carry out the
following comparisons that return Boolean values:
bv1 == bv2
bv1 != bv2
bv1 < bv2
bv1 <= bv2
bv1 > bv2
bv1 >= bv2
The equalities and inequalities are determined by the integer
values associated with the bit vectors. These operator
overloadings are made possible by providing implementation code
for __eq__, __ne__, __lt__, __le__, __gt__, and __ge__,
respectively.
@title
OTHER SUPPORTED OPERATIONS:
@tag10
(10) You can permute and unpermute bit vectors:
bv_permuted = bv.permute(permutation_list)
bv_unpermuted = bv.unpermute(permutation_list)
@tag11
(11) Left and right circular rotations can be carried out by
bitvec << N
bitvec >> N
for circular rotations to the left and to the right by N bit
positions. These operator overloadings are made possible by
implementing the __lshift__ and __rshift__ methods,
respectively.
@tag12
(12) If you want to shift a bitvector non-circularly:
bitvec = BitVector(bitstring = '10010000')
bitvec.shift_left(3) # 10000000
bitvec.shift_right(3) # 00010000
Obviously, for a sufficient large left or right non-circular
shift, you will end up with a bitvector that is all zeros.
@tag13
(13) A bit vector containing an even number of bits can be divided
into two equal parts by
[left_half, right_half] = bitvec.divide_into_two()
where left_half and right_half hold references to the two
returned bit vectors.
@tag14
(14) You can find the integer value of a bit array by
bitvec.int_val()
or by
int(bitvec)
@tag15
(15) You can convert a bit vector into its string representation by
str( bitvec )
@tag16
(16) Because __add__ is supplied, you can always join two bit vectors
by
bitvec3 = bitvec1 + bitvec2
bitvec3 is a new bit vector that contains all the bits of
bitvec1 followed by all the bits of bitvec2.
@tag17
(17) You can find the length of a bitvector by
len = bitvec.length()
@tag18
(18) You can make a deep copy of a bitvector by
bitvec_copy = bitvec.deep_copy()
@tag19
(19) You can write a bit vector directly to a file, as illustrated
by the following example that reads one bit vector from a file
and then writes it to another file
bv = BitVector(filename = 'input.txt')
bv1 = bv.read_bits_from_file(64)
print(bv1)
FILEOUT = open('output.bits', 'wb')
bv1.write_to_file( FILEOUT )
FILEOUT.close()
bv = BitVector(filename = 'output.bits')
bv2 = bv.read_bits_from_file(64)
print(bv2)
IMPORTANT: The size of a bit vector must be a multiple of of 8
for this write function to work. If this
condition is not met, the function will throw an
exception.
IMPORTANT FOR WINDOWS USERS: When writing an internally
generated bit vector out to a disk file, it is
important to open the file in the binary mode as
shown. Otherwise, the bit pattern 00001010
('\\n') in your bitstring will be written out as
0000110100001010 ('\\r\\n'), which is the
linebreak on Windows machines.
@tag20
(20) You can also write a bit vector directly to a stream object, as
illustrated by
fp_write = io.StringIO()
bitvec.write_bits_to_fileobject( fp_write )
print( fp_write.getvalue() )
@tag21
(21) You can pad a bit vector from the left or from the right with a
designated number of zeros
bitvec.pad_from_left( n )
bitvec.pad_from_right( n )
In the first case, the new bit vector will be the same as the
old bit vector except for the additional n zeros on the left.
The same thing happens in the second case except that now the
additional n zeros will be on the right.
@tag22
(22) You can test if a bit vector x is contained in another bit
vector y by using the syntax 'if x in y'. This is made
possible by the override definition for the special
__contains__ method.
@tag23
(23) You can change the bit pattern associated with a previously
constructed BitVector instance:
bv = BitVector(intVal = 7, size =16)
print(bv) # 0000000000000111
bv.set_value(intVal = 45)
print(bv) # 101101
@tag24
(24) You can count the number of bits set in a BitVector instance by
bv = BitVector( bitstring = '100111' )
print( bv.count_bits() ) # 4
@tag25
(25) For folks who use bit vectors with millions of bits in them but
with only a few bits set, your bit counting will go much, much
faster if you call count_bits_sparse() instead of count_bits():
# a BitVector with 2 million bits:
bv = BitVector(size = 2000000)
bv[345234] = 1
bv[233]=1
bv[243]=1
bv[18]=1
bv[785] =1
print(bv.count_bits_sparse()) # 5
@tag26
(26) You can calculate the similarity and the distance between two
bit vectors using the Jaccard similarity coefficient and the
Jaccard distance. Also, you can calculate the Hamming distance
between two bit vectors:
bv1 = BitVector(bitstring = '11111111')
bv2 = BitVector(bitstring = '00101011')
print bv1.jaccard_similarity(bv2)
print(str(bv1.jaccard_distance(bv2)))
print(str(bv1.hamming_distance(bv2)))
@tag27
(27) Starting from a given bit position, you can find the position
index of the next set bit:
bv = BitVector(bitstring = '00000000000001')
print(bv.next_set_bit(5)) # 13
since the position index of the SET bit after the bit
whose position index 5 is 13.
@tag28
(28) You can measure the "rank" of a bit that is set at a given
position. Rank is the number of bits that are set up to the
position of the bit you are interested in.
bv = BitVector(bitstring = '01010101011100')
print(bv.rank_of_bit_set_at_index(10)) # 6
@tag29
(29) You can test whether the integer value of a bit vector is a
power of two. The sparse version of this method will work much
faster for very long bit vectors. However, the regular version
may work faster for small bit vectors.
bv = BitVector(bitstring = '10000000001110')
print( bv.is_power_of_2() )
print( bv.is_power_of_2_sparse() )
@tag30
(30) Given a bit vector, you can construct a bit vector with all the
bits reversed, in the sense that what was left to right before
now becomes right to left.
bv = BitVector(bitstring = '0001100000000000001')
print(str(bv.reverse()))
@tag31
(31) You can find the greatest common divisor of two bit vectors:
bv1 = BitVector(bitstring = '01100110') # int val: 102
bv2 = BitVector(bitstring = '011010') # int val: 26
bv = bv1.gcd(bv2)
print(int(bv)) # 2
@tag32
(32) You can find the multiplicative inverse of a bit vector
vis-a-vis a given modulus:
bv_modulus = BitVector(intVal = 32)
bv = BitVector(intVal = 17)
bv_result = bv.multiplicative_inverse( bv_modulus )
if bv_result is not None:
print(str(int(bv_result))) # 17
else: print "No multiplicative inverse in this case"
This multiplicative inverse is calculated using normal integer
arithmetic. For multiplicative inverses in GF(2^n), use the
gf_MI() method described below.
@tag33
(33) To find the multiplicative inverse of a bit | |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import csv
import json
import os
from collections import OrderedDict as od
from datetime import datetime
from typing import Dict, List, Tuple
import numpy as np
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import ClusteringDiarizer
from nemo.collections.asr.parts.utils.speaker_utils import (
audio_rttm_map,
get_uniqname_from_filepath,
labels_to_rttmfile,
rttm_to_labels,
write_rttm2manifest,
)
from nemo.utils import logging
try:
import arpa
ARPA = True
except ImportError:
ARPA = False
try:
import diff_match_patch
DIFF_MATCH_PATCH = True
except ImportError:
DIFF_MATCH_PATCH = False
__all__ = ['ASR_DIAR_OFFLINE']
def dump_json_to_file(file_path, riva_dict):
"""
Write a json file from the riva_dict dictionary.
"""
with open(file_path, "w") as outfile:
json.dump(riva_dict, outfile, indent=4)
def write_txt(w_path, val):
"""
Write a text file from the string input.
"""
with open(w_path, "w") as output:
output.write(val + '\n')
return None
def get_diff_text(text1: List[str], text2: List[str]) -> List[Tuple[int, str]]:
"""
Take the alignment between two lists and get the difference.
"""
orig_words = '\n'.join(text1.split()) + '\n'
pred_words = '\n'.join(text2.split()) + '\n'
diff = diff_match_patch.diff_match_patch()
diff.Diff_Timeout = 0
orig_enc, pred_enc, enc = diff.diff_linesToChars(orig_words, pred_words)
diffs = diff.diff_main(orig_enc, pred_enc, False)
diff.diff_charsToLines(diffs, enc)
return diffs
def get_speaker_error_mismatch(ctm_error_dict, error_buffer, w_range_buffer, pred_rttm_eval):
"""
Calculate the diarization confusion error using the reference CTM file.
"""
correct_count, error_count, align_error = 0, 0, []
for k, _d in enumerate(error_buffer):
if _d[0] == 1:
stt, end = w_range_buffer[k]
bool_list = [_bool for _bool in pred_rttm_eval[stt:end]]
error_count = len(bool_list) - sum(bool_list)
ctm_error_dict['diar_confuse_count'] += error_count
def get_speaker_error_match(ctm_error_dict, w_range, ctm_info_list, pred_info_list, mapping_dict):
"""
Count the words with wrong speaker assignments.
"""
error_count, align_error_list = 0, []
for ref, prd in zip(range(w_range[0][0], w_range[0][1]), range(w_range[1][0], w_range[1][1])):
ref_spk, ref_start, ref_end = ctm_info_list[ref]
pred_spk, pred_start, pred_end = pred_info_list[prd]
if pred_spk in mapping_dict:
error_count += 1 if ref_spk != mapping_dict[pred_spk] else 0
else:
error_count += 1
align_error_list.append(ref_start - pred_start)
ctm_error_dict['diar_confuse_count'] += error_count
return error_count, align_error_list
class ASR_DIAR_OFFLINE(object):
"""
A class designed for performing ASR and diarization together.
"""
def __init__(self, **cfg_diarizer):
self.manifest_filepath = cfg_diarizer['manifest_filepath']
self.params = cfg_diarizer['asr']['parameters']
self.ctc_decoder_params = cfg_diarizer['asr']['ctc_decoder_parameters']
self.realigning_lm_params = cfg_diarizer['asr']['realigning_lm_parameters']
self.nonspeech_threshold = self.params['asr_based_vad_threshold']
self.fix_word_ts_with_VAD = self.params['fix_word_ts_with_VAD']
self.root_path = cfg_diarizer['out_dir']
self.vad_threshold_for_word_ts = 0.7
self.max_word_ts_length_in_sec = 0.6
self.cfg_diarizer = cfg_diarizer
self.word_ts_anchor_offset = 0.0
self.run_ASR = None
self.realigning_lm = None
self.ctm_exists = {}
self.frame_VAD = {}
self.align_error_list = []
self.AUDIO_RTTM_MAP = audio_rttm_map(self.manifest_filepath)
self.audio_file_list = [value['audio_filepath'] for _, value in self.AUDIO_RTTM_MAP.items()]
self.color_palette = {
'speaker_0': '\033[1;32m',
'speaker_1': '\033[1;34m',
'speaker_2': '\033[1;30m',
'speaker_3': '\033[1;31m',
'speaker_4': '\033[1;35m',
'speaker_5': '\033[1;36m',
'speaker_6': '\033[1;37m',
'speaker_7': '\033[1;30m',
'speaker_8': '\033[1;33m',
'speaker_9': '\033[0;34m',
'white': '\033[0;37m',
}
def load_realigning_LM(self):
self.N_range = (
self.realigning_lm_params['min_number_of_words'],
self.realigning_lm_params['max_number_of_words'],
)
self.stt_end_tokens = ['</s>', '<s>']
logging.info(f"Loading LM for realigning: {self.realigning_lm_params['arpa_language_model']}")
return arpa.loadf(self.realigning_lm_params['arpa_language_model'])[0]
def save_VAD_labels_list(self, word_ts_dict):
"""
Take the non_speech labels from logit output. The logit output is obtained from
run_ASR() function.
Args:
word_ts_dict (dict):
List containing word timestamps.
"""
self.VAD_RTTM_MAP = {}
for idx, (uniq_id, word_timestamps) in enumerate(word_ts_dict.items()):
speech_labels_float = self.get_speech_labels_from_decoded_prediction(word_timestamps)
speech_labels = self.get_str_speech_labels(speech_labels_float)
output_path = os.path.join(self.root_path, 'pred_rttms')
if not os.path.exists(output_path):
os.makedirs(output_path)
filename = labels_to_rttmfile(speech_labels, uniq_id, output_path)
self.VAD_RTTM_MAP[uniq_id] = {'audio_filepath': self.audio_file_list[idx], 'rttm_filepath': filename}
def get_speech_labels_from_decoded_prediction(self, input_word_ts):
"""
Extract speech labels from the ASR output (decoded predictions)
Args:
input_word_ts (list):
List containing word timestamps.
Returns:
word_ts (list):
The ranges of the speech segments, which are merged ranges of input_word_ts.
"""
speech_labels = []
word_ts = copy.deepcopy(input_word_ts)
if word_ts == []:
return speech_labels
else:
count = len(word_ts) - 1
while count > 0:
if len(word_ts) > 1:
if word_ts[count][0] - word_ts[count - 1][1] <= self.nonspeech_threshold:
trangeB = word_ts.pop(count)
trangeA = word_ts.pop(count - 1)
word_ts.insert(count - 1, [trangeA[0], trangeB[1]])
count -= 1
return word_ts
def run_diarization(
self, diar_model_config, word_timestamps,
):
"""
Launch the diarization process using the given VAD timestamp (oracle_manifest).
Args:
word_and_timestamps (list):
List containing words and word timestamps
Returns:
diar_hyp (dict):
A dictionary containing rttm results which are indexed by a unique ID.
score Tuple[pyannote object, dict]:
A tuple containing pyannote metric instance and mapping dictionary between
speakers in hypotheses and speakers in reference RTTM files.
"""
if diar_model_config.diarizer.asr.parameters.asr_based_vad:
self.save_VAD_labels_list(word_timestamps)
oracle_manifest = os.path.join(self.root_path, 'asr_vad_manifest.json')
oracle_manifest = write_rttm2manifest(self.VAD_RTTM_MAP, oracle_manifest)
diar_model_config.diarizer.vad.model_path = None
diar_model_config.diarizer.vad.external_vad_manifest = oracle_manifest
diar_model = ClusteringDiarizer(cfg=diar_model_config)
score = diar_model.diarize()
if diar_model_config.diarizer.vad.model_path is not None and not diar_model_config.diarizer.oracle_vad:
self.get_frame_level_VAD(vad_processing_dir=diar_model.vad_pred_dir)
diar_hyp = {}
for k, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
pred_rttm = os.path.join(self.root_path, 'pred_rttms', uniq_id + '.rttm')
diar_hyp[uniq_id] = rttm_to_labels(pred_rttm)
return diar_hyp, score
def get_frame_level_VAD(self, vad_processing_dir):
"""
Read frame-level VAD outputs.
Args:
vad_processing_dir (str):
The path where VAD results are saved.
"""
for uniq_id in self.AUDIO_RTTM_MAP:
frame_vad = os.path.join(vad_processing_dir, uniq_id + '.median')
frame_vad_float_list = []
with open(frame_vad, 'r') as fp:
for line in fp.readlines():
frame_vad_float_list.append(float(line.strip()))
self.frame_VAD[uniq_id] = frame_vad_float_list
def gather_eval_results(self, metric, mapping_dict, total_riva_dict):
"""
Gather diarization evaluation results from pyannote DiarizationErrorRate metric object.
Args:
metric (DiarizationErrorRate metric): DiarizationErrorRate metric pyannote object
mapping_dict (dict): A dictionary containing speaker mapping labels for each audio file with key as unique name
Returns:
DER_result_dict (dict): A dictionary containing scores for each audio file along with aggregated results
"""
results = metric.results_
DER_result_dict = {}
count_correct_spk_counting = 0
for result in results:
key, score = result
pred_rttm = os.path.join(self.root_path, 'pred_rttms', key + '.rttm')
pred_labels = rttm_to_labels(pred_rttm)
est_n_spk = self.get_num_of_spk_from_labels(pred_labels)
ref_rttm = self.AUDIO_RTTM_MAP[key]['rttm_filepath']
ref_labels = rttm_to_labels(ref_rttm)
ref_n_spk = self.get_num_of_spk_from_labels(ref_labels)
if self.cfg_diarizer['oracle_vad']:
score['missed detection'] = 0
score['false alarm'] = 0
_DER, _CER, _FA, _MISS = (
(score['confusion'] + score['false alarm'] + score['missed detection']) / score['total'],
score['confusion'] / score['total'],
score['false alarm'] / score['total'],
score['missed detection'] / score['total'],
)
DER_result_dict[key] = {
"DER": round(_DER, 4),
"CER": round(_CER, 4),
"FA": round(_FA, 4),
"MISS": round(_MISS, 4),
"est_n_spk": est_n_spk,
"mapping": mapping_dict[key],
"is_spk_count_correct": (est_n_spk == ref_n_spk),
}
count_correct_spk_counting += int(est_n_spk == ref_n_spk)
DER, CER, FA, MISS = (
abs(metric),
metric['confusion'] / metric['total'],
metric['false alarm'] / metric['total'],
metric['missed detection'] / metric['total'],
)
DER_result_dict["total"] = {
"DER": DER,
"CER": CER,
"FA": FA,
"MISS": MISS,
"spk_counting_acc": count_correct_spk_counting / len(metric.results_),
}
return DER_result_dict
def get_the_closest_silence_start(self, vad_index_word_end, vad_frames, params, offset=10):
"""
Find the closest silence frame from the given starting position.
Args:
vad_index_word_end (float):
The timestamp of the end of the current word.
vad_frames (numpy.array):
The numpy array containing frame-level VAD probability.
params (dict):
Contains the parameters for diarization and ASR decoding.
Returns:
c (float):
A timestamp of the earliest start of a silence region from
the given time point, vad_index_word_end.
"""
c = vad_index_word_end + offset
limit = int(100 * self.max_word_ts_length_in_sec + vad_index_word_end)
while c < len(vad_frames):
if vad_frames[c] < self.vad_threshold_for_word_ts:
break
else:
c += 1
if c > limit:
break
c = min(len(vad_frames) - 1, c)
c = round(c / 100.0, 2)
return c
def compensate_word_ts_list(self, audio_file_list, word_ts_dict, params):
"""
Compensate the word timestamps based on the VAD output.
The length of each word is capped by self.max_word_ts_length_in_sec.
Args:
audio_file_list (list):
List containing audio file paths.
word_ts_dict (dict):
Dictionary containing timestamps of words.
params (dict):
The parameter dictionary for diarization and ASR decoding.
Returns:
enhanced_word_ts_dict (list):
List of the enhanced word timestamp values.
"""
enhanced_word_ts_dict = {}
for idx, (uniq_id, word_ts_seq_list) in enumerate(word_ts_dict.items()):
N = len(word_ts_seq_list)
enhanced_word_ts_buffer = []
for k, word_ts in enumerate(word_ts_seq_list):
if k < N - 1:
word_len = round(word_ts[1] - word_ts[0], 2)
len_to_next_word = round(word_ts_seq_list[k + 1][0] - word_ts[0] - 0.01, 2)
if uniq_id in self.frame_VAD:
vad_index_word_end = int(100 * word_ts[1])
closest_sil_stt = self.get_the_closest_silence_start(
vad_index_word_end, self.frame_VAD[uniq_id], params
)
vad_est_len = round(closest_sil_stt - word_ts[0], 2)
else:
vad_est_len = len_to_next_word
min_candidate = min(vad_est_len, len_to_next_word)
fixed_word_len = max(min(self.max_word_ts_length_in_sec, min_candidate), word_len)
enhanced_word_ts_buffer.append([word_ts[0], word_ts[0] + fixed_word_len])
else:
enhanced_word_ts_buffer.append([word_ts[0], word_ts[1]])
enhanced_word_ts_dict[uniq_id] = enhanced_word_ts_buffer
return enhanced_word_ts_dict
def get_transcript_with_speaker_labels(self, diar_hyp, word_hyp, word_ts_hyp):
"""
Match the diarization result with the ASR output.
The words and the timestamps for the corresponding words are matched
in a for loop.
Args:
diar_labels (dict):
Dictionary of the Diarization output | |
<reponame>mith1979/ansible_automation
# (c) 2012-2014, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.inventory
import ansible.constants as C
import ansible.runner
from ansible.utils.template import template
from ansible import utils
from ansible import errors
from ansible.module_utils.splitter import split_args, unquote
import ansible.callbacks
import ansible.cache
import os
import shlex
import collections
from play import Play
import StringIO
import pipes
# the setup cache stores all variables about a host
# gathered during the setup step, while the vars cache
# holds all other variables about a host
SETUP_CACHE = ansible.cache.FactCache()
VARS_CACHE = collections.defaultdict(dict)
RESERVED_TAGS = ['all','tagged','untagged','always']
class PlayBook(object):
'''
runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based
set of commands to run in series.
multiple plays/tasks do not execute simultaneously, but tasks in each
pattern do execute in parallel (according to the number of forks
requested) among the hosts they address
'''
# *****************************************************
def __init__(self,
playbook = None,
host_list = C.DEFAULT_HOST_LIST,
module_path = None,
forks = C.DEFAULT_FORKS,
timeout = C.DEFAULT_TIMEOUT,
remote_user = C.DEFAULT_REMOTE_USER,
remote_pass = C.DEFAULT_REMOTE_PASS,
remote_port = None,
transport = C.DEFAULT_TRANSPORT,
private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
callbacks = None,
runner_callbacks = None,
stats = None,
extra_vars = None,
only_tags = None,
skip_tags = None,
subset = C.DEFAULT_SUBSET,
inventory = None,
check = False,
diff = False,
any_errors_fatal = False,
vault_password = False,
force_handlers = False,
# privelege escalation
become = C.DEFAULT_BECOME,
become_method = C.DEFAULT_BECOME_METHOD,
become_user = C.DEFAULT_BECOME_USER,
become_pass = None,
):
"""
playbook: path to a playbook file
host_list: path to a file like /etc/ansible/hosts
module_path: path to ansible modules, like /usr/share/ansible/
forks: desired level of parallelism
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH keys
remote_port: default remote port to use if not specified with the host or play
transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occurring to each host
inventory: can be specified instead of host_list to use a pre-existing inventory object
check: don't change anything, just try to detect some potential changes
any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
force_handlers: continue to notify and run handlers even if a task fails
"""
self.SETUP_CACHE = SETUP_CACHE
self.VARS_CACHE = VARS_CACHE
arguments = []
if playbook is None:
arguments.append('playbook')
if callbacks is None:
arguments.append('callbacks')
if runner_callbacks is None:
arguments.append('runner_callbacks')
if stats is None:
arguments.append('stats')
if arguments:
raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
if extra_vars is None:
extra_vars = {}
if only_tags is None:
only_tags = [ 'all' ]
if skip_tags is None:
skip_tags = []
self.check = check
self.diff = diff
self.module_path = module_path
self.forks = forks
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.transport = transport
self.callbacks = callbacks
self.runner_callbacks = runner_callbacks
self.stats = stats
self.extra_vars = extra_vars
self.global_vars = {}
self.private_key_file = private_key_file
self.only_tags = only_tags
self.skip_tags = skip_tags
self.any_errors_fatal = any_errors_fatal
self.vault_password = <PASSWORD>
self.force_handlers = force_handlers
self.become = become
self.become_method = become_method
self.become_user = become_user
self.become_pass = become_pass
self.callbacks.playbook = self
self.runner_callbacks.playbook = self
if inventory is None:
self.inventory = ansible.inventory.Inventory(host_list)
self.inventory.subset(subset)
else:
self.inventory = inventory
if self.module_path is not None:
utils.plugins.module_finder.add_directory(self.module_path)
self.basedir = os.path.dirname(playbook) or '.'
utils.plugins.push_basedir(self.basedir)
# let inventory know the playbook basedir so it can load more vars
self.inventory.set_playbook_basedir(self.basedir)
vars = extra_vars.copy()
vars['playbook_dir'] = os.path.abspath(self.basedir)
if self.inventory.basedir() is not None:
vars['inventory_dir'] = self.inventory.basedir()
if self.inventory.src() is not None:
vars['inventory_file'] = self.inventory.src()
self.filename = playbook
(self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
ansible.callbacks.load_callback_plugins()
ansible.callbacks.set_playbook(self.callbacks, self)
self._ansible_version = utils.version_info(gitinfo=True)
# *****************************************************
def _get_playbook_vars(self, play_ds, existing_vars):
'''
Gets the vars specified with the play and blends them
with any existing vars that have already been read in
'''
new_vars = existing_vars.copy()
if 'vars' in play_ds:
if isinstance(play_ds['vars'], dict):
new_vars.update(play_ds['vars'])
elif isinstance(play_ds['vars'], list):
for v in play_ds['vars']:
new_vars.update(v)
return new_vars
# *****************************************************
def _get_include_info(self, play_ds, basedir, existing_vars={}):
'''
Gets any key=value pairs specified with the included file
name and returns the merged vars along with the path
'''
new_vars = existing_vars.copy()
tokens = split_args(play_ds.get('include', ''))
for t in tokens[1:]:
try:
(k,v) = unquote(t).split("=", 1)
new_vars[k] = template(basedir, v, new_vars)
except ValueError, e:
raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
return (new_vars, unquote(tokens[0]))
# *****************************************************
def _get_playbook_vars_files(self, play_ds, existing_vars_files):
new_vars_files = list(existing_vars_files)
if 'vars_files' in play_ds:
new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
return new_vars_files
# *****************************************************
def _extend_play_vars(self, play, vars={}):
'''
Extends the given play's variables with the additional specified vars.
'''
if 'vars' not in play or not play['vars']:
# someone left out or put an empty "vars:" entry in their playbook
return vars.copy()
play_vars = None
if isinstance(play['vars'], dict):
play_vars = play['vars'].copy()
play_vars.update(vars)
elif isinstance(play['vars'], list):
# nobody should really do this, but handle vars: a=1 b=2
play_vars = play['vars'][:]
play_vars.extend([{k:v} for k,v in vars.iteritems()])
return play_vars
# *****************************************************
def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
'''
run top level error checking on playbooks and allow them to include other playbooks.
'''
playbook_data = utils.parse_yaml_from_file(path, vault_password=<PASSWORD>)
accumulated_plays = []
play_basedirs = []
if type(playbook_data) != list:
raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
basedir = os.path.dirname(path) or '.'
utils.plugins.push_basedir(basedir)
for play in playbook_data:
if type(play) != dict:
raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
if 'include' in play:
# a playbook (list of plays) decided to include some other list of plays
# from another file. The result is a flat list of plays in the end.
play_vars = self._get_playbook_vars(play, vars)
play_vars_files = self._get_playbook_vars_files(play, vars_files)
inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
play_vars.update(inc_vars)
included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
(plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
for p in plays:
# support for parameterized play includes works by passing
# those variables along to the subservient play
p['vars'] = self._extend_play_vars(p, play_vars)
# now add in the vars_files
p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
accumulated_plays.extend(plays)
play_basedirs.extend(basedirs)
else:
# this is a normal (non-included play)
accumulated_plays.append(play)
play_basedirs.append(basedir)
return (accumulated_plays, play_basedirs)
# *****************************************************
def run(self):
''' run all patterns in the playbook '''
plays = []
matched_tags_all = set()
unmatched_tags_all = set()
# loop through all patterns and run them
self.callbacks.on_start()
for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
assert play is not None
matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
matched_tags_all = matched_tags_all | matched_tags
unmatched_tags_all = unmatched_tags_all | unmatched_tags
# Remove tasks we wish to skip
matched_tags = matched_tags - set(self.skip_tags)
# if we have matched_tags, the play must be run.
# if the play contains no tasks, assume we just want to gather facts
# in this case there are actually 3 meta tasks (handler flushes) not 0
# tasks, so that's why there's a check against 3
if (len(matched_tags) > 0 or len(play.tasks()) == 3):
plays.append(play)
# if the playbook is invoked with --tags or --skip-tags that don't
# exist at all in the playbooks then we need to raise an error so that
# the user can correct the arguments.
unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
(matched_tags_all | unmatched_tags_all))
for t in RESERVED_TAGS:
unknown_tags.discard(t)
if len(unknown_tags) > 0:
for t in RESERVED_TAGS:
unmatched_tags_all.discard(t)
msg | |
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
# This python file is a library of python functions which provides modular
# common set-up commands for solving a problem in OpenCMISS.
# Each function has a range of input options and calls the appropriate
# OpenCMISS linked commands to set up the problem. This is a high
# level library that will allow shorter scripting for solving cardiac mechanics
# simulations and also making it easier to debug.
# Author: <NAME>
# Start Date: 20th October 2014
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
from opencmiss.iron import iron
import numpy
import math
import os
# =================================================================================#
def BasicSetUp(regionUserNumber, coordinateSystemUserNumber):
# This function sets up the world region, 3D CS, parallel computing nodes, and
# diagnostics.
# Set up diagnostics/debug
#iron.DiagnosticsSetOn(iron.DiagnosticTypes.IN,[1,2,3,4,5],
#"Diagnostics",["DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE"])
# Get computational node information for parallel computing
numberOfComputationalNodes = iron.ComputationalNumberOfNodesGet()
computationalNodeNumber = iron.ComputationalNodeNumberGet()
# Set up 3D RC coordinate system
coordinateSystem = iron.CoordinateSystem()
coordinateSystem.CreateStart(coordinateSystemUserNumber)
coordinateSystem.dimension = 3
coordinateSystem.CreateFinish()
# Create world region
region = iron.Region()
region.CreateStart(regionUserNumber, iron.WorldRegion)
region.label = "Region"
region.coordinateSystem = coordinateSystem
region.CreateFinish()
# Output for diagnostics
print("----> Set up coordinate system and world region <----\n")
return numberOfComputationalNodes, computationalNodeNumber, coordinateSystem, region
# =================================================================================#
#=================================================================================#
def BasisFunction(basisUserNumber, numOfXi, option, collapsed):
# This function sets up the basis function depending on the option given.
if option[0] == 1:
# Trilinear basis function for interpolation of geometry.
basis = iron.Basis()
basis.CreateStart(basisUserNumber)
basis.numberOfXi = numOfXi
basis.type = iron.BasisTypes.LAGRANGE_HERMITE_TP
basis.interpolationXi = [iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE] * numOfXi
basis.QuadratureLocalFaceGaussEvaluateSet(True)
basis.quadratureNumberOfGaussXi = [2,2,2]
basis.CreateFinish()
# Output for diagnostics
print("----> Set up trilinear basis functions for geometry, use element based interpolation for pressure <----\n")
if collapsed:
basisCol = iron.Basis()
basisCol.CreateStart(basisUserNumber+1)
basisCol.numberOfXi = numOfXi
basisCol.type = iron.BasisTypes.LAGRANGE_HERMITE_TP
basisCol.interpolationXi = [iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE] * numOfXi
basisCol.QuadratureLocalFaceGaussEvaluateSet(True)
basisCol.quadratureNumberOfGaussXi = [2,2,2]
basisCol.CollapsedXiSet([iron.BasisXiCollapse.XI_COLLAPSED, iron.BasisXiCollapse.COLLAPSED_AT_XI0, iron.BasisXiCollapse.NOT_COLLAPSED])
print("---> Set up collapsed basis functions for apical elements")
basisCol.CreateFinish()
return basis, basisCol
return basis
elif option[0] == 2:
quadBasis = iron.Basis()
quadBasis.CreateStart(basisUserNumber[0])
quadBasis.InterpolationXiSet([iron.BasisInterpolationSpecifications.QUADRATIC_LAGRANGE]*numOfXi)
quadBasis.QuadratureNumberOfGaussXiSet([4]*numOfXi)
quadBasis.QuadratureLocalFaceGaussEvaluateSet(True)
quadBasis.CreateFinish()
# Tricubic Hermite basis function for interpolation of geometry.
cubicBasis = iron.Basis() # For geometry.
cubicBasis.CreateStart(basisUserNumber[1])
cubicBasis.InterpolationXiSet([iron.BasisInterpolationSpecifications.CUBIC_HERMITE] * numOfXi)
cubicBasis.QuadratureNumberOfGaussXiSet([4] * numOfXi)
cubicBasis.QuadratureLocalFaceGaussEvaluateSet(True)
cubicBasis.CreateFinish()
# Output for diagnostics
print("----> Set up tricubic hermite basis function for geometry and trilinear for hydrostatic pressure <----\n")
return quadBasis, cubicBasis
#=================================================================================#
#=================================================================================#
def GeneratedMesh(generatedMeshUserNumber, meshUserNumber, region, bases, dimensions, elements):
# This function sets up a generated mesh using user specified dimensions.
generatedMesh = iron.GeneratedMesh()
generatedMesh.CreateStart(generatedMeshUserNumber, region)
generatedMesh.TypeSet(iron.GeneratedMeshTypes.REGULAR)
generatedMesh.BasisSet(bases)
generatedMesh.ExtentSet(dimensions)
generatedMesh.NumberOfElementsSet(elements)
mesh = iron.Mesh()
generatedMesh.CreateFinish(meshUserNumber, mesh)
return generatedMesh, mesh
#=================================================================================#
#=================================================================================#
def DecompositionSetUp(decompositionUserNumber, mesh, numberOfComputationalNodes):
# This function sets up the decomposition of the mesh.
decomposition = iron.Decomposition()
decomposition.CreateStart(decompositionUserNumber, mesh)
decomposition.type = iron.DecompositionTypes.CALCULATED
decomposition.NumberOfDomainsSet(numberOfComputationalNodes)
decomposition.CalculateFacesSet(True)
decomposition.CreateFinish()
# Output for diagnostics
print("----> Set up decomposition <----\n")
return decomposition
#=================================================================================#
#=================================================================================#
def GeometricFieldSetUp(geometricFieldUserNumber, region, decomposition, option):
# Set up geometry field
geometricField = iron.Field() # Initialise
geometricField.CreateStart(geometricFieldUserNumber, region)
geometricField.MeshDecompositionSet(decomposition)
geometricField.VariableLabelSet(iron.FieldVariableTypes.U, "Geometry")
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
geometricField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
# Output for diagnostics
print("----> Set up tricubic Hermite geometric field with unit scaling <----\n")
elif option[1] == 2:
geometricField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
# Output for diagnostics
print("----> Set up tricubic Hermite geometric field with arithmetic mean scaling <----\n")
geometricField.CreateFinish()
return geometricField
#=================================================================================#
#=================================================================================#
def GeometricFieldInitialise(xNodes, yNodes, zNodes, geometricField, numNodes, option):
# This function initialises the geometric field with user specified coordinates.
# Initialise nodal values.
for node, value in enumerate(xNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 1, value)
for node, value in enumerate(yNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 2, value)
for node, value in enumerate(zNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 3, value)
# Initialise first derivatives.
if option[0] == 2:
# Tricubic Hermite basis.
for node in range(numNodes):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1, node + 1, 1, max(xNodes))
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2, node + 1, 2, max(yNodes))
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3, node + 1, 3, max(zNodes))
# Output
print("----> Initialised geometric nodal values <----\n")
return geometricField
#=================================================================================#
#=================================================================================#
def GeometricFieldExport(region, filename):
# This function exports the undeformed geometric field.
if not os.path.exists("./results"):
os.makedirs("./results")
exportField = iron.Fields()
exportField.CreateRegion(region)
exportField.NodesExport("./results/" + filename, "FORTRAN")
exportField.ElementsExport("./results/" + filename, "FORTRAN")
exportField.Finalise()
# Output
print("----> Export undeformed geometry <----\n")
#=================================================================================#
#=================================================================================#
def ExtractNodesElements(filename):
# This function extracts nodes and element connectivity information from
# exnode and exelem files.
try:
fid_node = open(filename+'.exnode', 'r')
except IOError:
print('ERROR: Unable to open '+filename+'.exnode')
return
try:
fid_elem = open(filename+'.exelem', 'r')
except IOError:
print('ERROR: Unable to open '+filename+'.exelem')
return
for i in range(1,86):
junk = fid_elem.readline()
nodesX = []
nodesY = []
nodesZ = []
elements = []
for i in [1,2,3,4,5,6]:
junk = fid_node.readline()
# Read nodal information.
i = 0
temp = fid_node.readline()
while temp != '':
currentNode = temp.split()[1]
temp = fid_node.readline()
nodesX.append(temp.split())
temp = fid_node.readline()
nodesY.append(temp.split())
temp = fid_node.readline()
nodesZ.append(temp.split())
i = i+1
temp = fid_node.readline()
nodesX = numpy.array(nodesX)
nodesY = numpy.array(nodesY)
nodesZ = numpy.array(nodesZ)
nodes = [nodesX, nodesY, nodesZ]
nodes = numpy.array(nodes)
# Read element connectivity
temp = fid_elem.readline()
#print temp.split()[0]
while temp.split() != []:
currentElem = temp.split()[1]
junk = fid_elem.readline()
temp = fid_elem.readline()
elements.append(temp.split())
junk = fid_elem.readline()
junk = fid_elem.readline()
temp = fid_elem.readline()
elements = numpy.array(elements)
return nodes, elements
#=================================================================================#
#=================================================================================#
def FibreFieldSetUp(fibreFieldUserNumber, region, decomposition, geometricField, option, microstructure, inputNodes):
# This function sets up the fibre field and initialises the values.
# Sets up the fibre field.
fibreField = iron.Field()
fibreField.CreateStart(fibreFieldUserNumber, region)
fibreField.TypeSet(iron.FieldTypes.FIBRE)
fibreField.MeshDecompositionSet(decomposition)
fibreField.GeometricFieldSet(geometricField)
fibreField.VariableLabelSet(iron.FieldVariableTypes.U, "Fibre")
if option[0] == 1:
fibreField.NumberOfVariablesSet(1)
fibreField.NumberOfComponentsSet(iron.FieldVariableTypes.U, 3)
if microstructure == 1:
for component in [1, 2, 3]:
fibreField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.CONSTANT)
elif microstructure == 2:
for component in [1, 2, 3]:
fibreField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, component, 1)
elif option[0] == 2:
# Tricubic Hermite interpolation
if option[1] == 1:
fibreField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
# Output
print("----> Set up tricubic hermite fibre field with unit scaling <----\n")
elif option[1] == 2:
fibreField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
# Output
print("----> Set up tricubic hermite fibre field with arithmetic mean scaling <----\n")
if microstructure == 1:
# Homogeneous fibre field.
for component in [1, 2, 3]:
fibreField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.CONSTANT)
elif microstructure == 2:
# Heterogeneous fibre field using linear interpolation.
for component in [1, 2, 3]:
fibreField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, component, 1)
fibreField.CreateFinish()
#####################################################
if microstructure == 2:
# Inhomogeneous fibre field using linear interpolation.
for n in range(1, inputNodes.num_nodes+1):
for component in [1,2,3]:
component_name = ["x","y","z"][component-1]
angle = inputNodes.node_values("fibers", component_name, n)
angle = float(angle[0])
angle = angle*math.pi/180
fibreField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,
1, iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, n,
component, angle)
print("----> Initialised heterogeneous fibre angles <----\n")
return fibreField
#=================================================================================#
#=================================================================================#
def MaterialFieldSetUpAuto(materialFieldUserNumber, equationsSet, params, cellMLOption):
# This function is used for setting up material field when using CellML
# description of constitutive model.
# Sets up material field, and apply field to mesh component.
materialField = iron.Field()
equationsSet.MaterialsCreateStart(materialFieldUserNumber, materialField)
materialField.VariableLabelSet(iron.FieldVariableTypes.U, "Material")
if cellMLOption[0]:
print("----> CellML Material Field using gauss point interpolation <----\n")
for component, param in enumerate(params, 1):
materialField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.GAUSS_POINT_BASED)
materialField.CreateFinish()
#########################################################################
# Initialise parameter values.
for component, param in enumerate(params, 1):
materialField.ComponentValuesInitialiseDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,
component, param)
# Output
print("----> Initialised " + str(len(params)) + " material parameters <----\n")
return materialField, equationsSet
#=================================================================================#
#=================================================================================#
def MaterialFieldSetUp(materialFieldUserNumber, region, decomposition, geometricField, params, option, cellMLOption):
# Sets up material field, and apply field to mesh component.
materialField = iron.Field()
materialField.CreateStart(materialFieldUserNumber, region)
materialField.TypeSet(iron.FieldTypes.MATERIAL)
materialField.MeshDecompositionSet(decomposition)
materialField.GeometricFieldSet(geometricField)
materialField.VariableLabelSet(iron.FieldVariableTypes.U, "Material")
materialField.NumberOfVariablesSet(1)
materialField.NumberOfComponentsSet(iron.FieldVariableTypes.U,len(params))
materialField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
if cellMLOption[0]:
print("----> CellML Material Field using gauss point interpolation <----\n")
for component, param in enumerate(params, 1):
materialField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.GAUSS_POINT_BASED)
else:
print("----> Material Field using constant interpolation <----\n")
for component, param in enumerate(params, 1):
materialField.ComponentInterpolationSet(iron.FieldVariableTypes.U, component,
iron.FieldInterpolationTypes.CONSTANT)
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
materialField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
elif option[1] == 2:
materialField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
materialField.CreateFinish()
for component, param in enumerate(params, 1):
materialField.ComponentValuesInitialiseDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES,
component, param)
materialField.ParameterSetUpdateStart(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
materialField.ParameterSetUpdateFinish(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES)
# Output
print("----> Initialised " + str(len(params)) + " material parameters <----\n")
return materialField
#=================================================================================#
#=================================================================================#
def DependentFieldSetUp(dependentFieldUserNumber, equationsSet, option, cellMLOption):
# Set up dependent field
dependentField = iron.Field()
equationsSet.DependentCreateStart(dependentFieldUserNumber, dependentField)
dependentField.VariableLabelSet(iron.FieldVariableTypes.U, "Dependent")
if cellMLOption[0]:
print('----> Labelling dependent field strain and stress <----\n')
dependentField.VariableLabelSet(iron.FieldVariableTypes.U1, "Strain")
dependentField.VariableLabelSet(iron.FieldVariableTypes.U2, "Stress")
if option[0] == 1:
# Trilinear
for i in [1, 2, 3]:
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, i, 1)
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.DELUDELN, i, 1)
dependentField.ComponentInterpolationSet(iron.FieldVariableTypes.U, 4,
iron.FieldInterpolationTypes.ELEMENT_BASED)
dependentField.ComponentInterpolationSet(iron.FieldVariableTypes.DELUDELN, 4,
iron.FieldInterpolationTypes.ELEMENT_BASED)
# Output
print("----> Use element based interpolation for hydrostatic pressure <----\n")
elif option[0] == 2:
# Tricubic Hermite
for i in [1, 2, 3]:
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.U, i, 1)
dependentField.ComponentMeshComponentSet(iron.FieldVariableTypes.DELUDELN, | |
<filename>statsmodels/tsa/statespace/tests/test_kalman.py
"""
Tests for _statespace module
Author: <NAME>
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and <NAME>. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
Hamilton, <NAME>. 1994.
Time Series Analysis.
Princeton, N.J.: Princeton University Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError:
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return (prefix, dtype, None)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace import _statespace as ss
from .results import results_kalman_filter
from numpy.testing import assert_almost_equal, assert_allclose
from nose.exc import SkipTest
prefix_statespace_map = {
's': ss.sStatespace, 'd': ss.dStatespace,
'c': ss.cStatespace, 'z': ss.zStatespace
}
prefix_kalman_filter_map = {
's': ss.sKalmanFilter, 'd': ss.dKalmanFilter,
'c': ss.cKalmanFilter, 'z': ss.zKalmanFilter
}
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Parameters
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Observed data
self.obs = np.array(data['lgdp'], ndmin=2, dtype=dtype, order="F")
# Measurement equation
self.k_endog = k_endog = 1 # dimension of observed data
# design matrix
self.design = np.zeros((k_endog, 4, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [1, 1, 0, 0]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
self.k_states = k_states = 4 # dimension of state space
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype, order="F")
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters'], dtype=dtype
)
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Durbin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
self.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(self.transition[:, :, 0], self.initial_state_cov),
self.transition[:, :, 0].T
)
)
def init_filter(self):
# Use the appropriate Statespace model
prefix = find_best_blas_type((self.obs,))
cls = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
self.model = cls(
self.obs, self.design, self.obs_intercept, self.obs_cov,
self.transition, self.state_intercept, self.selection,
self.state_cov
)
self.model.initialize_known(self.initial_state, self.initial_state_cov)
# Initialize the appropriate Kalman filter
cls = prefix_kalman_filter_map[prefix[0]]
self.filter = cls(self.model, conserve_memory=self.conserve_memory,
loglikelihood_burn=self.loglikelihood_burn)
def run_filter(self):
# Filter the data
self.filter()
# Get results
self.result = {
'loglike': lambda burn: np.sum(self.filter.loglikelihood[burn:]),
'state': np.array(self.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](self.true['start']), self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype, conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self._obs = self.obs
self.obs = np.array(np.r_[self.obs[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F")
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.init_filter()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.init_filter()
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.loglikelihood_burn = self.true['start']
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
# Observed data
self.obs = np.array(data, ndmin=2, dtype=dtype, order="C").T
# Parameters
self.k_endog = k_endog = 2 # dimension of observed data
self.k_states = k_states = 6 # dimension of state space
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Measurement equation
# design matrix
self.design = np.zeros((k_endog, k_states, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype)
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'], dtype=dtype
)
self.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.obs_cov[1, 1, 0] = sigma_ec**2
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Drubin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and | |
work nicely (+ deg -1)
box_off = int((int(c) + deg - 1) / deg)
box_base = (int((ord(r) - 64 + deg - 1) / deg) - 1) * deg
return 'b' + str(box_base + box_off)
@ classmethod
def getLocations(cls, id, deg):
""" Returns the row, column location of cell id as int indices into a 2D array
in a puzzle of degree deg.
TODO: Currently assumes id is of the form "rc[c]"
"""
r = ord(id[0])-64
c = int(id[1:])
return (r-1, c-1)
@ classmethod
def _cname(cls, idx):
# Start counting from 'c1'
return 'c' + str(idx+1)
@ classmethod
def _rname(cls, idx):
# Start counting from 'rA'
return 'r' + chr(65 + idx)
@ classmethod
def _bname(cls, idx):
# Start counting from 'b1'
return 'b' + str(idx+1)
@ classmethod
def initialize(cls, degree):
"""
Initialize the unit definitions (mapping of unit names to cell names)
and the unit map (mapping of cell names to unit names).
Unit defns initially contains the NxN boxes. This method adds the
rows and columns and then creates the unit map by inverting the
unit defns.
"""
for degree in [degree]: # range(2, 4):
width = degree ** 2
# Map from each unit to cell names in that unit
cls.unit_defns[degree] = {}
for idx in range(width):
# Unique names for each unit
cls.unit_defns[degree][cls._cname(idx)] = []
cls.unit_defns[degree][cls._rname(idx)] = []
cls.unit_defns[degree][cls._bname(idx)] = []
# Now populate each unit with cells
for row in [cls._rname(idx) for idx in range(width)]:
for col in [cls._cname(idx) for idx in range(width)]:
cell_id = cls.getCellID(row, col)
box_id = cls.getBoxID(row, col, degree)
cls.unit_defns[degree][row].append(cell_id)
cls.unit_defns[degree][col].append(cell_id)
cls.unit_defns[degree][box_id].append(cell_id)
# Map from each cell to names of units it appears in
cls.unit_map[degree] = {}
for unit, cell_list in cls.unit_defns[degree].items():
for cell in cell_list:
if(cell in cls.unit_map[degree]):
cls.unit_map[degree][cell].append(unit)
else:
cls.unit_map[degree][cell] = [unit]
def __init__(self, state=['.' for i in range(0, 81)], degree=3, name=None):
"""
Initialize a board for a puzzle of degree with the given state.
State parameter can be a string, a json, or a Board to copy.
Locals:
_state (dict {str -> Cell}): a mapping from every cell identifier to the Cell encapsulating that Cell's state
_id (int): a unique identifier
_parent_id (int): identifier of this board's parent
goal_cell (str): the name/id of the goal cell to answer the question about
accessible_cells (list[str]): the list of cell ids about which a user can take an action (eg, pivot, assign, exclude)
"""
try:
Board.unit_map[degree]
except KeyError:
Board.initialize(degree)
# Generate a UID integer from uuid1. These bits are largely dependent on clock
# (though it's been pointed out that they might leak a little information about MAC address)
self._id = uuid.uuid1().int >> 64
self._is_background = False
self._action = {}
self._state = dict()
assert isinstance(degree, int), "Degree must be an int."
assert 2 <= degree <= 4, "Degree must be between 2 and 4 for now."
self._degree = degree
self._parent_id = None
assert name is None or isinstance(name, str), f"Name must be a str, not {name} of type {type(name)}."
self.accessible_cells = None
self.config = None
if isinstance(state, Board):
# State is a Board; copy it, but keep the new identifier
logger.info("Initializing Board for Board %s (from %s).", str(state.getIdentifier()), str(state.getPuzzleName()))
logger.debug("Incoming Board is %s.", str(state))
for cell in state.getCells():
self._state[cell.getIdentifier()] = Cell(cell)
self._degree = state.getDegree()
self._parent_id = state._id
self.accessible_cells = state.accessible_cells
self.config = state.config.copy()
elif isinstance(state, dict):
# State was parsed from json; keep the same identifier and update fields appropriately
# board_dict = json.loads(board_json)
logger.info("Initializing Board for dict %s.", str(state))
params = copy.deepcopy(state)
assert 'serialNumber' in state, "Expecting serialNumber in state provided."
self._id = state['serialNumber']
del params['serialNumber']
assert 'assignments' in state, "Expecting assignments in state provided."
assert 'availableMoves' in state, "Expecting availableMoves in state provided."
assignments = [item
for row in state['assignments'] for item in row]
options = [item for row in state['availableMoves'] for item in row]
# Initialize cell state
i = 0
for identifier in sorted(Board.getAllCells(degree)):
cell_state = assignments[i] if assignments[i] is not None else options[i]
self._state[identifier] = Cell(identifier, cell_state)
i += 1
del params['assignments']
del params['availableMoves']
if 'degree' in state:
self._degree = state['degree']
else:
logger.warn("'degree' not specified in state: board initialization or use may fail unexpectedly.")
if 'parentSerialNumber' in state:
self._parent_id = state['parentSerialNumber']
del params['parentSerialNumber']
if 'goalCell' in state:
goal = state['goalCell']
assert len(goal) == 2, "Expected exactly a row and column index for goal."
cell_id = self.getCellIDFromArrayIndex(goal[0], goal[1])
if 'goal' in params:
assert params['goal'] == cell_id, "goalCell details do not match stored goal."
params['goal'] = cell_id
del params['goalCell']
if 'accessibleCells' in state:
self.accessible_cells = []
for accs in state['accessibleCells']:
assert len(accs) == 2, "Expected exactly a row and column index for accessibleCell."
self.accessible_cells.append(self.getCellIDFromArrayIndex(accs[0], accs[1]))
del params['accessibleCells']
else:
self.computeAccessibleCells()
if 'action' in state:
self.action = state['action']
del params['action']
if 'backtrackingBoard' in state:
self._is_background = state['backtrackingBoard']
del params['backtrackingBoard']
self.config = config_data.ConfigurationData(self.getStateStr(
False, False, ''), name, params)
elif isinstance(state, str):
logger.info("Initializing Board for string %s, name %s.", str(state), str(name))
i = 0
for identifier in sorted(Board.getAllCells(degree)):
self._state[identifier] = Cell(identifier, state[i])
i += 1
self._degree = degree
self.config = config_data.ConfigurationData(self.getStateStr(
False, False, ''), name)
logger.debug("Crafted config to be %s.", str(self.config))
self.computeAccessibleCells()
logger.debug("Calculated accessible cells as %s.", str(self.accessible_cells))
else:
raise TypeError('Can\'t initialize Board from input type ' + type(state)
+ '. (Must be Board, dict, or str.)')
def __str__(self):
output = "Board " + str(self._id) \
+ " (child of " + str(self._parent_id) + ") State:\n"
output += self.getStateStr(True)
output += '\nCells:\n'
for key in self._state:
output += str(self._state[key]) + '\n'
return output
def getGoalCell(self):
""" If we have a goal cell, return it; otherwise, return None. """
if self.config and self.config.parameters and "goal" in self.config.parameters:
return self.config.parameters["goal"]
return None
def computeAccessibleCells(self):
""" If we have a goal cell, compute accessible cells and store them in self.accessible_cells.
If we don't have a goal cell, return all remaining uncertain cells. """
goal_cell = self.getGoalCell()
offlimits = self.getAssociatedCellIds(goal_cell)
if goal_cell:
offlimits.append(goal_cell)
def inlimits(cell):
if cell == goal_cell:
return False
if self.getCell(cell).isCertain():
return False
if cell in offlimits:
return False
return True
self.accessible_cells = \
[cell for cell in filter(inlimits,
self.getAllCells())]
if len(self.accessible_cells) == 0:
# Devolve to any uncertain cells except the goal cell
def inlimits2(cell):
if cell == goal_cell:
return False
if self.getCell(cell).isCertain():
return False
return True
self.accessible_cells = \
[cell for cell in filter(inlimits2,
self.getAllCells())]
return self.accessible_cells
def setToBackground(self):
""" Sets this board to indicate whether it should be considered a background board.
Background boards represent places to which a user can back up over his own decision
if he accidentally placed himself in a corner. They are lower priority.
"""
self._is_background = True
def addAction(self, action: dict):
""" Adds the action describing the item that changed this board to this state to the board description. """
if not self._action:
self._action = action
for (key, value) in action.items():
if key == 'action' and key in self._action and value == 'applyops':
# Don't overwrite an existing action with applyops
continue
self._action[key] = value
def getDegree(self):
""" Returns the degree of this puzzle board. """
return self._degree
def getIdentifier(self):
""" Returns the identifier of this puzzle board. """
return self._id
def getDisplayName(self):
""" Returns the simple display name of this puzzle board. """
if not self.config:
return None
return self.config.getParam("displayName")
def getPuzzleName(self):
""" Returns the fully qualified puzzle name of this puzzle board. """
if not self.config:
return None
return self.config.getParam("puzzleName")
def getQuestion(self):
""" Returns the question associated with this puzzle board. """
if not self.config:
return None
return self.config.getParam("question")
def countUncertainValues(self):
""" Counts the number of uncertain values summed across all uncertain cells.
"""
uncertain_cells = filter(lambda c: not c.isCertain(), self.getCells())
n = sum([len(cell.getValues()) for cell in uncertain_cells])
return n
def countAssociatedUncertainValuesGivenUncertainCell(self, cell):
""" Counts the number of uncertain values summed across all uncertain cells associated with cell,
if cell is uncertain.
I.e., counts the number of uncertain values across cells in the row, | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Loop through selected (hardcoded!) RunAnalysis.R output tables and plot
chosen parts of the data on Bokeh maps.
Created on Wed Apr 15 15:26:30 2020
@author: <NAME> (<EMAIL>)
"""
# Silence FutureWarnings of Pandas
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# Import necessary modules
import pandas
import geopandas as gpd # geopandas is usually shortened as gpd
import mapclassify as mc
from fiona.crs import from_epsg
from bokeh.plotting import figure
from bokeh.layouts import gridplot
from bokeh.io import save
from bokeh.tile_providers import get_provider, Vendors
from bokeh.models import \
GeoJSONDataSource, \
LinearColorMapper, \
HoverTool, \
Legend, \
LegendItem, \
LabelSet, \
Label
import bokeh.palettes as palettes
import psycopg2
from os import path, mkdir
from scipy import stats
import matplotlib.pyplot as plt
# Silence max_open_warnings of matplotlib
plt.rcParams.update({'figure.max_open_warning': 0})
from textwrap import wrap
def get_db_conn():
"""Define DB connection params.
If successful, return a psycopg2 DB cursor.
On localhost just dbname will usually suffice, but over remote
connections e.g. the following can be defined as well:
* host
* user
* sslmode (please use "verify-full"!)
* password (if using password authentication instead of a client cert).
"""
conn_params = {
'dbname': 'tt'
}
try:
dbconn = psycopg2.connect(**conn_params)
dbconn.autocommit = True
return dbconn
except psycopg2.OperationalError:
print('Failed to establish a DB connection!\n')
raise
def get_plot(query, conn, desc, first, last):
# Get one of the results tables
df = gpd.GeoDataFrame.from_postgis(
query, conn, geom_col='geom',
crs=from_epsg(3067)).to_crs("EPSG:3857")
# A point source for labels. df.set_geometry() fails, but this works!
df_centroids = gpd.GeoDataFrame.from_postgis(
query, conn, geom_col='geom_centroid',
crs=from_epsg(3067)).to_crs("EPSG:3857")
# Classify data (manual classes based on outputs of previous runs!)
breaks = [-10.1, 10, 101]
classifier = mc.UserDefined.make(bins=breaks)
classes = df[['RFChange']].apply(classifier)
classes.columns = ['Class']
df = df.join(classes)
# Collect some statistics:
# For practical help how to do this in Python:
# https://pythonfordatascience.org/independent-t-test-python/
statistics = {
'min_rfchg': df['RFChange'].min(),
'max_rfchg': df['RFChange'].max(),
'min_abschg': df['AbsChange'].min(),
'max_abschg': df['AbsChange'].max(),
'mean_rfchg': df['RFChange'].mean(),
'mean_abschg': df['AbsChange'].mean(),
'median_abschg': df['AbsChange'].median(),
'stdev_abschg': df['AbsChange'].std(),
'mean_T1': df['T1'].mean(),
'mean_T2': df['T2'].mean(),
'stdev_T1': df['T1'].std(),
'stdev_T2': df['T2'].std(),
'levenestat_abschg': None,
'levenepval_abschg': None,
'shapirostat_abschg': None,
'shapiropval_abschg': None,
't-stat_abschg': None,
't-pval_abschg': None,
't-df_abschg': None}
# None of this makes sense unless both variables have data:
reshist = None
resqq = None
if not ((df['T1'] == 0).all() or (df['T2'] == 0).all()):
# Null hypothesis for Levene test: both inputs have equal variances.
statistics['levenestat_abschg'], \
statistics['levenepval_abschg'] = stats.levene(df['T1'], df['T2'])
# Null hypothesis for Shapiro-Wilk: normal distribution of residuals.
diff = df['T2']-df['T1']
statistics['shapirostat_abschg'], \
statistics['shapiropval_abschg'] = stats.shapiro(diff)
plot_title = 'Residuals for ' + desc + ', ' + first + '–' + last
reshist = diff.plot(kind = 'hist',
title = "\n".join(wrap(plot_title, 60)),
figure=plt.figure())
plt.figure()
stats.probplot(diff, plot=plt)
plt.title("\n".join(wrap(plot_title, 60)))
resqq = plt.gca()
statistics['t-stat_abschg'], \
statistics['t-pval_abschg'] = stats.ttest_ind(df['T1'], df['T2'])
statistics['t-df_abschg'] = df['T1'].count() + df['T2'].count() - 2
# Do not use researchpy; it outputs a dataframe, and digging results
# out of that just adds complexity.
# statistics['descriptives_abs'], statistics['ttest_abs'] = rp.ttest(
# df['T1'], df['T2'])
# Define class names
cnames = [
'[-100…-10[',
'[-10…10]',
']10…100]']
# Adding labels doesn't make sense as legend plotting does not work
# (see below).
# for i in range(len(cnames)):
# df['Label'] = None
# df.loc[df['Class'] == i, 'Label'] = cnames[i]
# Get the tile provider. Ideally I should define my own and use
# an NLS bkg map, but defining an own WTMS source is painstaking!
tiles = get_provider(Vendors.CARTODBPOSITRON_RETINA)
# tiles = get_provider(Vendors.STAMEN_TERRAIN_RETINA)
# Try to plot a map
plot = figure(
x_range=(2725000,2815000),
y_range=(8455000,8457000),
# x_range=(2725000,2815000),
# y_range=(8355000,8457000),
x_axis_type="mercator",
y_axis_type="mercator",
height=450,
width=600,
title = desc + ', ' + first + '–' + last)
plot.add_tile(tiles, level='underlay')
# Create the colour mapper
colourmapper = LinearColorMapper(
low = 1,
high = 1,
palette=[palettes.RdYlBu[9][4]],
low_color=palettes.RdYlBu[9][0],
high_color=palettes.RdYlBu[9][8])
# Create a map source from the DB results table and plot it
mapsource = GeoJSONDataSource(geojson=df.to_json())
plot.patches('xs',
'ys',
fill_color={'field': 'Class', 'transform': colourmapper},
line_color='gray',
source=mapsource,
line_width=1,
level='underlay')
# Plot labels from centroids
labsource = GeoJSONDataSource(geojson=df_centroids.to_json())
# DEBUG: mark centroids on maps
# plot.circle('x', 'y', source=labsource, color='red', size=10, level='underlay')
labels = LabelSet(x='x',
y='y',
text='RFChange',
text_font_size='8pt',
x_offset=-4,
y_offset=-7,
source=labsource,
level='glyph')
plot.add_layout(labels)
# Cite map sources
citation = Label(
x=3,
y=0,
x_units='screen',
y_units='screen',
text='Map data: Statistics Finland, UH / Accessibility Research Group',
render_mode='css',
text_font_size='7.25pt',
text_color='black',
border_line_color='white',
border_line_alpha=0.1,
border_line_width=1.0,
background_fill_color='white',
background_fill_alpha=0.4)
plot.add_layout(citation)
bkg_map_head = Label(
x=298,
y=0,
x_units='screen',
y_units='screen',
text='Background map: ',
render_mode='css',
text_font_size='7.25pt',
text_color='black',
border_line_color='white',
border_line_alpha=0.1,
border_line_width=1.0,
background_fill_color='white',
background_fill_alpha=0.4)
plot.add_layout(bkg_map_head)
# Create a legend. Of course it does NOT work automatically, see
# https://github.com/bokeh/bokeh/issues/9398, but MUST still be defined
# by data. :(
# The easiest way is to create fake elements and position them so that
# they're invisible, but draw a legend for them anyway.
xq = list()
yq = list()
for i in range(3):
xq.append(2800000+1000*i)
for i in range(3):
xq.append(8500000+1000*i)
colours = [palettes.RdYlBu[9][0],
palettes.RdYlBu[9][4],
palettes.RdYlBu[9][8]]
legend_renderer = plot.multi_line(
[xq, xq, xq], [yq, yq, yq], color=colours, line_width=20)
legend = [
LegendItem(label=cnames[0], renderers=[legend_renderer], index=0),
LegendItem(label=cnames[1], renderers=[legend_renderer], index=1),
LegendItem(label=cnames[2], renderers=[legend_renderer], index=2)]
plot.add_layout(Legend(items=legend, location='top_right',
title='Change, %-p.'))
hoverinfo = HoverTool()
hoverinfo.tooltips = [('Region', '@nimi'),
('Mean time ' + first,'@T1'),
('Mean time ' + last,'@T2'),
('Mean time change', '@AbsChange'),
('Change, %-point of the industry total',
'@RFChange')]
plot.add_tools(hoverinfo)
return plot, statistics, reshist, resqq
def industry_list():
return {
'PriProd': 'Agriculture, forestry and fishing',
'Mining': 'Mining and quarrying',
'Manuf': 'Manufacturing',
'ElAC': 'Electricity, gas, steam and A/C',
'WaterEnv': 'Water supply, sewage and environment',
'Construct': 'Construction',
'Trade': 'Wholesale and retail trade',
'Logistics': 'Transportation and storage',
'HoReCa': 'Hotels, restaurants and catering',
'InfoComm': 'Information and communication',
'FinIns': 'Financial and insurance activities',
'RealEst': 'Real estate activities',
'SpecProf': 'Speciality professions',
'AdminSupp': 'Administrative and support services',
'PubAdmDef': 'Public administration and defence',
'Education': 'Education',
'HealthSoc': 'Human health and social work',
'ArtsEnt': 'Arts, entertainment and recreation',
'OtherServ': 'Other service activities and NGOs',
'HomeEmp': 'Households as employers',
'IntOrgs': 'International organisations',
'UnknownInd': 'Unknown industry'}
# Get a DB connection object.
pg = get_db_conn()
modes = {'car': 'Car', 'pt': 'PT'}
series = ('2013', '2015', '2018')
plot_stats_ind = list()
reshists = dict()
resqqs = dict()
docs_path = path.join(path.dirname(path.realpath(__file__)), 'docs')
for modeid, mode in modes.items():
plots = list()
for field, desc in industry_list().items():
query=(
'SELECT * FROM (SELECT '
'CASE WHEN '
'r.nimi<>\'\' THEN r.nimi ELSE \'Kauniainen\' END AS nimi, '
'r.geom, '
'rf1."RegID", '
'CASE WHEN rf1."RegID" '
'NOT IN (\'0911102000\', \'0916603000\') THEN '
'ST_PointOnSurface(r.geom) '
'ELSE '
'CASE WHEN rf1."RegID" = \'0911102000\' THEN '
'ST_Translate(ST_PointOnSurface(r.geom), 0, 4000) '
'ELSE '
'ST_Translate(ST_PointOnSurface(r.geom), 0, 8000) '
'END '
'END AS geom_centroid, '
'rf1."MunID", '
'rf1."AreaID", '
'rf1."DistID", '
'rf1."Count" AS "C1", '
'rf1."' + field +'" AS "RF_T1", '
'abs1."' + field +'" AS "T1", '
'CASE WHEN abs1."' + field +'" <> 0 THEN '
'ROUND(CAST('
'abs1."' + field +'"/(abs1."' + field +\
'"/abs1."Total")/abs1."Count" AS NUMERIC), 2) '
'ELSE 0 END AS "M1", '
'rf2."Count" AS "C2",rf2."' + field + '" AS "RF_T2", '
'abs2."' + field +'" AS "T2", '
'CASE WHEN abs2."' + field +'" <> 0 THEN '
'ROUND(CAST('
'abs2."' + field +'"/(abs2."' + field + \
'"/abs2."Total")/abs2."Count" AS NUMERIC), 2) '
'ELSE 0 END AS "M2", '
'ROUND(CAST(rf2."' + field + '"-rf1."' + field + \
'" AS NUMERIC), 2) AS "RFChange", '
'ROUND(CAST(abs2."' + field + '"-abs1."' + field + \
'" AS NUMERIC), 2) AS "AbsChange" '
'FROM hcr_subregions r '
'LEFT JOIN res_agg_j_ic_reg_' + modeid + '_icfreq_mun rf1 ON '
'r.kokotun=rf1."RegID" AND rf1."TTM" = {} '
'LEFT JOIN res_agg_j_ic_reg_' + modeid + '_icfreq_mun rf2 ON '
'r.kokotun=rf2."RegID" AND rf2."TTM" = {} '
'LEFT JOIN res_agg_j_ic_reg_' + modeid + '_mun abs1 ON '
'r.kokotun=abs1."RegID" AND abs1."TTM" = {} '
'LEFT JOIN res_agg_j_ic_reg_' + modeid + '_mun abs2 ON '
'r.kokotun=abs2."RegID" AND abs2."TTM" = {} '
') AS Q WHERE "AbsChange" IS NOT NULL')
for i, item in enumerate(series):
if(i<=len(series)-2):
q = query.format(item, series[i+1], item, series[i+1])
plot = get_plot(
q, pg,
mode + ': Change of the share of the total time: ' + desc,
item, series[i+1])
plots.append(plot[0])
period = str(item) + '–' + str(series[i+1])
reshists[mode + '_' + field + '_' + period] = plot[2]
resqqs[mode + '_' + field + '_' + period] = plot[3]
statsdic = | |
not smiley:
currentSmileys = smileys[:i]
break
for smiley in currentSmileys:
if name.upper() == smiley.upper():
raise commands.CommandError(message=f'Error: `{name}` is already on the smiley list.')
row = 0
for i, smiley in enumerate(smileys):
if name.upper() == smiley.upper():
row = i + headerRows + 1
break
if row:
await sheet.delete_row(row)
row = headerRows + len(currentSmileys) + 1
timestamp = datetime.utcnow().strftime("%b %#d, %Y")
endTime = (datetime.utcnow() + relativedelta(months=+1)).strftime("%b %#d, %Y")
values = [name, 'No', 'Applied', '', '', '', '', '', '', '', '', '', '', 'Pending', timestamp, endTime]
await sheet.insert_row(values, row)
await ctx.send(f'**{name}** has been added to the smileys sheet.')
if ctx.author.top_role <= leaderRole:
adminChannel = self.bot.get_channel(config['adminChannel'])
await adminChannel.send(f'**{name}** has been added to the smileys sheet with status **Pending**.')
@commands.command(pass_context=True, hidden=True)
@portables_leader()
async def activatesmiley(self, ctx, *nameParts):
'''
Sets smiley status to active (Leader+) (Portables only).
Arguments: name.
Surround names containing spaces with quotation marks, e.g.: "name with spaces".
'''
addCommand()
await ctx.channel.trigger_typing()
if not nameParts:
raise commands.CommandError(message=f'Required argument missing: `name`.')
name = ''
for part in nameParts:
name += part + ' '
name = name.strip()
if not name:
raise commands.CommandError(message=f'Required argument missing: `name`.')
agc = await self.bot.agcm.authorize()
ss = await agc.open(config['sheetName'])
sheet = await ss.worksheet('Smileys')
headerRows = 4
smileys = await sheet.col_values(1)
smileys = smileys[headerRows:]
for i, smiley in enumerate(smileys):
if smiley is None or not smiley:
smileys = smileys[:i]
break
row = 0
for i, smiley in enumerate(smileys):
if name.upper() == smiley.upper():
row = i + headerRows + 1
name = smiley
break
if not row:
for i, smiley in enumerate(smileys):
if name.upper() in smiley.upper():
row = i + headerRows + 1
name = smiley
break
if not row:
raise commands.CommandError(message=f'Could not find smiley: `{name}`.')
col = 14
status = await sheet.cell(row, col)
status = status.value
if status == 'Active':
raise commands.CommandError(message=f'Error: `{name}`\'s status was already set to active.')
await sheet.update_cell(row, col, 'Active')
await ctx.send(f'**{name}**\'s status has been set to active.')
@commands.command(pass_context=True, hidden=True)
@portables_admin()
async def addalt(self, ctx, name="", member=""):
'''
Adds a rank alt to the sheets (Admin+) (Portables only).
Arguments: name, member.
Member can be either a name or a mention.
Surround names containing spaces with quotation marks, e.g.: "name with spaces".
Constraints: name must be a valid RSN, member must be a rank.
'''
addCommand()
await ctx.channel.trigger_typing()
if not name:
raise commands.CommandError(message=f'Required argument missing: `name`.')
if not member:
raise commands.CommandError(message=f'Required argument missing: `member`.')
rankRole = discord.utils.get(ctx.guild.roles, id=config['rankRole'])
if ctx.message.mentions:
member = ctx.message.mentions[0]
else:
pattern = re.compile('[\W_]+')
memberName = pattern.sub('', member).upper()
member = discord.utils.find(lambda m: utils.is_name(memberName, m) and m.top_role >= rankRole, ctx.guild.members)
if not member:
raise commands.CommandError(message=f'Could not find rank: `{memberName}`.')
memberName = member.display_name
memberName = re.sub('[^A-z0-9 -]', '', memberName).replace('`', '').strip()
type = ''
modRole = discord.utils.get(ctx.guild.roles, id=config['modRole'])
adminRole = discord.utils.get(ctx.guild.roles, id=config['adminRole'])
leaderRole = discord.utils.get(ctx.guild.roles, id=config['leaderRole'])
if member.top_role >= adminRole:
type = 'Admin+ alt'
elif member.top_role >= modRole:
type = 'Moderator alt'
else:
type = 'Rank alt'
if len(name) > 12:
raise commands.CommandError(message=f'Invalid argument: `{name}`.')
if re.match('^[A-z0-9 -]+$', name) is None:
raise commands.CommandError(message=f'Invalid argument: `{name}`.')
agc = await self.bot.agcm.authorize()
ss = await agc.open(config['sheetName'])
sheet = await ss.worksheet('Smileys')
headerRows = 4
smileys = await sheet.col_values(1)
smileys = smileys[headerRows:]
types = await sheet.col_values(2)
types = types[headerRows:]
currentSmileys = []
for i, smiley in enumerate(smileys):
if not smiley:
currentSmileys = smileys[:i]
types = types[:i]
break
for smiley in currentSmileys:
if name.upper() == smiley.upper():
raise commands.CommandError(message=f'Error: `{name}` is already on the smiley list.')
row = 0
if 'Rank' in type:
for i, t in enumerate(types):
if not 'ALT' in t.upper():
row = i + headerRows + 1
break
elif 'Mod' in type:
for i, t in enumerate(types):
if not 'ADMIN' in t.upper() and not 'MODERATOR' in t.upper():
row = i + headerRows + 1
break
elif 'Admin' in type:
for i, t in enumerate(types):
if not 'ADMIN' in t.upper():
row = i + headerRows + 1
break
if not row:
raise commands.CommandError(message=f'Unexpected error: Could not find row in spreadsheet.')
timestamp = datetime.utcnow().strftime("%b %#d, %Y")
endTime = ''
values = [name, type, f'{memberName} alt', '', '', '', '', '', '', '', '', '', '', 'Pending', timestamp, endTime]
await sheet.insert_row(values, row)
await ctx.send(f'**{memberName}**\'s alt, **{name}**, has been added to the smileys sheet.')
if ctx.author.top_role < leaderRole:
adminChannel = self.bot.get_channel(config['adminChannel'])
await adminChannel.send(f'**{memberName}**\'s alt, **{name}**, has been added to the smileys sheet with status **Pending**.')
@commands.command(pass_context=True, aliases=['a'], ignore_extra=True)
@portables_only()
async def add(self, ctx):
"""
Add portable locations (Portables only).
Arguments: portable, worlds, location, worlds, location, etc...
Constraints: This command can only be used in the locations channel. Only approved locations, and worlds are allowed. Additionally, worlds must be a valid world. No more than 3 portables per location.
"""
addCommand()
await ctx.channel.trigger_typing() # send 'typing...' status
portables = self.bot.get_guild(config['portablesServer'])
if not portables:
raise commands.CommandError(message=f'Error: could not find Portables server.')
member = await portables.fetch_member(ctx.author.id)
adminCommandsChannel = self.bot.get_channel(config['adminCommandsChannel'])
if adminCommandsChannel:
if ctx.guild == self.bot.get_guild(config['portablesServer']):
if ctx.channel != adminCommandsChannel and not ctx.channel.id in portables_channel_ids:
raise commands.CommandError(message=f'Error: `Incorrect channel`. Please use {portables_channel_mention_string}.')
input = ctx.message.content.upper().replace(ctx.prefix.upper(), '', 1).replace(ctx.invoked_with.upper(), '', 1).strip() # get the input corresponding to this message
if not input: # if there was no input, return
raise commands.CommandError(message=f'Required argument missing: `location`.')
# get the portable type corresponding to the input
portable, col = get_port_type(input, ctx.channel)
if col == -1: # if no portable type was given, return
raise commands.CommandError(message=f'Required argument missing: `portable`.')
# replace some portable types due to incompatibilities with location abbreviations
input = input.replace('RANGE', '')
input = input.replace('WORKBENCH', '')
newPorts = getPorts(input) # get the set of portable locations corresponding to the input
if not newPorts: # if there were no locations, return
raise commands.CommandError(message=f'Invalid argument: `location`.')
ports = await getPortRow(self.bot.agcm) # get the current portable locations from the sheet
val = ports[col-1] # get the string corresponding to our portable type
ports[col-1] = "" # set value for our portable type to empty
for i, p in enumerate(ports): # for each portable, get the set of portable locations
ports[i] = getPorts(p)
error = checkPorts(newPorts, ports) # check for errors in the set of portables
if error: # if there was an error, send the error message and return
raise commands.CommandError(message=error)
newPortsText = format(newPorts).replace('*', '\*') # string representing portables to be added
currentPorts = getPorts(val) # current portables on sheets
sumPorts = addPorts(currentPorts, newPorts) # set of portables after adding given portables
newVal = format(sumPorts) # string representing the new set of portable locations
# check whether multiple portables were added
multiple = False
if len(newPorts) > 1:
multiple = True
elif len(newPorts[0][0]) > 1:
multiple = True
# if no change, raise an error
if newVal == val:
if multiple:
raise commands.CommandError(message=f'The `{portable}` locations `{format(newPorts)}` were already on the sheet.')
else:
raise commands.CommandError(message=f'The `{portable}` location `{format(newPorts)}` was already on the sheet.')
timestamp = datetime.utcnow().strftime("%#d %b, %#H:%M") # get timestamp string in format: day Month, hours:minutes
name = '' # initialize empty name of user
isHelper = False # boolean value representing whether or not the user is a rank
helperRole = discord.utils.get(portables.roles, id=config['helperRole'])
if helperRole in member.roles: # if the rank role is in the set of roles corresponding to the user
isHelper = True # then set isRank to true
name = utils.get_user_name(member) # and get the name of the user
await updateSheet(self.bot.agcm, col, newVal, timestamp, name, isHelper) # update the sheet
# send confirmation message
if multiple:
await ctx.send(f'The **{portable}** locations **{newPortsText}** have been added.')
else:
await ctx.send(f'The **{portable}** location **{newPortsText}** has been added.')
@commands.command(pass_context=True, aliases=['rem'], ignore_extra=True)
@portables_only()
async def remove(self, ctx):
"""
Remove portable locations (Portables only).
Arguments: portable, worlds, location, worlds, location, etc...
Constraints: This command can only be used in the locations channel. Only approved locations, and worlds are allowed. Additionally, worlds must be a valid world. No more than 3 portables per location.
"""
addCommand() # increment global commands counter
await ctx.channel.trigger_typing() # send 'typing...' status
portables = self.bot.get_guild(config['portablesServer'])
if not | |
be generated, you can view the status of these jobs below. ', 'info'))
if not display_link.preparing_display():
display_link.prepare_display()
preparable_steps = display_link.get_prepare_steps()
else:
raise Exception('Attempted a view action (%s) on a non-ready display application' % app_action)
return trans.fill_template_mako("dataset/display_application/display.mako",
msg=msg,
display_app=display_app,
display_link=display_link,
refresh=refresh,
preparable_steps=preparable_steps)
return trans.show_error_message('You do not have permission to view this dataset at an external display application.')
def _delete(self, trans, dataset_id):
message = None
status = 'done'
id = None
try:
id = self.decode_id(dataset_id)
hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get(id)
assert hda, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in trans.history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
hda.mark_deleted()
hda.clear_associated_files()
trans.log_event("Dataset id %s marked as deleted" % str(id))
self.hda_manager.stop_creating_job(hda)
trans.sa_session.flush()
except Exception as e:
msg = 'HDA deletion failed (encoded: %s, decoded: %s)' % (dataset_id, id)
log.exception(msg + ': ' + str(e))
trans.log_event(msg)
message = 'Dataset deletion failed'
status = 'error'
return (message, status)
def _undelete(self, trans, dataset_id):
message = None
status = 'done'
id = None
try:
id = self.decode_id(dataset_id)
history = trans.get_history()
hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get(id)
assert hda and hda.undeletable, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_undeleted()
trans.sa_session.flush()
trans.log_event("Dataset id %s has been undeleted" % str(id))
except Exception:
msg = 'HDA undeletion failed (encoded: %s, decoded: %s)' % (dataset_id, id)
log.exception(msg)
trans.log_event(msg)
message = 'Dataset undeletion failed'
status = 'error'
return (message, status)
def _unhide(self, trans, dataset_id):
try:
id = self.decode_id(dataset_id)
except Exception:
return False
history = trans.get_history()
hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get(id)
if hda:
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_unhidden()
trans.sa_session.flush()
trans.log_event("Dataset id %s has been unhidden" % str(id))
return True
return False
def _purge(self, trans, dataset_id):
message = None
status = 'done'
try:
id = self.decode_id(dataset_id)
user = trans.get_user()
hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get(id)
# Invalid HDA
assert hda, 'Invalid history dataset ID'
# If the user is anonymous, make sure the HDA is owned by the current session.
if not user:
current_history_id = trans.galaxy_session.current_history_id
assert hda.history.id == current_history_id, 'Data does not belong to current user'
# If the user is known, make sure the HDA is owned by the current user.
else:
assert hda.history.user == user, 'Data does not belong to current user'
# Ensure HDA is deleted
hda.deleted = True
# HDA is purgeable
# Decrease disk usage first
if user:
user.adjust_total_disk_usage(-hda.quota_amount(user))
# Mark purged
hda.purged = True
trans.sa_session.add(hda)
trans.log_event("HDA id %s has been purged" % hda.id)
trans.sa_session.flush()
# Don't delete anything if there are active HDAs or any LDDAs, even if
# the LDDAs are deleted. Let the cleanup scripts get it in the latter
# case.
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event("Dataset id %s has been purged upon the the purge of HDA id %s" % (hda.dataset.id, hda.id))
trans.sa_session.add(hda.dataset)
except Exception:
log.exception('Unable to purge dataset (%s) on purge of HDA (%s):' % (hda.dataset.id, hda.id))
trans.sa_session.flush()
except Exception as exc:
msg = 'HDA purge failed (encoded: %s, decoded: %s): %s' % (dataset_id, id, exc)
log.exception(msg)
trans.log_event(msg)
message = 'Dataset removal from disk failed'
status = 'error'
return (message, status)
@web.expose
def delete(self, trans, dataset_id, filename, show_deleted_on_refresh=False):
message, status = self._delete(trans, dataset_id)
return trans.response.send_redirect(web.url_for(controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status))
@web.expose
def delete_async(self, trans, dataset_id, filename):
message, status = self._delete(trans, dataset_id)
if status == 'done':
return "OK"
else:
raise Exception(message)
@web.expose
def undelete(self, trans, dataset_id, filename):
message, status = self._undelete(trans, dataset_id)
return trans.response.send_redirect(web.url_for(controller='root', action='history', show_deleted=True, message=message, status=status))
@web.expose
def undelete_async(self, trans, dataset_id, filename):
message, status = self._undelete(trans, dataset_id)
if status == 'done':
return "OK"
else:
raise Exception(message)
@web.expose
def unhide(self, trans, dataset_id, filename):
if self._unhide(trans, dataset_id):
return trans.response.send_redirect(web.url_for(controller='root', action='history', show_hidden=True))
raise Exception("Error unhiding")
@web.expose
def purge(self, trans, dataset_id, filename, show_deleted_on_refresh=False):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge(trans, dataset_id)
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
return trans.response.send_redirect(web.url_for(controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status))
@web.expose
def purge_async(self, trans, dataset_id, filename):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge(trans, dataset_id)
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
if status == 'done':
return "OK"
else:
raise Exception(message)
@web.expose
def show_params(self, trans, dataset_id=None, from_noframe=None, **kwd):
"""
Show the parameters used for the job associated with an HDA
"""
try:
hda = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(self.decode_id(dataset_id))
except ValueError:
hda = None
if not hda:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable("Invalid reference dataset id: %s." % escape(str(dataset_id)))
if not self._can_access_dataset(trans, hda):
return trans.show_error_message("You are not allowed to access this dataset")
# Get the associated job, if any. If this hda was copied from another,
# we need to find the job that created the origial dataset association.
params_objects = None
job = None
tool = None
upgrade_messages = {}
has_parameter_errors = False
inherit_chain = hda.source_dataset_chain
if inherit_chain:
job_dataset_association = inherit_chain[-1][0]
else:
job_dataset_association = hda
if job_dataset_association.creating_job_associations:
job = job_dataset_association.creating_job_associations[0].job
if job:
# Get the tool object
try:
# Load the tool
toolbox = self.get_toolbox()
tool = toolbox.get_tool(job.tool_id, job.tool_version)
assert tool is not None, 'Requested tool has not been loaded.'
# Load parameter objects, if a parameter type has changed, it's possible for the value to no longer be valid
try:
params_objects = job.get_param_values(trans.app, ignore_errors=False)
except Exception:
params_objects = job.get_param_values(trans.app, ignore_errors=True)
# use different param_objects in the following line, since we want to display original values as much as possible
upgrade_messages = tool.check_and_update_param_values(job.get_param_values(trans.app, ignore_errors=True),
trans,
update_values=False)
has_parameter_errors = True
except Exception:
pass
if job is None:
return trans.show_error_message("Job information is not available for this dataset.")
# TODO: we should provide the basic values along with the objects, in order to better handle reporting of old values during upgrade
return trans.fill_template("show_params.mako",
inherit_chain=inherit_chain,
history=trans.get_history(),
hda=hda,
job=job,
tool=tool,
params_objects=params_objects,
upgrade_messages=upgrade_messages,
has_parameter_errors=has_parameter_errors)
@web.expose
def copy_datasets(self, trans, source_history=None, source_content_ids="", target_history_id=None, target_history_ids="", new_history_name="", do_copy=False, **kwd):
user = trans.get_user()
if source_history is not None:
decoded_source_history_id = self.decode_id(source_history)
history = self.history_manager.get_owned(decoded_source_history_id, trans.user, current_history=trans.history)
current_history = trans.get_history()
else:
history = current_history = trans.get_history()
refresh_frames = []
if source_content_ids:
if not isinstance(source_content_ids, list):
source_content_ids = source_content_ids.split(",")
encoded_dataset_collection_ids = [s[len("dataset_collection|"):] for s in source_content_ids if s.startswith("dataset_collection|")]
encoded_dataset_ids = [s[len("dataset|"):] for s in source_content_ids if s.startswith("dataset|")]
decoded_dataset_collection_ids = set(map(self.decode_id, encoded_dataset_collection_ids))
decoded_dataset_ids = set(map(self.decode_id, encoded_dataset_ids))
else:
decoded_dataset_collection_ids = []
decoded_dataset_ids = []
if new_history_name:
target_history_ids = []
else:
if target_history_id:
target_history_ids = [self.decode_id(target_history_id)]
elif target_history_ids:
if not isinstance(target_history_ids, list):
target_history_ids = target_history_ids.split(",")
target_history_ids = list(set([self.decode_id(h) for h in target_history_ids if h]))
else:
target_history_ids = []
done_msg = error_msg = ""
new_history = None
if do_copy:
invalid_contents = 0
if not (decoded_dataset_ids or decoded_dataset_collection_ids) or not (target_history_ids or new_history_name):
error_msg = "You must provide both source datasets and target histories. "
else:
if new_history_name:
new_history = trans.app.model.History()
new_history.name = new_history_name
new_history.user = user
trans.sa_session.add(new_history)
trans.sa_session.flush()
target_history_ids.append(new_history.id)
if user:
target_histories = [hist for hist in map(trans.sa_session.query(trans.app.model.History).get, target_history_ids) if hist is not None and hist.user == user]
else:
target_histories = [history]
if len(target_histories) != len(target_history_ids):
error_msg = error_msg + "You do not have permission to add datasets to %i requested histories. " % (len(target_history_ids) - len(target_histories))
source_contents = list(map(trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get, decoded_dataset_ids))
source_contents.extend(map(trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get, decoded_dataset_collection_ids))
source_contents.sort(key=lambda content: content.hid)
for content in source_contents:
if content is None:
error_msg = error_msg + "You tried to copy a dataset that does not exist. "
invalid_contents += 1
elif content.history != history:
error_msg = error_msg + "You tried to copy a dataset which is not in your current history. "
invalid_contents += 1
else:
for hist in target_histories:
if content.history_content_type == "dataset":
hist.add_dataset(content.copy())
else:
copy_collected_datasets = True
copy_kwds = {}
if copy_collected_datasets:
copy_kwds["element_destination"] = hist
| |
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Contains the `Expression` class.
The purpose of this library is to make it easy to construct and optimize ML
problems for which the goal is to minimize a linear combination of rates,
subject to zero or more constraints on linear combinations of rates. That is:
minimize: c11 * rate11 + c12 * rate12 + ... + tensor1
such that: c21 * rate21 + c22 * rate22 + ... + tensor2 <= 0
c31 * rate31 + c32 * rate32 + ... + tensor3 <= 0
...
Each rate is e.g. an error rate, positive prediction rate, false negative rate
on a certain slice of the data, and so on.
The objective and constraint functions are each represented by an `Expression`,
which captures a linear combination of `Term`s and scalar `Tensor`s. A `Term`
represents the proportion of time that a certain event occurs on a subset of the
data. For example, a `Term` might represent the positive prediction rate on the
set of negatively-labeled examples (the false positive rate).
The above description is somewhat oversimplified. Most relevantly here, an
`Expression` object actually consists of two `BasicExpression` objects: one for
the "penalty" portion of the problem (which is used when optimizing the model
parameters), and the other for the "constraint" portion (used when optimizing
the constraints, e.g. the Lagrange multipliers, if using the Lagrangian
formulation). In other words, an `Expression` needs to know not only exactly
what quantity it represents (the "constraints" portion), but also a
{sub,super}differentiable approximation of it (the "penalty" portion).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numbers
import six
from tensorflow_constrained_optimization.python.rates import basic_expression
from tensorflow_constrained_optimization.python.rates import constraint
from tensorflow_constrained_optimization.python.rates import helpers
from tensorflow_constrained_optimization.python.rates import term
@six.add_metaclass(abc.ABCMeta)
class Expression(helpers.RateObject):
"""Represents an expression that can be penalized or constrained.
An `Expression`, like a `BasicExpression`, represents a linear combination of
terms. However, it actually includes *two* `BasicExpression`s, one of
which--the "penalty" portion--is used when the expression is being minimized
(in the objective function) or penalized (to satisfy a constraint), and the
second of which--the "constraint" portion--is used when the expression is
being constrained.
Typically, the "penalty" and "constraint" portions will be different
approximations of the same quantity, e.g. the latter could be a zero-one-based
rate (say, a false positive rate), and the former a hinge approximation of it.
In addition, an `Expression` can (optionally) contain a set of associated
constraints. Technically, these can be anything: they're simply additional
constraints, which may or may not have anything to do with the `Expression` to
which they're attached. In practice, they'll usually represent conditions that
are required for the associated `Expression` to make sense. For example, we
could construct an `Expression` representing "the false positive rate of f(x)
at the threshold for which the true positive rate is at least 0.9" with the
expression being "the false positive rate of f(x) - t", and the extra
constraint being "the true positive rate of f(x) - t is at least 0.9", where
"t" is an implicit threshold. These extra constraints will ultimately be
included in any optimization problem that includes the associated `Expression`
(or an `Expression` derived from it).
"""
@abc.abstractproperty
def penalty_expression(self):
"""Returns the `BasicExpression` used for the "penalty" portion.
An `Expression` contains *two* `BasicExpression`s, one of which--the
"penalty" portion--is used when the expression is being minimized (in the
objective function) or penalized (to satisfy a constraint), while the
second--the "constraint" portion--is used when the expression is being
constrained.
"""
@abc.abstractproperty
def constraint_expression(self):
"""Returns the `BasicExpression` used for the "constraint" portion.
An `Expression` contains *two* `BasicExpression`s, one of which--the
"penalty" portion--is used when the expression is being minimized (in the
objective function) or penalized (to satisfy a constraint), while the
second--the "constraint" portion--is used when the expression is being
constrained.
"""
@abc.abstractproperty
def extra_constraints(self):
"""Returns the list of extra `Constraint`s."""
@abc.abstractmethod
def _positive_scalar_mul(self, scalar):
"""Returns the result of multiplying by a positive scalar."""
# This method is intended for internal use only. It's basically the same as
# __mul__, except that it requires its parameter to be nonnegative (this is
# *not* checked), and accepts non-Number inputs (in particular, it accepts
# DeferredTensors).
@abc.abstractmethod
def _positive_scalar_div(self, scalar):
"""Returns the result of dividing by a positive scalar."""
# This method is intended for internal use only. It's basically the same as
# __div__, except that it requires its parameter to be positive (this is
# *not* checked), and accepts non-Number inputs (in particular, it accepts
# DeferredTensors).
@abc.abstractmethod
def __mul__(self, scalar):
"""Returns the result of multiplying by a scalar."""
def __rmul__(self, scalar):
"""Returns the result of multiplying by a scalar."""
return self.__mul__(scalar)
@abc.abstractmethod
def __truediv__(self, scalar):
"""Returns the result of dividing by a scalar."""
# __rtruediv__ is not implemented since we only allow *scalar* division, i.e.
# (Expression / scalar) is allowed, but (scalar / Expression) is not.
@abc.abstractmethod
def __neg__(self):
"""Returns the result of negating this `Expression`."""
def __add__(self, other):
"""Returns the result of adding two `Expression`s."""
if not isinstance(other, helpers.RateObject):
# BasicExpressions do not support scalar addition, so we first need to
# convert the scalar into an Expression.
other_basic_expression = basic_expression.BasicExpression(
[term.TensorTerm(other)])
other = ExplicitExpression(other_basic_expression, other_basic_expression)
elif not isinstance(other, Expression):
raise TypeError("Expression objects can only be added to each other, or "
"scalars")
return SumExpression([self, other])
def __radd__(self, other):
"""Returns the result of adding two `Expression`s."""
return self.__add__(other)
def __sub__(self, other):
"""Returns the result of subtracting two `Expression`s."""
return self.__add__(other.__neg__())
def __rsub__(self, other):
"""Returns the result of subtracting two `Expression`s."""
return self.__neg__().__add__(other)
def __le__(self, other):
"""Returns a `Constraint` representing self <= other."""
return constraint.Constraint(self - other)
def __ge__(self, other):
"""Returns a `Constraint` representing self >= other."""
return constraint.Constraint(other - self)
# We don't make __eq__ create an equality constraint for two reasons:
# 1) We might want to put Expressions into sets or something.
# 2) We probably won't be able to optimize subject to equality constraints
# very effectively, anyway.
class ExplicitExpression(Expression):
"""Represents explicit penalty and constraint `BasicExpressions`."""
def __init__(self, penalty_expression, constraint_expression):
"""Creates a new `ExplicitExpression`.
An `Expression` represents a quantity that will be minimized/maximized or
constrained. Internally, it's actually represented as *two*
`BasicExpression`s, one of which--the "penalty" portion--is used when the
expression is being minimized (in the objective function) or penalized (to
satisfy a constraint), and the second of which--the "constraint" portion--is
used when the expression is being constrained. These two `BasicExpression`s
are the two parameters of this function.
Args:
penalty_expression: `BasicExpression` that will be used for the "penalty"
portion of the optimization (i.e. when optimizing the model parameters).
It should be {sub,semi}differentiable.
constraint_expression: `BasicExpression` that will be used for the
"constraint" portion of the optimization (i.e. when optimizing the
constraints). It does not need to be {sub,semi}differentiable.
"""
super(ExplicitExpression, self).__init__()
self._penalty_expression = penalty_expression
self._constraint_expression = constraint_expression
@property
def penalty_expression(self):
return self._penalty_expression
@property
def constraint_expression(self):
return self._constraint_expression
@property
def extra_constraints(self):
return []
def _positive_scalar_mul(self, scalar):
return ExplicitExpression(self._penalty_expression * scalar,
self._constraint_expression * scalar)
def _positive_scalar_div(self, scalar):
return ExplicitExpression(self._penalty_expression / scalar,
self._constraint_expression / scalar)
def __mul__(self, scalar):
if not isinstance(scalar, numbers.Number):
raise TypeError("Expression objects only support *scalar* multiplication")
return ExplicitExpression(self._penalty_expression * scalar,
self._constraint_expression * scalar)
def __truediv__(self, scalar):
if not isinstance(scalar, numbers.Number):
raise TypeError("Expression objects only support *scalar* division")
return ExplicitExpression(self._penalty_expression / scalar,
self._constraint_expression / scalar)
def __neg__(self):
return ExplicitExpression(-self._penalty_expression,
-self._constraint_expression)
class ConstrainedExpression(Expression):
"""Represents a list of constraints wrapped around an `Expression`."""
def __init__(self, expression, extra_constraints):
"""Creates a new `ConstrainedExpression`.
The "extra_constraints" parameter is used to specify | |
- m.x4192*m.b3012 <= 0)
m.c4264 = Constraint(expr=m.x1163*m.x1163 - m.x4193*m.b3012 <= 0)
m.c4265 = Constraint(expr=m.x1164*m.x1164 - m.x4194*m.b3012 <= 0)
m.c4266 = Constraint(expr=m.x1165*m.x1165 - m.x4195*m.b3012 <= 0)
m.c4267 = Constraint(expr=m.x1166*m.x1166 - m.x4196*m.b3012 <= 0)
m.c4268 = Constraint(expr=m.x1167*m.x1167 - m.x4197*m.b3012 <= 0)
m.c4269 = Constraint(expr=m.x1168*m.x1168 - m.x4198*m.b3012 <= 0)
m.c4270 = Constraint(expr=m.x1169*m.x1169 - m.x4199*m.b3012 <= 0)
m.c4271 = Constraint(expr=m.x1170*m.x1170 - m.x4200*m.b3012 <= 0)
m.c4272 = Constraint(expr=m.x1171*m.x1171 - m.x4201*m.b3012 <= 0)
m.c4273 = Constraint(expr=m.x1172*m.x1172 - m.x4202*m.b3012 <= 0)
m.c4274 = Constraint(expr=m.x1173*m.x1173 - m.x4203*m.b3012 <= 0)
m.c4275 = Constraint(expr=m.x1174*m.x1174 - m.x4204*m.b3012 <= 0)
m.c4276 = Constraint(expr=m.x1175*m.x1175 - m.x4205*m.b3012 <= 0)
m.c4277 = Constraint(expr=m.x1176*m.x1176 - m.x4206*m.b3012 <= 0)
m.c4278 = Constraint(expr=m.x1177*m.x1177 - m.x4207*m.b3012 <= 0)
m.c4279 = Constraint(expr=m.x1178*m.x1178 - m.x4208*m.b3012 <= 0)
m.c4280 = Constraint(expr=m.x1179*m.x1179 - m.x4209*m.b3012 <= 0)
m.c4281 = Constraint(expr=m.x1180*m.x1180 - m.x4210*m.b3012 <= 0)
m.c4282 = Constraint(expr=m.x1181*m.x1181 - m.x4211*m.b3012 <= 0)
m.c4283 = Constraint(expr=m.x1182*m.x1182 - m.x4212*m.b3012 <= 0)
m.c4284 = Constraint(expr=m.x1183*m.x1183 - m.x4213*m.b3012 <= 0)
m.c4285 = Constraint(expr=m.x1184*m.x1184 - m.x4214*m.b3012 <= 0)
m.c4286 = Constraint(expr=m.x1185*m.x1185 - m.x4215*m.b3012 <= 0)
m.c4287 = Constraint(expr=m.x1186*m.x1186 - m.x4216*m.b3012 <= 0)
m.c4288 = Constraint(expr=m.x1187*m.x1187 - m.x4217*m.b3012 <= 0)
m.c4289 = Constraint(expr=m.x1188*m.x1188 - m.x4218*m.b3012 <= 0)
m.c4290 = Constraint(expr=m.x1189*m.x1189 - m.x4219*m.b3012 <= 0)
m.c4291 = Constraint(expr=m.x1190*m.x1190 - m.x4220*m.b3012 <= 0)
m.c4292 = Constraint(expr=m.x1191*m.x1191 - m.x4221*m.b3012 <= 0)
m.c4293 = Constraint(expr=m.x1192*m.x1192 - m.x4222*m.b3012 <= 0)
m.c4294 = Constraint(expr=m.x1193*m.x1193 - m.x4223*m.b3012 <= 0)
m.c4295 = Constraint(expr=m.x1194*m.x1194 - m.x4224*m.b3012 <= 0)
m.c4296 = Constraint(expr=m.x1195*m.x1195 - m.x4225*m.b3012 <= 0)
m.c4297 = Constraint(expr=m.x1196*m.x1196 - m.x4226*m.b3012 <= 0)
m.c4298 = Constraint(expr=m.x1197*m.x1197 - m.x4227*m.b3012 <= 0)
m.c4299 = Constraint(expr=m.x1198*m.x1198 - m.x4228*m.b3012 <= 0)
m.c4300 = Constraint(expr=m.x1199*m.x1199 - m.x4229*m.b3012 <= 0)
m.c4301 = Constraint(expr=m.x1200*m.x1200 - m.x4230*m.b3012 <= 0)
m.c4302 = Constraint(expr=m.x1201*m.x1201 - m.x4231*m.b3013 <= 0)
m.c4303 = Constraint(expr=m.x1202*m.x1202 - m.x4232*m.b3013 <= 0)
m.c4304 = Constraint(expr=m.x1203*m.x1203 - m.x4233*m.b3013 <= 0)
m.c4305 = Constraint(expr=m.x1204*m.x1204 - m.x4234*m.b3013 <= 0)
m.c4306 = Constraint(expr=m.x1205*m.x1205 - m.x4235*m.b3013 <= 0)
m.c4307 = Constraint(expr=m.x1206*m.x1206 - m.x4236*m.b3013 <= 0)
m.c4308 = Constraint(expr=m.x1207*m.x1207 - m.x4237*m.b3013 <= 0)
m.c4309 = Constraint(expr=m.x1208*m.x1208 - m.x4238*m.b3013 <= 0)
m.c4310 = Constraint(expr=m.x1209*m.x1209 - m.x4239*m.b3013 <= 0)
m.c4311 = Constraint(expr=m.x1210*m.x1210 - m.x4240*m.b3013 <= 0)
m.c4312 = Constraint(expr=m.x1211*m.x1211 - m.x4241*m.b3013 <= 0)
m.c4313 = Constraint(expr=m.x1212*m.x1212 - m.x4242*m.b3013 <= 0)
m.c4314 = Constraint(expr=m.x1213*m.x1213 - m.x4243*m.b3013 <= 0)
m.c4315 = Constraint(expr=m.x1214*m.x1214 - m.x4244*m.b3013 <= 0)
m.c4316 = Constraint(expr=m.x1215*m.x1215 - m.x4245*m.b3013 <= 0)
m.c4317 = Constraint(expr=m.x1216*m.x1216 - m.x4246*m.b3013 <= 0)
m.c4318 = Constraint(expr=m.x1217*m.x1217 - m.x4247*m.b3013 <= 0)
m.c4319 = Constraint(expr=m.x1218*m.x1218 - m.x4248*m.b3013 <= 0)
m.c4320 = Constraint(expr=m.x1219*m.x1219 - m.x4249*m.b3013 <= 0)
m.c4321 = Constraint(expr=m.x1220*m.x1220 - m.x4250*m.b3013 <= 0)
m.c4322 = Constraint(expr=m.x1221*m.x1221 - m.x4251*m.b3013 <= 0)
m.c4323 = Constraint(expr=m.x1222*m.x1222 - m.x4252*m.b3013 <= 0)
m.c4324 = Constraint(expr=m.x1223*m.x1223 - m.x4253*m.b3013 <= 0)
m.c4325 = Constraint(expr=m.x1224*m.x1224 - m.x4254*m.b3013 <= 0)
m.c4326 = Constraint(expr=m.x1225*m.x1225 - m.x4255*m.b3013 <= 0)
m.c4327 = Constraint(expr=m.x1226*m.x1226 - m.x4256*m.b3013 <= 0)
m.c4328 = Constraint(expr=m.x1227*m.x1227 - m.x4257*m.b3013 <= 0)
m.c4329 = Constraint(expr=m.x1228*m.x1228 - m.x4258*m.b3013 <= 0)
m.c4330 = Constraint(expr=m.x1229*m.x1229 - m.x4259*m.b3013 <= 0)
m.c4331 = Constraint(expr=m.x1230*m.x1230 - m.x4260*m.b3013 <= 0)
m.c4332 = Constraint(expr=m.x1231*m.x1231 - m.x4261*m.b3013 <= 0)
m.c4333 = Constraint(expr=m.x1232*m.x1232 - m.x4262*m.b3013 <= 0)
m.c4334 = Constraint(expr=m.x1233*m.x1233 - m.x4263*m.b3013 <= 0)
m.c4335 = Constraint(expr=m.x1234*m.x1234 - m.x4264*m.b3013 <= 0)
m.c4336 = Constraint(expr=m.x1235*m.x1235 - m.x4265*m.b3013 <= 0)
m.c4337 = Constraint(expr=m.x1236*m.x1236 - m.x4266*m.b3013 <= 0)
m.c4338 = Constraint(expr=m.x1237*m.x1237 - m.x4267*m.b3013 <= 0)
m.c4339 = Constraint(expr=m.x1238*m.x1238 - m.x4268*m.b3013 <= 0)
m.c4340 = Constraint(expr=m.x1239*m.x1239 - m.x4269*m.b3013 <= 0)
m.c4341 = Constraint(expr=m.x1240*m.x1240 - m.x4270*m.b3013 <= 0)
m.c4342 = Constraint(expr=m.x1241*m.x1241 - m.x4271*m.b3013 <= 0)
m.c4343 = Constraint(expr=m.x1242*m.x1242 - m.x4272*m.b3013 <= 0)
m.c4344 = Constraint(expr=m.x1243*m.x1243 - m.x4273*m.b3013 <= 0)
m.c4345 = Constraint(expr=m.x1244*m.x1244 - m.x4274*m.b3013 <= 0)
m.c4346 = Constraint(expr=m.x1245*m.x1245 - m.x4275*m.b3013 <= 0)
m.c4347 = Constraint(expr=m.x1246*m.x1246 - m.x4276*m.b3013 <= 0)
m.c4348 = Constraint(expr=m.x1247*m.x1247 - m.x4277*m.b3013 <= 0)
m.c4349 = Constraint(expr=m.x1248*m.x1248 - m.x4278*m.b3013 <= 0)
m.c4350 = Constraint(expr=m.x1249*m.x1249 - m.x4279*m.b3013 <= 0)
m.c4351 = Constraint(expr=m.x1250*m.x1250 - m.x4280*m.b3013 <= 0)
m.c4352 = Constraint(expr=m.x1251*m.x1251 - m.x4281*m.b3013 <= 0)
m.c4353 = Constraint(expr=m.x1252*m.x1252 - m.x4282*m.b3013 <= 0)
m.c4354 = Constraint(expr=m.x1253*m.x1253 - m.x4283*m.b3013 <= 0)
m.c4355 = Constraint(expr=m.x1254*m.x1254 - m.x4284*m.b3013 <= 0)
m.c4356 = Constraint(expr=m.x1255*m.x1255 - m.x4285*m.b3013 <= 0)
m.c4357 = Constraint(expr=m.x1256*m.x1256 - m.x4286*m.b3013 <= 0)
m.c4358 = Constraint(expr=m.x1257*m.x1257 - m.x4287*m.b3013 <= 0)
m.c4359 = Constraint(expr=m.x1258*m.x1258 - m.x4288*m.b3013 <= 0)
m.c4360 = Constraint(expr=m.x1259*m.x1259 - m.x4289*m.b3013 <= 0)
m.c4361 = Constraint(expr=m.x1260*m.x1260 - m.x4290*m.b3013 <= 0)
m.c4362 = Constraint(expr=m.x1261*m.x1261 - m.x4291*m.b3013 <= 0)
m.c4363 = Constraint(expr=m.x1262*m.x1262 - m.x4292*m.b3013 <= 0)
m.c4364 = Constraint(expr=m.x1263*m.x1263 - m.x4293*m.b3013 <= 0)
m.c4365 = Constraint(expr=m.x1264*m.x1264 - m.x4294*m.b3013 <= 0)
m.c4366 = Constraint(expr=m.x1265*m.x1265 - m.x4295*m.b3013 <= 0)
m.c4367 = Constraint(expr=m.x1266*m.x1266 - m.x4296*m.b3013 <= 0)
m.c4368 = Constraint(expr=m.x1267*m.x1267 - m.x4297*m.b3013 <= 0)
m.c4369 = Constraint(expr=m.x1268*m.x1268 - m.x4298*m.b3013 <= 0)
m.c4370 = Constraint(expr=m.x1269*m.x1269 - m.x4299*m.b3013 <= 0)
m.c4371 = Constraint(expr=m.x1270*m.x1270 - m.x4300*m.b3013 <= 0)
m.c4372 = Constraint(expr=m.x1271*m.x1271 - m.x4301*m.b3013 <= 0)
m.c4373 = Constraint(expr=m.x1272*m.x1272 - m.x4302*m.b3013 <= 0)
m.c4374 = Constraint(expr=m.x1273*m.x1273 - m.x4303*m.b3013 <= 0)
m.c4375 = Constraint(expr=m.x1274*m.x1274 - m.x4304*m.b3013 <= 0)
m.c4376 = Constraint(expr=m.x1275*m.x1275 - m.x4305*m.b3013 <= 0)
m.c4377 = Constraint(expr=m.x1276*m.x1276 - m.x4306*m.b3013 <= 0)
m.c4378 = Constraint(expr=m.x1277*m.x1277 - m.x4307*m.b3013 <= 0)
m.c4379 = Constraint(expr=m.x1278*m.x1278 - m.x4308*m.b3013 <= 0)
m.c4380 = Constraint(expr=m.x1279*m.x1279 - m.x4309*m.b3013 <= 0)
m.c4381 = Constraint(expr=m.x1280*m.x1280 - m.x4310*m.b3013 <= 0)
m.c4382 = Constraint(expr=m.x1281*m.x1281 - m.x4311*m.b3013 <= 0)
m.c4383 = Constraint(expr=m.x1282*m.x1282 - m.x4312*m.b3013 <= 0)
m.c4384 = Constraint(expr=m.x1283*m.x1283 - m.x4313*m.b3013 <= 0)
m.c4385 = Constraint(expr=m.x1284*m.x1284 - m.x4314*m.b3013 <= 0)
m.c4386 = Constraint(expr=m.x1285*m.x1285 - m.x4315*m.b3013 <= 0)
m.c4387 = Constraint(expr=m.x1286*m.x1286 - m.x4316*m.b3013 <= 0)
m.c4388 = Constraint(expr=m.x1287*m.x1287 - m.x4317*m.b3013 <= 0)
m.c4389 = Constraint(expr=m.x1288*m.x1288 - m.x4318*m.b3013 <= 0)
m.c4390 = Constraint(expr=m.x1289*m.x1289 - m.x4319*m.b3013 <= 0)
m.c4391 = Constraint(expr=m.x1290*m.x1290 - m.x4320*m.b3013 <= 0)
m.c4392 = Constraint(expr=m.x1291*m.x1291 - m.x4321*m.b3013 <= 0)
m.c4393 = Constraint(expr=m.x1292*m.x1292 - m.x4322*m.b3013 <= 0)
m.c4394 = Constraint(expr=m.x1293*m.x1293 - m.x4323*m.b3013 <= 0)
m.c4395 = Constraint(expr=m.x1294*m.x1294 - m.x4324*m.b3013 <= 0)
m.c4396 = Constraint(expr=m.x1295*m.x1295 - m.x4325*m.b3013 <= 0)
m.c4397 = Constraint(expr=m.x1296*m.x1296 - m.x4326*m.b3013 <= 0)
m.c4398 = Constraint(expr=m.x1297*m.x1297 - m.x4327*m.b3013 <= 0)
m.c4399 = Constraint(expr=m.x1298*m.x1298 - m.x4328*m.b3013 <= 0)
m.c4400 = Constraint(expr=m.x1299*m.x1299 - m.x4329*m.b3013 <= 0)
m.c4401 = Constraint(expr=m.x1300*m.x1300 - m.x4330*m.b3013 <= 0)
m.c4402 = Constraint(expr=m.x1301*m.x1301 - m.x4331*m.b3014 <= 0)
m.c4403 = Constraint(expr=m.x1302*m.x1302 - m.x4332*m.b3014 <= 0)
m.c4404 = Constraint(expr=m.x1303*m.x1303 - m.x4333*m.b3014 <= 0)
m.c4405 = Constraint(expr=m.x1304*m.x1304 - m.x4334*m.b3014 <= 0)
m.c4406 = Constraint(expr=m.x1305*m.x1305 - m.x4335*m.b3014 <= 0)
m.c4407 = Constraint(expr=m.x1306*m.x1306 - m.x4336*m.b3014 <= 0)
m.c4408 = Constraint(expr=m.x1307*m.x1307 - m.x4337*m.b3014 <= 0)
m.c4409 = Constraint(expr=m.x1308*m.x1308 - m.x4338*m.b3014 <= 0)
m.c4410 = Constraint(expr=m.x1309*m.x1309 - m.x4339*m.b3014 <= 0)
m.c4411 = Constraint(expr=m.x1310*m.x1310 - m.x4340*m.b3014 <= 0)
m.c4412 = Constraint(expr=m.x1311*m.x1311 - m.x4341*m.b3014 <= 0)
m.c4413 = Constraint(expr=m.x1312*m.x1312 - m.x4342*m.b3014 <= 0)
m.c4414 = Constraint(expr=m.x1313*m.x1313 - m.x4343*m.b3014 <= 0)
m.c4415 = Constraint(expr=m.x1314*m.x1314 - m.x4344*m.b3014 <= 0)
m.c4416 = Constraint(expr=m.x1315*m.x1315 - m.x4345*m.b3014 <= 0)
m.c4417 = Constraint(expr=m.x1316*m.x1316 - m.x4346*m.b3014 <= 0)
m.c4418 = Constraint(expr=m.x1317*m.x1317 - m.x4347*m.b3014 <= 0)
m.c4419 = Constraint(expr=m.x1318*m.x1318 - m.x4348*m.b3014 <= 0)
m.c4420 = Constraint(expr=m.x1319*m.x1319 - m.x4349*m.b3014 <= 0)
m.c4421 = Constraint(expr=m.x1320*m.x1320 - m.x4350*m.b3014 <= 0)
m.c4422 = Constraint(expr=m.x1321*m.x1321 - m.x4351*m.b3014 <= 0)
m.c4423 = Constraint(expr=m.x1322*m.x1322 - m.x4352*m.b3014 <= 0)
m.c4424 = Constraint(expr=m.x1323*m.x1323 - m.x4353*m.b3014 <= 0)
m.c4425 = Constraint(expr=m.x1324*m.x1324 - m.x4354*m.b3014 <= 0)
m.c4426 = Constraint(expr=m.x1325*m.x1325 - m.x4355*m.b3014 <= 0)
m.c4427 = Constraint(expr=m.x1326*m.x1326 - m.x4356*m.b3014 <= 0)
m.c4428 = Constraint(expr=m.x1327*m.x1327 - m.x4357*m.b3014 <= 0)
m.c4429 = Constraint(expr=m.x1328*m.x1328 - m.x4358*m.b3014 <= 0)
m.c4430 = Constraint(expr=m.x1329*m.x1329 - m.x4359*m.b3014 <= 0)
m.c4431 = Constraint(expr=m.x1330*m.x1330 - m.x4360*m.b3014 <= 0)
m.c4432 = Constraint(expr=m.x1331*m.x1331 - m.x4361*m.b3014 <= 0)
m.c4433 = Constraint(expr=m.x1332*m.x1332 - m.x4362*m.b3014 <= 0)
m.c4434 = Constraint(expr=m.x1333*m.x1333 - m.x4363*m.b3014 <= 0)
m.c4435 = Constraint(expr=m.x1334*m.x1334 - m.x4364*m.b3014 <= 0)
m.c4436 = Constraint(expr=m.x1335*m.x1335 - m.x4365*m.b3014 <= 0)
m.c4437 = Constraint(expr=m.x1336*m.x1336 - m.x4366*m.b3014 <= 0)
m.c4438 = Constraint(expr=m.x1337*m.x1337 - m.x4367*m.b3014 <= 0)
m.c4439 = Constraint(expr=m.x1338*m.x1338 - m.x4368*m.b3014 <= 0)
m.c4440 = Constraint(expr=m.x1339*m.x1339 - m.x4369*m.b3014 <= 0)
m.c4441 = Constraint(expr=m.x1340*m.x1340 - m.x4370*m.b3014 <= 0)
m.c4442 = Constraint(expr=m.x1341*m.x1341 - m.x4371*m.b3014 <= 0)
m.c4443 = Constraint(expr=m.x1342*m.x1342 - m.x4372*m.b3014 <= 0)
m.c4444 = Constraint(expr=m.x1343*m.x1343 - m.x4373*m.b3014 <= 0)
m.c4445 = Constraint(expr=m.x1344*m.x1344 - m.x4374*m.b3014 <= 0)
m.c4446 = Constraint(expr=m.x1345*m.x1345 - m.x4375*m.b3014 <= 0)
m.c4447 = Constraint(expr=m.x1346*m.x1346 - m.x4376*m.b3014 <= 0)
m.c4448 = Constraint(expr=m.x1347*m.x1347 - m.x4377*m.b3014 <= 0)
m.c4449 = Constraint(expr=m.x1348*m.x1348 - m.x4378*m.b3014 <= 0)
m.c4450 = Constraint(expr=m.x1349*m.x1349 - m.x4379*m.b3014 <= 0)
m.c4451 = Constraint(expr=m.x1350*m.x1350 - m.x4380*m.b3014 <= 0)
m.c4452 = Constraint(expr=m.x1351*m.x1351 - m.x4381*m.b3014 <= 0)
m.c4453 = Constraint(expr=m.x1352*m.x1352 - m.x4382*m.b3014 <= 0)
m.c4454 = Constraint(expr=m.x1353*m.x1353 - m.x4383*m.b3014 <= 0)
m.c4455 = Constraint(expr=m.x1354*m.x1354 - m.x4384*m.b3014 <= 0)
m.c4456 = Constraint(expr=m.x1355*m.x1355 - m.x4385*m.b3014 <= 0)
m.c4457 = Constraint(expr=m.x1356*m.x1356 - m.x4386*m.b3014 <= 0)
m.c4458 = Constraint(expr=m.x1357*m.x1357 - m.x4387*m.b3014 <= 0)
m.c4459 = Constraint(expr=m.x1358*m.x1358 - m.x4388*m.b3014 <= 0)
m.c4460 = Constraint(expr=m.x1359*m.x1359 - m.x4389*m.b3014 <= 0)
m.c4461 = Constraint(expr=m.x1360*m.x1360 - m.x4390*m.b3014 <= 0)
m.c4462 = Constraint(expr=m.x1361*m.x1361 - m.x4391*m.b3014 <= 0)
m.c4463 = Constraint(expr=m.x1362*m.x1362 - m.x4392*m.b3014 <= 0)
m.c4464 = Constraint(expr=m.x1363*m.x1363 - m.x4393*m.b3014 <= 0)
m.c4465 = Constraint(expr=m.x1364*m.x1364 - m.x4394*m.b3014 <= 0)
m.c4466 = Constraint(expr=m.x1365*m.x1365 - m.x4395*m.b3014 <= 0)
m.c4467 = Constraint(expr=m.x1366*m.x1366 - m.x4396*m.b3014 <= 0)
m.c4468 = Constraint(expr=m.x1367*m.x1367 - m.x4397*m.b3014 <= 0)
m.c4469 = Constraint(expr=m.x1368*m.x1368 - m.x4398*m.b3014 <= 0)
m.c4470 = Constraint(expr=m.x1369*m.x1369 - m.x4399*m.b3014 <= 0)
m.c4471 = Constraint(expr=m.x1370*m.x1370 - m.x4400*m.b3014 <= 0)
m.c4472 = Constraint(expr=m.x1371*m.x1371 - m.x4401*m.b3014 <= 0)
m.c4473 = Constraint(expr=m.x1372*m.x1372 - m.x4402*m.b3014 <= 0)
m.c4474 = Constraint(expr=m.x1373*m.x1373 - m.x4403*m.b3014 <= 0)
m.c4475 = Constraint(expr=m.x1374*m.x1374 - m.x4404*m.b3014 <= 0)
m.c4476 = Constraint(expr=m.x1375*m.x1375 - m.x4405*m.b3014 | |
"""
These set of functions help the algorithms of MSAF to read and write files
of the Segmentation Dataset.
"""
from collections import Counter
import datetime
import glob
import json
import logging
import numpy as np
import os
from threading import Thread
# Local stuff
import msaf
from msaf import jams2
from msaf import utils
class FileStruct:
def __init__(self, audio_file):
"""Creates the entire file structure given the audio file."""
self.ds_path = os.path.dirname(os.path.dirname(audio_file))
self.audio_file = audio_file
self.est_file = self._get_dataset_file(msaf.Dataset.estimations_dir,
msaf.Dataset.estimations_ext)
self.features_file = self._get_dataset_file(msaf.Dataset.features_dir,
msaf.Dataset.features_ext)
self.ref_file = self._get_dataset_file(msaf.Dataset.references_dir,
msaf.Dataset.references_ext)
def _get_dataset_file(self, dir, ext):
"""Gets the desired dataset file."""
audio_file_ext = "." + self.audio_file.split(".")[-1]
base_file = os.path.basename(self.audio_file).replace(
audio_file_ext, ext)
return os.path.join(self.ds_path, dir, base_file)
def __repr__(self):
"""Prints the file structure."""
return "FileStruct(\n\tds_path=%s,\n\taudio_file=%s,\n\test_file=%s," \
"\n\tfeatures_file=%s,\n\tref_file=%s\n)" % (
self.ds_path, self.audio_file, self.est_file,
self.features_file, self.ref_file)
def has_same_parameters(est_params, boundaries_id, labels_id, params):
"""Checks whether the parameters in params are the same as the estimated
parameters in est_params."""
K = 0
for param_key in params.keys():
if param_key in est_params.keys() and \
est_params[param_key] == params[param_key] and \
est_params["boundaries_id"] == boundaries_id and \
(labels_id is None or est_params["labels_id"] == labels_id):
K += 1
return K == len(params.keys())
def find_estimation(all_estimations, boundaries_id, labels_id, params,
est_file):
"""Finds the correct estimation from all the estimations contained in a
JAMS file given the specified arguments.
Parameters
----------
all_estimations : list
List of section Range Annotations from a JAMS file.
boundaries_id : str
Identifier of the algorithm used to compute the boundaries.
labels_id : str
Identifier of the algorithm used to compute the labels.
params : dict
Additional search parameters. E.g. {"feature" : "hpcp"}.
est_file : str
Path to the estimated file (JAMS file).
Returns
-------
correct_est : RangeAnnotation
Correct estimation found in all the estimations.
None if it couldn't be found.
corect_i : int
Index of the estimation in the all_estimations list.
"""
correct_est = None
correct_i = -1
found = False
for i, estimation in enumerate(all_estimations):
est_params = estimation.sandbox
if has_same_parameters(est_params, boundaries_id, labels_id,
params) and not found:
correct_est = estimation
correct_i = i
found = True
elif has_same_parameters(est_params, boundaries_id, labels_id,
params) and found:
logging.warning("Multiple estimations match your parameters in "
"file %s" % est_file)
correct_est = estimation
correct_i = i
return correct_est, correct_i
def read_estimations(est_file, boundaries_id, labels_id=None, **params):
"""Reads the estimations (boundaries and/or labels) from a jams file
containing the estimations of an algorithm.
Parameters
----------
est_file : str
Path to the estimated file (JAMS file).
boundaries_id : str
Identifier of the algorithm used to compute the boundaries.
labels_id : str
Identifier of the algorithm used to compute the labels.
params : dict
Additional search parameters. E.g. {"feature" : "hpcp"}.
Returns
-------
boundaries : np.array((N,2))
Array containing the estimated boundaries in intervals.
labels : np.array(N)
Array containing the estimated labels.
Empty array if labels_id is None.
"""
# Open file and read jams
try:
jam = jams2.load(est_file)
except:
logging.error("Could not open JAMS file %s" % est_file)
return np.array([]), np.array([])
# Get all the estimations for the sections
all_estimations = jam.sections
# Find correct estimation
correct_est, i = find_estimation(all_estimations, boundaries_id, labels_id,
params, est_file)
if correct_est is None:
logging.error("Could not find estimation in %s" % est_file)
'''This happens occasionally when switching features/boundary methods. ??'''
return np.array([]), np.array([])
# Retrieve unique levels of segmentation
levels = []
for range in correct_est.data:
levels.append(range.label.context)
levels = list(set(levels))
# Retrieve data
all_boundaries = []
all_labels = []
for level in levels:
boundaries = []
labels = []
for range in correct_est.data:
if level == range.label.context:
boundaries.append([range.start.value, range.end.value])
if labels_id is not None:
labels.append(range.label.value)
all_boundaries.append(np.asarray(boundaries))
all_labels.append(np.asarray(labels, dtype=int))
# If there is only one level, return np.arrays instead of lists
if len(levels) == 1:
all_boundaries = all_boundaries[0]
all_labels = all_labels[0]
return all_boundaries, all_labels
def get_algo_ids(est_file):
"""Gets the algorithm ids that are contained in the est_file."""
with open(est_file, "r") as f:
est_data = json.load(f)
algo_ids = est_data["boundaries"].keys()
return algo_ids
def read_references(audio_path, annotator_id=0):
"""Reads the boundary times and the labels.
Parameters
----------
audio_path : str
Path to the audio file
Returns
-------
ref_times : list
List of boundary times
ref_labels : list
List of labels
"""
# Dataset path
ds_path = os.path.dirname(os.path.dirname(audio_path))
# Read references
jam_path = os.path.join(ds_path, msaf.Dataset.references_dir,
os.path.basename(audio_path)[:-4] +
msaf.Dataset.references_ext)
ds_prefix = os.path.basename(audio_path).split("_")[0]
# Get context
if ds_prefix in msaf.prefix_dict.keys():
context = msaf.prefix_dict[ds_prefix]
else:
context = "function"
try:
ref_inters, ref_labels = jams2.converters.load_jams_range(
jam_path, "sections", context=context, annotator=annotator_id)
except:
logging.warning("Reference not found in %s" % jam_path)
return []
# Intervals to times
ref_times = utils.intervals_to_times(ref_inters)
return ref_times, ref_labels
def read_ref_labels(audio_path):
"""Reads the annotated labels from the given audio path."""
ref_times, ref_labels = read_references(audio_path)
return ref_labels
def read_ref_int_labels(audio_path):
"""Reads the annotated labels using unique integers as identifiers
instead of strings."""
ref_labels = read_ref_labels(audio_path)
labels = []
label_dict = {}
k = 1
for ref_label in ref_labels:
if ref_label in label_dict.keys():
labels.append(label_dict[ref_label])
else:
label_dict[ref_label] = k
labels.append(k)
k += 1
return labels
def align_times(times, frames):
"""Aligns the times to the closes frame times (e.g. beats)."""
dist = np.minimum.outer(times, frames)
bound_frames = np.argmax(np.maximum(0, dist), axis=1)
return np.unique(bound_frames)
def read_ref_bound_frames(audio_path, beats):
"""Reads the corresponding references file to retrieve the boundaries
in frames."""
ref_times, ref_labels = read_references(audio_path)
# align with beats
bound_frames = align_times(ref_times, beats)
return bound_frames
def get_features(audio_path, annot_beats=False, framesync=False,
pre_features=None):
"""
Gets the features of an audio file given the audio_path.
Parameters
----------
audio_path: str
Path to the audio file.
annot_beats: bool
Whether to use annotated beats or not.
framesync: bool
Whether to use framesync features or not.
pre_features: dict
Pre computed features as a dictionary.
`None` for reading them form the json file.
Return
------
C: np.array((N, 12))
(Beat-sync) Chromagram
M: np.array((N, 13))
(Beat-sync) MFCC
T: np.array((N, 6))
(Beat-sync) Tonnetz
T: np.array((N, 6))
(Beat-sync) Gammatone
cqt: np.array((N, msaf.Anal.cqt_bins))
(Beat-sync) Constant-Q transform
gmt: np.array(N, 19)
Gammatone features
beats: np.array(T)
Beats in seconds
dur: float
Song duration
analysis : dict
Parameters of analysis of track (e.g. sampling rate)
"""
if pre_features is None:
# Dataset path
ds_path = os.path.dirname(os.path.dirname(audio_path))
# Read Estimations
features_path = os.path.join(ds_path, msaf.Dataset.features_dir,
os.path.basename(audio_path)[:-4] + msaf.Dataset.features_ext)
with open(features_path, "r") as f:
feats = json.load(f)
# Beat Synchronous Feats
if framesync:
feat_str = "framesync"
beats = None
else:
if annot_beats:
# Read references
try:
annotation_path = os.path.join(
ds_path, msaf.Dataset.references_dir,
os.path.basename(audio_path)[:-4] +
msaf.Dataset.references_ext)
jam = jams2.load(annotation_path)
except:
raise RuntimeError("No references found in file %s" %
annotation_path)
feat_str = "ann_beatsync"
beats = []
beat_data = jam.beats[0].data
if beat_data == []:
raise ValueError
for data in beat_data:
beats.append(data.time.value)
beats = np.unique(beats)
else:
feat_str = "est_beatsync"
beats = np.asarray(feats["beats"]["times"])
# Mi: added the Gammatone features
C = np.asarray(feats[feat_str]["hpcp"])
M = np.asarray(feats[feat_str]["mfcc"])
T = np.asarray(feats[feat_str]["tonnetz"])
cqt = np.asarray(feats[feat_str]["cqt"])
G = np.asarray(feats[feat_str]["gmt"])
analysis = feats["analysis"]
dur = analysis["dur"]
# Frame times might be shorter than the actual number of features.
if framesync:
frame_times = utils.get_time_frames(dur, analysis)
C = C[:len(frame_times)]
M = M[:len(frame_times)]
T = T[:len(frame_times)]
G = G[:len(frame_times)]
else:
feat_prefix = ""
if not framesync:
feat_prefix = "bs_"
C = pre_features["%shpcp" % feat_prefix]
M = pre_features["%smfcc" % feat_prefix]
T = pre_features["%stonnetz" % feat_prefix]
cqt = pre_features["%scqt" % feat_prefix]
G = pre_features["%sgmt" % feat_prefix]
beats = pre_features["beats"]
dur = pre_features["anal"]["dur"]
analysis = pre_features["anal"]
return C, M, T, cqt, G, beats, dur, analysis
def safe_write(jam, out_file):
"""This method is suposed to be called in a safe thread in order to
avoid interruptions and corrupt the file."""
try:
f = open(out_file, "w")
json.dump(jam, f, indent=2)
finally:
f.close()
def save_estimations(out_file, times, labels, boundaries_id, labels_id,
**params):
"""Saves the segment estimations in a JAMS file.close
Parameters
----------
out_file : str
Path to the output JAMS file in which to save the estimations.
times : np.array or list
Estimated boundary times.
If `list`, estimated hierarchical boundaries.
labels : np.array(N, 2)
Estimated labels (None in case we are only storing boundary
evaluations).
boundaries_id : str
Boundary algorithm identifier.
labels_id : str
Labels algorithm identifier.
params : dict
Dictionary with additional parameters for both algorithms.
"""
# Convert to intervals and sanity check
if 'numpy' in str(type(times)):
inters = utils.times_to_intervals(times)
assert len(inters) == len(labels), "Number of boundary intervals " \
"(%d) and labels (%d) do not match" % (len(inters), len(labels))
# Put into lists to simplify the writing process later
inters = [inters]
labels = [labels]
else:
inters = []
for level in range(len(times)):
est_inters = utils.times_to_intervals(times[level])
inters.append(est_inters)
assert len(inters[level]) == len(labels[level]), \
"Number of boundary intervals (%d) and labels (%d) do not match" % \
(len(inters[level]), len(labels[level]))
curr_estimation = None
curr_i = -1
# Find estimation in file
if os.path.isfile(out_file):
jam = jams2.load(out_file)
all_estimations = jam.sections
curr_estimation, curr_i = find_estimation(
all_estimations, boundaries_id, labels_id, params, out_file)
else:
# Create new JAMS if it doesn't exist
jam = jams2.Jams()
jam.metadata.title = os.path.basename(out_file).replace(
msaf.Dataset.estimations_ext, "")
# Create new annotation if needed
if curr_estimation is None:
curr_estimation = jam.sections.create_annotation()
# Save metadata and parameters
curr_estimation.annotation_metadata.attribute = "sections"
curr_estimation.annotation_metadata.version = msaf.__version__
curr_estimation.annotation_metadata.origin = "MSAF"
sandbox = {}
sandbox["boundaries_id"] = boundaries_id
sandbox["labels_id"] = labels_id
sandbox["timestamp"] = \
datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S")
for key in params:
sandbox[key] = params[key]
curr_estimation.sandbox = sandbox
# Save actual data
curr_estimation.data = []
for i, (level_inters, level_labels) in enumerate(zip(inters, labels)):
if level_labels is None:
label = np.ones(len(inters)) * -1
for bound_inter, label in zip(level_inters, level_labels):
segment = curr_estimation.create_datapoint()
segment.start.value = float(bound_inter[0])
segment.start.confidence = 0.0
segment.end.value = float(bound_inter[1])
segment.end.confidence = 0.0
segment.label.value = label
segment.label.confidence = 0.0
segment.label.context = "level_%d" % i
# Place estimation in its place
if curr_i != -1:
jam.sections[curr_i] = curr_estimation
# Write file and do not let users interrupt it
my_thread = Thread(target=safe_write, args=(jam, out_file,))
my_thread.start()
my_thread.join()
def get_all_est_boundaries(est_file, annot_beats, algo_ids=None,
annotator_id=0):
"""Gets all the estimated boundaries for all the algorithms.
Parameters
----------
est_file: str
Path to the estimated file (JSON file)
annot_beats: bool
Whether to use the annotated beats or not.
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
Returns
-------
all_boundaries: list
A list of np.arrays containing the times of the boundaries, one array
for each algorithm
"""
all_boundaries = []
# Get GT boundaries
jam_file = os.path.dirname(est_file) + "/../references/" + \
os.path.basename(est_file).replace("json", "jams")
ds_prefix = os.path.basename(est_file).split("_")[0]
ann_inter, ann_labels = jams2.converters.load_jams_range(
jam_file, "sections", context=msaf.prefix_dict[ds_prefix],
annotator=annotator_id)
ann_times = utils.intervals_to_times(ann_inter)
all_boundaries.append(ann_times)
# Estimations
if algo_ids is None:
algo_ids = get_algo_ids(est_file)
for algo_id in algo_ids:
est_inters = read_estimations(est_file, algo_id, annot_beats,
feature=msaf.feat_dict[algo_id])
if len(est_inters) == 0:
logging.warning("no estimations for algorithm: %s" % algo_id)
continue
boundaries = utils.intervals_to_times(est_inters)
all_boundaries.append(boundaries)
return all_boundaries
def get_all_est_labels(est_file, annot_beats, algo_ids=None, annotator_id=0):
"""Gets all the estimated boundaries for all the algorithms.
Parameters
----------
est_file: str
Path to the estimated file (JSON file)
annot_beats: bool
Whether to use the annotated beats or not.
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
annotator_id : int
Identifier of | |
to simply be "\n", which when adjusted
# by tight_layout() will leave a spacing between the suptitle and the plots.
### create confusion matrix ###
# set "\n" as confusion matrix title, font size large (to mirror suptitle)
axs[0].set_title(" ", fontsize = "large")
# heatmap, with annotations, and using clabs as axis labels. if normalized,
# report percentage (no decimal places), else report as decimal value
if norm_true == "true":
heatmap(cmat, annot = True, cmap = cmaps[0], cbar = False,
xticklabels = clabs, yticklabels = clabs, ax = axs[0],
fmt = ".0%")
else:
heatmap(cmat, annot = True, cmap = cmaps[0], cbar = False,
xticklabels = clabs, yticklabels = clabs, ax = axs[0],
fmt = "d")
### create plot of ROC curves ###
# set title of ROC curve plot to be "\n" as well
axs[1].set_title("\n", fontsize = "large")
# set axis labels; same in multiclass and binary case
axs[1].set_xlabel("false positive rate", fontsize = "medium")
axs[1].set_ylabel("true positive rate")
# in the two-class case, we just create a simple lint plot with one color
if nclasses == 2: axs[1].plot(fpr, tpr, color = cmaps[1], **kwargs)
# else in the multiclass case
else:
# get ListedColormap from the specified color map string with cc value
lcm = _get_cmap_colors(cmaps[1], nclasses, cc = cc[0], callfn = fname_)
# for each class label in clabs, plot its respective true positive rate
# against its false positive rate with lcm.colors[i] as the color
for i in range(nclasses):
axs[1].plot(fpr[i], tpr[i], color = lcm.colors[i], **kwargs)
# set legend from y_test_bins's column names
axs[1].legend(y_test_bins.columns)
### create plot of precision-recall curves ###
# set title of precision-recall curve plot to also be "\n"
axs[2].set_title("\n", fontsize = "large")
# set axis labels
axs[2].set_xlabel("recall")
axs[2].set_ylabel("precision")
# in the two-class case, create one simple line plot with a single color
if nclasses == 2: axs[2].plot(rcr, prr, color = cmaps[2], **kwargs)
else:
# get ListedColormap from the specified color map string with cc value
lcm = _get_cmap_colors(cmaps[2], nclasses, cc = cc[1], callfn = fname_)
# for each class label in clabs, plot its respective precision against
# its recall by indexing color with lcm.colors[i]
for i in range(nclasses):
axs[2].plot(rcr[i], prr[i], color = lcm.colors[i], **kwargs)
# set legend from y_test_bins's column names
axs[2].legend(y_test_bins.columns)
# add tight layout adjustments
fig.tight_layout()
# if out_file is not None, save to outfile
if outfile is not None: fig.savefig(outfile)
# return fig, axs, and stats_dict
return fig, axs, stats_dict
def _adj_barh(ax, coefs, flabs, figsize = (_cfp_figwidth, _cfp_figheight),
axscale = 1.1, style = "darkgrid", cmap = _cfp_cmap, cc = 0,
edgecolor = "white", edgewidth = 1, title = None,
fontsize = "medium", callfn = None, **kwargs):
"""
internal method to create on a specified Axes object a horizontal bar plot
with square plotting area from a set of points and associated data labels.
axis limits are adjustable with regards to the range of the data, as are the
padding of the bars and the widths + colors of the bars' edges. title can be
specified or be None, and color map can be freely changed.
parameters:
ax
coefs
flabs
figsize
axscale optional, default 1.1. sets the scale of the xy-axis length in
relation to the length of the range of values in coefs. so in
data units, if the range of values in coefs is x, then length of
the xy-axes is axscale * x. we force axscale >= 1 for a
sensible-looking plot. axscale > 1 increases space between the
ends of the longest bars and the left/right plot edges.
style
cmap
cc
edgecolor dd
edgewidth linewidth
title
fontsize
callfn
**kwargs
"""
if callfn is None: callfn = _coef_barplot.__name__
# check that ax is a matplotlib Axes object (most likely an AxesSubplot)
if not isinstance(ax, Axes):
raise TypeError("{0}: error: ax must inherit from matplotlib.axes.Axes"
"".format(callfn))
# check that coefs and flabs are 1d iterables; skip type checking
if (hasattr(coefs, "__iter__") == False) or isinstance(coefs, str):
raise TypeError("{0}: error: coefs must be an iterable".format(callfn))
if (hasattr(flabs, "__iter__") == False) or isinstance(flabs, str):
raise TypeError("{0}: error: flabs must be an iterable".format(callfn))
# check if lengths are the same
if len(coefs) != len(flabs):
raise ValueError("{0}: coefs and flabs must be of the same length"
"".format(callfn))
# check if axscale >= 1
if (not isinstance(axscale, int)) and (not isinstance(axscale, float)):
raise TypeError("{0}: error: axscale must be float >= 1".format(callfn))
if axscale < 1:
raise ValueError("{0}: error: float axscale must be >= 1"
"".format(callfn))
# set nfeatures
nfeatures = len(coefs)
### axes setup ###
# set the axes of ax to be square
ax.axis("square")
# get the range of the coefficients/feature importances; use as limits
# for the x axis, and make y axis lims (0, xylim[1] - xylim[0]).
# notes: ensures we will still have a square set of axes. axscale is used to
# scale the lengths of the xy-axes, so we can leave some space between the
# longest bars and the edge of the plot.
xylim = (axscale * min(*coefs), axscale * max(*coefs))
# compute range of xy-axes and set x axis and y axis limits
xy_len = xylim[1] - xylim[0]
ax.axis([*xylim, 0, xy_len])
# since we have nfeatures features, we divide [xylim[0], xylim[1]] into
# nfeatures uniform intervals, and pick the intervals' midpoints.
# width of a single interval is simply xy_len / nfeatures. bys determines
# the vertical coordinates of each of the bars on our grid.
bys = tuple((xy_len / nfeatures) * (i + 0.5) for i in range(nfeatures))
# to make sure our labels and bars are placed from the top of the plot going
# down, we have to reverse the order of the bar widths (bwidths), bar left
# endpoints (blefts, only when the coefs values are nonnegative, and of
# course the order of our feature labels as well. first reverse flabs.
flabs = tuple(reversed(flabs))
# coefs/feature_imps gives us the widths of the bars; do in reverse order
bwidths = tuple(abs(coefs[nfeatures - i - 1]) for i in range(nfeatures))
# we want the height of each bar to be (1 - pad_frac) / (nfeatures + 1)
bheight = (1 - 0.1) * (xylim[1] - xylim[0]) / (nfeatures + 1)
# get length hand sides of bars. if all the values are nonnegative, than we
# don't need to get blefts (default 0).
blefts = 0
_all_nn = False
for c in coefs:
if c < 0:
_all_nn = True
break
if _all_nn == True:
blefts = tuple(min(0, coefs[nfeatures - i - 1]) for i in
range(nfeatures))
# get ListedColormap from the specified color map string (placed bottom-up);
# we control color contrast and concentration around middle of chosen color
# map gradient with the cc parameter
lcm = _get_cmap_colors(cmap, nfeatures, cc = cc, callfn = callfn)
# plot the bars, with default center alignment onto the bys points
ax.barh(bys, bwidths, height = bheight, left = blefts, color = lcm.colors,
edgecolor = edgecolor, linewidth = edgewidth, align = "center")
# force there to be nfeatures y ticks, using centers of bars
ax.yaxis.set_major_locator(FixedLocator(bys))
# replace tick labels with flabs; force medium font size
ax.set_yticklabels(flabs, fontsize = "medium")
# remove the y gridlines by setting their width to 0 (ratchet af)
ax.grid(axis = "y", linewidth = 0)
# if title is not None, then set title with size given by fontsize
if title is not None: ax.set_title(title, fontsize = fontsize)
# return the new Axes object
return ax
def coef_plot(est, flabs, figsize = "auto", axscale = 1.1, model_name = "auto",
best_model = False, style = "darkgrid", cmap = _cfp_cmap, cc = 0,
outfile = None, layout = "dual", **kwargs):
"""
given a fitted estimator with a coef_ or feature_importances_, plot model
coefficients or feature importances (respectively). this method works for
most standard | |
reverse the string
padded_bv = ('0' * (n - len(bv)) + bv)[::-1]
for i in range(n):
w = v
w ^= (1 << i) # push 1 to the left by i and xor with w
bw = int_to_binary_string(w)
padded_bw = ('0' * (n - len(bw)) + bw)[::-1]
butterfly[(padded_bv, i)] = [(padded_bv, i + 1), (padded_bw, i + 1)]
elif vertices == 'vectors':
from sage.modules.free_module import VectorSpace
from sage.rings.finite_rings.finite_field_constructor import FiniteField
from copy import copy
butterfly = {}
for v in VectorSpace(FiniteField(2), n):
# We must call tuple since vectors are mutable. To obtain a
# vector from the tuple tv, just call vector(tv).
tv = tuple(v)
for i in range(n):
w = copy(v)
w[i] += 1 # Flip the ith bit
butterfly[(tv, i)] = [(tv, i + 1), (tuple(w), i + 1)]
else:
raise NotImplementedError("vertices must be 'strings' or 'vectors'")
return DiGraph(butterfly, name="{}-dimensional Butterfly".format(n))
def Path(self, n):
r"""
Return a directed path on `n` vertices.
INPUT:
- ``n`` -- integer; number of vertices in the path
EXAMPLES::
sage: g = digraphs.Path(5)
sage: g.vertices()
[0, 1, 2, 3, 4]
sage: g.size()
4
sage: g.automorphism_group().cardinality()
1
"""
g = DiGraph(n, name="Path")
if n:
g.add_path(list(range(n)))
g.set_pos({i: (i,0) for i in range(n)})
return g
def Paley(self, q):
r"""
Return a Paley digraph on `q` vertices.
Parameter `q` must be the power of a prime number and congruent to 3 mod
4.
.. SEEALSO::
- :wikipedia:`Paley_graph`
- :meth:`~sage.graphs.graph_generators.GraphGenerators.PaleyGraph`
EXAMPLES:
A Paley digraph has `n * (n-1) / 2` edges, its underlying graph is a
clique, and so it is a tournament::
sage: g = digraphs.Paley(7); g
Paley digraph with parameter 7: Digraph on 7 vertices
sage: g.size() == g.order() * (g.order() - 1) / 2
True
sage: g.to_undirected().is_clique()
True
A Paley digraph is always self-complementary::
sage: g.complement().is_isomorphic(g)
True
TESTS:
Wrong parameter::
sage: digraphs.Paley(6)
Traceback (most recent call last):
...
ValueError: parameter q must be a prime power
sage: digraphs.Paley(5)
Traceback (most recent call last):
...
ValueError: parameter q must be congruent to 3 mod 4
"""
from sage.rings.finite_rings.integer_mod import mod
from sage.rings.finite_rings.finite_field_constructor import FiniteField
from sage.arith.all import is_prime_power
if not is_prime_power(q):
raise ValueError("parameter q must be a prime power")
if not mod(q, 4) == 3:
raise ValueError("parameter q must be congruent to 3 mod 4")
g = DiGraph([FiniteField(q,'a'), lambda i,j: (i!=j) and (j-i).is_square()],
loops=False, name="Paley digraph with parameter {}".format(q))
return g
def TransitiveTournament(self, n):
r"""
Return a transitive tournament on `n` vertices.
In this tournament there is an edge from `i` to `j` if `i<j`.
See the :wikipedia:`Tournament_(graph_theory)` for more information.
INPUT:
- ``n`` -- integer; number of vertices in the tournament
EXAMPLES::
sage: g = digraphs.TransitiveTournament(5)
sage: g.vertices()
[0, 1, 2, 3, 4]
sage: g.size()
10
sage: g.automorphism_group().cardinality()
1
.. SEEALSO::
- :wikipedia:`Tournament_(graph_theory)`
- :meth:`~sage.graphs.digraph.DiGraph.is_tournament`
- :meth:`~sage.graphs.digraph.DiGraph.is_transitive`
- :meth:`~sage.graphs.digraph_generators.DiGraphGenerators.RandomTournament`
TESTS::
sage: digraphs.TransitiveTournament(-1)
Traceback (most recent call last):
...
ValueError: the number of vertices cannot be strictly negative
"""
g = DiGraph(n, name="Transitive Tournament")
for i in range(n - 1):
for j in range(i + 1, n):
g.add_edge(i, j)
g._circle_embedding(list(range(n)))
return g
def RandomTournament(self, n):
r"""
Return a random tournament on `n` vertices.
For every pair of vertices, the tournament has an edge from
`i` to `j` with probability `1/2`, otherwise it has an edge
from `j` to `i`.
INPUT:
- ``n`` -- integer; number of vertices
EXAMPLES::
sage: T = digraphs.RandomTournament(10); T
Random Tournament: Digraph on 10 vertices
sage: T.size() == binomial(10, 2)
True
sage: T.is_tournament()
True
sage: digraphs.RandomTournament(-1)
Traceback (most recent call last):
...
ValueError: the number of vertices cannot be strictly negative
.. SEEALSO::
- :wikipedia:`Tournament_(graph_theory)`
- :meth:`~sage.graphs.digraph.DiGraph.is_tournament`
- :meth:`~sage.graphs.digraph_generators.DiGraphGenerators.TransitiveTournament`
- :meth:`~sage.graphs.digraph_generators.DiGraphGenerators.Complete`
- :meth:`~sage.graphs.digraph_generators.DiGraphGenerators.RandomSemiComplete`
"""
from sage.misc.prandom import random
g = DiGraph(n, name="Random Tournament")
for i in range(n - 1):
for j in range(i + 1, n):
if random() <= .5:
g.add_edge(i, j)
else:
g.add_edge(j, i)
g._circle_embedding(list(range(n)))
return g
def tournaments_nauty(self, n,
min_out_degree=None, max_out_degree=None,
strongly_connected=False, debug=False, options=""):
r"""
Iterator over all tournaments on `n` vertices using Nauty.
INPUT:
- ``n`` -- integer; number of vertices
- ``min_out_degree``, ``max_out_degree`` -- integers; if set to
``None`` (default), then the min/max out-degree is not constrained
- ``debug`` -- boolean (default: ``False``); if ``True`` the first line
of genbg's output to standard error is captured and the first call to
the generator's ``next()`` function will return this line as a string.
A line leading with ">A" indicates a successful initiation of the
program with some information on the arguments, while a line beginning
with ">E" indicates an error with the input.
- ``options`` -- string; anything else that should be forwarded as input
to Nauty's genbg. See its documentation for more information :
`<http://cs.anu.edu.au/~bdm/nauty/>`_.
EXAMPLES::
sage: for g in digraphs.tournaments_nauty(4):
....: print(g.edges(labels = False))
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
[(1, 0), (1, 3), (2, 0), (2, 1), (3, 0), (3, 2)]
[(0, 2), (1, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
[(0, 2), (0, 3), (1, 0), (2, 1), (3, 1), (3, 2)]
sage: tournaments = digraphs.tournaments_nauty
sage: [len(list(tournaments(x))) for x in range(1,8)]
[1, 1, 2, 4, 12, 56, 456]
sage: [len(list(tournaments(x, strongly_connected = True))) for x in range(1,9)]
[1, 0, 1, 1, 6, 35, 353, 6008]
"""
nauty_input = options
if min_out_degree is None:
min_out_degree = 0
if max_out_degree is None:
max_out_degree = n - 1
nauty_input += " -d" + str(min_out_degree)
nauty_input += " -D" + str(max_out_degree)
if strongly_connected:
nauty_input += " -c"
nauty_input += " " + str(n) + " "
sp = subprocess.Popen("gentourng {0}".format(nauty_input), shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
if debug:
yield sp.stderr.readline()
gen = sp.stdout
while True:
try:
s = bytes_to_str(next(gen))
except StopIteration:
# Exhausted list of graphs from nauty geng
return
G = DiGraph(n)
i = 0
j = 1
for b in s[:-1]:
if b == '0':
G.add_edge(i, j)
else:
G.add_edge(j, i)
if j == n - 1:
i += 1
j = i + 1
else:
j += 1
yield G
def nauty_directg(self, graphs, options="", debug=False):
r"""
Return an iterator yielding digraphs using nauty's ``directg`` program.
Description from directg --help:
Read undirected graphs and orient their edges in all possible ways.
Edges can be oriented in either or both directions (3 possibilities).
Isomorphic directed graphs derived from the same input are suppressed.
If the input graphs are non-isomorphic then the output graphs are also.
INPUT:
- ``graphs`` -- a :class:`Graph` or an iterable containing :class:`Graph`
the graph6 string of these graphs is used as an input for ``directg``.
- ``options`` (str) -- a string passed to directg as if it was run at
a system command line. Available options from directg --help::
-e# | -e#:# specify a value or range of the total number of arcs
-o orient each edge in only one direction, never both
-f# Use only the subgroup that fixes the first # vertices setwise
-V only output graphs with nontrivial groups (including exchange of
isolated vertices). The -f option is respected.
-s#/# Make only a fraction of the orientations: The first integer is
the part number (first is 0) and the second is the number of
parts. Splitting is done per input graph independently.
- ``debug`` (boolean) -- default: ``False`` - if ``True``
directg standard error and standard output are displayed.
EXAMPLES::
sage: gen = graphs.nauty_geng("-c 3")
sage: dgs = list(digraphs.nauty_directg(gen))
sage: len(dgs)
13
sage: dgs[0]
Digraph on 3 vertices
sage: dgs[0]._bit_vector()
'001001000'
sage: len(list(digraphs.nauty_directg(graphs.PetersenGraph(), options="-o")))
324
TESTS::
sage: g = digraphs.nauty_directg(graphs.PetersenGraph(), options="-o -G")
sage: next(g)
Traceback (most recent call last):
...
ValueError: directg output options [-u|-T|-G] are not allowed
sage: next(digraphs.nauty_directg(graphs.nauty_geng("-c 3"),
....: options="-o", debug=True))
&BH?
&BGO
&B?o
&BX?
&BP_
<BLANKLINE>
Digraph on 3 vertices
.. SEEALSO::
- :meth:`~sage.graphs.graph.Graph.orientations`
"""
if '-u' in options or '-T' in options or '-G' in options:
raise ValueError("directg output options [-u|-T|-G] are not allowed")
if isinstance(graphs, Graph):
| |
'sumMax': sumMax, 'sumJerk': sumJerk, 'sumTargets': sumTargets}))
print(fstr({'vals': vals}, fmat='15.12g'))
print(fstr({'targets': targets}))
self.lossValue = loss
return loss
def jacobian(self):
# the 1st derivative of the loss function
vxj = self.vxj
vyj = self.vyj
omegaj = self.omegaj
pxj = self.pxj
pyj = self.pyj
thetaj = self.thetaj
(pxt, pyt, thetat) = self.target_pose
(vxt, vyt, omegat) = self.target_twist
(leftt, rightt) = self.target_lr
dpxdl = self.dpxdl
dpydl = self.dpydl
dpxdr = self.dpxdr
dpydr = self.dpydr
(bhxl, bhxr, _) = self.bhes[0]
(bhyl, bhyr, _) = self.bhes[1]
(bhol, bhor, _) = self.bhes[2]
alphaxj = self.alphaxj
alphayj = self.alphayj
alphaoj = self.alphaoj
betaj = self.betaj
Wmax = self.Wmax
Wjerk = self.Wjerk
mmax = self.mmax
lefts = self.als
rights = self.ars
leftsp9 = np.power(lefts / mmax, 9)
rightsp9 = np.power(rights / mmax, 9)
n = len(lefts)
dlefts = np.zeros([n])
drights = np.zeros([n])
for k in range(1, n):
dlefts[k] = (
+(vxj[-1] - vxt) * bhxl * alphaxj[n-1-k]
+(vyj[-1] - vyt) * bhyl * alphayj[n-1-k]
+(omegaj[-1] - omegat) * bhol * alphaoj[n-1-k]
+(thetaj[-1] - thetat) * bhol * betaj[n-1-k]
+(pxj[-1] - pxt) * dpxdl[-1, k]
+(pyj[-1] - pyt) * dpydl[-1, k]
+Wmax * leftsp9[k] / mmax
+Wjerk * (2 * lefts[k] -lefts[k-1] -lefts[min(k+1, n-1)])
)
drights[k] = (
+(vxj[-1] - vxt) * bhxr * alphaxj[n-1-k]
+(vyj[-1] - vyt) * bhyr * alphayj[n-1-k]
+(omegaj[-1] - omegat) * bhor * alphaoj[n-1-k]
+(thetaj[-1] - thetat) * bhor * betaj[n-1-k]
+(pxj[-1] - pxt) * dpxdr[-1, k]
+(pyj[-1] - pyt) * dpydr[-1, k]
+Wmax * rightsp9[k]
+Wjerk * (2 * rights[k] -rights[k-1] -rights[min(k+1, n-1)])
)
# TODO: check this
dlefts[-1] += (lefts[-1] - leftt)
drights[-1] += (rights[-1] - rightt)
self.dlefts = dlefts
self.drights = drights
return (dlefts, drights)
def hessian(self):
# second derivative of the loss function
pxj = self.pxj
pyj = self.pyj
(pxt, pyt, _) = self.target_pose
dpxdl = self.dpxdl
dpydl = self.dpydl
dpxdr = self.dpxdr
dpydr = self.dpydr
(bhxl, bhxr, _) = self.bhes[0]
(bhyl, bhyr, _) = self.bhes[1]
(bhol, bhor, _) = self.bhes[2]
alphaxj = self.alphaxj
alphayj = self.alphayj
alphaoj = self.alphaoj
betaj = self.betaj
Wmax = self.Wmax
Wjerk = self.Wjerk
mmax = self.mmax
lefts = self.als
rights = self.ars
d2pxdldl = self.d2pxdldl
d2pxdldr = self.d2pxdldr
d2pxdrdr = self.d2pxdrdr
d2pydldl = self.d2pydldl
d2pydldr = self.d2pydldr
d2pydrdr = self.d2pydrdr
n = len(lefts) - 1
# We'll define this as 0 -> n-1 are lefts[1:], n -> 2n-1 are rights[1:]
hess = np.empty([2*n, 2*n])
# values that vary with each k, m value
deltapxn = pxj[-1] - pxt
deltapyn = pyj[-1] - pyt
for i in range(0, 2*n):
k = i % n + 1
kleft = (i < n)
if kleft:
dpxdu = dpxdl[n, k]
dpydu = dpydl[n, k]
dvxdu = alphaxj[n-k] * bhxl
dvydu = alphayj[n-k] * bhyl
domdu = alphaoj[n-k] * bhol
dthdu = betaj[n-k] * bhol
else:
dpxdu = dpxdr[n, k]
dpydu = dpydr[n, k]
dvxdu = alphaxj[n-k] * bhxr
dvydu = alphayj[n-k] * bhyr
domdu = alphaoj[n-k] * bhor
dthdu = betaj[n-k] * bhor
for j in range(0, 2*n):
m = j % n + 1
mleft = (j < n)
if mleft:
dpxds = dpxdl[n, m]
dpyds = dpydl[n, m]
dvxds = alphaxj[n-m] * bhxl
dvyds = alphayj[n-m] * bhyl
domds = alphaoj[n-m] * bhol
dthds = betaj[n-m] * bhol
if kleft:
d2px = d2pxdldl[n, k, m]
d2py = d2pydldl[n, k, m]
else:
# note d2pxdrdl[i,j] = d2pxdldr[j,i]
d2px = d2pxdldr[n, m, k]
d2py = d2pydldr[n, m, k]
else:
dpxds = dpxdr[n, m]
dpyds = dpydr[n, m]
dvxds = alphaxj[n-m] * bhxr
dvyds = alphayj[n-m] * bhyr
domds = alphaoj[n-m] * bhor
dthds = betaj[n-m] * bhor
if kleft:
d2px = d2pxdldr[n, k, m]
d2py = d2pydldr[n, k, m]
else:
d2px = d2pxdrdr[n, k, m]
d2py = d2pydrdr[n, k, m]
hess[i, j] = (
deltapxn * d2px + dpxdu * dpxds +
deltapyn * d2py + dpydu * dpyds +
dvxdu * dvxds + dvydu * dvyds + domdu * domds + dthdu * dthds
)
# values that require k == m
for i in range(0, 2*n):
k = i % n + 1
kleft = (i < n)
# max term
hess[i, i] += (Wmax / mmax**2) * (lefts[k]**8 if kleft else rights[k]**8)
# motor target value
if k == n:
hess[i, i] += 1.0
# jerk term
hess[i, i] += 2 *Wjerk
if k > 1:
hess[i, i-1] -= Wjerk
if k == n:
hess[i, i] -= Wjerk
else:
hess[i, i+1] -= Wjerk
self.hess = hess
return hess
def dloss_dleft(self, j, eps=1.e-3):
# numerical estimate of loss derivative at left[j]
base_als = self.als.copy()
lefts = base_als.copy()
lefts[j] += eps
nr.poses(lefts, self.ars)
loss_plus = nr.reloss()
lefts = base_als.copy()
lefts[j] -= eps
nr.poses(lefts, self.ars)
loss_minus = nr.reloss()
self.als = base_als
dloss = 0.5 * (loss_plus - loss_minus) / eps
return dloss
def d2loss_dl_dl(self, k, eps=0.0001):
# numerical estimate of second derivative of loss dl dl
base_als = self.als.copy()
n = len(self.als)
d2lossj = [0.0]
for j in range(1, n):
lefts = base_als.copy()
lefts[k] += eps
self.als = lefts
#dlossp = self.dloss_dleft(j, eps)
nr.poses(lefts, self.ars)
nr.gradients()
nr.jacobian()
dlossp = self.dlefts[j]
pxp = self.pxj[-1]
lefts = base_als.copy()
lefts[k] -= eps
self.als = lefts
#dlossm = self.dloss_dleft(j, eps)
nr.poses(lefts, self.ars)
nr.gradients()
nr.jacobian()
dlossm = self.dlefts[j]
pxm = self.pxj[-1]
d2lossj.append(0.5 * (dlossp - dlossm) / eps)
#print(estr({'pxp': pxp, 'pxm': pxm, 'pxp - pxm': pxp - pxm}))
print(estr(({'dlossp': dlossp, 'dlossm': dlossm, 'dlossp-dlossm': dlossp-dlossm, 'wjerk': self.Wjerk})))
self.als = base_als
return d2lossj
def dloss_dright(self, j, eps=0.0001):
# numerical estimate of loss derivative at left[j]
base_ars = self.ars.copy()
rights = base_ars.copy()
rights[j] += eps
nr.poses(self.als, rights)
loss_plus = nr.reloss()
rights = base_ars.copy()
rights[j] -= eps
nr.poses(self.als, rights)
loss_minus = nr.reloss()
self.ars = base_ars
dloss = 0.5 * (loss_plus - loss_minus) / eps
return dloss
if __name__ == '__main__':
from bdbd_common.pathPlan2 import PathPlan
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,4))
axis1 = None
axis2 = None
dt = 0.05
lr_model = default_lr_model()
#lr_model = ((1.0, 1.0, 10.0), (-1.0, 1.0, 10.0), (-1.0, 10.0, 10.0))
start_pose = [0.0, 0.0, 0.0]
start_twist = [0.0, 0.0, 0.0]
target_pose = [0.10, 0.05, 0.0]
target_twist = [0.0, 0.0, 0.0]
cruise_v = 0.3
lr_start = (0.0, 0.0)
gauss_iters = 1
nr_iters = 100
Wmax = 1.e-3
Wjerk = 1.e-4
NRstart = 0.01
NRfact = 1.1
pp = PathPlan()
pathPlan = pp.start2(start_pose, target_pose)
print('path_plan:')
for segment in pathPlan:
print(fstr(segment))
# estimate left, right to achieve the path
speedPlan = pp.speedPlan(start_twist[0], cruise_v, target_twist[0], u=0.25)
print('speed_plan:')
for segment in speedPlan:
print(fstr(segment))
vxr0 = start_twist[0] * math.cos(start_pose[2]) + start_twist[1] * math.sin(start_pose[2])
vyr0 = -start_twist[0] * math.sin(start_pose[2]) + start_twist[1] * math.cos(start_pose[2])
last_vx = vxr0
last_omega = start_twist[2]
vxres = [vxr0]
vyres = [vyr0]
omegas = [start_twist[2]]
vvs = [pp.v(0.0)]
vvs[0]['left'] = lr_start[0]
vvs[0]['right'] = lr_start[1]
lefts = [lr_start[0]]
rights = [lr_start[1]]
tt = 0.0
tees = [tt]
while True:
tt += dt
vv = pp.v(tt)
vvs.append(vv)
# vv gives vhat is in wheel frame. We need to convert to robot frame.
vxres.append(vv['v'])
vyres.append(vv['omega'] * pp.dwheel)
omegas.append(vv['omega'])
(left, right, last_vx, last_omega) = lr_est(vv['v'], vv['omega'], last_vx, last_omega, dt)
lefts.append(left)
rights.append(right)
tees.append(tt)
vv['left'] = left
vv['right'] = right
if vv['fraction'] > 0.9999:
break
for seg in vvs:
print(estr(seg))
#lefts = lefts[:10]
#rights = rights[:10]
#tees = tees[:10]
n = len(lefts)
nr = NewRaph(n, dt, lr_model)
# gradient descent iteration
eps = 0.5
last_loss = 1.e10
last_lefts = None
last_rights = None
last_dlefts = None
last_drights = None
for count in range(gauss_iters + nr_iters):
start = time.time()
(pxj, pyj, thetaj, vxj, vyj, omegaj) = nr.poses(lefts, rights,
start_pose=start_pose, start_twist=start_twist)
#print('pose time:', time.time() - start)
start = time.time()
(dpxdl, dpxdr, dpydl, dpydr) = nr.gradients()
(d2pxdldl, d2pxdldr, d2pxdrdr, d2pydldl, d2pydldr, d2pydrdr) = nr.seconds()
#print('gradients time:', time.time() - start)
#print(len(lefts), len(dpxdl))
start = time.time()
loss = nr.loss(mmax=1.0, target_pose=target_pose, Wmax=Wmax, Wjerk=Wjerk)
#print('loss time:', time.time() - start)
#print('loss:', loss)
start = time.time()
(dlefts, drights) = nr.jacobian()
hess | |
"""Errors for the binobj package."""
import typing
from typing import Any
from typing import Iterable
from typing import Optional
from typing import TypeVar
from typing import Union
import more_itertools as m_iter
from binobj.typedefs import FieldOrName
from binobj.typedefs import StructOrName
if typing.TYPE_CHECKING: # pragma: no cover
from binobj.fields import Field
from binobj.structures import Struct
T = TypeVar("T")
__all__ = [
"ArraySizeError",
"ConfigurationError",
"DeserializationError",
"Error",
"ExtraneousDataError",
"FieldRedefinedError",
"FieldReferenceError",
"IllegalOperationError",
"ImmutableFieldError",
"MissingRequiredValueError",
"MultipleInheritanceError",
"SerializationError",
"UndefinedSizeError",
"UnexpectedEOFError",
"UnexpectedValueError",
"UnserializableValueError",
"ValidationError",
"ValueSizeError",
]
class Error(Exception):
"""Base class for all binobj errors.
Do not throw this exception directly.
"""
def __init__(self, message: Optional[str] = None, *args: Any):
# If there is no error message, use the first line of the docstring.
if message is None and self.__doc__:
message = self.__doc__.splitlines()[0]
super().__init__(message, *args)
class ConfigurationError(Error):
"""A field, struct, or other object was misconfigured.
At least one of the ``field``, ``struct``, or ``obj`` keyword arguments must be
passed to the constructor.
:param str message:
Optional. A description of what's wrong. If not given, a generic error message
will be chosen depending on which of the ``field``, ``struct``, or ``obj``
keyword arguments is passed.
:param field:
The misconfigured :class:`~binobj.fields.base.Field` or its name.
:param struct:
The misconfigured :class:`~binobj.structures.Struct` or its name.
:param obj:
If the misconfigured object is neither a field nor a struct, pass it or its name
here.
:raise ValueError:
None of the ``field``, ``struct``, or ``obj`` keyword arguments were passed.
.. versionadded:: 0.3.0
The ``struct`` and ``obj`` arguments.
"""
def __init__(
self,
message: Optional[str] = None,
*,
field: Optional[FieldOrName] = None,
struct: Optional[StructOrName] = None,
obj: Any = None
):
if not (field or struct or obj):
raise ValueError(
"At least one of `field`, `struct`, or `obj` must be passed to the"
" constructor."
)
if not message:
if field:
if struct:
message = "Field {f!r} in struct {s!r} was misconfigured."
else:
message = "The field {f!r} was misconfigured."
elif struct:
message = "The struct {s!r} was misconfigured."
else:
message = "The object {o!r} was misconfigured."
message = message.format(f=field, s=struct, o=obj)
super().__init__(message)
self.field = field
self.struct = struct
self.obj = obj
class SerializationError(Error):
"""An error occurred while serializing data.
:param str message:
An error message explaining the problem.
:param ~binobj.structures.Struct struct:
The struct that contains the field that failed to be serialized.
:param ~binobj.fields.base.Field field:
The field that failed to be serialized.
:param value:
The value that caused the crash.
"""
def __init__(
self,
message: Optional[str] = None,
*,
struct: Optional["Struct"] = None,
field: Optional[FieldOrName] = None,
value: Optional[T] = None
):
super().__init__(message)
self.struct = struct
self.field = field
self.value = value
class DeserializationError(Error):
"""An error occurred while deserializing data.
:param str message:
An error message explaining the problem.
:param ~binobj.fields.base.Field field:
The field that failed to load.
:param bytes data:
The raw data that was read that led to the crash.
:param int offset:
The offset into the data stream where the crash occurred.
"""
def __init__(
self,
message: Optional[str] = None,
*,
field: Optional["Field[Any]"] = None,
data: Optional[bytes] = None,
offset: Optional[int] = None
):
super().__init__(message)
self.field = field
self.data = data
self.offset = offset
class ValidationError(Error):
"""Validation failed for one or more fields.
:param str message:
An error message explaining the problem.
:param ~binobj.fields.base.Field field:
The field that failed validation.
:param value:
The invalid value.
"""
def __init__(
self, message: Optional[str] = None, *, field: "Field[T]", value: Optional[T]
):
if not message:
message = "Invalid value for %s: %r" % (field, value)
super().__init__(message)
self.field = field
self.value = value
class FieldReferenceError(Error):
"""An error occurred while computing a field reference.
:param str message:
Optional. A more detailed error message, if desired.
:param str field:
The name of the field that failed to be referenced.
"""
def __init__(self, message: Optional[str] = None, *, field: str):
if not message:
message = "Attempted to reference a missing or undefined field: " + repr(
field
)
super().__init__(message)
self.field = field
class IllegalOperationError(Error):
"""The attempted operation is disallowed.
.. versionadded:: 0.4.1
"""
################################################################################
class ImmutableFieldError(IllegalOperationError):
"""Cannot assign to an immutable or computed field.
:param ~binobj.fields.base.Field field:
The field an attempt was made to be assigned to.
.. versionadded:: 0.4.1
.. versionadded:: 0.6.1
The ``field`` argument.
"""
def __init__(self, *, field: Optional["Field[Any]"] = None):
if field is not None:
message = (
"Cannot assign to immutable field: %r" % field
) # type: Optional[str]
else:
message = None
super().__init__(message)
self.field = field
class MultipleInheritanceError(ConfigurationError):
"""A Struct can't inherit from more than one Struct.
This restriction is in place because the field ordering could be non-intuitive given
Python's MRO.
.. versionadded:: 0.3.0
"""
class FieldRedefinedError(ConfigurationError):
"""A struct has a field already defined in a parent class.
:param str struct:
The name of the struct that has the redefined field.
:param field:
The :class:`~binobj.fields.base.Field` that's been redefined, or its name.
.. versionadded:: 0.3.0
"""
def __init__(self, *, struct: str, field: FieldOrName):
super().__init__(
"Struct %s defines field %r already defined in its parent class."
% (struct, field),
struct=struct,
field=field,
)
class UndefinedSizeError(ConfigurationError):
"""The size of the field couldn't be determined, possibly due to misconfiguration.
:param field:
The :class:`~binobj.fields.base.Field` that's missing its size, or the name of
that field.
.. versionadded:: 0.3.1
"""
def __init__(self, *, field: FieldOrName):
super().__init__(
"Size of field %s couldn't be determined. The field might not have had its"
" `size` set, or a variable-sized field has a bug." % field,
field=field,
)
class NoDefinedFieldsError(ConfigurationError):
"""The struct has no defined fields.
.. versionadded:: 0.9.0
"""
def __init__(self, *, struct: StructOrName):
super().__init__("The struct %r has no defined fields." % struct, struct=struct)
class MixedDeclarationsError(ConfigurationError):
"""The class declares fields with both PEP 526 and assignments; only one is allowed.
Aside from mixing both kinds of declarations, this will also happen when a user
declares their struct with :func:`~binobj.pep526.dataclass` but uses the old form of
assignment-based field definitions.
.. versionadded:: 0.9.0
"""
class InvalidTypeAnnotationError(ConfigurationError):
"""The type annotation for a field is invalid.
.. versionadded:: 0.9.0
"""
def __init__(self, *, field: FieldOrName, annotation: Any):
message = (
"The type annotation for field %r is invalid. For example, you can't use"
" typing.Union[X, Y] to emulate binobj.fields.Union. The annotation is: %r"
) % (field, annotation)
super().__init__(message, field=field)
self.annotation = annotation
class CannotDetermineNullError(ConfigurationError):
"""The `null_value` for this field couldn't be determined when loading.
.. versionadded:: 0.9.0
"""
def __init__(self, *, field: "Field[Any]"):
super().__init__(
"Passing `DEFAULT` for `null_value` of unsized field %r makes it impossible"
" to determine what None should be and would result in unpredictable"
" behavior." % self,
field=field,
)
class UnserializableValueError(SerializationError):
"""The value couldn't be serialized.
:param ~binobj.fields.base.Field field:
The field that failed to serialize the given value.
:param value:
The value that can't be serialized.
:param str reason:
Optional. The reason for the failure.
"""
def __init__(
self, *, field: "Field[T]", value: Optional[T], reason: Optional[str] = None
):
if reason is not None:
message = "%s can't serialize value: %s" % (field, reason)
else:
message = "%s can't serialize value of type %r." % (
field,
type(value).__name__,
)
super().__init__(message, field=field, value=value)
class MissingRequiredValueError(SerializationError):
"""No value was passed for a required field.
:param field:
The missing field, or its name.
"""
def __init__(self, *, field: FieldOrName):
super().__init__("Missing required value for field: %s" % field, field=field)
class UnexpectedValueError(SerializationError):
"""The data to dump has unexpected fields.
:param ~binobj.structures.Struct struct:
The struct performing the serialization.
:param name:
Either a string or an iterable of strings, each being the name of a field that
was unexpected. Don't pass :class:`~binobj.fields.base.Field` instances.
"""
def __init__(self, *, struct: "Struct", name: Union[str, Iterable[str]]):
self.names = set(m_iter.always_iterable(name))
msg = "%d unrecognized field(s) given to %s for serialization: %s" % (
len(self.names),
type(struct).__name__,
", ".join(repr(f) for f in sorted(self.names)),
)
super().__init__(msg, struct=struct)
class ValueSizeError(UnserializableValueError):
"""The value can't be serialized because it doesn't fit into the field.
:param ~binobj.fields.base.Field field:
The field that failed to serialize the given value.
:param value:
The value that's the wrong size.
"""
def __init__(self, *, field: "Field[Any]", value: Any):
super().__init__(
reason="Value doesn't fit into %r bytes." % field.size,
field=field,
value=value,
)
class ArraySizeError(SerializationError):
"""The | |
import tensorflow as tf
from tensorflow.keras import layers
class SpectralNormalization(tf.keras.layers.Wrapper):
"""This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction.
This speeds up convergence by improving the
conditioning of the optimization problem.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
<NAME>, <NAME> (2016)
WeightNormalization wrapper works for keras and tf layers.
```python
net = WeightNormalization(
tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3),
data_init=True)(x)
net = WeightNormalization(
tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)(net)
net = WeightNormalization(
tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNormalization(
tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
"""
def __init__(self, layer, data_init=True, **kwargs):
super(SpectralNormalization, self).__init__(layer, **kwargs)
self.data_init = data_init
self._track_trackable(layer, name='layer')
self._init_critical_section = tf.CriticalSection(name='init_mutex')
self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN)
if self.data_init and self.is_rnn:
logging.warning(
"WeightNormalization: Using `data_init=True` with RNNs "
"is advised against by the paper. Use `data_init=False`.")
def build(self, input_shape):
"""Build `Layer`"""
input_shape = tf.TensorShape(input_shape)
self.input_spec = tf.keras.layers.InputSpec(
shape=[None] + input_shape[1:])
if not self.layer.built:
self.layer.build(input_shape)
kernel_layer = self.layer.cell if self.is_rnn else self.layer
if not hasattr(kernel_layer, 'kernel'):
raise ValueError('`WeightNormalization` must wrap a layer that'
' contains a `kernel` for weights')
if self.is_rnn:
kernel = kernel_layer.recurrent_kernel
else:
kernel = kernel_layer.kernel
# The kernel's filter or unit dimension is -1
self.layer_depth = int(kernel.shape[-1])
self.kernel_norm_axes = list(range(kernel.shape.rank - 1))
self.g = self.add_weight(
name='g',
shape=(self.layer_depth,),
initializer='ones',
dtype=kernel.dtype,
trainable=True,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.compat.v1.VariableAggregation.MEAN
)
self.v = kernel
self._initialized = self.add_weight(
name='initialized',
shape=None,
initializer='zeros',
dtype=tf.dtypes.bool,
trainable=False)
if self.data_init:
# Used for data initialization in self._data_dep_init.
with tf.name_scope('data_dep_init'):
layer_config = tf.keras.layers.serialize(self.layer)
layer_config['config']['trainable'] = False
self._naked_clone_layer = tf.keras.layers.deserialize(
layer_config)
self._naked_clone_layer.build(input_shape)
self._naked_clone_layer.set_weights(self.layer.get_weights())
if not self.is_rnn:
self._naked_clone_layer.activation = None
self.built = True
def call(self, inputs):
"""Call `Layer`"""
def _do_nothing():
return tf.identity(self.g)
def _update_weights():
# Ensure we read `self.g` after _update_weights.
with tf.control_dependencies(self._initialize_weights(inputs)):
return tf.identity(self.g)
g = self._init_critical_section.execute(lambda: tf.cond(
self._initialized, _do_nothing, _update_weights))
with tf.name_scope('compute_weights'):
# Replace kernel by normalized weight variable.
kernel = tf.nn.l2_normalize(self.v, axis=self.kernel_norm_axes) * g
if self.is_rnn:
self.layer.cell.recurrent_kernel = kernel
update_kernel = tf.identity(self.layer.cell.recurrent_kernel)
else:
self.layer.kernel = kernel
update_kernel = tf.identity(self.layer.kernel)
# Ensure we calculate result after updating kernel.
with tf.control_dependencies([update_kernel]):
outputs = self.layer(inputs)
return outputs
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list())
def _initialize_weights(self, inputs):
"""Initialize weight g.
The initial value of g could either from the initial value in v,
or by the input value if self.data_init is True.
"""
with tf.control_dependencies([
tf.debugging.assert_equal( # pylint: disable=bad-continuation
self._initialized,
False,
message='The layer has been initialized.')
]):
if self.data_init:
assign_tensors = self._data_dep_init(inputs)
else:
assign_tensors = self._init_norm()
assign_tensors.append(self._initialized.assign(True))
return assign_tensors
def _init_norm(self):
"""Set the weight g with the norm of the weight vector."""
with tf.name_scope('init_norm'):
v_flat = tf.reshape(self.v, [-1, self.layer_depth])
v_norm = tf.linalg.norm(v_flat, axis=0)
g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))
return [g_tensor]
def _data_dep_init(self, inputs):
"""Data dependent initialization."""
with tf.name_scope('data_dep_init'):
#print(type(self.g))
#print(dir(self.g))
#print(self.g.__class__)
#print(self.g.__name__)
# Generate data dependent init values
x_init = self._naked_clone_layer(inputs)
data_norm_axes = list(range(x_init.shape.rank - 1))
m_init, v_init = tf.nn.moments(x_init, data_norm_axes)
scale_init = 1. / tf.math.sqrt(v_init + 1e-10)
# RNNs have fused kernels that are tiled
# Repeat scale_init to match the shape of fused kernel
# Note: This is only to support the operation,
# the paper advises against RNN+data_dep_init
if scale_init.shape[0] != self.g.shape[0]:
rep = int(self.g.shape[0] / scale_init.shape[0])
scale_init = tf.tile(scale_init, [rep])
# Assign data dependent init values
g_tensor = self.g.assign(self.g * scale_init)
if hasattr(self.layer, 'bias') and self.layer.bias is not None:
bias_tensor = self.layer.bias.assign(-m_init * scale_init)
return [g_tensor, bias_tensor]
else:
return [g_tensor]
def get_config(self):
config = {'data_init': self.data_init}
base_config = super(WeightNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def remove(self):
kernel = tf.Variable(
tf.nn.l2_normalize(self.v, axis=self.kernel_norm_axes) * self.g,
name='recurrent_kernel' if self.is_rnn else 'kernel')
if self.is_rnn:
self.layer.cell.recurrent_kernel = kernel
else:
self.layer.kernel = kernel
return self.layer
"""class SpectralNormalization(tf.keras.layers.Wrapper):
\"""This wrapper is modified from
https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/layers/wrappers.py
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
\"""
def __init__(self, layer, data_init=True, **kwargs):
super(SpectralNormalization, self).__init__(layer, **kwargs)
self.data_init = data_init
self._track_trackable(layer, name='layer')
self._init_critical_section = tf.CriticalSection(name='init_mutex')
self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN)
if self.data_init and self.is_rnn:
logging.warning(
"WeightNormalization: Using `data_init=True` with RNNs "
"is advised against by the paper. Use `data_init=False`.")
def build(self, input_shape):
\"""Build `Layer`\"""
input_shape = tf.TensorShape(input_shape)
self.input_spec = tf.keras.layers.InputSpec(
shape=[None] + input_shape[1:])
if not self.layer.built:
self.layer.build(input_shape)
kernel_layer = self.layer.cell if self.is_rnn else self.layer
if not hasattr(kernel_layer, 'kernel'):
raise ValueError('`WeightNormalization` must wrap a layer that'
' contains a `kernel` for weights')
if self.is_rnn:
kernel = kernel_layer.recurrent_kernel
else:
kernel = kernel_layer.kernel
# The kernel's filter or unit dimension is -1
self.layer_depth = int(kernel.shape[-1])
self.temporal_dim = int(tf.reshape(kernel, [-1, self.layer_depth]).shape[0])
self.kernel_norm_axes = list(range(kernel.shape.rank - 1))
self._u = self.add_weight(
name='u',
shape=(1,self.layer_depth),
initializer=tf.keras.initializers.GlorotNormal,
dtype=kernel.dtype,
trainable=True)
self._v = self.add_weight(
name='g',
shape=(1,self.temporal_dim),
initializer=tf.keras.initializers.GlorotNormal,
dtype=kernel.dtype,
trainable=True)
self._u = tf.math.l2_normalize(self._u, axis=1)
self._v = tf.math.l2_normalize(self._v, axis=1)
self.v = kernel
\"""self.g = self.add_weight(
name='g',
shape=(self.layer_depth,),
initializer='ones',
dtype=kernel.dtype,
trainable=True)
self._initialized = self.add_weight(
name='initialized',
shape=None,
initializer='zeros',
dtype=tf.dtypes.bool,
trainable=False)
if self.data_init:
# Used for data initialization in self._data_dep_init.
with tf.name_scope('data_dep_init'):
layer_config = tf.keras.layers.serialize(self.layer)
layer_config['config']['trainable'] = False
self._naked_clone_layer = tf.keras.layers.deserialize(
layer_config)
self._naked_clone_layer.build(input_shape)
self._naked_clone_layer.set_weights(self.layer.get_weights())
if not self.is_rnn:
self._naked_clone_layer.activation = None\"""
self.built = True
def call(self, inputs):
\"""Call `Layer`\"""
\"""def _do_nothing():
return tf.identity(self.g)
def _update_weights():
# Ensure we read `self.g` after _update_weights.
with tf.control_dependencies(self._initialize_weights(inputs)):
return tf.identity(self.g)
g = self._init_critical_section.execute(lambda: tf.cond(
self._initialized, _do_nothing, _update_weights))\"""
with tf.name_scope('compute_weights'):
# Replace kernel by spectrally normalized weight.
#with tf.init_scope():
kernel = self.spectral_normalize()
if self.is_rnn:
self.layer.cell.recurrent_kernel = kernel
update_kernel = tf.identity(self.layer.cell.recurrent_kernel)
else:
self.layer.kernel = kernel
update_kernel = tf.identity(self.layer.kernel)
# Ensure we calculate result after updating kernel.
with tf.control_dependencies([update_kernel]):
outputs = self.layer(inputs)
return outputs
def spectral_normalize(self):
kernel_mat = tf.reshape(self.v, [self.layer_depth, self.temporal_dim])
self._v = tf.math.l2_normalize(tf.matmul(self._u, kernel_mat), axis=1)
update_v = tf.identity(self._v)
with tf.control_dependencies([update_v]):
self._u = tf.math.l2_normalize(tf.matmul(self._v, tf.transpose(kernel_mat)), axis=1)
update_u = tf.identity(self._u)
with tf.control_dependencies([update_u]):
sigma = tf.reduce_sum(tf.matmul(self._u, kernel_mat) * self._v)
return self.v / sigma
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list())
\"""def _initialize_weights(self, inputs):
#Initialize weight g.
#The initial value of g could either from the initial value in v,
#or by the input value if self.data_init is True.
with tf.control_dependencies([
tf.debugging.assert_equal( # pylint: disable=bad-continuation
self._initialized,
False,
message='The layer has been initialized.')
]):
if self.data_init:
assign_tensors = self._data_dep_init(inputs)
else:
assign_tensors = self._init_norm()
assign_tensors.append(self._initialized.assign(True))
return assign_tensors
def _init_norm(self):
#Set the weight g with the norm of the weight vector.
with tf.name_scope('init_norm'):
v_flat = tf.reshape(self.v, [-1, self.layer_depth])
v_norm = tf.linalg.norm(v_flat, axis=0)
g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))
return [g_tensor]
def _data_dep_init(self, inputs):
#Data dependent initialization.
with tf.name_scope('data_dep_init'):
# Generate data dependent init values
x_init = self._naked_clone_layer(inputs)
data_norm_axes = list(range(x_init.shape.rank - 1))
m_init, v_init = tf.nn.moments(x_init, data_norm_axes)
scale_init = 1. / tf.math.sqrt(v_init + 1e-10)
# RNNs have fused kernels that are tiled
# Repeat scale_init to match the shape of fused kernel
# Note: This is only to support the operation,
# the paper advises against RNN+data_dep_init
if scale_init.shape[0] != self.g.shape[0]:
rep = int(self.g.shape[0] / scale_init.shape[0])
scale_init = tf.tile(scale_init, [rep])
# Assign data dependent init values
g_tensor = self.g.assign(self.g * scale_init)
if hasattr(self.layer, 'bias') and self.layer.bias is not None:
bias_tensor = self.layer.bias.assign(-m_init * scale_init)
return [g_tensor, bias_tensor]
else:
return [g_tensor]\"""
def get_config(self):
config = {'data_init': self.data_init}
base_config = super(WeightNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def remove(self):
kernel = tf.Variable(
tf.nn.l2_normalize(self.v, axis=self.kernel_norm_axes) * self.g,
name='recurrent_kernel' if self.is_rnn else 'kernel')
if self.is_rnn:
self.layer.cell.recurrent_kernel = kernel
else:
self.layer.kernel = kernel
return self.layer
\"""
def l2normalize(v, eps=1e-12):
return tf.math.divide(v,(tf.norm(v) + eps))
class SpectralNormalization(layers.Layer):
\""" Paper: https://openreview.net/forum?id=B1QRgziT-
source: https://github.com/pfnet-research/sngan_projection
\"""
def __init__(self, module, name="weights", Ip=1, factor=None):
super(SpectralNormalization, self).__init__()
self.module = module
self.weight_name = name
if not Ip >= 1:
raise ValueError("The number of power iterations should be positive integer")
self.Ip = Ip
self.factor = factor
def _check_param(self):
try:
u = getattr(self, "u")
v = getattr(self, "v")
return True
except AttributeError:
return False
def _make_param(self):
W = getattr(self.module, self.weight_name)[0]
height = W.shape[-1]
width = | |
= 155
slot_town_has_tournament = 156
slot_town_tournament_max_teams = 157
slot_town_tournament_max_team_size = 158
slot_center_faction_when_oath_renounced = 159
slot_center_walker_0_troop = 160
slot_center_walker_1_troop = 161
slot_center_walker_2_troop = 162
slot_center_walker_3_troop = 163
slot_center_walker_4_troop = 164
slot_center_walker_5_troop = 165
slot_center_walker_6_troop = 166
slot_center_walker_7_troop = 167
slot_center_walker_8_troop = 168
slot_center_walker_9_troop = 169
slot_center_walker_0_dna = 170
slot_center_walker_1_dna = 171
slot_center_walker_2_dna = 172
slot_center_walker_3_dna = 173
slot_center_walker_4_dna = 174
slot_center_walker_5_dna = 175
slot_center_walker_6_dna = 176
slot_center_walker_7_dna = 177
slot_center_walker_8_dna = 178
slot_center_walker_9_dna = 179
slot_center_walker_0_type = 180
slot_center_walker_1_type = 181
slot_center_walker_2_type = 182
slot_center_walker_3_type = 183
slot_center_walker_4_type = 184
slot_center_walker_5_type = 185
slot_center_walker_6_type = 186
slot_center_walker_7_type = 187
slot_center_walker_8_type = 188
slot_center_walker_9_type = 189
slot_town_trade_route_1 = 190
slot_town_trade_route_2 = 191
slot_town_trade_route_3 = 192
slot_town_trade_route_4 = 193
slot_town_trade_route_5 = 194
slot_town_trade_route_6 = 195
slot_town_trade_route_7 = 196
slot_town_trade_route_8 = 197
slot_town_trade_route_9 = 198
slot_town_trade_route_10 = 199
slot_town_trade_route_11 = 200
slot_town_trade_route_12 = 201
slot_town_trade_route_13 = 202
slot_town_trade_route_14 = 203
slot_town_trade_route_15 = 204
slot_town_trade_routes_begin = slot_town_trade_route_1
slot_town_trade_routes_end = slot_town_trade_route_15 + 1
num_trade_goods = itm_siege_supply - itm_spice
slot_town_trade_good_productions_begin = 500 #a harmless number, until it can be deprecated
#These affect production but in some cases also demand, so it is perhaps easier to itemize them than to have separate
slot_village_number_of_cattle = 205
slot_center_head_cattle = 205 #dried meat, cheese, hides, butter
slot_center_head_sheep = 206 #sausages, wool
slot_center_head_horses = 207 #horses can be a trade item used in tracking but which are never offered for sale
slot_center_acres_pasture = 208 #pasture area for grazing of cattles and sheeps, if this value is high then number of cattles and sheeps increase faster
slot_production_sources_begin = 209 # START OF PRODUCTION SOURCES
slot_center_acres_grain = 209 #grain
slot_center_acres_olives = 210 #olives
slot_center_acres_vineyard = 211 #fruit
slot_center_acres_flax = 212 #flax
slot_center_acres_dates = 213 #dates
slot_center_fishing_fleet = 214 #smoked fish
slot_center_salt_pans = 215 #salt
slot_center_apiaries = 216 #honey
slot_center_silk_farms = 217 #silk
slot_center_kirmiz_farms = 218 #dyes
slot_center_iron_deposits = 219 #iron
slot_center_fur_traps = 220 #furs
slot_center_mills = 221 #bread
slot_center_breweries = 222 #ale
slot_center_wine_presses = 223 #wine
slot_center_olive_presses = 224 #oil
slot_center_linen_looms = 225 #linen
slot_center_silk_looms = 226 #velvet
slot_center_wool_looms = 227 #wool cloth
slot_center_pottery_kilns = 228 #pottery
slot_center_smithies = 229 #tools
slot_center_tanneries = 230 #leatherwork
slot_center_shipyards = 231 #naval stores - uses timber, pitch, and linen
slot_center_household_gardens = 232 #cabbages
slot_center_acres_fruit_trees = 233 #fruit
slot_production_sources_end = 235 # one more then the last production source
slot_center_head_chicken = 238 #drives chicken production
slot_center_head_pigs = 239 #drives pork production
#all spice comes overland to Tulga
#all dyes come by sea to Jelkala
#chicken and pork are perishable and non-tradeable, and based on grain production
#timber and pitch if we ever have a shipbuilding industry
#limestone and timber for mortar, if we allow building
slot_town_last_nearby_fire_time = 240
#slot_town_trade_good_prices_begin = slot_town_trade_good_productions_begin + num_trade_goods + 1
slot_party_following_orders_of_troop = 244
slot_party_orders_type = 245
slot_party_orders_object = 246
slot_party_orders_time = 247
slot_party_temp_slot_1 = 248 #right now used only within a single script, merchant_road_info_to_s42, to denote closed roads. Now also used in comparative scripts
slot_party_under_player_suggestion = 249 #move this up a bit
slot_town_trade_good_prices_begin = 250
slot_center_last_reconnoitered_by_faction_time = 350
#slot_center_last_reconnoitered_by_faction_cached_strength = 360
#slot_center_last_reconnoitered_by_faction_friend_strength = 370
#slot_party_type values
##spt_caravan = 1
spt_castle = 2
spt_town = 3
spt_village = 4
##spt_forager = 5
##spt_war_party = 6
##spt_patrol = 7
##spt_messenger = 8
##spt_raider = 9
##spt_scout = 10
spt_reinforcement = 6
#SB : add reinforcements as part of kingdom party range
spt_kingdom_caravan = 11
##spt_prisoner_train = 12
spt_kingdom_hero_party = 13
##spt_merchant_caravan = 14
spt_village_farmer = 15
spt_ship = 16
spt_cattle_herd = 17
spt_bandit_lair = 18
#spt_deserter = 20
kingdom_party_types_begin = spt_kingdom_caravan
kingdom_party_types_end = spt_kingdom_hero_party + 1
#slot_faction_state values
sfs_active = 0
sfs_defeated = 1
sfs_inactive = 2
sfs_inactive_rebellion = 3
sfs_beginning_rebellion = 4
#slot_faction_ai_state values
sfai_default = 0 #also defending
sfai_gathering_army = 1
sfai_attacking_center = 2
sfai_raiding_village = 3
sfai_attacking_enemy_army = 4
sfai_attacking_enemies_around_center = 5
sfai_feast = 6 #can be feast, wedding, or major tournament
#Social events are a generic aristocratic gathering. Tournaments take place if they are in a town, and hunts take place if they are at a castle.
#Weddings will take place at social events between betrothed couples if they have been engaged for at least a month, if the lady's guardian is the town lord, and if both bride and groom are present
#Rebellion system changes begin
sfai_nascent_rebellion = 7
#Rebellion system changes end
#slot_party_ai_state values
spai_undefined = -1
spai_besieging_center = 1
spai_patrolling_around_center = 4
spai_raiding_around_center = 5
##spai_raiding_village = 6
spai_holding_center = 7
##spai_helping_town_against_siege = 9
spai_engaging_army = 10
spai_accompanying_army = 11
spai_screening_army = 12
spai_trading_with_town = 13
spai_retreating_to_center = 14
##spai_trading_within_kingdom = 15
spai_visiting_village = 16 #same thing, I think. Recruiting differs from holding because NPC parties don't actually enter villages
#slot_village_state values
svs_normal = 0
svs_being_raided = 1
svs_looted = 2
svs_recovering = 3
svs_deserted = 4
svs_under_siege = 5
#$g_player_icon_state values
pis_normal = 0
pis_camping = 1
pis_ship = 2
########################################################
## SCENE SLOTS #############################
########################################################
slot_scene_visited = 0
#INVASION MODE START
slot_scene_ccoop_disallow_horses = 1 #should be set to 1 for scenes that should be played dismounted in Invasion mode (e.g. Forest Hideout)
#INVASION MODE END
slot_scene_belfry_props_begin = 10
########################################################
## TROOP SLOTS #############################
########################################################
#slot_troop_role = 0 # 10=Kingdom Lord
slot_troop_occupation = 2 # 0 = free, 1 = merchant
#slot_troop_duty = 3 # Kingdom duty, 0 = free
#slot_troop_homage_type = 45
#homage_mercenary = = 1 #Player is on a temporary contract
#homage_official = = 2 #Player has a royal appointment
#homage_feudal = = 3 #
slot_troop_state = 3
slot_troop_last_talk_time = 4
slot_troop_met = 5 #i also use this for the courtship state -- may become cumbersome
slot_troop_courtship_state = 5 #2 professed admiration, 3 agreed to seek a marriage, 4 ended relationship
slot_troop_party_template = 6
#slot_troop_kingdom_rank = 7
slot_troop_renown = 7
##slot_troop_is_prisoner = 8 # important for heroes only
slot_troop_prisoner_of_party = 8 # important for heroes only
#slot_troop_is_player_companion = 9 # important for heroes only:::USE slot_troop_occupation = slto_player_companion
slot_troop_present_at_event = 9
slot_troop_leaded_party = 10 # important for kingdom heroes only
slot_troop_wealth = 11 # important for kingdom heroes only
slot_troop_cur_center = 12 # important for royal family members only (non-kingdom heroes)
slot_troop_banner_scene_prop = 13 # important for kingdom heroes and player only
slot_troop_original_faction = 14 # for pretenders
#slot_troop_loyalty = 15 #deprecated - this is now derived from other figures
slot_troop_player_order_state = 16 #Deprecated
slot_troop_player_order_object = 17 #Deprecated
#troop_player order state are all deprecated in favor of party_order_state. This has two reasons -- 1) to reset AI if the party is eliminated, and 2) to allow the player at a later date to give orders to leaderless parties, if we want that
#Post 0907 changes begin
slot_troop_age = 18
slot_troop_age_appearance = 19
#Post 0907 changes end
slot_troop_does_not_give_quest = 20
slot_troop_player_debt = 21
slot_troop_player_relation = 22
#slot_troop_player_favor = 23
slot_troop_last_quest = 24
slot_troop_last_quest_betrayed = 25
slot_troop_last_persuasion_time= 26
slot_troop_last_comment_time = 27
slot_troop_spawned_before = 28
#Post 0907 changes begin
slot_troop_last_comment_slot = 29
#Post 0907 changes end
slot_troop_spouse = 30
slot_troop_father = 31
slot_troop_mother = 32
slot_troop_guardian = 33 #Usually siblings are identified by a common parent.This is used for brothers if the father is not an active npc. At some point we might introduce geneologies
slot_troop_betrothed = 34 #Obviously superseded once slot_troop_spouse is filled
#other relations are derived from one's parents
#slot_troop_daughter = 33
#slot_troop_son = 34
#slot_troop_sibling = 35
##diplomacy start+
#NOTE TO MODDERS: There is code that depends on these slots appearing in the correct order and being continuous.
dplmc_slot_troop_relatives_begin = slot_troop_spouse
dplmc_slot_troop_relatives_end = slot_troop_betrothed
dplmc_slot_troop_relatives_including_betrothed_end = slot_troop_betrothed + 1
##diplomacy end+
slot_troop_love_interest_1 = 35 #each unmarried lord has three love interests
slot_troop_love_interest_2 = 36
slot_troop_love_interest_3 = 37
slot_troop_love_interests_end = 38
#ways to court -- discuss a book, commission/compose a poem, present a gift, recount your exploits, fulfil a specific quest, appear at a tournament
#preferences for women - (conventional - father's friends)
slot_lady_no_messages = 37
slot_lady_last_suitor = 38
slot_lord_granted_courtship_permission = 38
slot_troop_betrothal_time = 39 #used in scheduling the wedding
slot_troop_trainer_met = 30
slot_troop_trainer_waiting_for_result = 31
slot_troop_trainer_training_fight_won = 32
slot_troop_trainer_num_opponents_to_beat = 33
slot_troop_trainer_training_system_explained = 34
slot_troop_trainer_opponent_troop = 35
slot_troop_trainer_training_difficulty = 36
# slot_troop_trainer_training_fight_won = 37 #SB : duplicate slot
slot_lady_used_tournament = 40
slot_troop_current_rumor = 45
slot_troop_temp_slot = 46
slot_troop_promised_fief = 47
slot_troop_set_decision_seed = 48 #Does not change
slot_troop_temp_decision_seed = 49 #Resets at recalculate_ai
slot_troop_recruitment_random = 50 #used in a number of different places in the intrigue procedures to overcome intermediate hurdles, although not for the final calculation, might be replaced at some point by the global decision seed
#Decision seeds can be used so that some randomness can be added to NPC decisions, without allowing the player to spam the NPC with suggestions
#The temp decision seed is reset 24 to 48 hours after the NPC last spoke to the player, while the set seed only changes in special occasions
#The single seed is used with varying modula to give high/low outcomes on different issues, without using a separate slot for each issue
slot_troop_intrigue_impatience = 51
#recruitment changes end
#slot_troop_honorable = 50
#slot_troop_merciful = 51
slot_lord_reputation_type = 52
slot_lord_recruitment_argument = 53 #the last argument proposed by the player to the lord
slot_lord_recruitment_candidate = 54 #the last candidate proposed by the player to the lord
slot_troop_change_to_faction = 55
##diplomacy start+ Use this slot to track owned center points (village = 1, castle = 2, town = 3)
#The value should be one more than the actual number of center points, because it makes
#it obvious when the slot has not been initialized. (It also so happens that we often
#add 1 to the value anyway to avoid division by 0, so this can be convenient.)
dplmc_slot_troop_center_points_plus_one = 56
##diplomacy end+
#slot_troop_readiness_to_join_army = 57 #possibly deprecate
#slot_troop_readiness_to_follow_orders = | |
criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] policy_ids: Performs the operation on the unique policy IDs specified. Enter multiple policy IDs in comma-separated format. The `policy_ids` and `policy_names` parameters cannot be provided together.
:param list[str] policy_names: Performs the operation on the policy names specified. Enter multiple policy names in comma-separated format. For example, `name01,name02`.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: PolicyRuleNfsClientGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if policy_ids is not None:
if not isinstance(policy_ids, list):
policy_ids = [policy_ids]
if policy_names is not None:
if not isinstance(policy_names, list):
policy_names = [policy_names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api210_policies_nfs_client_rules_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api210_policies_nfs_client_rules_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'policy_ids' in params:
query_params.append(('policy_ids', params['policy_ids']))
collection_formats['policy_ids'] = 'csv'
if 'policy_names' in params:
query_params.append(('policy_names', params['policy_names']))
collection_formats['policy_names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.10/policies/nfs/client-rules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PolicyRuleNfsClientGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api210_policies_nfs_client_rules_post_with_http_info(
self,
rules=None, # type: models.PolicyRuleNfsClientPost
authorization=None, # type: str
x_request_id=None, # type: str
policy_ids=None, # type: List[str]
policy_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.PolicyRuleNfsClientResponse
"""Create NFS client policy rules
Creates one or more NFS client policy rules. The `policy_ids` or `policy_names` parameter is required, but cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api210_policies_nfs_client_rules_post_with_http_info(rules, async_req=True)
>>> result = thread.get()
:param PolicyRuleNfsClientPost rules: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] policy_ids: Performs the operation on the unique policy IDs specified. Enter multiple policy IDs in comma-separated format. The `policy_ids` and `policy_names` parameters cannot be provided together.
:param list[str] policy_names: Performs the operation on the policy names specified. Enter multiple policy names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: PolicyRuleNfsClientResponse
If the method is called asynchronously,
returns the request thread.
"""
if policy_ids is not None:
if not isinstance(policy_ids, list):
policy_ids = [policy_ids]
if policy_names is not None:
if not isinstance(policy_names, list):
policy_names = [policy_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'rules' is set
if rules is None:
raise TypeError("Missing the required parameter `rules` when calling `api210_policies_nfs_client_rules_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'policy_ids' in params:
query_params.append(('policy_ids', params['policy_ids']))
collection_formats['policy_ids'] = 'csv'
if 'policy_names' in params:
query_params.append(('policy_names', params['policy_names']))
collection_formats['policy_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'rules' in params:
body_params = params['rules']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.10/policies/nfs/client-rules', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PolicyRuleNfsClientResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api210_policies_nfs_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete NFS policies
Deletes one or more NFS policies. The `ids` or `names` parameter is required, but they cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api210_policies_nfs_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
| |
node_id, likely a label string, search based on this
# label
for node in self:
if str(node) == key:
return node
log.warning('Unable to find node %s in %s ' % (key, self))
return None
def degree(self, node):
""""""
return node.degree()
def neighbors(self, node):
return iter(OverlayNode(self._anm, self._overlay_id, node)
for node in self._graph.neighbors(node.node_id))
def overlay(self, key):
"""Get to other overlay graphs in functions"""
return OverlayGraph(self._anm, key)
@property
def name(self):
""""""
return self.__repr__()
def __nonzero__(self):
return self.anm.has_overlay(self._overlay_id)
def node_label(self, node):
""""""
return repr(OverlayNode(self._anm, self._overlay_id, node))
def dump(self):
""""""
self._anm.dump_graph(self)
def has_edge(self, edge):
"""Tests if edge in graph"""
return self._graph.has_edge(edge.src, edge.dst)
def __iter__(self):
""""""
return iter(OverlayNode(self._anm, self._overlay_id, node)
for node in self._graph)
def __len__(self):
""""""
return len(self._graph)
def nodes(self, *args, **kwargs):
""""""
result = self.__iter__()
if len(args) or len(kwargs):
result = self.filter(result, *args, **kwargs)
return list(result)
def routers(self, *args, **kwargs):
"""Shortcut for nodes(), sets device_type to be router"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_router()]
def switches(self, *args, **kwargs):
"""Shortcut for nodes(), sets device_type to be switch"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_switch()]
def servers(self, *args, **kwargs):
"""Shortcut for nodes(), sets device_type to be server"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_server()]
def l3devices(self, *args, **kwargs):
"""Shortcut for nodes(), sets device_type to be server"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_l3device()]
def device(self, key):
"""To access programatically"""
return OverlayNode(self._anm, self._overlay_id, key)
def groupby(self, attribute, nodes=None):
"""Returns a dictionary sorted by attribute
>>> G_in.groupby("asn")
{u'1': [r1, r2, r3, sw1], u'2': [r4]}
"""
result = {}
if not nodes:
data = self.nodes()
else:
data = nodes
data = sorted(data, key=lambda x: x.get(attribute))
for (key, grouping) in itertools.groupby(data, key=lambda x:
x.get(attribute)):
result[key] = list(grouping)
return result
def filter(
self,
nbunch=None,
*args,
**kwargs
):
""""""
if not nbunch:
nbunch = self.nodes()
def filter_func(node):
"""Filter based on args and kwargs"""
return all(getattr(node, key) for key in args) \
and all(getattr(node, key) == val for (key, val) in
kwargs.items())
return (n for n in nbunch if filter_func(n))
def edges(
self,
src_nbunch=None,
dst_nbunch=None,
*args,
**kwargs
):
""""""
# nbunch may be single node
if src_nbunch:
try:
src_nbunch = src_nbunch.node_id
except AttributeError:
src_nbunch = (n.node_id for n in src_nbunch)
# only store the id in overlay
def filter_func(edge):
"""Filter based on args and kwargs"""
return all(getattr(edge, key) for key in args) \
and all(getattr(edge, key) == val for (key, val) in
kwargs.items())
valid_edges = ((src, dst) for (src, dst) in
self._graph.edges_iter(src_nbunch))
if dst_nbunch:
try:
dst_nbunch = dst_nbunch.node_id
dst_nbunch = set([dst_nbunch])
except AttributeError:
# faster membership test than other sequences
dst_nbunch = (n.node_id for n in dst_nbunch)
# only store the id in OverlayEdge
# faster membership test than other sequences
dst_nbunch = set(dst_nbunch)
valid_edges = ((src, dst) for (src, dst) in valid_edges
if dst in dst_nbunch)
if len(args) or len(kwargs):
all_edges = iter(OverlayEdge(self._anm, self._overlay_id,
src, dst) for (src, dst) in valid_edges)
result = (edge for edge in all_edges if filter_func(edge))
else:
result = (OverlayEdge(self._anm, self._overlay_id, src,
dst) for (src, dst) in valid_edges)
return list(result)
class OverlaySubgraph(OverlayBase):
"""OverlaySubgraph"""
def __init__(
self,
anm,
overlay_id,
graph,
name=None,
):
""""""
super(OverlaySubgraph, self).__init__(anm, overlay_id)
self._graph = graph
self._subgraph_name = name
def __repr__(self):
return self._subgraph_name or 'subgraph'
class OverlayGraph(OverlayBase):
"""API to interact with an overlay graph in ANM"""
@property
def anm(self):
"""Returns anm for this overlay"""
return self._anm
@property
def _graph(self):
"""Access underlying graph for this OverlayNode"""
return self._anm.overlay_nx_graphs[self._overlay_id]
def _replace_graph(self, graph):
""""""
self._anm.overlay_nx_graphs[self._overlay_id] = graph
# these work similar to their nx counterparts: just need to strip the
# node_id
def add_nodes_from(
self,
nbunch,
retain=None,
update=False,
**kwargs
):
"""Update won't append data (which could clobber) if node exists"""
nbunch = list(nbunch) # listify in case consumed in try/except
if not retain:
retain = []
try:
retain.lower()
retain = [retain] # was a string, put into list
except AttributeError:
pass # already a list
if not update:
# filter out existing nodes
nbunch = (n for n in nbunch if n not in self._graph)
nbunch = list(nbunch)
node_ids = list(nbunch) # before appending retain data
if len(retain):
add_nodes = []
for node in nbunch:
data = dict((key, node.get(key)) for key in retain)
add_nodes.append((node.node_id, data))
nbunch = add_nodes
else:
try:
# only store the id in overlay
nbunch = [n.node_id for n in nbunch]
except AttributeError:
pass # use nbunch directly as the node IDs
self._graph.add_nodes_from(nbunch, **kwargs)
for node in self._graph.nodes():
node_data = self._graph.node[node]
if "label" not in node_data:
node_data["label"] = str(node) # use node id
self._init_interfaces(node_ids)
def add_node(
self,
node,
retain=None,
**kwargs
):
"""Adds node to overlay"""
if not retain:
retain = []
try:
retain.lower()
retain = [retain] # was a string, put into list
except AttributeError:
pass # already a list
try:
node_id = node.id
except AttributeError:
node_id = node # use the string node id
data = {}
if len(retain):
data = dict((key, node.get(key)) for key in retain)
kwargs.update(data) # also use the retained data
self._graph.add_node(node_id, kwargs)
self._init_interfaces([node_id])
def _init_interfaces(self, nbunch=None):
"""Initialises interfaces"""
# TODO: this needs a major refactor!
# store the original bunch to check if going input->phy
if nbunch is not None:
nbunch = list(nbunch) # listify generators
original_nbunch = {}
if nbunch is None:
nbunch = [n for n in self._graph.nodes()]
try:
previous = list(nbunch)
nbunch = list(unwrap_nodes(nbunch))
except AttributeError:
pass # don't need to unwrap
else:
# record a dict of the new nbunch to the original
for index, element in enumerate(nbunch):
previous_element = previous[index]
if previous_element is not None:
original_nbunch[element] = previous[index]
phy_graph = self._anm.overlay_nx_graphs['phy']
initialised_nodes = []
for node in nbunch:
try:
phy_interfaces = phy_graph.node[node]['_interfaces']
interface_data = {'description': None,
'type': 'physical'}
# need to do dict() to copy, otherwise all point to same memory
# location -> clobber
data = dict((key, dict(interface_data)) for key in
phy_interfaces)
self._graph.node[node]['_interfaces'] = data
except KeyError:
# TODO: split this off into seperate function
# test if adding from input graph
# Note: this has to be done on a node-by-node basis
# as ANK allows adding nodes from multiple graphs at once
# TODO: warn if adding from multiple overlays at onc
if self._overlay_id == "phy" and len(original_nbunch):
# see if adding from input->phy,
# overlay nodes were provided as input
original_node = original_nbunch[node]
if original_node.overlay_id == "input":
# are doing input->phy
# copy the
original_interfaces = original_node.get(
"_interfaces")
if original_interfaces is not None:
# Initialise with the keys
int_data = {k: {"description": v.get("description"), "type": v.get("type")}
for k, v in original_interfaces.items()}
self._graph.node[node][
'_interfaces'] = int_data
else:
# no counterpart in physical graph, initialise
# Can't do node log becaue node doesn't exist yet
self._graph.node[node]['_interfaces'] = \
{0: {'description': 'loopback', 'type': 'loopback'}}
initialised_nodes.append(node)
if len(initialised_nodes):
initialised_nodes = [OverlayNode(self.anm, self._overlay_id, n) for n in initialised_nodes]
initialised_nodes = sorted([str(n) for n in initialised_nodes])
self.log.debug("Initialised interfaces for %s" % ", ".join(initialised_nodes))
def allocate_interfaces(self):
"""allocates edges to interfaces"""
if self._overlay_id in ('input', 'phy'):
if all(len(node['input']._interfaces) > 0 for node in self) \
and all(len(edge['input']._interfaces) > 0 for edge in
self.edges()):
input_interfaces_allocated = True
else:
log.info('Automatically assigning input interfaces')
input_interfaces_allocated = False
if self._overlay_id == 'input':
# only return if allocated here
if input_interfaces_allocated:
return # already allocated
# int_counter = (n for n in itertools.count() if n not in
if self._overlay_id == 'phy':
# check if nodes added
nodes = list(self)
edges = list(self.edges())
if len(nodes) and len(edges):
# allocate called once physical graph populated
if input_interfaces_allocated:
for node in self:
input_interfaces = node['input']._interfaces
if len(input_interfaces):
node._interfaces = input_interfaces
for edge in self.edges():
edge._interfaces = edge['input']._interfaces
input_interfaces = edge['input']._interfaces
if len(input_interfaces):
edge._interfaces = input_interfaces
return
self._init_interfaces()
ebunch = sorted(self.edges())
for edge in ebunch:
src = edge.src
dst = edge.dst
dst = edge.dst
src_int_id = src._add_interface('%s to %s' % (src.label,
dst.label))
dst_int_id = dst._add_interface('%s to %s' % (dst.label,
src.label))
edge._interfaces = {}
edge._interfaces[src.id] = src_int_id
edge._interfaces[dst.id] = dst_int_id
def __delitem__(self, key):
"""Alias | |
values of
parameters before writing the file. Default is True.
clobber: Option to overwrite the file if it already exists.
Default is False. If False and file exists, raises an IOError.
data_compression: HDF5 filter to apply when writing the data_array. Default is
None (no filter/compression).
flags_compression: HDF5 filter to apply when writing the flags_array. Default is
the LZF filter.
nsample_compression: HDF5 filter to apply when writing the nsample_array. Default is
the LZF filter.
data_write_dtype: datatype of output visibility data. If 'None', then the same datatype
as data_array will be used. Otherwise, a numpy dtype object must be specified with
an 'r' field and an 'i' field for real and imaginary parts, respectively. See
uvh5.py for an example of defining such a datatype. Default is None.
Returns:
None
Notes:
The HDF5 library allows for the application of "filters" when writing data, which can
provide moderate to significant levels of compression for the datasets in question.
Testing has shown that for some typical cases of UVData objects (empty/sparse flag_array
objects, and/or uniform nsample_arrays), the built-in LZF filter provides significant
compression for minimal computational overhead.
Note that for typical HERA data files written after mid-2018, the bitshuffle filter was
applied to the data_array. Because of the lack of portability, it is not included as an
option here; in the future, it may be added. Note that as long as bitshuffle is installed
on the system in a way that h5py can find it, no action needs to be taken to _read_ a
data_array encoded with bitshuffle (or an error will be raised).
"""
if run_check:
self.check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if os.path.exists(filename):
if clobber:
print("File exists; clobbering")
else:
raise IOError("File exists; skipping")
# open file for writing
with h5py.File(filename, 'w') as f:
# write header
header = f.create_group("Header")
self._write_header(header)
# write out data, flags, and nsample arrays
dgrp = f.create_group("Data")
if data_write_dtype is None:
if self.data_array.dtype == 'complex64':
data_write_dtype = 'c8'
else:
data_write_dtype = 'c16'
if data_write_dtype not in ('c8', 'c16'):
_check_uvh5_dtype(data_write_dtype)
visdata = dgrp.create_dataset("visdata", self.data_array.shape, chunks=True,
compression=data_compression, dtype=data_write_dtype)
indices = (np.s_[:], np.s_[:], np.s_[:], np.s_[:])
_write_complex_astype(self.data_array, visdata, indices)
else:
visdata = dgrp.create_dataset("visdata", chunks=True,
data=self.data_array,
compression=data_compression,
dtype=data_write_dtype)
flags = dgrp.create_dataset("flags", chunks=True,
data=self.flag_array,
compression=flags_compression)
nsample_array = dgrp.create_dataset("nsamples", chunks=True,
data=self.nsample_array.astype(np.float32),
compression=nsample_compression)
return
def initialize_uvh5_file(self, filename, clobber=False, data_compression=None,
flags_compression="lzf", nsample_compression="lzf",
data_write_dtype=None):
"""Initialize a UVH5 file on disk to be written to in parts.
Args:
filename: The UVH5 file to write to.
clobber: Option to overwrite the file if it already exists.
Default is False. If False and file exists, raises an IOError.
data_compression: HDF5 filter to apply when writing the data_array. Default is
None (no filter/compression).
flags_compression: HDF5 filter to apply when writing the flags_array. Default is
the LZF filter.
nsample_compression: HDF5 filter to apply when writing the nsample_array. Default is
the LZF filter.
data_write_dtype: datatype of output visibility data. If 'None', then double-precision
floats will be used. The user may specify 'c8' for single-precision floats or 'c16'
for double-presicion. Otherwise, a numpy dtype object must be specified with
an 'r' field and an 'i' field for real and imaginary parts, respectively. See
uvh5.py for an example of defining such a datatype. Default is None.
Returns:
None
Notes:
When partially writing out data, this function should be called first to initialize the
file on disk. The data is then actually written by calling the write_uvh5_part method,
with the same filename as the one specified in this function. See the tutorial for a
worked example.
The HDF5 library allows for the application of "filters" when writing data, which can
provide moderate to significant levels of compression for the datasets in question.
Testing has shown that for some typical cases of UVData objects (empty/sparse flag_array
objects, and/or uniform nsample_arrays), the built-in LZF filter provides significant
compression for minimal computational overhead.
Note that for typical HERA data files written after mid-2018, the bitshuffle filter was
applied to the data_array. Because of the lack of portability, it is not included as an
option here; in the future, it may be added. Note that as long as bitshuffle is installed
on the system in a way that h5py can find it, no action needs to be taken to _read_ a
data_array encoded with bitshuffle (or an error will be raised).
"""
if os.path.exists(filename):
if clobber:
print("File exists; clobbering")
else:
raise IOError("File exists; skipping")
# write header and empty arrays to file
with h5py.File(filename, 'w') as f:
# write header
header = f.create_group("Header")
self._write_header(header)
# initialize the data groups on disk
data_size = (self.Nblts, self.Nspws, self.Nfreqs, self.Npols)
dgrp = f.create_group("Data")
if data_write_dtype is None:
# we don't know what kind of data we'll get--default to double-precision
data_write_dtype = 'c16'
if data_write_dtype not in ('c8', 'c16'):
# make sure the data type is correct
_check_uvh5_dtype(data_write_dtype)
visdata = dgrp.create_dataset("visdata", data_size, chunks=True,
dtype=data_write_dtype, compression=data_compression)
flags = dgrp.create_dataset("flags", data_size, chunks=True,
dtype='b1', compression=flags_compression)
nsample_array = dgrp.create_dataset("nsamples", data_size, chunks=True,
dtype='f4', compression=nsample_compression)
return
def _check_header(self, filename, run_check_acceptability=True):
"""
Check that the metadata present in a file header matches the object's metadata.
Args:
header: reference to an h5py data group that contains the header information.
run_check_acceptability: Option to check acceptable range of the values of
parameters after reading in the file. Default is True.
Returns:
None
Notes:
This function creates a new UVData object an reads in the header information saved
on disk to compare with the object in memory. Note that this adds some small
memory overhead, but this amount is typically much smaller than the size of the data.
"""
uvd_file = UVH5()
with h5py.File(filename, 'r') as f:
header = f['/Header']
uvd_file._read_header(header, filename, run_check_acceptability=run_check_acceptability)
# temporarily remove data, flag, and nsample arrays, so we only check metadata
if self.data_array is not None:
data_array = self.data_array
self.data_array = None
replace_data = True
else:
replace_data = False
if self.flag_array is not None:
flag_array = self.flag_array
self.flag_array = None
replace_flags = True
else:
replace_flags = False
if self.nsample_array is not None:
nsample_array = self.nsample_array
self.nsample_array = None
replace_nsamples = True
else:
replace_nsamples = False
if self != uvd_file:
raise AssertionError("The object metadata in memory and metadata on disk are different")
else:
# clean up after ourselves
if replace_data:
self.data_array = data_array
if replace_flags:
self.flag_array = flag_array
if replace_nsamples:
self.nsample_array = nsample_array
del uvd_file
return
def write_uvh5_part(self, filename, data_array, flags_array, nsample_array, check_header=True,
antenna_nums=None, antenna_names=None, ant_str=None, bls=None,
frequencies=None, freq_chans=None, times=None, polarizations=None,
blt_inds=None, run_check_acceptability=True, add_to_history=None):
"""
Write out a part of a UVH5 file that has been previously initialized.
Args:
filename: the file on disk to write data to. It must already exist,
and is assumed to have been initialized with initialize_uvh5_file.
data_array: the data to write to disk. A check is done to ensure that
the dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
flags_array: the flags array to write to disk. A check is done to ensure
that the dimensions of the data passed in conform to the ones specified
by the "selection" arguments.
nsample_array: the nsample array to write to disk. A check is done to ensure
that the dimensions fo the data passed in conform to the ones specified
by the "selection" arguments.
check_header: option to check that the metadata present in the header
on disk matches that in the object. Default is True.
run_check_acceptability: If check_header, additional option to check
acceptable range of the values of parameters after reading in the file.
Default is True.
antenna_nums: The antennas numbers to include when writing data into
the object (antenna positions and names for the excluded antennas
will be retained). This cannot be provided if antenna_names is
also provided.
antenna_names: The antennas names to include when writing data into
the object (antenna positions and names for the excluded antennas
will be retained). This cannot be provided if antenna_nums is
also provided.
bls: A list of antenna number tuples | |
**callkwds):
"""
Parameters
----------
points : 2d (M,2) or 1d (N,)
M points in 2-dim space where to evalutae the interpolator
(only one in 1d case)
**callkwds : keywords passed to the interpolator's __call__ method
Returns
-------
Y : 1d array (M,)
interpolated values
"""
points = np.asarray(points)
if len(points.shape) == 1:
points = points[None,:]
return self.call(points, **callkwds)
def get_min(self, x0=None, **kwds):
"""Return [x,y] where z(x,y) = min(z) by minimizing z(x,y) w/
scipy.optimize.fmin().
Parameters
----------
x0 : sequence, length (2,), optional
Initial guess. If None then use the data grid point with the
smallest `z` value.
Returns
-------
[xmin, ymin]: 1d array (2,)
"""
_kwds = dict(disp=0, xtol=1e-12, ftol=1e-8, maxfun=1e4, maxiter=1e4)
_kwds.update(kwds)
if x0 is None:
idx0 = self.values.argmin()
x0 = [self.xx[idx0], self.yy[idx0]]
xopt = optimize.fmin(self, x0, **_kwds)
return xopt
def fempty(shape, dtype=np.float):
return np.empty(shape, dtype=dtype, order='F')
def distsq(arrx, arry):
r"""Squared distances between all points in `arrx` and `arry`:
.. math::
r_{ij}^2 = \sum_k (\texttt{arrx}[i,k] - \texttt{arry}[j,k])^2 \\
i = 1..M_x \\
j = 1..M_y \\
k = 1..N
This is like
scipy.spatial.distance.cdist(arrx, arry)**2.0
This is a wrapper for :func:`pwtools._flib.distsq`.
Parameters
----------
arrx, arry : ndarray (Mx,N), (My,N)
Mx (My) points in N-dim space
Returns
-------
2d array (Mx,My)
"""
nx, ny = arrx.shape[0], arry.shape[0]
ndim = arrx.shape[1]
ndimx, ndimy = arrx.shape[1], arry.shape[1]
assert ndimx == ndimy, ("ndimx (%s, shape: %s) != ndimy (%s, shape: %s)" \
%(str(ndimx),
str(arrx.shape),
str(ndimy),
str(arry.shape)))
# Allocating in F-order is essential for speed! For many points, this step
# is actually the bottleneck, NOT the Fortran code! This is b/c if `dist`
# is order='C' (numpy default), then the f2py wrapper makes a copy of the
# array before starting to crunch numbers.
dist = np.empty((nx, ny), dtype=arrx.dtype, order='F')
return _flib.distsq(arrx, arry, dist, nx, ny, ndim)
class DataND:
"""
Transform 2d array `a2` to nd array `an`. The 2d array's last column are
values on a grid represented by the nd array. The 2d array is the
"flattened" version of the nd array. Works only for ordered axes where `a2`
was generated by a nested loop over ordered 1d sequences, i.e.
>>> nx,ny,nz = len(x),len(y),len(z)
>>> for ii in range(nx):
... for jj in range(ny):
... for kk in range(nz):
... idx = ii*ny*nz + jj*nz + kk
... a2[idx,0] = x[ii]
... a2[idx,1] = y[jj]
... a2[idx,2] = z[kk]
... a2[idx,3] = <some value>
>>> axes = [x,y,z]
The `axes` are also extracted by numpy.unique() from `a2`'s columns,
therefore only ordered axes work.
The reverse operation `an` -> `a2` is not implemented ATM.
Examples
--------
>>> from pwtools import num
>>> # something to create grid values
>>> a=iter(arange(1,100))
>>> # Nested loop
>>> a2=array([[x,y,z,a.next()] for x in [0,1,2] for y in [0,1] for z in [0,1,2,3]])
>>> nd=num.DataND(a2=a2)
>>> nd.an.shape
(3,2,4)
>>> # nd array an[ii,jj,kk]
>>> nd.an
array([[[ 1, 2, 3, 4],
[ 5, 6, 7, 8]],
[[ 9, 10, 11, 12],
[13, 14, 15, 16]],
[[17, 18, 19, 20],
[21, 22, 23, 24]]])
>>> nd.axes
[array([0, 1, 2]), array([0, 1]), array([0, 1, 2, 3])]
"""
def __init__(self, a2=None, an=None, axes=None):
"""
Parameters
----------
arr : 2d array (nrows, ncols)
Attributes
-------
nd : nd arry
axes : list of 1d arrays
The axes of the grid from np.unique()'ed ``ncols-1`` columns.
"""
if an is None:
self.a2 = a2
self.an, self.axes = self.a2_to_an()
def a2_to_an(self):
axes = []
dims = []
for colidx in range(self.a2.shape[1]-1):
a = np.unique(self.a2[:,colidx])
axes.append(a)
dims.append(len(a))
assert np.product(dims) == self.a2.shape[0]
idx = itertools.product(*tuple(map(range, dims)))
an = np.empty(dims, dtype=self.a2.dtype)
# an[1,2,3] == an[(1,2,3)], need way to eliminate loop over index array
for ii,_idx in enumerate(idx):
an[tuple(_idx)] = self.a2[ii,-1]
return an, axes
def rms(arr, nitems='all'):
"""RMS of all elements in a ndarray.
Parameters
----------
arr : ndarray
nitems : {'all', float}
normalization constant, the sum of squares is divided by this number,
set to unity for no normalization, if 'all' then use nitems = number of
elements in the array
Returns
-------
rms : scalar
"""
if nitems == 'all':
nitems = float(arr.nbytes / arr.itemsize)
else:
nitems = float(nitems)
rms = np.sqrt((arr**2.0).sum() / nitems)
return rms
def rms3d(arr, axis=0, nitems='all'):
"""RMS of 3d array along `axis`. Sum all elements of all axes != axis.
Parameters
----------
arr : 3d array
axis : int
The axis along which the RMS of all sub-arrays is to be computed
(usually time axis in MD).
nitems : {'all', float}
normalization constant, the sum of squares is divided by this number,
set to unity for no normalization, if 'all' then use nitems = number of
elements in each sub-array along `axis`
Returns
-------
rms : 1d array, (arr.shape[axis],)
"""
# We could use num.sum() and would be able to generalize to nd arrays. But
# not needed now.
assert -1 <= axis <= 2, "allowed axis values: -1,0,1,2"
assert arr.ndim == 3, "arr must be 3d array"
if axis == -1:
axis = arr.ndim - 1
if nitems == 'all':
sl = [slice(None)]*arr.ndim
sl[axis] = 0 # pick out 1st sub-array along axis
nitems = float(arr[tuple(sl)].nbytes / arr.itemsize)
else:
nitems = float(nitems)
if axis == 0:
rms = np.sqrt((arr**2.0).sum(1).sum(1) / nitems)
elif axis == 1:
rms = np.sqrt((arr**2.0).sum(0).sum(1) / nitems)
elif axis == 2:
rms = np.sqrt((arr**2.0).sum(0).sum(0) / nitems)
return rms
def inner_points_mask(points):
"""Mask array into `points` where ``points[msk]`` are all "inner" points,
i.e. `points` with one level of edge points removed. For 1D, this is simply
points[1:-1,:] (assuming ordered points). For ND, we calculate and remove
the convex hull.
Parameters
----------
points : nd array (npoints, ndim)
Returns
-------
msk : (npoints, ndim)
Bool array.
"""
msk = np.ones((points.shape[0],), dtype=bool)
if points.shape[1] == 1:
assert (np.diff(points[:,0]) >= 0.0).all(), ("points not monotonic")
msk[0] = False
msk[-1] = False
else:
from scipy.spatial import Delaunay
tri = Delaunay(points)
edge_idx = np.unique(tri.convex_hull)
msk.put(edge_idx, False)
return msk
def poly_str(ndim, deg):
"""String representation of a `ndim`-poly of degree `deg`."""
st = ''
for ii,pwr in enumerate(poly_powers(ndim,deg)):
xx = '*'.join('x%i^%i'%(i,n) for i,n in enumerate(pwr))
term = 'a%i*' %ii + xx
if st == '':
st = term
else:
st += ' + %s' %term
return st
def poly_powers(ndim, deg):
"""Powers for building a n-dim polynomial and columns of the n-dim
Vandermonde matrix.
Parameters
----------
ndim : number of dimensions of the poly (e.g. 2 for f(x1,x2))
deg : degree of the poly
Returns
-------
powers : 2d array ``((deg+1)**ndim, ndim)``
Examples
--------
For one dim, we have data points (x_i,y_i) and the to-be-fitted poly of order
k is::
f(x) = a0*x^0 + a1*x^1 + a2*x^2 + ... + ak*x^k
The Vandermonde matrix A consists of all powers of x (cols) for all data
points (rows) and each row has the form of the poly::
[[x_0^0 x_0^1 ... x_0^k],
[x_1^0 x_1^1 ... x_1^k],
...
[x_n^0 x_n^1 ... x_n^k]]
To fit, we solve A . a = y, where a = [a0,...,ak].
The returned array `powers` has k rows, where each row holds the powers for
one term in the poly. For ndim=1 and poly order k, we have::
[[0],
[1],
...
[k]]
and::
[0] -> x^0
[1] -> x^1
...
[k] -> x^k
Now, suppose we have 2 dims, thus data points (x0_i,x1_i,y_i) and a poly
of order 2::
f(x0,x1) = a0*x0^0*x1^0 + a1*x0^0*x1^1 + a2*x0^0*x1^2 + a3*x0^1*x1^0 +
a4*x0^1*x1^1 + a5*x0^1*x1^2 + a6*x0^2*x1^0 + a7*x0^2*x1^1 +
a8*x0^2*x1^2
with 9 coeffs a = [a0,...,a8]. Therefore, ``powers.shape = (9,2)``::
[[0, 0],
[0, 1],
[0, 2],
[1, 0],
[1, 1],
[1, 2],
[2, 0],
[2, 1],
[2, 2]]
and::
[0,0] -> x0^0*x1^0
[1,2] -> x0^1*x1^2
...
"""
return np.array(list(itertools.product(range(deg+1), repeat=ndim)))
def vander(points, deg):
"""N-dim Vandermonde matrix for data `points` and a polynomial of degree
`deg`.
Parameters
----------
points : see polyfit()
deg : int
Degree of the poly (e.g. 3 for cubic).
Returns
-------
vander : 2d array (npoints, (deg+1)**ndim)
"""
powers | |
str format:
:return: DeviceDataSourceInstanceData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'hds_id', 'id', 'period', 'start', 'end', 'datapoints', 'format'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_datasource_instance_data" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_device_datasource_instance_data`") # noqa: E501
# verify the required parameter 'hds_id' is set
if ('hds_id' not in params or
params['hds_id'] is None):
raise ValueError("Missing the required parameter `hds_id` when calling `get_device_datasource_instance_data`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_device_datasource_instance_data`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `get_device_datasource_instance_data`, must conform to the pattern `/\d+/`") # noqa: E501
if 'hds_id' in params and not re.search('\d+', params['hds_id'] if type(params['hds_id']) is str else str(params['hds_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `hds_id` when calling `get_device_datasource_instance_data`, must conform to the pattern `/\d+/`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_device_datasource_instance_data`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'hds_id' in params:
path_params['hdsId'] = params['hds_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'period' in params:
query_params.append(('period', params['period'])) # noqa: E501
if 'start' in params:
query_params.append(('start', params['start'])) # noqa: E501
if 'end' in params:
query_params.append(('end', params['end'])) # noqa: E501
if 'datapoints' in params:
query_params.append(('datapoints', params['datapoints'])) # noqa: E501
if 'format' in params:
query_params.append(('format', params['format'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources/{hdsId}/instances/{id}/data', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceDataSourceInstanceData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_datasource_instance_graph_data(self, device_id, hds_id, id, graph_id, **kwargs): # noqa: E501
"""get device instance graph data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_graph_data(device_id, hds_id, id, graph_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: The device-datasource ID (required)
:param int id: (required)
:param int graph_id: (required)
:param int start:
:param int end:
:param str format:
:return: GraphPlot
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_datasource_instance_graph_data_with_http_info(device_id, hds_id, id, graph_id, **kwargs) # noqa: E501
else:
(data) = self.get_device_datasource_instance_graph_data_with_http_info(device_id, hds_id, id, graph_id, **kwargs) # noqa: E501
return data
def get_device_datasource_instance_graph_data_with_http_info(self, device_id, hds_id, id, graph_id, **kwargs): # noqa: E501
"""get device instance graph data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_graph_data_with_http_info(device_id, hds_id, id, graph_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: The device-datasource ID (required)
:param int id: (required)
:param int graph_id: (required)
:param int start:
:param int end:
:param str format:
:return: GraphPlot
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'hds_id', 'id', 'graph_id', 'start', 'end', 'format'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_datasource_instance_graph_data" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_device_datasource_instance_graph_data`") # noqa: E501
# verify the required parameter 'hds_id' is set
if ('hds_id' not in params or
params['hds_id'] is None):
raise ValueError("Missing the required parameter `hds_id` when calling `get_device_datasource_instance_graph_data`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_device_datasource_instance_graph_data`") # noqa: E501
# verify the required parameter 'graph_id' is set
if ('graph_id' not in params or
params['graph_id'] is None):
raise ValueError("Missing the required parameter `graph_id` when calling `get_device_datasource_instance_graph_data`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `get_device_datasource_instance_graph_data`, must conform to the pattern `/\d+/`") # noqa: E501
if 'hds_id' in params and not re.search('\d+', params['hds_id'] if type(params['hds_id']) is str else str(params['hds_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `hds_id` when calling `get_device_datasource_instance_graph_data`, must conform to the pattern `/\d+/`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_device_datasource_instance_graph_data`, must conform to the pattern `/\d+/`") # noqa: E501
if 'graph_id' in params and not re.search('-?\d+', params['graph_id'] if type(params['graph_id']) is str else str(params['graph_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `graph_id` when calling `get_device_datasource_instance_graph_data`, must conform to the pattern `/-?\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'hds_id' in params:
path_params['hdsId'] = params['hds_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'graph_id' in params:
path_params['graphId'] = params['graph_id'] # noqa: E501
query_params = []
if 'start' in params:
query_params.append(('start', params['start'])) # noqa: E501
if 'end' in params:
query_params.append(('end', params['end'])) # noqa: E501
if 'format' in params:
query_params.append(('format', params['format'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources/{hdsId}/instances/{id}/graphs/{graphId}/data', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphPlot', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_datasource_instance_group_by_id(self, device_id, device_ds_id, id, **kwargs): # noqa: E501
"""get device datasource instance group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_group_by_id(device_id, device_ds_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int device_ds_id: The device-datasource ID you'd like to add an instance group for (required)
:param int id: (required)
:param str fields:
:return: DeviceDataSourceInstanceGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_datasource_instance_group_by_id_with_http_info(device_id, device_ds_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_device_datasource_instance_group_by_id_with_http_info(device_id, device_ds_id, id, **kwargs) # noqa: E501
return data
def get_device_datasource_instance_group_by_id_with_http_info(self, device_id, device_ds_id, id, **kwargs): # noqa: E501
"""get device datasource instance group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_group_by_id_with_http_info(device_id, device_ds_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int device_ds_id: The device-datasource ID you'd like to add an instance group for (required)
:param int id: (required)
:param str fields:
:return: DeviceDataSourceInstanceGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'device_ds_id', 'id', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
| |
{3,S} {4,S}
2 Cd u0 {1,D}
3 Cb u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.4,5.37,5.93,6.18,6.5,6.62,6.72],'cal/(mol*K)','+|-',[0.1,0.1,0.1,0.1,0.1,0.1,0.1]),
H298 = (1.5,'kcal/mol','+|-',0.2),
S298 = (-14.4,'cal/(mol*K)','+|-',0.1),
),
shortDesc = u"""Cd-OCb jwb need calc""",
longDesc =
u"""
""",
)
entry(
index = 182,
label = "Cds-CddCdsOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 [Cd,CO] u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = u'Cds-(Cdd-Cd)(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 183,
label = "Cds-(Cdd-O2d)(Cds-O2d)O2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CO u0 {1,S} {6,D}
4 O2s u0 {1,S}
5 O2d u0 {2,D}
6 O2d u0 {3,D}
""",
thermo = u'Cds-(Cdd-O2d)CsOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 184,
label = "Cds-(Cdd-O2d)(Cds-Cd)O2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 O2s u0 {1,S}
5 O2d u0 {2,D}
6 C u0 {3,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 185,
label = "Cds-(Cdd-O2d)(Cds-Cds)O2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 O2s u0 {1,S}
5 O2d u0 {2,D}
6 Cd u0 {3,D}
""",
thermo = u'Cds-(Cdd-O2d)CsOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 186,
label = "Cds-(Cdd-O2d)(Cds-Cdd)O2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 O2s u0 {1,S}
5 O2d u0 {2,D}
6 Cdd u0 {3,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cdd-Cd)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 187,
label = "Cds-(Cdd-O2d)(Cds-Cdd-O2d)O2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 O2s u0 {1,S}
6 O2d u0 {3,D}
7 O2d u0 {4,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([11.01,12.97,14.17,14.97,15.8,16.26,16.88],'cal/(mol*K)','+|-',[0.1,0.1,0.1,0.1,0.1,0.1,0.1]),
H298 = (1.607,'kcal/mol','+|-',0.2),
S298 = (17.73,'cal/(mol*K)','+|-',0.1),
),
shortDesc = u"""{CCO/O/CCO} RAMAN & GREEN JPCA 2002, 106, 7937-7949""",
longDesc =
u"""
""",
)
entry(
index = 188,
label = "Cds-(Cdd-O2d)(Cds-Cdd-Cd)O2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 O2s u0 {1,S}
6 O2d u0 {3,D}
7 C u0 {4,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 189,
label = "Cds-(Cdd-Cd)(Cds-Cd)O2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 O2s u0 {1,S}
5 C u0 {2,D}
6 C u0 {3,D}
""",
thermo = u'Cds-(Cdd-Cd)(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 190,
label = "Cds-(Cdd-Cd)(Cds-Cds)O2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 O2s u0 {1,S}
5 C u0 {2,D}
6 Cd u0 {3,D}
""",
thermo = u'Cds-Cds(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 191,
label = "Cds-(Cdd-Cd)(Cds-Cdd)O2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 O2s u0 {1,S}
5 C u0 {2,D}
6 Cdd u0 {3,D}
""",
thermo = u'Cds-(Cdd-Cd)(Cds-Cdd-Cd)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 192,
label = "Cds-(Cdd-Cd)(Cds-Cdd-O2d)O2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 O2s u0 {1,S}
6 C u0 {3,D}
7 O2d u0 {4,D}
""",
thermo = u'Cds-Cds(Cds-Cdd-O2d)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 193,
label = "Cds-(Cdd-Cd)(Cds-Cdd-Cd)O2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 O2s u0 {1,S}
6 C u0 {3,D}
7 C u0 {4,D}
""",
thermo = u'Cds-(Cdd-Cd)(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 194,
label = "Cds-CddCtOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Ct u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = u'Cds-(Cdd-Cd)CtOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 195,
label = "Cds-(Cdd-O2d)CtOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Ct u0 {1,S}
4 O2s u0 {1,S}
5 O2d u0 {2,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 196,
label = "Cds-(Cdd-Cd)CtOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Ct u0 {1,S}
4 O2s u0 {1,S}
5 C u0 {2,D}
""",
thermo = u'Cds-CdsCtOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 197,
label = "Cds-CddCbOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Cb u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = u'Cds-(Cdd-Cd)CbOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 198,
label = "Cds-(Cdd-O2d)CbOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 O2s u0 {1,S}
5 O2d u0 {2,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cds)O2s',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 199,
label = "Cds-(Cdd-Cd)CbOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 O2s u0 {1,S}
5 C u0 {2,D}
""",
thermo = u'Cds-CdsCbOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1995,
label = "Cd-CdCsOs",
group =
"""
1 * Cd u0 {2,S} {3,S} {4,D}
2 Cs u0 {1,S}
3 O2s u0 {1,S}
4 C u0 {1,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([12.79,15.86,19.67,22.91,26.55,27.85,28.45],'J/(mol*K)','+|-',[5.1,5.1,5.1,5.1,5.1,5.1,5.1]),
H298 = (33,'kJ/mol','+|-',4.34),
S298 = (-50.89,'J/(mol*K)','+|-',5.94),
),
shortDesc = u"""\Derived from CBS-QB3 calculation with 1DHR treatment""",
longDesc =
u"""
Derived using calculations at B3LYP/6-311G(d,p)/CBS-QB3 level of theory. 1DH-rotors
optimized at the B3LYP/6-31G(d).Paraskevas et al, Chem. Eur. J. 2013, 19, 16431-16452,
DOI: 10.1002/chem.201301381
""",
)
entry(
index = 169,
label = "Cds-CdsCsOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cd u0 {1,D}
3 Cs u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.59,4.56,5.04,5.3,5.84,6.07,6.16],'cal/(mol*K)','+|-',[0.1,0.1,0.1,0.1,0.1,0.1,0.1]),
H298 = (3.03,'kcal/mol','+|-',0.2),
S298 = (-12.32,'cal/(mol*K)','+|-',0.1),
),
shortDesc = u"""Cd-OCs BOZZELLI-RADOM vin-oh and del (ccoh-ccohc)""",
longDesc =
u"""
""",
)
entry(
index = 179,
label = "Cds-CddCsOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Cs u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = u'Cds-(Cdd-Cd)CsOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 180,
label = "Cds-(Cdd-O2d)CsOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cs u0 {1,S}
4 O2s u0 {1,S}
5 O2d u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([10.91,12.65,13.59,14.22,15,15.48,16.28],'cal/(mol*K)','+|-',[0.1,0.1,0.1,0.1,0.1,0.1,0.1]),
H298 = (3.273,'kcal/mol','+|-',0.2),
S298 = (18.58,'cal/(mol*K)','+|-',0.1),
),
shortDesc = u"""{CCO/O/C} RAMAN & GREEN JPCA 2002, 106, 7937-7949""",
longDesc =
u"""
""",
)
entry(
index = 181,
label = "Cds-(Cdd-Cd)CsOs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cs u0 {1,S}
4 O2s u0 {1,S}
5 C u0 {2,D}
""",
thermo = u'Cds-CdsCsOs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CdCS",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 C u0 {1,D}
3 C u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1181,
label = "Cds-CdsCsSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cd u0 {1,D}
3 Cs u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.23,4.63,4.97,5.29,5.83,6.17,6.53],'cal/(mol*K)'),
H298 = (10.63,'kcal/mol'),
S298 = (-12.76,'cal/(mol*K)'),
),
shortDesc = u"""CBS-QB3 GA 1D-HR <NAME> 2010""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CdsCdsSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cd u0 {1,D}
3 Cd u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-Cds(Cds-Cd)S2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,D}
4 S2s u0 {1,S}
5 C u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-Cds(Cds-Cds)S2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,D}
4 S2s u0 {1,S}
5 Cd u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-Cds(Cds-Cdd)S2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,D}
4 S2s u0 {1,S}
5 Cdd u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-Cds(Cds-Cdd-S2d)S2s",
group =
"""
1 * Cd u0 {2,S} {4,D} {5,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {6,D}
4 Cd u0 {1,D}
5 S2s u0 {1,S}
6 S2d u0 {3,D}
""",
thermo | |
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
N = 10001
Time = np.linspace(0,10,N)
dt = Time[1]-Time[0]
R = lambda t: 2*np.sin(2*np.pi*t)
dR = lambda t: (2*np.pi)*2*np.cos(2*np.pi*t)
d2R = lambda t: -( (2*np.pi)**2)*2*np.sin(2*np.pi*t)
def g1(x1,x2):
return(x1**2 + x2)
def g2(x1,x2,u):
return(u)
def update_policy(t,x1,x2,u,dt):
x2.append(x2[-1] + g2(x1[-1],x2[-1],u)*dt)
x1.append(x1[-1] + g1(x1[-1],x2[-2])*dt)
def input(t,x1,x2,k1,k2):
u = (1+k1*k2)*R(t) + (k1 + k2)*dR(t) + d2R(t) \
- (2*x1[-1] + k1 + k2)*g1(x1[-1],x2[-1]) - k1*k2*x1[-1]
return(u)
k1,k2 = 5,5
x1 = [0]
x2 = [0]
for t in Time[1:]:
update_policy(t,x1,x2,input(t,x1,x2,k1,k2),dt)
plt.figure()
plt.title(r'$\dot{x}_{1} = x_{1}^{2} + x_{2}; \hspace{1em} \dot{x}_{2} = u$',\
fontsize=16,color='gray')
plt.plot(Time,x1,'b',lw=2)
plt.plot(Time,R(Time),'r--')
plt.xlabel("Time (s)")
plt.ylabel("Desired Measure")
plt.legend([r"Output $y = x_{1}$",r"Reference $r(t) = 2\sin(2\pi t)$"])
plt.figure()
plt.title('Error vs. Time')
plt.plot(Time, R(Time)-x1,color='r')
plt.xlabel("Time (s)")
plt.ylabel("Error")
# plt.show()
k1,k2 = 50,50
m1,m2,M = 1,1,1
A,w = 0.10,0.5*np.pi
b1,b2,b3,b4 = 20,20,20,20
CocontractionIndex = 2
def dx1(t,X):
return(X[1])
def dx2(t,X):
return(-(k1+k2)/M*X[0] + (k1/M)*X[2] + (k2/M)*X[3])
def dx3(t,X):
return(X[4])
def dx4(t,X):
return(X[5])
def dx5(t,X,U):
return(k1/m1*X[0] -k1/m1*X[2] + U[0]/m1)
def dx6(t,X,U):
return(k2/m2*X[0] -k2/m2*X[3] - U[1]/m2)
r = lambda t: A*np.sin(w*t)
dr = lambda t: A*w*np.cos(w*t)
d2r = lambda t: -A*w**2*np.sin(w*t)
d3r = lambda t: -A*w**3*np.cos(w*t)
d4r = lambda t: A*w**4*np.sin(w*t)
def z1(t,X):
return(r(t) - X[0])
def dz1(t,X):
return(dr(t) - X[1])
def d2z1(t,X):
return(d2r(t) - dx2(t,X))
def d3z1(t,X):
return(d3r(t) + (k1+k2)/M*X[1] - k1/M*X[4] - k2/M*X[5])
def a1(t,X):
return(dr(t) - b1*z1(t,X))
def da1(t,X):
return(d2r(t) - b1*dz1(t,X))
def z2(t,X):
return(X[1] - a1(t,X))
def dz2(t,X):
return(dx2(t,X) - da1(t,X))
def a2(t,X):
return((k1+k2)/M*X[0] + (1 + b1*b2)*z1(t,X) + (b1+b2)*dz1(t,X) + d2r(t))
def da2(t,X):
return((k1+k2)/M*X[1] + (1 + b1*b2)*dz1(t,X) + (b1+b2)*d2z1(t,X) + d3r(t))
def d2a2(t,X):
return((k1+k2)/M*dx2(t,X) + (1 + b1*b2)*d2z1(t,X) + (b1+b2)*d3z1(t,X) + d4r(t))
def z3(t,X):
return(k1/M*X[2] + k2/M*X[3] - a2(t,X))
def dz3(t,X):
return(k1/M*X[4] + k2/M*X[5] - da2(t,X))
def a3(t,X):
return(da2(t,X) - z2(t,X) -b3*z3(t,X))
def da3(t,X):
return(d2a2(t,X) - dz2(t,X) -b3*dz3(t,X))
def z4(t,X):
return(k1/M*X[4] + k2/M*X[5] - a3(t,X))
# def dz4(t,X):
# return(k1/M*dx5(t,X,U) + k2/M*dx6(t,X,U) - da3(t,X))
def c1(t,X):
return(-(k1**2*m2 + k2**2*m1)/(m1*m2*M)*X[0] + k1**2/(m1*M)*X[2] + k2**2/(m2*M)*X[3] + \
da3(t,X) - z3(t,X) - b4*z4(t,X))
def return_U(t,X,e,Noise):
if c1(t,X)<=0:
u1 = ((m1*m2*M)/(k1*m2-e*k2*m1))*c1(t,X) + Noise[0]
u2 = e*(u1-Noise[0]) + Noise[1]
else:
u2 = ((m1*m2*M)/(e*k1*m2-k2*m1))*c1(t,X) + Noise[1]
u1 = e*(u2-Noise[1]) + Noise[0]
return([u1,u2])
def animate_trajectory(response,Time,x1,x3,x4,u1,u2):
assert type(response)==bool, "Input must be either True or False."
if response == True:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.animation as animation
import matplotlib.patches as patches
from scipy import signal
fig = plt.figure(figsize=(10,8))
ax1 = plt.subplot2grid((3,4),(0,0),colspan=4)
ax2 = plt.subplot2grid((3,4),(1,0),colspan=2)
ax3 = plt.subplot2grid((3,4),(1,2),colspan=2)
ax4 = plt.subplot2grid((3,4),(2,0),colspan=3)
ax5 = plt.subplot2grid((3,4),(2,3))
plt.suptitle("Underdetermined Mass-Spring System",Fontsize=28,y=0.95)
# Model Drawing
IdealBoxScalingFactor = 0.78533496170320571 # Calculated from w = np.pi
CurrentTrialScalingFactor = max([max(x3)-min(x1),max(x1)-min(x4)])
StraightLength = 0.05*CurrentTrialScalingFactor/IdealBoxScalingFactor
RestingLength = max([max(x1)-min(x3),max(x4)-min(x1)])+2*StraightLength\
+0.30*CurrentTrialScalingFactor/IdealBoxScalingFactor
CenterBoxHalfWidth = 0.15*CurrentTrialScalingFactor/IdealBoxScalingFactor
CenterBoxHalfHeight = 0.2*CurrentTrialScalingFactor/IdealBoxScalingFactor
SideBoxHalfWidth = 0.1*CurrentTrialScalingFactor/IdealBoxScalingFactor
SideBoxHalfHeight = 0.075*CurrentTrialScalingFactor/IdealBoxScalingFactor
ForceScaling = 1*CurrentTrialScalingFactor/IdealBoxScalingFactor
Spring_array =\
SideBoxHalfWidth\
*np.abs(signal.sawtooth(5*2*np.pi*np.linspace(0,1,1001)-np.pi/2))\
-(1/2)*SideBoxHalfWidth
Spring1, =\
ax1.plot(np.linspace(x1[0]+CenterBoxHalfWidth+StraightLength,\
RestingLength+x3[0]-SideBoxHalfWidth-StraightLength,1001),\
Spring_array,'k')
Spring1_left, = ax1.plot([x1[0]+CenterBoxHalfWidth,x1[0]+CenterBoxHalfWidth+StraightLength],[0,0],'k')
Spring1_right, = \
ax1.plot([RestingLength+x3[0]-SideBoxHalfWidth-StraightLength,\
RestingLength+x3[0]-SideBoxHalfWidth],\
[0,0],'k')
Spring2, =\
ax1.plot(np.linspace(-RestingLength+x4[0]+SideBoxHalfWidth+StraightLength,\
x1[0]-CenterBoxHalfWidth-StraightLength,1001),\
Spring_array,'k')
Spring2_left, = ax1.plot([x1[0]-CenterBoxHalfWidth-StraightLength,x1[0]-CenterBoxHalfWidth],[0,0],'k')
Spring2_right, = \
ax1.plot([-RestingLength+x4[0]+SideBoxHalfWidth,\
-RestingLength+x4[0]+SideBoxHalfWidth+StraightLength],\
[0,0],'k')
ax1.get_xaxis().set_ticks([])
ax1.get_yaxis().set_ticks([])
ax1.set_frame_on(True)
CenterMass = plt.Rectangle((-CenterBoxHalfWidth,-CenterBoxHalfHeight),\
2*CenterBoxHalfWidth,2*CenterBoxHalfHeight,Color='#4682b4')
ax1.add_patch(CenterMass)
Mass1 = plt.Rectangle((-SideBoxHalfWidth+RestingLength,-SideBoxHalfHeight),\
2*SideBoxHalfWidth,2*SideBoxHalfHeight,Color='#4682b4')
ax1.add_patch(Mass1)
Mass2 = plt.Rectangle((-SideBoxHalfWidth-RestingLength,-SideBoxHalfHeight),\
2*SideBoxHalfWidth,2*SideBoxHalfHeight,Color='#4682b4')
ax1.add_patch(Mass2)
PositionArrow, = ax1.plot([x1[0],x1[0]],[0,2*CenterBoxHalfHeight],'k')
PositionArrowHead, = ax1.plot([x1[0]],[2*CenterBoxHalfHeight],'k^')
PositionArrowTail, = ax1.plot([x1[0]],[0],'ko')
Scale = ax1.plot([-1.1*A,1.1*A],\
[2.75*CenterBoxHalfHeight,2.75*CenterBoxHalfHeight],\
'0.60')
Ticks = np.linspace(-A,A,5)
TickHeights = [0.3*CenterBoxHalfHeight,\
0.15*CenterBoxHalfHeight,\
0.3*CenterBoxHalfHeight,\
0.15*CenterBoxHalfHeight,\
0.3*CenterBoxHalfHeight]
[ax1.plot([Ticks[i],Ticks[i]],\
[2.75*CenterBoxHalfHeight-TickHeights[i],2.75*CenterBoxHalfHeight],'0.60') \
for i in range(5)]
Force1Arrow, = ax1.plot([RestingLength+x3[0]+(5/3)*SideBoxHalfWidth,\
RestingLength + x3[0]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[0]/(max(u1[5000:]+u2[5000:]))],\
[0,0],'g')
Force1ArrowHead, = \
ax1.plot([RestingLength + x3[0]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[0]/(max(u1[5000:]+u2[5000:]))],[0],'g>')
Force2Arrow, =\
ax1.plot([x4[0]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[0]/(max(u1[5000:]+u2[5000:])),\
x4[0]-RestingLength-(5/3)*SideBoxHalfWidth],[0,0],'r')
Force2ArrowHead, = \
ax1.plot([x4[0]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[0]/(max(u1[5000:]+u2[5000:]))],[0],'r<')
LowerBound = (np.array(x4[5001:])-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*np.array(u2[5000:])/(max(u1[5000:]+u2[5000:]))).min()
UpperBound = (RestingLength + np.array(x3[5001:])+(5/3)*SideBoxHalfWidth\
+ForceScaling*np.array(u1[5000:])/(max(u1[5000:]+u2[5000:]))).max()
Bound = 1.05*np.array([-LowerBound,UpperBound]).max()
ax1.set_xlim([-Bound,Bound])
ax1.set_ylim([-1.5*CenterBoxHalfHeight,3.25*CenterBoxHalfHeight])
ax1.set_aspect('equal')
#Force 1
Force1, = ax3.plot([0],[u1[0]],color = 'g')
ax3.set_xlim(0,Time[-1])
ax3.set_xticks(list(np.linspace(0,Time[-1],5)))
ax3.set_xticklabels([str(0),'','','',str(Time[-1])])
ax3.set_ylim(0,1.15*max(u1[5000:]+u2[5000:]))
if np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1).shape[0] < 5:
ax3.set_yticks(list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1)))
ax3.set_yticklabels([""]*(int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1))
else:
NumTicks = np.floor(1.15*max(u1[5000:]+u2[5000:]))
MaxTick = NumTicks - NumTicks%5
TickStep = MaxTick/5
Ticks = list(np.linspace(0,TickStep*5,6))
ax3.set_yticks(Ticks)
ax3.set_yticklabels([""]*len(Ticks))
# ax3.set_yticklabels([str(int(el)) for el in \
# list(np.linspace(0,\
# np.ceil(max(u1[int(len(u1)/2):])*1.1) - \
# np.ceil(max(u1[int(len(u1)/2):])*1.1)%3,4))],\
# fontsize=12)
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.set_title("Force 1",fontsize=16,fontweight = 4,color = 'g',y = 0.95)
# ax3.set_xlabel("Time (s)")
#Force 2
Force2, = ax2.plot([0],[u2[0]],color = 'r')
ax2.set_xlim(0,Time[-1])
ax2.set_xticks(list(np.linspace(0,Time[-1],5)))
ax2.set_xticklabels([str(0),'','','',str(Time[-1])])
ax2.set_ylim(0,1.15*max(u1[5000:]+u2[5000:]))
ax2.set_yticks(list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1)))
ax2.set_yticklabels([str(int(el)) for el in \
list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1))],\
fontsize=12)
if np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1).shape[0] < 5:
ax2.set_yticks(list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1)))
ax2.set_yticklabels([str(int(el)) for el in \
list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1))],\
fontsize=12)
else:
NumTicks = np.floor(1.15*max(u1[5000:]+u2[5000:]))
MaxTick = NumTicks - NumTicks%5
TickStep = MaxTick/5
Ticks = list(np.linspace(0,TickStep*5,6))
ax2.set_yticks(Ticks)
ax2.set_yticklabels([str(tick) for tick in Ticks])
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.set_title("Force 2",fontsize=16,fontweight = 4,color = 'r',y = 0.95)
# ax2.set_xlabel("Time (s)")
# Trajectory
Predicted, = ax4.plot(Time,r(Time),'0.60',linestyle='--')
Actual, = ax4.plot([0],[x1[0]],'b')
ax4.set_xlim(0,Time[-1])
ax4.set_xticks(list(np.linspace(0,Time[-1],5)))
ax4.set_xticklabels([str(0),'','','',str(Time[-1])])
ax4.set_ylim([-1.25*A,1.25*A])
ax4.set_yticks([-A,0,A])
ax4.set_xlabel("Time (s)")
ax4.set_ylabel("Position of Center Mass (m)")
ax4.spines['right'].set_visible(False)
ax4.spines['top'].set_visible(False)
# Error
ErrorArray = x1-r(Time)
Error, = ax5.plot([0],[ErrorArray[0]],'k')
ax5.set_xlim(0,Time[-1])
ax5.set_xticks(list(np.linspace(0,Time[-1],5)))
ax5.set_xticklabels([str(0),'','','',str(Time[-1])])
ax5.set_ylim([ErrorArray.min() - 0.1*(max(ErrorArray)-min(ErrorArray)),\
ErrorArray.max() + 0.1*(max(ErrorArray)-min(ErrorArray))])
ax5.set_xlabel("Time (s)")
ax5.set_ylabel("Error (m)")
ax5.yaxis.set_label_position("right")
ax5.yaxis.tick_right()
ax5.spines['left'].set_visible(False)
ax5.spines['top'].set_visible(False)
def animate(i):
Spring1.set_xdata(np.linspace(x1[i]+CenterBoxHalfWidth+StraightLength,\
RestingLength+x3[i]-SideBoxHalfWidth-StraightLength,1001))
Spring1_left.set_xdata([x1[i]+CenterBoxHalfWidth,x1[i]+CenterBoxHalfWidth+StraightLength])
Spring1_right.set_xdata([RestingLength+x3[i]-SideBoxHalfWidth-StraightLength,\
RestingLength+x3[i]-SideBoxHalfWidth])
Spring2.set_xdata(np.linspace(-RestingLength+x4[i]+SideBoxHalfWidth+StraightLength,\
x1[i]-CenterBoxHalfWidth-StraightLength,1001))
Spring2_left.set_xdata([x1[i]-CenterBoxHalfWidth-StraightLength,x1[i]-CenterBoxHalfWidth])
Spring2_right.set_xdata([-RestingLength+x4[i]+SideBoxHalfWidth,\
-RestingLength+x4[i]+SideBoxHalfWidth+StraightLength])
CenterMass.xy = (-CenterBoxHalfWidth + x1[i],-CenterBoxHalfHeight)
Mass1.xy = (-SideBoxHalfWidth+RestingLength + x3[i],-SideBoxHalfHeight)
Mass2.xy = (-SideBoxHalfWidth-RestingLength + x4[i],-SideBoxHalfHeight)
PositionArrow.set_xdata([x1[i],x1[i]])
PositionArrowHead.set_xdata([x1[i]])
PositionArrowTail.set_xdata([x1[i]])
Force1Arrow.set_xdata([RestingLength+x3[i]+(5/3)*SideBoxHalfWidth,\
RestingLength + x3[i]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[i]/(max(u1[5000:]+u2[5000:]))])
Force1ArrowHead.set_xdata([RestingLength + x3[i]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[i]/(max(u1[5000:]+u2[5000:]))])
Force2Arrow.set_xdata([x4[i]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[i]/(max(u1[5000:]+u2[5000:])),\
x4[i]-RestingLength-(5/3)*SideBoxHalfWidth])
Force2ArrowHead.set_xdata([x4[i]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[i]/(max(u1[5000:]+u2[5000:]))])
Force1.set_xdata(Time[:i])
Force1.set_ydata(u1[:i])
Force2.set_xdata(Time[:i])
Force2.set_ydata(u2[:i])
Actual.set_xdata(Time[:i])
Actual.set_ydata(x1[:i])
Error.set_xdata(Time[:i])
Error.set_ydata(ErrorArray[:i])
return Spring1,Spring1_left,Spring1_right,Spring2,Spring2_left,Spring2_right,CenterMass,Mass1,Mass2,Force1,Force2,Actual,Error,PositionArrow,PositionArrowHead,PositionArrowTail,Force1Arrow,Force1ArrowHead,Force2Arrow,Force2ArrowHead,
# Init only required for blitting to give a clean slate.
def init():
Spring1, =\
ax1.plot(np.linspace(x1[0]+CenterBoxHalfWidth+StraightLength,\
RestingLength+x3[0]-SideBoxHalfWidth-StraightLength,1001),\
Spring_array,'k')
Spring1_left, = \
ax1.plot([x1[0]+CenterBoxHalfWidth,x1[0]+CenterBoxHalfWidth+StraightLength],\
[0,0],'k')
Spring1_right, = \
ax1.plot([RestingLength+x3[0]-SideBoxHalfWidth-StraightLength,\
RestingLength+x3[0]-SideBoxHalfWidth],[0,0],'k')
Spring2, =\
ax1.plot(np.linspace(-RestingLength+x4[0]+SideBoxHalfWidth+StraightLength,\
x1[0]-CenterBoxHalfWidth-StraightLength,1001),\
Spring_array,'k')
Spring2_left, =\
ax1.plot([x1[0]-CenterBoxHalfWidth-StraightLength,x1[0]-CenterBoxHalfWidth],\
[0,0],'k')
Spring2_right, = \
ax1.plot([-RestingLength+x4[0]+SideBoxHalfWidth,\
-RestingLength+x4[0]+SideBoxHalfWidth+StraightLength],[0,0],'k')
CenterMass = plt.Rectangle((-CenterBoxHalfWidth,-CenterBoxHalfHeight),\
2*CenterBoxHalfWidth,2*CenterBoxHalfHeight,Color='#4682b4')
ax1.add_patch(CenterMass)
Mass1 = plt.Rectangle((-SideBoxHalfWidth+RestingLength,-SideBoxHalfHeight),\
2*SideBoxHalfWidth,2*SideBoxHalfHeight,Color='#4682b4')
ax1.add_patch(Mass1)
Mass2 = plt.Rectangle((-SideBoxHalfWidth-RestingLength,-SideBoxHalfHeight),\
2*SideBoxHalfWidth,2*SideBoxHalfHeight,Color='#4682b4')
ax1.add_patch(Mass2)
PositionArrow, = ax1.plot([x1[0],x1[0]],[0,2*CenterBoxHalfHeight],'k')
PositionArrowHead, = ax1.plot([x1[0]],[2*CenterBoxHalfHeight],'k^')
PositionArrowTail, = ax1.plot([x1[0]],[0],'ko')
Force1Arrow, = ax1.plot([RestingLength+x3[0]+(5/3)*SideBoxHalfWidth,\
RestingLength + x3[0]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[0]/(max(u1[5000:]+u2[5000:]))],\
[0,0],'g')
Force1ArrowHead, = \
ax1.plot([RestingLength + x3[0]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[0]/(max(u1[5000:]+u2[5000:]))],[0],'g<')
Force2Arrow, = ax1.plot([x4[0]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[0]/(max(u1[5000:]+u2[5000:])),\
x4[0]-RestingLength-(5/3)*SideBoxHalfWidth],[0,0],'r')
Force2ArrowHead, = \
ax1.plot([x4[0]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[0]/(max(u1[5000:]+u2[5000:]))],[0],'r>')
Force1, = ax3.plot([0],[u1[0]],color = 'g')
Force2, = ax2.plot([0],[u2[0]],color = 'r')
Predicted, = ax4.plot(Time,r(Time),'0.60',linestyle='--')
Actual, = ax4.plot([0],[x1[0]],'b')
Error, = ax5.plot([0],[ErrorArray[0]],'k')
Spring1.set_visible(False)
Spring1_left.set_visible(False)
Spring1_right.set_visible(False)
Spring2.set_visible(False)
Spring2_left.set_visible(False)
Spring2_right.set_visible(False)
CenterMass.set_visible(False)
Mass1.set_visible(False)
Mass2.set_visible(False)
PositionArrow.set_visible(False)
PositionArrowHead.set_visible(False)
PositionArrowTail.set_visible(False)
Force1.set_visible(False)
Force2.set_visible(False)
Predicted.set_visible(False)
Actual.set_visible(False)
Error.set_visible(False)
Force1Arrow.set_visible(False)
Force1ArrowHead.set_visible(False)
Force2Arrow.set_visible(False)
Force2ArrowHead.set_visible(False)
return Spring1,Spring1_left,Spring1_right,Spring2,Spring2_left,Spring2_right,CenterMass,Mass1,Mass2,Force1,Force2,Actual,Error,PositionArrow,PositionArrowHead,PositionArrowTail,Force1Arrow,Force1ArrowHead,Force2Arrow,Force2ArrowHead,
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(Time),10), init_func=init,interval=1, blit=False)
# if save_as_gif:
# ani.save('test.gif', writer='imagemagick', fps=30)
plt.show()
def plot_multiple_PDF_frames(response,Time,x1,x3,x4,u1,u2,FileName=None):
assert type(response)==bool, "Input must be either True or False."
if FileName != None: assert type(FileName)==str, "FileName must be a string"
if response == True:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.patches as patches
from scipy import signal
from matplotlib.backends.backend_pdf import PdfPages
import os.path
def return_fig(i):
fig = plt.figure(figsize=(10,8))
ax1 = plt.subplot2grid((3,4),(0,0),colspan=4)
ax2 = plt.subplot2grid((3,4),(1,0),colspan=2)
ax3 = plt.subplot2grid((3,4),(1,2),colspan=2)
ax4 = plt.subplot2grid((3,4),(2,0),colspan=3)
ax5 = plt.subplot2grid((3,4),(2,3))
plt.suptitle("Underdetermined Mass-Spring System",Fontsize=28,y=0.95)
# Model Drawing
IdealBoxScalingFactor = 0.78533496170320571 # Calculated from w = np.pi
CurrentTrialScalingFactor = max([max(x3)-min(x1),max(x1)-min(x4)])
StraightLength = 0.05*CurrentTrialScalingFactor/IdealBoxScalingFactor
RestingLength = max([max(x1)-min(x3),max(x4)-min(x1)])+2*StraightLength\
+0.30*CurrentTrialScalingFactor/IdealBoxScalingFactor
CenterBoxHalfWidth = 0.15*CurrentTrialScalingFactor/IdealBoxScalingFactor
CenterBoxHalfHeight = 0.2*CurrentTrialScalingFactor/IdealBoxScalingFactor
SideBoxHalfWidth = 0.1*CurrentTrialScalingFactor/IdealBoxScalingFactor
SideBoxHalfHeight = 0.075*CurrentTrialScalingFactor/IdealBoxScalingFactor
ForceScaling = 1*CurrentTrialScalingFactor/IdealBoxScalingFactor
Spring_array =\
SideBoxHalfWidth\
*np.abs(signal.sawtooth(5*2*np.pi*np.linspace(0,1,1001)-np.pi/2))\
-(1/2)*SideBoxHalfWidth
Spring1, =\
ax1.plot(np.linspace(x1[i]+CenterBoxHalfWidth+StraightLength,\
RestingLength+x3[i]-SideBoxHalfWidth-StraightLength,1001),\
Spring_array,'k')
Spring1_left, = \
ax1.plot([x1[i]+CenterBoxHalfWidth,x1[i]+CenterBoxHalfWidth+StraightLength],\
[0,0],'k')
Spring1_right, = \
ax1.plot([RestingLength+x3[i]-SideBoxHalfWidth-StraightLength,\
RestingLength+x3[i]-SideBoxHalfWidth],\
[0,0],'k')
Spring2, =\
ax1.plot(np.linspace(-RestingLength+x4[i]+SideBoxHalfWidth+StraightLength,\
x1[i]-CenterBoxHalfWidth-StraightLength,1001),\
Spring_array,'k')
Spring2_left, = \
ax1.plot([x1[i]-CenterBoxHalfWidth-StraightLength,x1[i]-CenterBoxHalfWidth],\
[0,0],'k')
Spring2_right, = \
ax1.plot([-RestingLength+x4[i]+SideBoxHalfWidth,\
-RestingLength+x4[i]+SideBoxHalfWidth+StraightLength],\
[0,0],'k')
ax1.get_xaxis().set_ticks([])
ax1.get_yaxis().set_ticks([])
ax1.set_frame_on(True)
CenterMass = plt.Rectangle((-CenterBoxHalfWidth + x1[i],-CenterBoxHalfHeight),\
2*CenterBoxHalfWidth,2*CenterBoxHalfHeight,Color='#4682b4')
ax1.add_patch(CenterMass)
Mass1 = plt.Rectangle((-SideBoxHalfWidth+RestingLength + x3[i],-SideBoxHalfHeight),\
2*SideBoxHalfWidth,2*SideBoxHalfHeight,Color='#4682b4')
ax1.add_patch(Mass1)
Mass2 = plt.Rectangle((-SideBoxHalfWidth-RestingLength + x4[i],-SideBoxHalfHeight),\
2*SideBoxHalfWidth,2*SideBoxHalfHeight,Color='#4682b4')
ax1.add_patch(Mass2)
PositionArrow, = ax1.plot([x1[i],x1[i]],[0,2*CenterBoxHalfHeight],'k')
PositionArrowHead, = ax1.plot([x1[i]],[2*CenterBoxHalfHeight],'k^')
PositionArrowTail, = ax1.plot([x1[i]],[0],'ko')
Scale = ax1.plot([-1.1*A,1.1*A],\
[2.75*CenterBoxHalfHeight,2.75*CenterBoxHalfHeight],\
'0.60')
Ticks = np.linspace(-A,A,5)
TickHeights = [0.3*CenterBoxHalfHeight,\
0.15*CenterBoxHalfHeight,\
0.3*CenterBoxHalfHeight,\
0.15*CenterBoxHalfHeight,\
0.3*CenterBoxHalfHeight]
[ax1.plot([Ticks[i],Ticks[i]],\
[2.75*CenterBoxHalfHeight-TickHeights[i],2.75*CenterBoxHalfHeight],'0.60') \
for i in range(5)]
Force1Arrow, = ax1.plot([RestingLength+x3[i]+(5/3)*SideBoxHalfWidth,\
RestingLength + x3[i]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[i]/(max(u1[5000:]+u2[5000:]))],\
[0,0],'g')
Force1ArrowHead, = \
ax1.plot([RestingLength + x3[i]+(5/3)*SideBoxHalfWidth\
+ForceScaling*u1[i]/(max(u1[5000:]+u2[5000:]))],[0],'g>')
Force2Arrow, =\
ax1.plot([x4[i]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[i]/(max(u1[5000:]+u2[5000:])),\
x4[i]-RestingLength-(5/3)*SideBoxHalfWidth],[0,0],'r')
Force2ArrowHead, = \
ax1.plot([x4[i]-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*u2[i]/(max(u1[5000:]+u2[5000:]))],[0],'r<')
LowerBound = (np.array(x4[5001:])-RestingLength-(5/3)*SideBoxHalfWidth\
-ForceScaling*np.array(u2[5000:])/(max(u1[5000:]+u2[5000:]))).min()
UpperBound = (RestingLength + np.array(x3[5001:])+(5/3)*SideBoxHalfWidth\
+ForceScaling*np.array(u1[5000:])/(max(u1[5000:]+u2[5000:]))).max()
Bound = 1.05*np.array([-LowerBound,UpperBound]).max()
ax1.set_xlim([-Bound,Bound])
ax1.set_ylim([-1.5*CenterBoxHalfHeight,3.25*CenterBoxHalfHeight])
ax1.set_aspect('equal')
#Force 1
Force1, = ax3.plot(Time[:i],u1[:i],color = 'g')
ax3.set_xlim(0,Time[-1])
ax3.set_xticks(list(np.linspace(0,Time[-1],5)))
ax3.set_xticklabels([str(0),'','','',str(Time[-1])])
ax3.set_ylim(0,1.15*max(u1[5000:]+u2[5000:]))
if np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1).shape[0] < 5:
ax3.set_yticks(list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1)))
ax3.set_yticklabels([""]*(int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1))
else:
NumTicks = np.floor(1.15*max(u1[5000:]+u2[5000:]))
MaxTick = NumTicks - NumTicks%5
TickStep = MaxTick/5
Ticks = list(np.linspace(0,TickStep*5,6))
ax3.set_yticks(Ticks)
ax3.set_yticklabels([""]*len(Ticks))
# ax3.set_yticklabels([str(int(el)) for el in \
# list(np.linspace(0,\
# np.ceil(max(u1[int(len(u1)/2):])*1.1) - \
# np.ceil(max(u1[int(len(u1)/2):])*1.1)%3,4))],\
# fontsize=12)
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.set_title("Force 1",fontsize=16,fontweight = 4,color = 'g',y = 0.95)
# ax3.set_xlabel("Time (s)")
#Force 2
Force2, = ax2.plot(Time[:i],u2[:i],color = 'r')
ax2.set_xlim(0,Time[-1])
ax2.set_xticks(list(np.linspace(0,Time[-1],5)))
ax2.set_xticklabels([str(0),'','','',str(Time[-1])])
ax2.set_ylim(0,1.15*max(u1[5000:]+u2[5000:]))
ax2.set_yticks(list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1)))
ax2.set_yticklabels([str(int(el)) for el in \
list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1))],\
fontsize=12)
if np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1).shape[0] < 5:
ax2.set_yticks(list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1)))
ax2.set_yticklabels([str(int(el)) for el in \
list(np.linspace(0,np.floor(1.15*max(u1[5000:]+u2[5000:])),\
int(np.floor(1.15*max(u1[5000:]+u2[5000:])))+1))],\
fontsize=12)
else:
NumTicks = np.floor(1.15*max(u1[5000:]+u2[5000:]))
MaxTick = NumTicks - NumTicks%5
TickStep = MaxTick/5
Ticks = list(np.linspace(0,TickStep*5,6))
ax2.set_yticks(Ticks)
ax2.set_yticklabels([str(tick) for tick in Ticks])
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.set_title("Force 2",fontsize=16,fontweight = 4,color = 'r',y = 0.95)
# ax2.set_xlabel("Time (s)")
# Trajectory
Predicted, = ax4.plot(Time,r(Time),'0.60',linestyle='--')
Actual, = ax4.plot(Time[:i],x1[:i],'b')
ax4.set_xlim(0,Time[-1])
| |
None,
operator: Union[Operator, str] = None,
active: bool = None,
pcallback: Union['Callback', int] = None,
registered_payload: Union[Payload, str] = None, # corresponding payload's UUID
payload_type: Union[PayloadType, str] = None, # corresponding payload's type
c2_profile: Union[C2Profile, str] = None, # corresponding payload's c2 profile
payload_description: str = None, # corresponding payload's description
integrity_level: int = None,
operation: Union[Operation, str] = None,
encryption_type: str = None,
decryption_key: str = None,
encryption_key: str = None,
tasks: List[Union['Task', Dict]] = None,
id: int = None
):
self._init_callback = init_callback
self._last_checkin = last_checkin
self._user = user
self._host = host
self._pid = pid
self._ip = ip
self._description = description
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operation)
self._active = active
if isinstance(pcallback, Callback) or pcallback is None:
self._pcallback = pcallback
elif pcallback == 'null':
self._pcallback = None
else:
self._pcallback = Callback(id=pcallback)
if isinstance(registered_payload, Payload) or registered_payload is None:
self._registered_payload = registered_payload
else:
self._registered_payload = Payload(uuid=registered_payload)
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
if isinstance(c2_profile, C2Profile) or c2_profile is None:
self._c2_profile = c2_profile
else:
self._c2_profile = C2Profile(name=c2_profile)
self._payload_description = payload_description
self._integrity_level = integrity_level
if isinstance(operation, Operation) or operation is None:
self._operation = operation
else:
self._operation = Operation(name=operation)
self._encryption_type = encryption_type
self._decryption_key = decryption_key
self._encryption_key = encryption_key
if isinstance(tasks, List):
self._tasks = [Task(**x) if isinstance(x, Dict) else x for x in tasks]
elif tasks is None:
self._tasks = tasks
else:
self._tasks = [Task(**tasks) if isinstance(tasks, Dict) else tasks]
self._id = id
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
r[k[1:]] = getattr(self, k)
except:
r[k[1:]] = json.dumps(getattr(self, k), default=lambda o: o.to_json())
return r
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Callback):
return self._id == other.id
return False
@property
def init_callback(self) -> str:
return self._init_callback
@init_callback.setter
def init_callback(self, init_callback):
self._init_callback = init_callback
@property
def last_checkin(self) -> str:
return self._last_checkin
@last_checkin.setter
def last_checkin(self, last_checkin):
self._last_checkin = last_checkin
@property
def user(self) -> str:
return self._user
@user.setter
def user(self, user):
self._user = user
@property
def host(self) -> str:
return self._host
@host.setter
def host(self, host):
self._host = host
@property
def pid(self) -> int:
return self._pid
@pid.setter
def pid(self, pid):
self._pid = pid
@property
def ip(self) -> str:
return self._ip
@ip.setter
def ip(self, ip):
self._ip = ip
@property
def description(self) -> str:
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def operator(self) -> Operator:
return self._operator
@operator.setter
def operator(self, operator):
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
@property
def active(self) -> bool:
return self._active
@active.setter
def active(self, active):
self._active = active
@property
def pcallback(self) -> 'Callback':
return self._pcallback
@pcallback.setter
def pcallback(self, pcallback):
if isinstance(pcallback, Callback) or pcallback is None:
self._pcallback = pcallback
elif pcallback == 'null':
self._pcallback = None
else:
self._pcallback = Callback(id=pcallback)
@property
def registered_payload(self) -> Payload:
return self._registered_payload
@registered_payload.setter
def registered_payload(self, registered_payload):
if isinstance(registered_payload, Payload) or registered_payload is None:
self._registered_payload = registered_payload
else:
self._registered_payload = Payload(uuid=registered_payload)
@property
def payload_type(self) -> PayloadType:
return self._payload_type
@payload_type.setter
def payload_type(self, payload_type):
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
@property
def c2_profile(self) -> C2Profile:
return self._c2_profile
@c2_profile.setter
def c2_profile(self, c2_profile):
if isinstance(c2_profile, C2Profile) or c2_profile is None:
self._c2_profile = c2_profile
else:
self._c2_profile = C2Profile(name=c2_profile)
@property
def payload_description(self) -> str:
return self._payload_description
@payload_description.setter
def payload_description(self, payload_description):
self._payload_description = payload_description
@property
def integrity_level(self) -> int:
return self._integrity_level
@integrity_level.setter
def integrity_level(self, integrity_level):
self._integrity_level = integrity_level
@property
def operation(self) -> Operation:
return self._operation
@operation.setter
def operation(self, operation):
if isinstance(operation, Operation) or operation is None:
self._operation = operation
else:
self._operation = Operation(name=operation)
@property
def encryption_type(self) -> str:
return self._encryption_type
@encryption_type.setter
def encryption_type(self, encryption_type):
self._encryption_type = encryption_type
@property
def decryption_key(self) -> str:
return self._decryption_key
@decryption_key.setter
def decryption_key(self, decryption_key):
self._decryption_key = decryption_key
@property
def encryption_key(self) -> str:
return self._encryption_key
@encryption_key.setter
def encryption_key(self, encryption_key):
self._encryption_key = encryption_key
@property
def tasks(self) -> List['Task']:
return self._tasks
@tasks.setter
def tasks(self, tasks):
if isinstance(tasks, List):
self._tasks = [Task(**x) if isinstance(x, Dict) else x for x in tasks]
elif tasks is None:
self._tasks = tasks
else:
self._tasks = [Task(**tasks) if isinstance(tasks, Dict) else tasks]
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id):
self._id = id
class Task:
def __init__(self,
command: Union[Command, str] = None,
params: str = None,
timestamp: str = None,
callback: Union[Callback, int] = None,
operator: Union[Operator, str] = None,
status: str = None,
task_status: str = None, # sometimes this is set to not conflict with overall status message
original_params: str = None,
comment: str = None,
comment_operator: Union[Operator, str] = None,
id: int = None,
responses: List[Union['Response', Dict]] = None,
test_command: bool = None):
if isinstance(command, Command) or command is None:
self._command = command
else:
self._command = Command(cmd=command)
self._params = params
self._timestamp = timestamp
if isinstance(callback, Callback) or callback is None:
self._callback = callback
else:
self._callback = Callback(id=callback)
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
self._status = status
self._original_params = original_params
if comment == "":
self._comment = None
else:
self._comment = comment
if isinstance(comment_operator, Operator) or comment_operator is None:
self._comment_operator = comment_operator
elif comment_operator == 'null':
self._comment_operator = None
else:
self._comment_operator = Operator(username=comment_operator)
self._id = id
if isinstance(responses, List):
self._responses = [Response(**x) if isinstance(x, Dict) else x for x in responses]
elif responses is None:
self._responses = responses
else:
self._responses = [Response(**responses) if isinstance(responses, Dict) else Response(response=responses)]
self._test_command = test_command
if task_status is not None:
self._status = task_status
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
r[k[1:]] = getattr(self, k)
except:
r[k[1:]] = json.dumps(getattr(self, k), default=lambda o: o.to_json())
return r
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Task):
return self._id == other.id
return False
@property
def command(self) -> Command:
return self._command
@command.setter
def command(self, command):
if isinstance(command, Command) or command is None:
self._command = command
else:
self._command = Command(cmd=command)
@property
def params(self) -> str:
return self._params
@params.setter
def params(self, params):
self._params = params
@property
def timestamp(self) -> str:
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
self._timestamp = timestamp
@property
def callback(self) -> Callback:
return self._callback
@callback.setter
def callback(self, callback):
if isinstance(callback, Callback):
self._callback = callback
else:
self._callback = Callback(id=callback)
@property
def operator(self) -> Operator:
return self._operator
@operator.setter
def operator(self, operator):
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
@property
def status(self) -> str:
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def original_params(self) -> str:
return self._original_params
@original_params.setter
def original_params(self, original_params):
self._original_params = original_params
@property
def comment(self) -> str:
return self._comment
@comment.setter
def comment(self, comment):
if comment == "":
self._comment = None
else:
self._comment = comment
@property
def comment_operator(self) -> Operator:
return self._comment_operator
@comment_operator.setter
def comment_operator(self, comment_operator):
if isinstance(comment_operator, Operator) or comment_operator is None:
self._comment_operator = comment_operator
elif comment_operator == 'null':
self._comment_operator = None
else:
self._comment_operator = Operator(username=comment_operator)
@property
def responses(self) -> List['Response']:
return self._responses
@responses.setter
def responses(self, responses):
if isinstance(responses, List):
self._responses = [Response(**x) if isinstance(x, Dict) else x for x in responses]
elif responses is None:
self._responses = responses
else:
self._responses = [Response(**responses) if isinstance(responses, Dict) else Response(response=responses)]
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def test_command(self) -> bool:
return self._test_command
@test_command.setter
def test_command(self, test_command):
self._test_command = test_command
@property
def task_status(self) -> str:
return self._status
@task_status.setter
def task_status(self, task_status):
self._status = task_status
class Response:
def __init__(self,
response: str = None,
timestamp: str = None,
task: Union[Task, int, Dict] = None, # JSON string of the corresponding task
id: int = None):
self._response = response
self._timestamp = timestamp
if isinstance(task, Task) or task is None:
self._task = task
elif isinstance(task, Dict):
self._task = Task(**task)
else:
self._task = Task(id=task)
self._id = id
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
| |
of YYYY-MM-DD formatted dates in history (e.g. 2000-01-01,2005-02-24).
Vintage dates are used to download data as it existed on these specified dates in history.
Vintage dates can be specified instead of a real-time period using realtime_start and realtime_end.
Sometimes it may be useful to enter a vintage date that is not a date when the data values were revised. For instance you may want to know the latest available revisions on 2001-09-11 (World Trade Center and Pentagon attacks) or as of a Federal Open Market Committee (FOMC) meeting date.
Entering a vintage date is also useful to compare series on different releases with different release dates.
## Description
https://fred.stlouisfed.org/docs/api/fred/series_observations.html
Get the observations or data values for an economic data series.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/series/observations?series_id=GNPCA&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"observation_start": "1776-07-04",
"observation_end": "9999-12-31",
"units": "lin",
"output_type": 1,
"file_type": "json",
"order_by": "observation_date",
"sort_order": "asc",
"count": 84,
"offset": 0,
"limit": 100000,
"observations": [
{
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"date": "1929-01-01",
"value": "1065.9"
},
...
]
}
```
## Returns
`pystlouisfed.models.Series`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.series_observations(series_id='GNPCA').head()
realtime_start realtime_end value
date
1929-01-01 2022-02-05 2022-02-05 1120.718
1930-01-01 2022-02-05 2022-02-05 1025.678
1931-01-01 2022-02-05 2022-02-05 958.927
1932-01-01 2022-02-05 2022-02-05 834.769
1933-01-01 2022-02-05 2022-02-05 823.628
```
```python
>>> from matplotlib import pyplot as plt
>>> fred = FRED(api_key='<KEY>')
>>> df = fred.series_observations(series_id='T10Y2Y')
>>> df.plot(y='value', grid=True)
>>> plt.show()
```
.. image:: T10Y2Y.png
"""
if units not in enums.Unit:
raise ValueError('Variable units ({}) is not one of the values: {}'.format(units, ', '.join(map(str, enums.Unit))))
if frequency is not None and frequency not in enums.Frequency:
raise ValueError('Variable frequency ({}) is not one of the values: {}'.format(frequency, ', '.join(map(str, enums.Frequency))))
if aggregation_method not in enums.AggregationMethod:
raise ValueError('Variable aggregation_method ({}) is not one of the values: {}'.format(aggregation_method, ', '.join(map(str, enums.AggregationMethod))))
if output_type not in enums.OutputType:
raise ValueError('Variable output_type ({}) is not one of the values: {}'.format(output_type, ', '.join(map(str, enums.OutputType))))
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/series/observations',
'observations',
limit=100000,
series_id=series_id,
realtime_start=realtime_start,
realtime_end=realtime_end,
sort_order=sort_order,
observation_start=observation_start,
observation_end=observation_end,
units=units,
frequency=frequency,
aggregation_method=aggregation_method,
output_type=output_type,
vintage_dates=vintage_dates
)
)
date_columns = ['realtime_start', 'realtime_end', 'date']
if not df.empty:
df[date_columns] = df[date_columns].apply(pd.to_datetime, format='%Y-%m-%d')
df.value = df.value.replace(self.EMPTY_VALUE, np.nan)
df = df.astype(dtype={
'value': 'float'
}).set_index('date')
return df
def series_release(self, series_id: str, realtime_start: date = None, realtime_end: date = None) -> pd.DataFrame:
"""
## Parameters
`series_id`
The id for a series.
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
## Description
https://fred.stlouisfed.org/docs/api/fred/series_release.html
Get the release for an economic data series.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/series/release?series_id=IRA&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"releases": [
{
"id": 21,
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"name": "H.6 Money Stock Measures",
"press_release": true,
"link": "http://www.federalreserve.gov/releases/h6/"
}
]
}
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.series_release(series_id='IRA').head()
realtime_start realtime_end name press_release link
id
21 2022-02-05 2022-02-05 H.6 Money Stock Measures True http://www.federalreserve.gov/releases/h6/
```
"""
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/series/release',
'releases',
series_id=series_id,
realtime_start=realtime_start,
realtime_end=realtime_end,
)
)
date_columns = ['realtime_start', 'realtime_end']
if not df.empty:
df[date_columns] = df[date_columns].apply(pd.to_datetime, format='%Y-%m-%d')
df = df.astype(dtype={
'name': 'string',
'link': 'string',
'press_release': 'bool'
}).set_index('id')
return df
def series_search(
self,
search_text: str,
search_type: enums.SearchType = enums.SearchType.full_text,
realtime_start: date = None,
realtime_end: date = None,
order_by: enums.OrderBy = None,
sort_order: enums.SortOrder = None,
filter_variable: enums.FilterVariable = None,
filter_value: enums.FilterValue = None,
tag_names: List[str] = None,
exclude_tag_names: List[str] = None
) -> pd.DataFrame:
"""
## Parameters
`search_text`
The words to match against economic data series.
`search_type`
Determines the type of search to perform.
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`order_by`
Order results by values of the specified attribute.
`sort_order`
Sort results is ascending or descending order for attribute values specified by order_by.
`filter_variable`
The attribute to filter results by.
`filter_value`
The value of the filter_variable attribute to filter results by.
`tag_names`
A semicolon delimited list of tag names that series match all of.
`exclude_tag_names`
A semicolon delimited list of tag names that series match none of.
## Description
https://fred.stlouisfed.org/docs/api/fred/series_search.html
Get economic data series that match search text.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/series/search?search_text=monetary+service+index&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2017-08-01",
"realtime_end": "2017-08-01",
"order_by": "search_rank",
"sort_order": "desc",
"count": 32,
"offset": 0,
"limit": 1000,
"seriess": [
{
"id": "MSIM2",
"realtime_start": "2017-08-01",
"realtime_end": "2017-08-01",
"title": "Monetary Services Index: M2 (preferred)",
"observation_start": "1967-01-01",
"observation_end": "2013-12-01",
"frequency": "Monthly",
"frequency_short": "M",
"units": "Billions of Dollars",
"units_short": "Bil. of $",
"seasonal_adjustment": "Seasonally Adjusted",
"seasonal_adjustment_short": "SA",
"last_updated": "2014-01-17 07:16:44-06",
"popularity": 34,
"group_popularity": 33,
"notes": "The MSI measure the flow of monetary services received each period by households and firms from their holdings of monetary assets (levels of the indexes are sometimes referred to as Divisia monetary aggregates).\\nPreferred benchmark rate equals 100 basis points plus the largest rate in the set of rates.\\nAlternative benchmark rate equals the larger of the preferred benchmark rate and the Baa corporate bond yield.\\nMore information about the new MSI can be found at\\nhttp://research.stlouisfed.org/msi/index.html."
},
...
]
}
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.series_search(search_text='monetary service index').head()
realtime_start realtime_end title observation_start observation_end frequency frequency_short units units_short seasonal_adjustment seasonal_adjustment_short last_updated popularity group_popularity notes
id
MSIMZMP 2022-02-05 2022-02-05 Monetary Services Index: MZM (preferred) 1967-01-01 2013-12-01 Monthly M Billions of Dollars Bil. of $ Seasonally Adjusted SA 2014-01-17 13:16:42+00:00 20 20 The MSI measure the flow of monetary services ...
MSIM2 2022-02-05 2022-02-05 Monetary Services Index: M2 (preferred) 1967-01-01 2013-12-01 Monthly M Billions of Dollars Bil. of $ Seasonally Adjusted SA 2014-01-17 13:16:44+00:00 16 16 The MSI measure the flow of monetary services ...
MSIALLP 2022-02-05 2022-02-05 Monetary Services Index: ALL Assets (preferred) 1967-01-01 2013-12-01 Monthly M Billions of Dollars Bil. of $ Seasonally Adjusted SA 2014-01-17 13:16:45+00:00 14 14 The MSI measure the flow of monetary services ...
MSIM1P 2022-02-05 2022-02-05 Monetary Services Index: M1 (preferred) 1967-01-01 2013-12-01 Monthly M Billions of Dollars Bil. of $ Seasonally Adjusted SA 2014-01-17 13:16:45+00:00 9 9 The MSI measure the flow of monetary services ...
MSIM2A 2022-02-05 2022-02-05 Monetary Services Index: M2 (alternative) 1967-01-01 2013-12-01 Monthly M Billions of Dollars Bil. of $ Seasonally Adjusted SA 2014-01-17 13:16:44+00:00 8 8 The MSI measure the flow of monetary services ...
```
"""
allowed_orders = [
enums.OrderBy.search_rank,
enums.OrderBy.series_id,
enums.OrderBy.title,
enums.OrderBy.units,
enums.OrderBy.frequency,
enums.OrderBy.seasonal_adjustment,
enums.OrderBy.realtime_start,
enums.OrderBy.realtime_end,
enums.OrderBy.last_updated,
enums.OrderBy.observation_start,
enums.OrderBy.observation_end,
enums.OrderBy.popularity,
enums.OrderBy.group_popularity
]
# If the value of search_type is 'full_text' then the default value of order_by is 'search_rank'.
if search_type == enums.SearchType.full_text and order_by is None:
order_by = enums.OrderBy.search_rank
# If the value of search_type is 'series_id' then the default value of order_by is 'series_id'.
elif search_text == enums.SearchType.series_id and order_by is None:
order_by = enums.OrderBy.series_id
# If order_by is equal to 'search_rank' or 'popularity', then the default value of sort_order is 'desc'. Otherwise, the default sort order is 'asc'.
if order_by == enums.OrderBy.search_rank or order_by == enums.OrderBy.popularity and sort_order is None:
sort_order = enums.SortOrder.desc
else:
sort_order = enums.SortOrder.asc
if order_by not in allowed_orders:
raise ValueError('Variable order_by ({}) is not one of the values: {}'.format(order_by, ', '.join(map(str, allowed_orders))))
if search_type | |
import collections
import dataclasses
import enum
import functools
import itertools
import operator
import random
import re
import typing
from typing import (
Any,
Mapping,
Union,
MutableMapping,
Optional,
TypeVar,
Callable,
NamedTuple,
Type,
Tuple,
Sequence,
)
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.signal
from playground import collector_lib
from playground import midi_lib
from playground import plot_lib
from playground.tests import helper as tests_helper
# some modules need access to sampling_frequency
SAMPLING_FREQUENCY = 44100
OUT_DTYPE = np.float32 # TODO
class ClockSignal(NamedTuple):
ts: np.ndarray
sample_indices: np.ndarray
sample_rate: int
def zeros(self):
return np.zeros_like(self.ts)
@property
def shape(self):
return self.ts.shape
@property
def num_samples(self):
return self.ts.shape[0]
def add_channel_dim(self, signal):
assert len(signal.shape) == 1
return np.broadcast_to(signal.reshape(-1, 1), self.shape)
class Clock:
def __init__(self, num_samples=2048, num_channels=2, sample_rate=44100.):
self.num_samples = num_samples
self.num_channels = num_channels
self.sample_rate = sample_rate
self.i = 0
self.arange = np.arange(self.num_samples, dtype=int) # Cache it.
def __call__(self) -> ClockSignal:
"""Gets clock signal and increments i."""
clock_signal = self.get_current_clock_signal()
self.i += self.num_samples
return clock_signal
def get_current_clock_signal(self):
sample_indices = self.i + self.arange
ts = sample_indices / self.sample_rate
# Broadcast `ts` into (num_samples, num_channels)
ts = ts[..., np.newaxis] * np.ones((self.num_channels,))
return ClockSignal(ts, sample_indices, self.sample_rate)
def get_until(self, num_samples):
sample_indices = np.arange(num_samples, dtype=int)
ts = sample_indices / self.sample_rate
ts = ts[..., np.newaxis] * np.ones((self.num_channels,))
return ClockSignal(ts, sample_indices, self.sample_rate)
class State:
__special_name__ = "State"
def __init__(self, *, initial_value=None):
self._value = initial_value
@property
def is_set(self) -> bool:
return self._value is not None
def get(self):
return self._value
def set(self, value):
self._value = value
T = TypeVar("T")
def _is_module_subclass(instance):
try:
all_base_classes = instance.__class__.__mro__
except AttributeError:
return False
for c in all_base_classes:
if c.__name__ == Module.__name__:
return True
return False
class Module:
"""
Module :: Signal -> Signal, in particular:
Module :: [Sampling_Times] -> [Samples]
Modules can be called on a nparray of sampling times,
and calculate an output of the same size according to
the module graph defined in its constructor.
A subclass should overwrite self.out, respecting its signature.
"""
def __init__(self):
self.collect = collector_lib.FakeCollector()
def collect_data(self, num_steps, clock: Clock,
warmup_num_steps=100) -> Tuple[np.ndarray, Sequence[Tuple[str, Sequence[Any]]]]:
# Warmup.
for _ in range(warmup_num_steps):
clock_signal = clock()
self(clock_signal)
# Set collect to a useful instance.
collectors = self._set("collect", factory=collector_lib.Collector)
# Loop with `ts`
all_ts = []
for _ in range(num_steps):
clock_signal = clock()
self(clock_signal)
all_ts.append(clock_signal.ts)
# Reset back to fake collector
self._set("collect", factory=collector_lib.FakeCollector)
non_empty_collectors = {k: collector for k, collector in collectors.items() if collector}
if not non_empty_collectors:
raise ValueError("No module collected data!")
output = []
for k, collector in non_empty_collectors.items():
for shift_collector_name, shift_collector_values in collector:
full_k = (k + "." + shift_collector_name).strip(".")
output.append((full_k, shift_collector_values))
return np.concatenate(all_ts, axis=0), output
def out_mean_int(self, clock_signal: ClockSignal) -> int:
return round(np.mean(self.out(clock_signal)))
def out_mean_float(self, clock_signal: ClockSignal) -> float:
return np.mean(self.out(clock_signal))
def out(self, clock_signal: ClockSignal) -> np.ndarray:
raise Exception("not implemented")
def __rtruediv__(self, other):
return _MathModule(operator.truediv, other, self)
def __mul__(self, other):
"""Implement module * scalar and module * module."""
return _MathModule(operator.mul, self, other)
def __rmul__(self, other):
"""Implement scalar * module."""
return self * other
def __truediv__(self, other):
"""Implement module / scalar and module / module"""
return _MathModule(operator.truediv, self, other)
def __rtruediv__(self, other):
return _MathModule(operator.truediv, other, self)
def __add__(self, other):
"""Implement module + scalar and module + module"""
return _MathModule(operator.add, self, other)
def __radd__(self, other):
return self + other
def __sub__(self, other):
"""Implement module - scalar and module - module"""
return _MathModule(operator.sub, self, other)
def __rsub__(self, other):
return _MathModule(operator.sub, other, self)
# TODO: Describe why we have call and out (in short: for convenicence)
def __call__(self, clock_signal: ClockSignal) -> np.ndarray:
out = self.out(clock_signal)
if "TriggerSource" not in str(type(self)) and out.shape != clock_signal.shape: # yes, a hack! because I violate the API in TriggerSource!
raise ValueError(f"Failure at {self.__class__.__name__}. "
f"Shapes are {out.shape} vs. {clock_signal.shape}.")
return out
def get_params_by_name(self) -> MutableMapping[str, "Parameter"]:
return self._get(Parameter)
def get_states_by_name(self) -> MutableMapping[str, "State"]:
return self._get(State)
def _get(self, cls: Type[T], prefix="", include_root=True) -> MutableMapping[str, T]:
"""Recursively find all instances of `cls`."""
result = {}
if prefix == "" and include_root and isinstance(self, cls):
result[""] = self
for var_name, var_instance in vars(self).items():
if getattr(var_instance, "__special_name__", None) == cls.__special_name__:
full_name = f"{prefix}{var_name}"
var_instance.name = full_name
result[full_name] = var_instance
continue
# If it's a Module, we go into the recursion.
if _is_module_subclass(var_instance):
result.update(var_instance._get(cls, prefix=f"{var_name}."))
return result
def _set(self, var_name: str, factory: Callable[[], T]) -> Mapping[str, T]:
"""Recursively set `var_name` to `factory()` for all submodules."""
modules = self._get(Module, include_root=True)
outputs = {}
for k, m in modules.items():
# print(f"Setting: {k}.{var_name}")
value = factory()
setattr(m, var_name, value)
outputs[k] = value
return outputs
# NOTE: We need to take the params and state, as we cannot
# find it anymore, since we have new classes when we call this!
def copy_params_and_state_from(self, src_params, src_state):
_copy(src=src_params, target=self.get_params_by_name())
_copy(src=src_state, target=self.get_states_by_name())
def sample(self, num_samples: int):
clock_signal = Clock.get_until(num_samples)
res = self.__call__(clock_signal)
return res
class Id(Module):
"""Every Monoid needs a neutral element ;)"""
def __init__(self, inp: Module):
self.out = inp
class _MathModule(Module):
"""Implement various mathematical operations on modules, see base class."""
def __init__(self, op, left, right):
super().__init__()
self.op = op
self.left = left
self.right = right
def out(self, clock_signal: ClockSignal):
left = self._maybe_call(self.left, clock_signal)
right = self._maybe_call(self.right, clock_signal)
return self.op(left, right)
@staticmethod
def _maybe_call(module_or_number, clock_signal: ClockSignal):
if isinstance(module_or_number, Module):
return module_or_number(clock_signal)
return module_or_number
# TODO: Revisit the whole API.
# The idea of this class is to support modules that do not rely on a clock_signal.
# E.g. Envelope Generators.
class InputLessModule(Module):
def get_output(self):
pass
class GaussEnvelopeGenerator(InputLessModule):
def __init__(self, elen):
super().__init__()
self.cache = None
self.elen: Parameter = elen
def get_output(self):
elen = round(self.elen.get())
attack = np.linspace(0, 2, elen//4)
peak = np.linspace(2, 1, elen//10)
hold = np.ones(elen*2)
decay = np.linspace(1, 0, elen)#//8)
zeros = np.zeros(elen*4)
#return np.array(10*[np.exp(-(x-(elen/2))**2 * 0.001) for x in range(elen)])
#x = np.arange(elen)
#return 10*np.exp(-(x-(elen/2))**2 * 0.001)
return np.concatenate((attack,
peak,
hold,
decay,
zeros), 0)
class EnvelopeGenerator(InputLessModule):
def __init__(self, elen):
super().__init__()
self.cache = None
self.elen: Parameter = elen
def get_output(self):
elen = round(self.elen.get())
attack = np.linspace(0, 1, elen)
peak = np.linspace(1, 1.1, elen//4)
hold = np.ones(elen)
decay = np.linspace(1, 0, elen*4)
return np.concatenate((attack, peak, hold, decay), 0)
class ADSREnvelopeGenerator(InputLessModule):
def __init__(self, attack, decay, sustain, release, hold):
super().__init__()
self.attack = attack
self.decay = decay
self.sustain = sustain
self.release = release
self.hold = hold or Parameter(100)
def get_output(self):
t_attack = round(self.attack.get())
t_decay = round(self.decay.get())
sustain_height = self.sustain.get()
t_hold = round(self.hold.get())
t_release = round(self.release.get())
attack = np.linspace(0, 1, t_attack)
decay = np.linspace(1, sustain_height, t_decay)
hold = np.ones(t_hold) * sustain_height
release = np.linspace(sustain_height, 0, t_release)
return np.concatenate((attack, decay, hold, release), 0)
@tests_helper.mark_for_testing(value=lambda: 1)
class Constant(Module):
def __init__(self, value):
self.value = value
def out(self, clock_signal: ClockSignal):
# TODO: sounds cool
# num_samples, num_channels = ts.shape
# if abs(self.previous_value - self.value) > 1e-4:
# out = (np.linspace(self.previous_value, self.value, num_samples).reshape(-1, 1) *
# np.ones((num_channels,)))
# print(self.previous_value, self.value, out[:10])
# else:
# out = np.ones_like(ts) * self.value
out = np.broadcast_to(self.value, clock_signal.shape)
return out
def __repr__(self):
return f'{self.__class__.__name__}(value={self.value})'
def set(self, value):
self.value = value
def inc(self, diff):
"""Increment value by `diff`."""
self.set(self.value + diff)
def get(self):
return self.value
@tests_helper.mark_for_testing(value=lambda: 1)
class Parameter(Constant):
__special_name__ = "Parameter"
def __init__(self,
value: float,
lo: Optional[float] = None,
hi: Optional[float] = None,
key: Optional[str] = None,
knob: Optional[midi_lib.KnobConvertible] = None,
shift_multiplier: float = 10,
clip: bool = False):
"""Create Parameter.
NOTES:
- `lo`, `hi` are always used for knobs, but only used for `key` if `clip=True`.
This is because knobs have a range of 0-127, and we use `lo`, `hi` to map to that range.
Args:
value: Initial value
lo: Lowest sane value. Defaults to 0.1 * value.
hi: Highest sane value. Defaults to 1.9 * value.
key: If given, a key on the keyboard that controls this parameter. Example: "f".
knob: If given, a knob on a Midi controller that controls this parameter.
shift_multiplier: Only used if `key` is set, in which case this sets how much
more we change the parameter if SHIFT is pressed on the keyboard.
clip: If True, clip to [lo, hi] in `set`.
"""
super().__init__(value)
if lo is None:
lo = 0.1 * value
if hi is None:
hi = 1.9 * value
if hi < lo:
raise ValueError
self.lo, self.hi = lo, hi
self.span = self.hi - self.lo
self.key = key
self.knob | |
= "%d"
j = scale_val(j)
num = locale.format(fmt, j, grouping=True)
if len(num) >= fieldlen:
num += " "
return num
def print_header(self):
pre = ""
for v, j in zip(self.vals, self.headers):
l = self.fieldlen(j)
l = max(len(self.format_field(v, j, l)), l)
print >>args.output, pre + j
pre += "|" + " "*(l - 1)
self.columns[j] = l
def flush(self):
if (self.num_output % HEADER_INTERVAL) == 0:
self.print_header()
self.over = 0
out = ""
for j, h in zip(self.vals, self.headers):
fieldlen = self.fieldlen(h)
num = self.format_field(j, h, fieldlen)
out += "%-*s" % (fieldlen, num)
self.update_column(h, len(num))
print >>args.output, out
self.vals = []
self.headers = []
self.num_output += 1
class OutputCSV(Output):
"""CSV version of Output."""
def __init__(self, csv):
Output.__init__(self)
self.csv = csv
def flush(self):
if self.num_output == 0:
print >>args.output, self.csv.join(["timestamp"] + self.headers)
scaled_vals = map(scale_val, [self.timestamp] + self.vals)
print >>args.output, self.csv.join(map(str, scaled_vals))
self.vals = []
self.headers = []
self.num_output += 1
groupings = ('[', ']', '{', '}', '[[', ']]')
out = None
class PerfRun:
"""Control a perf process"""
def __init__(self):
self.perf = None
# for testing purposes
def mock(self, logfile, evl):
f = open(logfile, "w")
for t in range(0, 5):
num = 10000 + t
for i in evl:
if i in groupings:
continue
i = i.replace("{","").replace("}","")
o = "%s,%s" % (num, i)
to = "%d," % (t)
print >>f,to + "S0,1,"+o
print >>f,to + "S1,1,"+o
num += 10000
f.close()
def execute(self, s, logfile, evl):
if not args.quiet:
l = map(lambda x: "'" + x + "'" if x.find("{") >= 0 else x, s)
i = l.index('--log-fd')
del l[i:i+2]
print >>args.output, " ".join(l)
if args.mock:
self.mock(logfile, evl)
self.perf = None
else:
self.perf = subprocess.Popen(s, close_fds=False)
def perf_box(x):
m = re.match(r"uncore_([^/]+)(_\d+)?/", x)
if not m:
return None
return m.group(1)
def available_counters(box):
if box in cpu_aux.limited_counters:
return cpu_aux.limited_counters[box]
return cpu_aux.DEFAULT_COUNTERS
def count_filter(ev):
return sum(map(lambda x: ev.count("," + x), cpu_aux.filters))
def is_fixed(x):
return x.find("/clockticks/") >= 0
# determine if equation can run in a group
def supports_group(evl, nl):
evl = filter(lambda x: not is_fixed(x), evl)
boxes = defaultdict(list)
bnames = defaultdict(list)
for j, n in zip(evl, nl):
box = perf_box(j)
if not box:
continue
boxes[box].append(j)
bnames[box].append(n)
for box in boxes:
# some events have too complicated counter constraints for
# this pear brain scheduler to decide if groups work or not. Just do
# not do groups for them.
for n in bnames[box]:
ev = lookup_event(n)
if complicated_counters(ev):
return False
evl = boxes[box]
filters = sum(map(count_filter, evl))
if len(evl) > available_counters(box) or filters > 1:
return False
return True
def count_box(box):
return len(find_boxes(box))
# run a equation
def evaluate(eq, EV):
SAMPLE_INTERVAL = float(args.interval)*1000000
ROUND = lambda x: round(x)
KB = 1024
MB = 1024*KB
GB = 1024*MB
KILO = 1000
MEGA = 1000*KILO
GIGA = 1000*MEGA
NUM_R3QPI = count_box("r3qpi") # XXX add generic function
dbg("evaluate", eq)
try:
return eval(eq)
except NameError:
return "#EVAL"
except ZeroDivisionError:
return 0.0
def is_error(x):
return is_str(x) and x.startswith("#")
# read perf output and output results
def gen_res(evl, res, evp, equations, evnames, timestamp):
dbg("evl", evl)
dbg("res", res)
dbg("evp", evp)
dbg("equations", equations)
cur_eq = None
eql = equations
for j in evl:
if j == '[' or j == '[[':
cur_eq = eql[0]
eq_events = dict()
elif j == ']' or j == ']]':
r = None
for x in eq_events:
if is_error(eq_events[x]):
r = eq_events[x]
break
if r is None:
if '/' in equations[0]:
EV = lambda x, n: float(eq_events[x])
else:
EV = lambda x, n: long(eq_events[x])
r = evaluate(equations[0], EV)
dbg("result", r)
out.out(evnames[0], r, timestamp)
equations = equations[1:]
cur_eq = None
evnames = evnames[1:]
elif cur_eq:
assert evp[0] == j
eq_events[evp[0]] = res[0]
res = res[1:]
evp = evp[1:]
elif j in ('{', '}'):
continue
else:
assert evp[0] == j
if re.match(r"[0-9]+", res[0]):
r = int(res[0])
else:
r = res[0]
out.out(evnames[0], r, timestamp)
evnames = evnames[1:]
res = res[1:]
evp = evp[1:]
out.flush()
assert len(res) == 0
assert len(evp) == 0
# replace internal [] equation groups with perf format
def gen_events(evl):
e = ""
prev = ""
for j in evl:
if j == '[' or j == ']':
continue
if j == '[[':
j = '{'
elif j == ']]':
j = '}'
sep = ""
if prev:
match = [prev in groupings, j in groupings]
if match == [True, True] or match == [False, False]:
sep = ","
if prev in ['[', '{'] and match[1] == False:
sep = ""
if prev in [']', '}']:
sep = ","
if match[0] == False and j in ['[', '{']:
sep = ","
e += sep + j
prev = j
return e
def concat(d):
x = []
for j in sorted(d.keys()):
x += d[j]
return x
def gennames(names, sockets):
x = []
for s in sorted(sockets):
if s != "":
s += "-"
for n in names:
x.append(s + n)
return x
def check_per_socket(s, warned):
if (not warned and
sorted(map(lambda x: int(x[1:]), s)) != range(0, len(s))):
ucmsg.warning("perf --per-socket appears broken. Please update perf.")
ucmsg.warning("Data on socket > 0 will be likely incorrect.")
return True
return warned
perf_errors = {
"<not supported>": "#NS",
"<not counted>": "#NC",
}
# run perf and output results
def measure(evl, argl, equations, evnames):
warned = False
all_events = gen_events(evl)
## use a pty because perf doesn't do enough fflush() otherwise
outp, inp = pty.openpty()
logfile = "ulog.%d" % (os.getpid())
run = PerfRun()
run.execute([perf, "stat", "--log-fd", "%d" % (inp), "-e", all_events] + argl, logfile, evl)
prev_timestamp = None
evp = defaultdict(list)
res = defaultdict(list)
socket = ""
try:
if args.mock:
f = open(logfile, 'r')
else:
f = os.fdopen(outp, 'r')
os.close(inp)
while True:
try:
# force line-by-line buffering
l = f.readline()
if not l:
break
except (KeyboardInterrupt, exceptions.IOError):
break
l = l.strip()
dbg("perf", l)
if l.startswith('#') or l == "":
continue
if per_socket:
ts, socket, _, rest = l.split(",", 3)
l = ts + "," + rest
# uncore// contains commas!
m = re.match(r"([0-9.]+),([0-9]+|<.*>),?,(.*)$", l)
if not m:
print "PERF-UNREADABLE", l,
continue
timestamp = m.group(1)
if timestamp != prev_timestamp:
if per_socket and not args.quiet:
warned = check_per_socket(res.keys(), warned)
if evp:
num = len(res)
gen_res(evl*num, concat(res), concat(evp),
equations*num, gennames(evnames, res.keys()), timestamp)
res = defaultdict(list)
evp = defaultdict(list)
prev_timestamp = timestamp
r = m.group(2)
if r.startswith("<"):
if r in perf_errors:
r = perf_errors[r]
else:
r = "#NA"
res[socket].append(r)
p = m.group(3)
if p.find("/,") >= 0:
p = re.sub(r"/,.*", "", p) + "/"
if p.startswith(","):
p = p[1:]
evp[socket].append(p)
f.close()
if args.mock:
os.remove(logfile)
except exceptions.IOError:
# ptys always seem to end with EIO
#print "Error talking to perf", e
pass
if evp:
num = len(res)
gen_res(evl*num, concat(res), concat(evp), equations*num,
gennames(evnames, res.keys()), timestamp)
if run.perf:
run.perf.wait()
dummy_count = 1000
def ev_append(ovl, x, nl, n):
if x not in ovl:
ovl.append(x)
if n not in nl:
nl.append(n)
global dummy_count
dummy_count += 1
return dummy_count # dummy value to avoid division by zero
class WarnOnce:
def __init__(self):
self.warned = False
def warn(self, msg):
if not self.warned:
ucmsg.warning(msg)
self.warned = True
def add_group(x, nl, in_group, mw):
if len(x) == 0:
return x
if len(x) > 1 and not in_group:
if supports_group(x, nl):
return ['[['] + x + [']]']
mw.warn("Equation will multiplex and may produce inaccurate results (see manual)")
return ['['] + x + [']']
# convert a equation to perf form
def convert_equation(ev, qual, in_group, quiet=False):
mw = WarnOnce()
nnl = []
evl = []
equations = []
eql = format_equation(ev, qual, quiet)
for p in eql:
ovl = []
nl = []
# run equation to collect events
r = evaluate(p, lambda x, n: ev_append(ovl, x, nl, n))
if is_error(r) and not args.quiet:
print >>sys.stderr, "Cannot evaluate equation", ev["Equation"]
nnl.append(nl)
evl.append(ovl)
equations.append(p)
flat_eql = []
for j, nl in zip(evl, nnl):
flat_eql += add_group(j, nl, in_group, mw)
return flat_eql, equations
standard_events = ("cycles", "ref-cycles", "instructions")
# convert a single event to perf form
def convert_one(evn, evl, evnames, equations, qual, in_group):
ev = lookup_event(evn)
if not ev:
print >>sys.stderr, "unknown event", evn
sys.exit(1)
if "Equation" in ev:
nvl, neql = convert_equation(ev, qual, in_group)
equations += neql
num = len(neql)
| |
<filename>tests/frontend/workspace.py
#
# Copyright (C) 2018 Codethink Limited
# Copyright (C) 2018 Bloomberg Finance LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import stat
import shutil
import tempfile
import pytest
from buildstream._testing import create_repo, ALL_REPO_KINDS
from buildstream._testing import cli # pylint: disable=unused-import
from buildstream import _yaml
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from buildstream._workspaces import BST_WORKSPACE_FORMAT_VERSION
from tests.testutils import create_artifact_share, create_element_size, wait_for_cache_granularity
repo_kinds = ALL_REPO_KINDS
# Project directory
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project",)
BASE_FILENAME = os.path.basename(__file__)
class WorkspaceCreator:
def __init__(self, cli, tmpdir, datafiles, project_path=None):
self.cli = cli
self.tmpdir = tmpdir
self.datafiles = datafiles
if not project_path:
project_path = str(datafiles)
else:
shutil.copytree(str(datafiles), project_path)
self.project_path = project_path
self.bin_files_path = os.path.join(project_path, "files", "bin-files")
self.workspace_cmd = os.path.join(self.project_path, "workspace_cmd")
def create_workspace_element(self, kind, suffix="", workspace_dir=None, element_attrs=None):
element_name = "workspace-test-{}{}.bst".format(kind, suffix)
element_path = os.path.join(self.project_path, "elements")
if not workspace_dir:
workspace_dir = os.path.join(self.workspace_cmd, element_name)
if workspace_dir[-4:] == ".bst":
workspace_dir = workspace_dir[:-4]
# Create our repo object of the given source type with
# the bin files, and then collect the initial ref.
# And ensure we store it in a suffix-specific directory, to avoid clashes
# if using multiple times the same kind element here.
repo = create_repo(kind, str(self.tmpdir), "repo-for-{}".format(element_name))
with tempfile.TemporaryDirectory() as tempdir:
dst_repo = os.path.join(tempdir, "repo")
shutil.copytree(self.bin_files_path, dst_repo)
# Touch a file with the element name in, to allow validating that this
# is the correct repo
# pylint: disable=consider-using-with
open(os.path.join(dst_repo, element_name), "a", encoding="utf-8").close()
ref = repo.create(os.path.join(tempdir, "repo"))
# Write out our test target
element = {"kind": "import", "sources": [repo.source_config(ref=ref)]}
if element_attrs:
element = {**element, **element_attrs}
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
return element_name, element_path, workspace_dir
def create_workspace_elements(self, kinds, suffixs=None, workspace_dir_usr=None, element_attrs=None):
element_tuples = []
if suffixs is None:
suffixs = ["",] * len(kinds)
else:
assert len(suffixs) == len(kinds)
for suffix, kind in zip(suffixs, kinds):
element_name, _, workspace_dir = self.create_workspace_element(
kind, suffix, workspace_dir_usr, element_attrs
)
element_tuples.append((element_name, workspace_dir))
# Assert that there is a fetch is needed
states = self.cli.get_element_states(self.project_path, [e for e, _ in element_tuples])
assert not any(states[e] != "fetch needed" for e, _ in element_tuples)
return element_tuples
def open_workspaces(self, kinds, suffixs=None, workspace_dir=None, element_attrs=None, no_checkout=False):
element_tuples = self.create_workspace_elements(kinds, suffixs, workspace_dir, element_attrs)
os.makedirs(self.workspace_cmd, exist_ok=True)
# Now open the workspace, this should have the effect of automatically
# tracking & fetching the source from the repo.
args = ["workspace", "open"]
if no_checkout:
args.append("--no-checkout")
if workspace_dir is not None:
assert len(element_tuples) == 1, "test logic error"
_, workspace_dir = element_tuples[0]
args.extend(["--directory", workspace_dir])
args.extend([element_name for element_name, workspace_dir_suffix in element_tuples])
result = self.cli.run(cwd=self.workspace_cmd, project=self.project_path, args=args)
result.assert_success()
if not no_checkout:
# Assert that we are now buildable because the source is now cached.
states = self.cli.get_element_states(self.project_path, [e for e, _ in element_tuples])
assert not any(states[e] != "buildable" for e, _ in element_tuples)
# Check that the executable hello file is found in each workspace
for _, workspace in element_tuples:
filename = os.path.join(workspace, "usr", "bin", "hello")
assert os.path.exists(filename)
return element_tuples
def open_workspace(
cli,
tmpdir,
datafiles,
kind,
suffix="",
workspace_dir=None,
project_path=None,
element_attrs=None,
no_checkout=False,
):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles, project_path)
workspaces = workspace_object.open_workspaces((kind,), (suffix,), workspace_dir, element_attrs, no_checkout)
assert len(workspaces) == 1
element_name, workspace = workspaces[0]
return element_name, workspace_object.project_path, workspace
@pytest.mark.datafiles(DATA_DIR)
def test_open_multi(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
workspaces = workspace_object.open_workspaces(repo_kinds)
for (elname, workspace), kind in zip(workspaces, repo_kinds):
assert kind in elname
workspace_lsdir = os.listdir(workspace)
assert elname in workspace_lsdir
@pytest.mark.skipif(os.geteuid() == 0, reason="root may have CAP_DAC_OVERRIDE and ignore permissions")
@pytest.mark.datafiles(DATA_DIR)
def test_open_multi_unwritable(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
element_tuples = workspace_object.create_workspace_elements(repo_kinds, repo_kinds)
os.makedirs(workspace_object.workspace_cmd, exist_ok=True)
# Now open the workspace, this should have the effect of automatically
# tracking & fetching the source from the repo.
args = ["workspace", "open"]
args.extend([element_name for element_name, workspace_dir_suffix in element_tuples])
cli.configure({"workspacedir": workspace_object.workspace_cmd})
cwdstat = os.stat(workspace_object.workspace_cmd)
try:
os.chmod(workspace_object.workspace_cmd, cwdstat.st_mode - stat.S_IWRITE)
result = workspace_object.cli.run(project=workspace_object.project_path, args=args)
finally:
# Using this finally to make sure we always put thing back how they should be.
os.chmod(workspace_object.workspace_cmd, cwdstat.st_mode)
result.assert_main_error(ErrorDomain.STREAM, "workspace-directory-failure")
# Normally we avoid checking stderr in favour of using the mechine readable result.assert_main_error
# But Tristan was very keen that the names of the elements left needing workspaces were present in the out put
assert " ".join([element_name for element_name, workspace_dir_suffix in element_tuples[1:]]) in result.stderr
@pytest.mark.datafiles(DATA_DIR)
def test_open_multi_with_directory(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
element_tuples = workspace_object.create_workspace_elements(repo_kinds, repo_kinds)
os.makedirs(workspace_object.workspace_cmd, exist_ok=True)
# Now open the workspace, this should have the effect of automatically
# tracking & fetching the source from the repo.
args = ["workspace", "open"]
args.extend(["--directory", "any/dir/should/fail"])
args.extend([element_name for element_name, workspace_dir_suffix in element_tuples])
result = workspace_object.cli.run(
cwd=workspace_object.workspace_cmd, project=workspace_object.project_path, args=args
)
result.assert_main_error(ErrorDomain.STREAM, "directory-with-multiple-elements")
@pytest.mark.datafiles(DATA_DIR)
def test_open_defaultlocation(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
# pylint: disable=unbalanced-tuple-unpacking
((element_name, workspace_dir),) = workspace_object.create_workspace_elements(["git"], ["git"])
os.makedirs(workspace_object.workspace_cmd, exist_ok=True)
# Now open the workspace, this should have the effect of automatically
# tracking & fetching the source from the repo.
args = ["workspace", "open"]
args.append(element_name)
# In the other tests we set the cmd to workspace_object.workspace_cmd with the optional
# argument, cwd for the workspace_object.cli.run function. But hear we set the default
# workspace location to workspace_object.workspace_cmd and run the cli.run function with
# no cwd option so that it runs in the project directory.
cli.configure({"workspacedir": workspace_object.workspace_cmd})
result = workspace_object.cli.run(project=workspace_object.project_path, args=args)
result.assert_success()
assert cli.get_element_state(workspace_object.project_path, element_name) == "buildable"
# Check that the executable hello file is found in the workspace
# even though the cli.run function was not run with cwd = workspace_object.workspace_cmd
# the workspace should be created in there as we used the 'workspacedir' configuration
# option.
filename = os.path.join(workspace_dir, "usr", "bin", "hello")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_open_defaultlocation_exists(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
# pylint: disable=unbalanced-tuple-unpacking
((element_name, workspace_dir),) = workspace_object.create_workspace_elements(["git"], ["git"])
os.makedirs(workspace_object.workspace_cmd, exist_ok=True)
with open(workspace_dir, "w", encoding="utf-8") as fl:
fl.write("foo")
# Now open the workspace, this should have the effect of automatically
# tracking & fetching the source from the repo.
args = ["workspace", "open"]
args.append(element_name)
# In the other tests we set the cmd to workspace_object.workspace_cmd with the optional
# argument, cwd for the workspace_object.cli.run function. But hear we set the default
# workspace location to workspace_object.workspace_cmd and run the cli.run function with
# no cwd option so that it runs in the project directory.
cli.configure({"workspacedir": workspace_object.workspace_cmd})
result = workspace_object.cli.run(project=workspace_object.project_path, args=args)
result.assert_main_error(ErrorDomain.STREAM, "bad-directory")
@pytest.mark.datafiles(DATA_DIR)
def test_open_track(cli, tmpdir, datafiles):
open_workspace(cli, tmpdir, datafiles, "git")
@pytest.mark.datafiles(DATA_DIR)
def test_open_noclose_open(cli, tmpdir, datafiles):
# opening the same workspace twice without closing it should fail
element_name, project, _ = open_workspace(cli, tmpdir, datafiles, "git")
result = cli.run(project=project, args=["workspace", "open", element_name])
result.assert_main_error(ErrorDomain.STREAM, None)
@pytest.mark.datafiles(DATA_DIR)
def test_open_force(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
# Close the workspace
result = cli.run(project=project, args=["workspace", "close", element_name])
result.assert_success()
# Assert the workspace dir still exists
assert os.path.exists(workspace)
# Now open the workspace again with --force, this should happily succeed
result = cli.run(project=project, args=["workspace", "open", "--force", "--directory", workspace, element_name])
result.assert_success()
@pytest.mark.datafiles(DATA_DIR)
def test_open_force_open(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
result = cli.run(project=project, args=["workspace", "close", element_name])
result.assert_success()
# Assert the workspace dir exists
assert os.path.exists(workspace)
# Now open the workspace again with --force, this should happily succeed
result = cli.run(project=project, args=["workspace", "open", "--force", "--directory", workspace, element_name])
result.assert_success()
# Regression test for #1086.
@pytest.mark.datafiles(DATA_DIR)
def test_open_force_open_no_checkout(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
hello_path = os.path.join(workspace, "hello.txt")
# Assert the workspace dir exists
assert os.path.exists(workspace)
# Create a new file in the workspace
with open(hello_path, "w", encoding="utf-8") as f:
f.write("hello")
# Now open the workspace again with --force and --no-checkout
result = cli.run(
project=project, args=["workspace", "open", "--force", "--no-checkout", "--directory", workspace, element_name]
)
result.assert_success()
# Ensure that our files were not overwritten
assert os.path.exists(hello_path)
with open(hello_path, encoding="utf-8") as f:
assert | |
<reponame>The-Yak-Collective/gigayak
#simple gig manager using sqlite3
#database has one table: gigs and that table has the following fields:
# gigid INTEGER PRIMARY KEY - number of gig entry in table
# creatorid text - discord id of creator
# contents text - the contenst of teh gig - no internal formatting. consider showagig
# filled int - 0= unfilled 1= yes filled
# createdat int - timestamp of when gig created
# filledat int - timestamp of filling
#using same scheme, also support project bot, help wanted bot, agenda bot and newletter bot
#use GIGAYAK_DISCORD_KEY as an env variable - the key for discord bot. needs read/write permission to channels
from discord.ext import tasks, commands
import discord
import asyncio
import os
import re
import subprocess
import time
import datetime
from dotenv import load_dotenv
import sqlite3 #consider , "check_same_thread = False" on sqlite.connect()
import logging
GIG_CHAN=810196195246211092
gig_chan=0
from discord_gigayak import *
HOME_DIR="/home/yak/robot/gigayak/"
USER_DIR="/home/yak/"
conn=sqlite3.connect(HOME_DIR+'gigdatabase.db') #the connection should be global.
db_c = conn.cursor()
load_dotenv(USER_DIR+'.env')
@tasks.loop(seconds=3600.0*24) #once a day kill old gigs
async def test_tick():
print("running tick")
reason="went stale after 30 days"
nowish=int(time.time())
rows=db_c.execute('select * from gigs where filled=0 and createdat<?',(nowish-30*24*3600,)).fetchall()
for row in rows:
print("i would close gig id",row[0])
try:
tellto=await dmchan(int(row[1]),0)
splitsend(tellto,("closed gig id {} because it went stale after 30 days:\n"+row[2]).format(row[0]),False)
#print("i would splitsend",tellto,("closed gig id {} because it went stale after 30 days:\n"+str(row[2])).format(row[0]))
db_c.execute('''UPDATE gigs set filled=1, filledat= ?, reason= ? where gigid=? ''',(int(nowish),reason,row[0]))
conn.commit()
except Exception as ex:
print("unable to notify re: ", row)
print("because of: ", ex)
print(logging.traceback.format_exc())
if len(rows)>0:
await update_gigchannel()
pass
@client.event #needed since it takes time to connect to discord
async def on_ready():
global gig_chan
print('We have logged in as {0.user}'.format(client), client.guilds)
checkon_database()
gig_chan=client.guilds[0].get_channel(GIG_CHAN)
await update_gigchannel()
test_tick.start()
return
def allowed(x,y): #is x allowed to play with item created by y
#permissions - some activities can only be done by yakshaver, etc. or by person who initiated action
if x==y: #same person. setting one to zero will force role check
return True
mid=client.guilds[0].get_member(message.author.id)
r=[x.name for x in mid.roles]
if 'yakshaver' in r or 'yakherder' in r: #for now, both roles are same permissions
return True
return False
@client.event
async def on_message(message):
if message.author == client.user:
return #ignore own messages to avoid loops
dmtarget=await dmchan(message.author.id,message.channel) #build backchannel to user, so we do not answer in general channel
#three bots that manage general lists
#gigabot
await try_bot("gig",message)
#wantedbot
await try_bot("wanted",message)
#newsitem bot
await try_bot("newsitem",message)
#agendabot - agenda per channel
await try_chan_bot('agenda',message)
#agendabot - readinglist per channel
await try_chan_bot('reading',message)
#jagendalist
if message.content.startswith("$jagendalist") or message.content.startswith("/jagendalist"):
cont=message.content.split(maxsplit=2)
chan_num=cont[1][2:-1]
w="agenda"
s='list of {} items in this channel:\n\n'.format(w)+perchanlist(int(chan_num),w)
await splitsend(message.channel,s,False)
return
#projbot - vote on projects
if message.content.startswith("$projtest") or message.content.startswith("/projtest"):
s='this is a test response from projbot'
await splitsend(message.channel,s,False)
return
if message.content.startswith("$projlist") or message.content.startswith("/projlist"):
s='list of open projects:\n\n'+projlist()
await splitsend(message.channel,s,False)
return
if message.content.startswith("$projhelp") or message.content.startswith("/projhelp"):
s='''
/projhelp this message
/projlist list of open projects
/projadd TEXT adds text as a new project. recommended format: short-name blurb roam-link
/projnewtext PJID TEXT changes the text of PJID
/proj+ PJID TEXT upvote this project; give a reason if you want
/proj- PJID TEXT upvote this project; give a reason if you want
/projvotes PJID lists all votes and reasons for project PJID
/projdrop PJID marks PJID as taken off agenda
go to https://roamresearch.com/#/app/ArtOfGig/page/DJVbvHE2_ to see how to add a new project
'''
#$projset PJID FIELD VALUE sets field to value in pjid
await splitsend(message.channel,s,True)
return
if message.content.startswith("$projadd") or message.content.startswith("/projadd"):
conts=message.content.split(maxsplit=1)[1]
db_c.execute('''insert into projects values (NULL,?,?,?,?,?,?,?,?)''',(str(message.author.id),conts,0,int(time.time()),0,0,0,""))
conn.commit()
s='new project id: ' +str(db_c.lastrowid)
await splitsend(message.channel,s,False)
return
if message.content.startswith("$projset") or message.content.startswith("/projset"): #hidden feature
cmd=message.content.split(maxsplit=3)
if len(cmd)<3:
return
if not allowed(message.author.id,0):
splitsend(dmtarget,'no permission to do this',False)
return
pjset(cmd[1],cmd[2],cmd[3])
s='set {}.{} to {}: '.format(pid,field,value)
await splitsend(message.channel,s,False)
return
if message.content.startswith("$projnewtext") or message.content.startswith("/projnewtext"): #instead of exiting text
cmd=message.content.split(maxsplit=2)
if len(cmd)<3:
return
if not allowed(message.author.id,int(db_c.execute('''select creatorid from projects where pjid=?''',(cmd[1],)).fetchone()[0])):
splitsend(dmtarget,'no permission to do this',False)
return
pjset(cmd[1],"contents",cmd[2])
s='new text put in project: ' +str(db_c.lastrowid)
await splitsend(message.channel,s,False)
return
if message.content.startswith("$proj+") or message.content.startswith("/proj+"):
cmd=message.content.split(maxsplit=2)
if len(cmd)<2:
return
if len(cmd)==2:
cmd.append("no reason given")
val=int(db_c.execute('''select upvotes from projects where pjid=?''',(cmd[1],)).fetchone()[0])
pjset(cmd[1],"upvotes",val+1)
s='upvoted project: ' +cmd[1]
await splitsend(message.channel,s,False)
db_c.execute('''insert into votes values (NULL,?,?,?,?,?)''',(str(message.author.id),cmd[1],+1,cmd[2],int(time.time())))
conn.commit()
return
if message.content.startswith("$proj-") or message.content.startswith("/proj-"):
cmd=message.content.split(maxsplit=2)
if len(cmd)<2:
return
if len(cmd)==2:
cmd.append("no reason given")
val=int(db_c.execute('''select downvotes from projects where pjid=?''',(cmd[1],)).fetchone()[0])
pjset(cmd[1],"downvotes",val-1)
s='downvoted project: ' +cmd[1]
await splitsend(message.channel,s,False)
db_c.execute('''insert into votes values (NULL,?,?,?,?,?)''',(str(message.author.id),cmd[1],-1,cmd[2],int(time.time())))
conn.commit()
return
if message.content.startswith("$projvotes") or message.content.startswith("/projvotes"):
cmd=message.content.split(maxsplit=1)
if len(cmd)<2:
return
s='votes for project {}'.format(cmd[1]) +votelist(cmd[1])
await splitsend(message.channel,s,False)
return
if message.content.startswith("$projdrop") or message.content.startswith("/projdrop"):
conts=int(message.content.split(maxsplit=1)[1])
db_c.execute('''UPDATE projects set filled=1, filledat= ? where pjid=? ''',(int(time.time()),conts))
conn.commit()
s='removed from project list: ' +str(conts)
await splitsend(message.channel,s,False)
return
#function which provides functionality for a per-channel list-based bot "w"
async def try_chan_bot(w,message):
if message.content.startswith("${}test".format(w)) or message.content.startswith("/{}test".format(w)):
s='this is a test response from {}bot'.format(w)
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}list".format(w)) or message.content.startswith("/{}list".format(w)):
s='list of {} items in this channel:\n\n'.format(w)+perchanlist(message.channel.id,w)
s=re.sub('(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)',r'<\1>',s)#work in progress!
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}out".format(w)) or message.content.startswith("/{}out".format(w)):
thestringlist=['/bin/bash', 'makethelist.bash', w]
out = subprocess.Popen(thestringlist,
cwd=HOME_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
#stdout,stderr = out.communicate()
s='list of {} items in all channels coming up in next message'.format(w)
await splitsend(message.channel,s,False)
await message.channel.send("actual file:", file=discord.File(HOME_DIR+"thelist.csv"))
return
if message.content.startswith("${}show".format(w)) or message.content.startswith("/{}show".format(w)):
conts=message.content.split(maxsplit=1)
nod=0
if len(conts)>1:
nod=int(conts[1])
if nod==0:
nod=1000
q=tabledump(w)
now=datetime.datetime.utcnow()
wh=now-datetime.timedelta(days=nod)
thresh=int(wh.timestamp())
print(thresh,q[0])
q1=[str(x) for x in q if int(x[4])>thresh]
s="\n".join(q1)
if not s:
s="no agenda items to show\n"
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}all".format(w)) or message.content.startswith("/{}all".format(w)): #hidden feature. for testing
s='list of {} items in all channels:\n\n'.format(w)+perchanlistall()
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}help".format(w)):
s='''
/{0}help this message
/{0}list list of {0} items
/{0}add TEXT adds text as a new item for {0} for THIS channel
/{0}drop ID marks id as taken off {0}
/{0}alldrop marks all ids as taken off {0}
/{0}out output a csv file with all items in sqlite3 table
/{0}show [DAYSBACK] output as a message, all items (optionally only DAYSBACK) in sqlitetable
'''. format(w)
await splitsend(message.channel,s,True)
return
if message.content.startswith("${}add".format(w)) or message.content.startswith("/{}add".format(w)):
conts=message.content.split(maxsplit=1)[1]
db_c.execute('''insert into {} values (NULL,?,?,?,?,?,?,?)'''.format(w),(str(message.author.id),conts,0,int(time.time()),0,message.channel.id,message.jump_url))
conn.commit()
s='new {} item id: '.format(w) +str(db_c.lastrowid)
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}drop".format(w)) or message.content.startswith("/{}drop".format(w)):
conts=int(message.content.split(maxsplit=2)[1]) #consider adding reason option here
db_c.execute('''UPDATE {0} set filled=1, filledat= ? where {0}id=? '''.format(w),(int(time.time()),conts))
conn.commit()
s='removed from {}: '.format(w) +str(conts)
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}alldrop".format(w)):
conts=int(message.content.split(maxsplit=2)[1]) #consider adding reason option here
db_c.execute('''UPDATE {0} set filled=1, filledat= ? where chan=?'''.format(w),(int(time.time()), message.channel.id))
conn.commit()
s='removed from {}: '.format(w) +str("all items")
await splitsend(message.channel,s,False)
return
#function which provides functionality for a list-based bot "w"
async def try_bot(w,message):
if message.content.startswith("${}test".format(w)) or message.content.startswith("/{}test".format(w)):
s='this is a test response from {}bot'.format(w)
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}list".format(w)) or message.content.startswith("/{}list".format(w)):
s='list of outstanding {}s:\n\n'.format(w)+"\n\n".join(thelist(w))
await splitsend(message.channel,s,False)
return
if message.content.startswith("${}help".format(w)) or message.content.startswith("/{}help".format(w)):
s='''
/{0}help this message
/{0}list lists available {0}s
/{0}add TEXT adds text as a new {0} and returns a {0}id
/{0}drop {0}ID [REASON] marks {0}id as closed. give optional reason
/{0}show message with table contents dump
'''.format(w)
await splitsend(message.channel,s,True)
return
if message.content.startswith("${}show".format(w)) or message.content.startswith("/{}show".format(w)):
q=tabledump(w+'s')
q1=[str(x) for x in q]
s="\n".join(q1)
await splitsend(message.channel,s,False)
if message.content.startswith("${}add".format(w)) or message.content.startswith("/{}add".format(w)):
conts=message.content.split(maxsplit=1)[1]
db_c.execute('''insert into {}s values (NULL,?,?,?,?,?,?)'''.format(w),(str(message.author.id),conts,0,int(time.time()),0,""))
conn.commit()
specialstring=""
if message.content.startswith("$gig") or message.content.startswith("/gig"):
specialstring=" . new gigs will be declared stale after 30 days and deleted."
s='new {} id: '.format(w) +str(db_c.lastrowid)+specialstring
await splitsend(message.channel,s,False)
if message.content.startswith("$gig") or message.content.startswith("/gig"):
await update_gigchannel()#later make general, if others have channels...
return
if message.content.startswith("${}drop".format(w)) or message.content.startswith("/{}drop".format(w)):
thetmp=message.content.split(maxsplit=2)
conts=int(thetmp[1])
reason="none given"
remark="you may add a reason for marking as filled by typing ${}drop ID REASON".format(w)
if len(thetmp)>2:
reason=thetmp[2]
remark=""
db_c.execute('''UPDATE {0}s set filled=1, filledat= ?, reason= ? where {0}id=? '''.format(w),(int(time.time()),reason,conts))
conn.commit()
s='marked as filled: ' +str(conts)+" "+reason+"\n"+remark
await splitsend(message.channel,s,False)
if message.content.startswith("$gig") or message.content.startswith("/gig"):
await update_gigchannel()#later make general, if others have channels...
return
def pjset(pid, field, value): #set any value. note execute cannot have ? type parameters, only values
db_c.execute('''UPDATE projects set {}=? where pjid=? '''.format(field),(value,pid))
conn.commit()
return
async def delete_all_gig_messages(): #for now, only bot messages and only on gig_chan
def is_me(m):
return m.author == client.user
deleted = await gig_chan.purge(limit=100, check=is_me)
async def update_gigchannel():#later make it for multipel channels
await delete_all_gig_messages()
listofgigs=thelist("gig")
for e in listofgigs:
embed=discord.Embed(color=0xd12323)
tpos=e.index('**)')
id=e[6:tpos]
temp=e[tpos+4:]
thename=id #trickey use of setting value
if len(temp)>1000: #field length limited to 1024 chars
sar=cutup(temp,1000)
print(sar)
for x in sar:
embed.add_field(name=thename, value=x, inline=False)
thename="\u200B"
else:
embed.add_field(name=thename, value=temp, inline=False)
await gig_chan.send(embed=embed)
#series of functions which generate formatted lists from the DB
def thelist(w):
q=[]
rows=db_c.execute('select * from {}s | |
import os
import sys
import re
keywords = [
'Function:',
'Class:',
'Enum:',
'Values:',
'Description:',
'Parameters:',
'Returns:',
'Example:'
]
classFunctionSeparator = '.'
functionKeyword = keywords[0]
classKeyword = keywords[1]
enumKeyword = keywords[2]
enumValuesKeyword = keywords[3]
descriptionKeyword = keywords[4]
parametersKeyword = keywords[5]
returnsKeyword = keywords[6]
exampleKeyword = keywords[7]
class SourceFileParser:
def __init__ (self, fileName):
self.fileName = fileName
def Parse (self):
file = open (self.fileName, 'r')
content = file.read ()
file.close ()
regexp = re.compile ('/\*\*(.*?)\*/', re.DOTALL)
docStrings = regexp.findall (content)
return self.ParseDocStrings (docStrings)
def ParseDocStrings (self, docStrings):
docParts = []
for docString in docStrings:
docPart = self.ParseDocString (docString)
current = {}
if functionKeyword in docPart.keys ():
current['partType'] = functionKeyword
elif classKeyword in docPart.keys ():
current['partType'] = classKeyword
elif enumKeyword in docPart.keys ():
current['partType'] = enumKeyword
current['partContent'] = docPart
docParts.append (current)
return docParts
def ParseDocString (self, docString):
def ProcessParameterLine (keyword, line, lines, index, sections):
sectionContent = []
index = index + 1
while index < len (lines):
sectionLine = lines[index].strip ()
if self.GetKeyword (sectionLine) != None:
index = index - 1
break
bracketCount = sectionLine.count ('{') + sectionLine.count ('}')
if bracketCount == 2:
firstBracket = sectionLine.find ('{')
secondBracket = sectionLine.find ('}')
if firstBracket != -1 and secondBracket != -1:
parameters = []
if firstBracket == 0:
parameters = [
sectionLine[firstBracket + 1 : secondBracket].strip (),
sectionLine[secondBracket + 1 :].strip (),
]
else:
parameters = [
sectionLine[: firstBracket - 1].strip (),
sectionLine[firstBracket + 1 : secondBracket].strip (),
sectionLine[secondBracket + 1 :].strip (),
]
sectionContent.append (parameters)
index = index + 1
sections[keyword] = sectionContent
return index
def ProcessExampleLine (keyword, line, lines, index, sections):
endOfLine = '\\n'
sectionContent = ''
index = index + 1
while index < len (lines):
sectionLine = lines[index].replace ('\t', '\\t')
if self.GetKeyword (sectionLine) != None:
index = index - 1
break
sectionContent += sectionLine + endOfLine
index = index + 1
sectionContent = sectionContent.strip (endOfLine)
sections[keyword] = sectionContent
return index
def ProcessNormalLine (keyword, line, lines, index, sections):
sectionContent = line[len (keyword) :].strip () + ' '
index = index + 1
while index < len (lines):
sectionLine = lines[index].strip ()
if self.GetKeyword (sectionLine) != None:
index = index - 1
break
sectionContent += sectionLine + ' '
index = index + 1
sectionContent = sectionContent.strip ()
sections[keyword] = sectionContent
return index
originalLines = docString.split ('\n')
lines = []
for line in originalLines:
line = line.strip ()
if len (line) == 0:
continue
if line[0] == '*':
line = line [1 :]
if line[0] == '\t':
line = line [1 :]
lines.append (line)
sections = {}
i = 0
while i < len (lines):
line = lines[i].strip ()
keyword = self.GetKeyword (line)
if keyword != None:
if keyword == parametersKeyword or keyword == returnsKeyword or keyword == enumValuesKeyword:
i = ProcessParameterLine (keyword, line, lines, i, sections)
elif keyword == exampleKeyword:
i = ProcessExampleLine (keyword, line, lines, i, sections)
else:
i = ProcessNormalLine (keyword, line, lines, i, sections)
i = i + 1
return sections
def GetKeyword (self, line):
for keyword in keywords:
if line.startswith (keyword):
return keyword
return None
class JSONFile:
def __init__ (self, fileName):
self.file = None
self.fileName = fileName
def Open (self):
self.file = open (self.fileName, 'w')
def Close (self):
self.file.close ()
def Write (self, tabs, text, comma):
for i in range (0, tabs):
self.file.write ('\t')
content = text
if comma:
content += ','
content += '\n'
self.file.write (content)
class Parameter:
def __init__ (self, name, type, description):
self.name = name
self.type = type
self.description = description
def WriteJSON (self, tabs, file, comma):
content = '["' + self.name + '", "' + self.type + '", "' + self.description + '"]'
if comma:
content += ','
file.Write (tabs, content, False)
class Return:
def __init__ (self, type, description):
self.type = type
self.description = description
def WriteJSON (self, tabs, file, comma):
content = '["' + self.type + '", "' + self.description + '"]'
if comma:
content += ','
file.Write (tabs, content, False)
class Value:
def __init__ (self, value, description):
self.value = value
self.description = description
def WriteJSON (self, tabs, file, comma):
content = '["' + self.value + '", "' + self.description + '"]'
if comma:
content += ','
file.Write (tabs, content, False)
class Function:
def __init__ (self, name):
self.name = name
self.description = ''
self.parameters = []
self.returns = []
self.example = ''
def GetName (self):
return self.name
def HasDescription (self):
return self.description != ''
def HasParameters (self):
return len (self.parameters) > 0
def HasReturns (self):
return len (self.returns) > 0
def HasExample (self):
return self.example != ''
def SetDescription (self, description):
self.description = description
def AddParameter (self, parameter):
self.parameters.append (parameter)
def AddReturn (self, retVal):
self.returns.append (retVal)
def SetExample (self, example):
self.example = example
def WriteJSON (self, tabs, file, comma):
file.Write (tabs, '"' + self.name + '" : {', False)
if self.HasDescription ():
file.Write (tabs + 1, '"description" : "' + self.description + '"', self.HasParameters () or self.HasReturns () or self.HasExample ())
if self.HasParameters ():
file.Write (tabs + 1, '"parameters" : [', False)
for i in range (0, len (self.parameters)):
parameter = self.parameters[i]
parameter.WriteJSON (tabs + 2, file, i < len (self.parameters) - 1)
file.Write (tabs + 1, ']', self.HasReturns () or self.HasExample ())
if self.HasReturns ():
file.Write (tabs + 1, '"returns" : [', False)
for i in range (0, len (self.returns)):
parameter = self.returns[i]
parameter.WriteJSON (tabs + 2, file, i < len (self.returns) - 1)
file.Write (tabs + 1, ']', self.HasExample ())
if self.HasExample ():
file.Write (tabs + 1, '"example" : "' + self.example + '"', False)
file.Write (tabs, '}', comma)
class Class:
def __init__ (self, name):
self.name = name
self.description = ''
self.parameters = []
self.functions = []
self.example = ''
def GetName (self):
return self.name
def HasDescription (self):
return self.description != ''
def HasParameters (self):
return len (self.parameters) > 0
def HasFunctions (self):
return len (self.functions) > 0
def HasExample (self):
return self.example != ''
def SetDescription (self, description):
self.description = description
def AddParameter (self, parameter):
self.parameters.append (parameter)
def AddFunction (self, function):
self.functions.append (function)
def SetExample (self, example):
self.example = example
def WriteJSON (self, tabs, file, comma):
file.Write (tabs, '"' + self.name + '" : {', False)
if self.HasDescription ():
file.Write (tabs + 1, '"description" : "' + self.description + '"', self.HasParameters () or self.HasFunctions () or self.HasExample ())
if self.HasParameters ():
file.Write (tabs + 1, '"parameters" : [', False)
for i in range (0, len (self.parameters)):
parameter = self.parameters[i]
parameter.WriteJSON (tabs + 2, file, i < len (self.parameters) - 1)
file.Write (tabs + 1, ']', self.HasFunctions () or self.HasExample ())
if self.HasFunctions ():
file.Write (tabs + 1, '"functions" : {', False)
for i in range (0, len (self.functions)):
function = self.functions[i]
function.WriteJSON (tabs + 2, file, i < len (self.functions) - 1)
file.Write (tabs + 1, '}', self.HasExample ())
if self.HasExample ():
file.Write (tabs + 1, '"example" : "' + self.example + '"', False)
file.Write (tabs, '}', comma)
class Enum:
def __init__ (self, name):
self.name = name
self.description = ''
self.values = []
self.example = ''
def GetName (self):
return self.name
def HasDescription (self):
return self.description != ''
def HasValues (self):
return len (self.values) > 0
def HasExample (self):
return self.example != ''
def SetDescription (self, description):
self.description = description
def AddValue (self, value):
self.values.append (value)
def SetExample (self, example):
self.example = example
def WriteJSON (self, tabs, file, comma):
file.Write (tabs, '"' + self.name + '" : {', False)
if self.HasDescription ():
file.Write (tabs + 1, '"description" : "' + self.description + '"', self.HasValues () or self.HasExample ())
if self.HasValues ():
file.Write (tabs + 1, '"values" : [', False)
for i in range (0, len (self.values)):
parameter = self.values[i]
parameter.WriteJSON (tabs + 2, file, i < len (self.values) - 1)
file.Write (tabs + 1, ']', self.HasExample ())
if self.HasExample ():
file.Write (tabs + 1, '"example" : "' + self.example + '"', False)
file.Write (tabs, '}', comma)
class Module:
def __init__ (self, name):
self.name = name
self.functions = []
self.classes = []
self.enums = []
def IsEmpty (self):
if len (self.functions) > 0:
return False
if len (self.classes) > 0:
return False
return True
def HasFunctions (self):
return len (self.functions) > 0
def HasClasses (self):
return len (self.classes) > 0
def HasEnums (self):
return len (self.enums) > 0
def AddFunction (self, function):
self.functions.append (function)
def AddClass (self, classVal):
self.classes.append (classVal)
def AddClassFunction (self, className, function):
for classVal in self.classes:
if classVal.GetName () == className:
classVal.AddFunction (function)
break
def AddEnum (self, classVal):
self.enums.append (classVal)
def WriteJSON (self, tabs, file, comma):
file.Write (tabs, '"' + self.name + '" : {', False)
if self.HasFunctions ():
file.Write (tabs + 1, '"functions" : {', False)
for i in range (0, len (self.functions)):
function = self.functions[i]
function.WriteJSON (tabs + 2, file, i < len (self.functions) - 1)
file.Write (tabs + 1, '}', self.HasClasses () or self.HasEnums ())
if self.HasClasses ():
file.Write (tabs + 1, '"classes" : {', False)
for i in range (0, len (self.classes)):
classVal = self.classes[i]
classVal.WriteJSON (tabs + 2, file, i < len (self.classes) - 1)
file.Write (tabs + 1, '}', self.HasEnums ())
if self.HasEnums ():
file.Write (tabs + 1, '"enums" : {', False)
for i in range (0, len (self.enums)):
enumVal = self.enums[i]
enumVal.WriteJSON (tabs + 2, file, i < len (self.enums) - 1)
file.Write (tabs + 1, '}', False)
file.Write (tabs, '}', comma)
class Documentation:
def __init__ (self, projectName):
self.modules = []
self.projectName = projectName
def AddModule (self, moduleName, sourceFiles):
module = Module (moduleName)
for sourceFile in sourceFiles:
self.ProcessSourceFile (sourceFile, module)
self.modules.append (module)
def ProcessSourceFile (self, fileName, module):
parser = SourceFileParser (fileName)
currentDocParts = parser.Parse ()
self.ProcessDocParts (currentDocParts, module)
def ProcessDocParts (self, docParts, module):
currentModule = Module (None)
for docPart in docParts:
partType = docPart['partType']
partContent = docPart['partContent']
partName = partContent[partType]
if partType == functionKeyword:
className = ''
functionName = ''
if partName.find (classFunctionSeparator) == -1:
functionName = partName
else:
splitted = partName.split (classFunctionSeparator)
if len (splitted) == 2:
className = splitted[0]
functionName = splitted[1]
if len (functionName) == 0:
continue
theFunction = Function (functionName)
if descriptionKeyword in partContent.keys ():
theFunction.SetDescription (partContent[descriptionKeyword])
if parametersKeyword in partContent.keys ():
for parameters in partContent[parametersKeyword]:
theParameter = Parameter (parameters[0], parameters[1], parameters[2])
theFunction.AddParameter (theParameter)
if returnsKeyword in partContent.keys ():
for parameters in partContent[returnsKeyword]:
theReturn = Return (parameters[0], parameters[1])
theFunction.AddReturn (theReturn)
if exampleKeyword in partContent.keys ():
theFunction.SetExample (partContent[exampleKeyword])
if len (className) | |
<filename>azure/mgmt/compute/v2017_03_30/operations/virtual_machines_operations.py<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class VirtualMachinesOperations(object):
"""VirtualMachinesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def capture(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Captures the VM by copying virtual hard disks of the VM and outputs a
template that can be used to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine
operation.
:type parameters: :class:`VirtualMachineCaptureParameters
<azure.mgmt.compute.v2017_03_30.models.VirtualMachineCaptureParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualMachineCaptureResult
<azure.mgmt.compute.v2017_03_30.models.VirtualMachineCaptureResult>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def create_or_update(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, **operation_config):
"""The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine
operation.
:type parameters: :class:`VirtualMachine
<azure.mgmt.compute.v2017_03_30.models.VirtualMachine>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualMachine
<azure.mgmt.compute.v2017_03_30.models.VirtualMachine>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualMachine')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, vm_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Retrieves information about the model view or the instance view of a
virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation.
Possible values include: 'instanceView'
:type expand: str or :class:`InstanceViewTypes
<azure.mgmt.compute.v2017_03_30.models.InstanceViewTypes>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`VirtualMachine
<azure.mgmt.compute.v2017_03_30.models.VirtualMachine>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`VirtualMachine
<azure.mgmt.compute.v2017_03_30.models.VirtualMachine>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'InstanceViewTypes')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def instance_view(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves information about the run-time state of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`VirtualMachineInstanceView
<azure.mgmt.compute.v2017_03_30.models.VirtualMachineInstanceView>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`VirtualMachineInstanceView
<azure.mgmt.compute.v2017_03_30.models.VirtualMachineInstanceView>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == | |
import cv2 as cv
import numpy as np
import numpy as np
import math
import imutils
import os
beta = 0.75
class BodyPart():
def __init__(self, name='part'):
self.name = name
self.x = 0
self.y = 0
self.theta = 0
self.l = 0
self.w = 0
self.children = []
self.parent = None
self.left_upper_corner = None
self.right_upper_corner = None
self.left_lower_corner = None
self.right_lower_corner = None
self.priority = 0
self.area = 0
self.Si = 0 # intersection of synthesized body part with foreground
self.visited = 0 # number of time this body part was updated
def setData(self, x, y, theta, l, w):
self.x = x
self.y = y
self.theta = theta
self.l = l
self.w = w
self.area = l * w
self.setCorners()
def updateValue(self, indx, lamda):
if indx == 0:
self.x += int(lamda)
elif indx == 1:
self.y += int(lamda)
elif indx == 2:
self.theta += lamda
elif indx == 3:
self.l += int(lamda)
self.area = self.l * self.w
else:
self.w += int(lamda)
self.area = self.l * self.w
self.setCorners()
def addChildren(self, children):
for child in children:
self.children.append(child)
def setParent(self, parent):
self.parent = parent
def getData(self):
return (self.x, self.y, self.theta, self.l, self.w)
def setCorners(self):
if self.name == 'Torso':
center = True
else:
center = False
self.left_upper_corner = get_left_upper_corner(self.x, self.y, self.theta, self.l, self.w, center)
self.right_upper_corner = get_right_upper_corner(self.x, self.y, self.theta, self.l, self.w, center)
self.left_lower_corner = get_left_lower_corner(self.x, self.y, self.theta, self.l, self.w, center)
self.right_lower_corner = get_right_lower_corner(self.x, self.y, self.theta, self.l, self.w, center)
# input : frame , initial background with no human in it
# output: binary image
def segmentation (frame, background):
if len(frame.shape) > 2:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if len(background.shape) > 2:
background = cv.cvtColor(background, cv.COLOR_BGR2GRAY)
diff = cv.absdiff(frame, background)
diff[diff > 35] = 255
diff[diff <= 35] = 0
return diff
def get_body_height(fore):
miniy = 0
maxiy = 0
for i in range(fore.shape[0]):
r= fore[i]
x = np.argmax(r)
if r[x] == 255:
miniy = i
break
for i in reversed(range(fore.shape[0])):
r = fore[i]
x = np.argmax(r)
if r[x] == 255:
maxiy = i
break
height = abs(maxiy - miniy)
return height
def get_torso_center(foreImage):
distMap= cv.distanceTransform(foreImage, cv.DIST_L2, 5)
(yTorso, xTorso) = np.where(distMap == np.amax(distMap))
return (xTorso[0], yTorso[0])
####################length of torso#############################
def get_torso_length(foreImage, lBody):
meanLTorso = .33
varLTorso = .001
mu = meanLTorso * lBody
sigma = np.sqrt(varLTorso * (lBody**2))
lTorso = np.random.normal(mu, sigma)
return lTorso
##################################################################
####################width of torso#############################
def get_torso_width(foreImage, wBody):
meanWTorso = 1
varWTorso = .001
mu = meanWTorso * wBody
sigma = np.sqrt(varWTorso * (wBody**2))
wTorso = np.random.normal(mu, sigma)
return wTorso
##################################################################
def get_torso_angle(foreImage):
fore = foreImage.copy()
# get horizontal histogram
num_rows = foreImage.shape[0]
distMap= cv.distanceTransform(foreImage, cv.DIST_L2, 5)
(yFirst, xFirst) = np.where(distMap == np.amax(distMap))
xFirst = int(xFirst[0])
yFirst = int(yFirst[0])
cropped_image = fore[min(yFirst + 5, num_rows - 1):, ]
distMap= cv.distanceTransform(cropped_image, cv.DIST_L2, 5)
(ySecond, xSecond) = np.where(distMap == np.amax(distMap))
xSecond = int(xSecond[0])
ySecond = int(ySecond[0]) + min(yFirst + 5, num_rows - 1)
if abs(ySecond - yFirst) < 30:
cropped_image = fore[0:max(yFirst - 5, 0), ]
distMap = cv.distanceTransform(cropped_image, cv.DIST_L2, 5)
if not distMap is None:
(ySecond, xSecond) = np.where(distMap == np.amax(distMap))
xSecond = int(xSecond[0])
ySecond = int(ySecond[0])
deltaY = ySecond - yFirst
deltaX = xSecond - xFirst
if deltaX != 0:
theta = np.arctan(deltaY/deltaX) * 180.0 / np.pi
else:
theta = 90.0
return 360
#return abs(90 - theta)
def get_torso_model(image_R,face,img):
lBody = get_body_height(img)
wBody = 0.17 * lBody
l = get_torso_length(img, lBody)
w = get_torso_width(img, wBody)
x,y= get_TFH(image_R,face,l)
theta = get_torso_angle(img)
torso_data = (x, y, theta, l, w)
return torso_data
def get_right_upper_arm_model(torso_center_x, torso_center_y, torso_theta,torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
(top_right_x,top_right_y) = get_right_upper_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
top_right_y = top_right_y + (.5 * width)
sigma_x = 1
right_x = top_right_x
right_y = top_right_y
theta = np.random.normal(45,10)
return right_x, right_y, theta, height, width
def get_left_upper_arm_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
(top_left_x,top_left_y) = get_left_upper_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
top_left_y = top_left_y+(.5 * width)
sigma_x = 3
left_x = top_left_x
left_y = top_left_y
theta = np.random.normal(125, 10)
return left_x, left_y, theta, height, width
def get_right_lower_arm_model(end_x, end_y, torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
top_right_x = end_x
top_right_y = end_y
sigma_x = 1
right_x = np.random.normal(top_right_x, sigma_x)
right_y = np.random.normal(top_right_y, sigma_x)
theta = np.random.normal(45, 10)
return right_x, right_y, theta, height, width
def get_left_lower_arm_model(end_x, end_y, torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
top_left_x = end_x
top_left_y = end_y
sigma_x = 3
left_x = np.random.normal(top_left_x, sigma_x)
left_y = np.random.normal(top_left_y, sigma_x)
theta = np.random.normal(125, 10)
return left_x, left_y, theta, height, width
def get_left_upper_leg_model(torso_center_x,torso_center_y,torso_theta,torso_height,torso_w):
meanHeight = .7* torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .35 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
(bottom_left_x,bottom_left_y) = get_left_lower_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
bottom_left_x = bottom_left_x+(.5 * width)
sigma_x = 0
left_x = np.random.normal(bottom_left_x, sigma_x)
left_y = np.random.normal(bottom_left_y, sigma_x)
theta = np.random.normal(100, 10)
return left_x, left_y, theta, height, width
def get_right_upper_leg_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w):
meanHeight = .7 * torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .34 * torso_w
varW = .1
width= np.random.normal(meanW, varW)
(top_right_x,top_right_y) = get_right_lower_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
top_right_x = top_right_x - (.5 * width)
sigma_x = 0
right_x = np.random.normal(top_right_x, sigma_x)
right_y = np.random.normal(top_right_y, sigma_x)
theta = np.random.normal(80, 10)
return right_x, right_y, theta, height, width
def get_left_lower_leg_model(end_x, end_y, torso_height, torso_w):
meanHeight = .7 * torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .35* torso_w
varW = .1
width= np.random.normal(meanW, varW)
bottom_left_x = end_x
bottom_left_y = end_y
sigma_x = 0
left_x = np.random.normal(bottom_left_x, sigma_x)
left_y = np.random.normal(bottom_left_y, sigma_x)
theta = np.random.normal(110, 10)
return left_x, left_y, theta, height, width
def get_right_lower_leg_model(end_x, end_y, torso_height, torso_w):
meanHeight = .7 * torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .34 * torso_w
varW = .1
width= np.random.normal(meanW, varW)
top_right_x = end_x
top_right_y = end_y
sigma_x = 0
right_x = np.random.normal(top_right_x, sigma_x)
right_y = np.random.normal(top_right_y, sigma_x)
theta = np.random.normal(70, 10)
return right_x, right_y, theta, height, width
def get_head_model(torso_center_x, torso_center_y, torso_height, torso_w):
meanHeight = .35 * torso_height
varHeight = .1
height = np.random.normal(meanHeight, varHeight)
meanW = .5* torso_w
varW = .1
width= np.random.normal(meanW, varW)
top_x = torso_center_x
top_y = torso_center_y - (.5 * torso_height)
theta = np.random.normal(270, 5)
return top_x, top_y, theta, height, width
def get_body_data(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w):
############################## draw upper legs#####################################
xll, yll, thetall, hll, wll = get_left_upper_leg_model(torso_center_x, torso_center_y, torso_theta,torso_height, torso_w)
left_upper_leg_data = (xll, yll, thetall, hll, wll)
endy_left_top_leg = yll + (hll * math.sin(math.radians(thetall)))
endx_left_top_leg = xll + (hll * math.cos(math.radians(thetall)))
xrl, yrl, thetarl, hrl, wrl = get_right_upper_leg_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w)
right_upper_leg_data = (xrl, yrl, thetarl, hrl, wrl)
endy_right_top_leg = yrl + (hrl * math.sin(math.radians(thetarl)))
endx_right_top_leg = xrl + (hrl * math.cos(math.radians(thetarl)))
############################## draw lower legs#######################################
xlll, ylll, thetalll, hlll, wlll = get_left_lower_leg_model(endx_left_top_leg, endy_left_top_leg, torso_height, torso_w)
left_lower_leg_data = (xlll, ylll, thetalll, hlll, wlll)
xrll, yrll, thetarll, hrll, wrll = get_right_lower_leg_model(endx_right_top_leg, endy_right_top_leg, torso_height, torso_w)
right_lower_leg_data = (xrll, yrll, thetarll, hrll, wrll)
########################draw upper arms####################################
xla, yla, thetala, hla, wla = get_left_upper_arm_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w)
left_upper_arm_data = (xla, yla, thetala, hla, wla)
endy_left_top_arm = yla + (hla * math.sin(math.radians(thetala)))
endx_left_top_arm = xla + (hla * math.cos(math.radians(thetala)))
xra, yra, thetara, hra, wra = get_right_upper_arm_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w)
right_upper_arm_data = (xra, yra, thetara, hra, wra)
endy_right_top_arm = yra + (hra * math.sin(math.radians(thetara)))
endx_right_top_arm = xra + (hra * math.cos(math.radians(thetara)))
###########################draw lower arms####################################
xrla, yrla, thetarla, hrla, wrla |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.