text stringlengths 957 885k |
|---|
<reponame>gnzlbg/nmp
import subprocess
import os
import shutil
import copy
import sys
from functools import partial
from operator import itemgetter, attrgetter
def copy_and_overwrite(from_path, to_path):
os.system("cp -rf " + from_path + " " + to_path)
def get_directory_structure(rootdir):
"""
Creates a nested dictionary that represents the folder structure of rootdir
"""
dir = {}
rootdir = rootdir.rstrip(os.sep)
start = rootdir.rfind(os.sep) + 1
for path, dirs, files in os.walk(rootdir):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = reduce(dict.get, folders[:-1], dir)
parent[folders[-1]] = subdir
return dir
def set_up_paths(args):
dirs = { 'src_dir' : os.path.abspath(args['<source_dir>']),
'build_dir' : os.path.abspath(args['<build_dir>']),
'src_site_dir' : os.path.join(os.path.abspath(args['<source_dir>']),
'site/'),
'build_site_dir' : os.path.join(os.path.abspath(args['<build_dir>']),
'site/') }
check_paths(dirs)
args['dirs'] = dirs
exclude_paths = ['site', args['dirs']['build_dir'], '.git']
args['exclude_paths'] = exclude_paths
def check_paths(dirs):
for k,v in dirs.iteritems():
if not os.path.exists(v):
if not 'site' in v:
sys.exit('[E] ' + k + ' directory doesn\'t exist!')
else:
print('[M] ' + k + ' directory doesn\'t exist! -> Created')
os.makedirs(v)
def check_directory_structure(args):
rel_src_dirs = args['src_dir_structure']
for rel_dir in rel_src_dirs:
d = os.path.join(args['dirs']['src_dir'], rel_dir)
if not os.path.exists(d):
sys.exit('[E] Error invalid source dir <= Path:\n'
+ d + '\n doesn\'t exist!')
def src_md_files(args):
src_path = args['dirs']['src_dir']
md_files = []
exclude_paths = args['exclude_paths']
for subdir, dirn, files in os.walk(src_path):
for f in files:
if f.endswith('.md') and not any(e in subdir for e in exclude_paths):
md_files.append(os.path.join(subdir, f))
return md_files
def src_to_build_md(src_md_file_path, args):
relative_path_from_src_dir = os.path.relpath(src_md_file_path, args['dirs']['src_dir'])
target_dir = os.path.join(args['dirs']['build_site_dir'],
os.path.dirname(relative_path_from_src_dir).replace('include/', ''))
target_fn = os.path.join(target_dir, os.path.basename(src_md_file_path).replace('readme', 'index'))
return target_fn
def toc_to_file(toc):
print(toc)
s = ''
for src_path, path, name, tree, indent in toc:
no_indent = indent.count('#')
ss = ' ' * 2 * no_indent + '- [' + name.replace('#', '') + '](' + path.split('site')[-1][1:] + ')\n'
s = s + ss
print(s)
return s
def create_and_write_toc(args):
toc = []
for src_md_file in src_md_files(args):
build_md_file = src_to_build_md(src_md_file, args)
d = os.path.dirname(build_md_file)
site_d = os.path.dirname(args['dirs']['build_site_dir'])
if d == site_d:
continue
toc_name = build_md_file.split('site')[1].replace('.md', '')
toc_path = ''
toc_indent = ''
if toc_name == '/nmp/index':
toc_name = toc_name.replace('/nmp/index', 'NMP')
else:
toc_path = os.path.dirname(toc_name).replace('/nmp/','')
toc_indent = '##' + ('#' * toc_path.count('/'))
if not 'index' in build_md_file:
toc_indent = toc_indent + '#'
toc_name = toc_name.split('/')[-1]
toc_name = toc_name.replace('index', toc_path.split('/')[-1])
toc_path = toc_path.replace('/','')
toc.append((src_md_file, build_md_file, toc_name, toc_path, toc_indent))
toc = sorted(toc, key=lambda x:x[3])
toc = sorted(toc, key=lambda x:x[2])
args['toc'] = toc
site_includes_dir = os.path.join(args['dirs']['build_site_dir'], '_includes/')
if not os.path.exists(site_includes_dir):
shutils.mkdir(site_includes_dir)
with file(site_includes_dir + 'toc', 'w') as modified: modified.write(toc_to_file(toc))
def create_site_files(args):
toc = args['toc']
for src_path, site_path, _, _, _ in toc:
site_dir = os.path.dirname(site_path)
if not os.path.exists(site_dir):
os.makedirs(site_dir)
with file(src_path, 'r') as original: data = original.read()
with file(site_path, 'w') as modified: modified.write("---\n---\n" + data)
# Copy root dir files
for f in os.listdir(args['dirs']['src_dir']):
if f.endswith(".md"):
target_file = src_to_build_md(f, args)
with file(f, 'r') as original: data = original.read()
with file(target_file, 'w') as modified: modified.write("---\n---\n" + data)
def update_links(args):
for subdir, dirs, files in os.walk(args['dirs']['build_site_dir']):
for f in files:
print(f)
if '.md' in f or '_includes' in subdir:
fp = os.path.join(subdir, f)
print('!!' + fp)
with file(fp, 'r') as original: data = original.read()
data = data.replace('.md', '.html').replace('include/', 'nmp/')
with file(fp, 'w') as modified: modified.write(data)
def generate_site(args):
"""Generates site"""
print('Generating ' + args['project_name'] + '\'s website...')
verbose = args['--verbose']
# Check that the paths are correct and create necessary directories:
set_up_paths(args)
check_directory_structure(args)
# Copy the src site to the build directory:
copy_and_overwrite(args['dirs']['src_site_dir'], args['dirs']['build_site_dir'])
# Create table of contents
create_and_write_toc(args)
# Copy md files to site_build_dir:
create_site_files(args)
# Update links
update_links(args)
print('...' + args['project_name'] + '\'s website generated!')
sys.exit()
md_files = md_files_in(src_dir)
generate_site_files(md_files, src_dir, target_site_dir)
return
|
<gh_stars>1-10
import os.path
import re
import pickle, hashlib
from sklearn.externals import joblib
from logging import debug, info
from ngram import get_ngrams
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import numpy as np
def identity(x):
return x
def preprocess(docs, c_ngmin=1, c_ngmax=1,
w_ngmin=1, w_ngmax=1, lowercase=None):
# convert docs to word/char ngrams with optional case normaliztion
# this would ideally be tha anlyzer parameter of the
# vectorizer, but requires lambda - which breaks saving
features = []
for doc in docs:
# character n-grams
if lowercase == 'char':
docfeat = get_ngrams(doc.lower(),
ngmax=c_ngmax, ngmin=c_ngmin,
tokenizer=list)
else:
docfeat = get_ngrams(doc,
ngmax=c_ngmax, ngmin=c_ngmin,
tokenizer=list)
# word n-grams
if lowercase == 'word':
docfeat.extend(get_ngrams(doc.lower(),
ngmax=w_ngmax, ngmin=w_ngmin,
append="W"))
else:
docfeat.extend(get_ngrams(doc,
ngmax=w_ngmax, ngmin=w_ngmin,
append="W"))
features.append(docfeat)
return features
def doc_to_ngrams(docs, use_cached=True, cache=True,
cache_dir='.cache', **kwargs):
""" Return bag-of-n-grams features for the give document set
"""
param = {
'c_ngmax': 1, 'c_ngmin': 1, 'w_ngmax': 1, 'w_ngmin': 1,
'min_df': 1,
'sublinear': True,
'norm': 'l2',
'max_features': None,
'input_name': None,
'lowercase': None,
'dim_reduce': None
}
for k, v in kwargs.items(): param[k] = v
if param['input_name'] and use_cached or cache:
os.makedirs(cache_dir, exist_ok=True)
paramstr = ','.join([k + '=' + str(param[k]) for k in sorted(param)])
cachefn = 'vectorizer-' + \
hashlib.sha224(paramstr.encode('utf-8')).hexdigest() + '.z'
cachefn = os.path.join(cache_dir, cachefn)
if use_cached and os.path.exists(cachefn):
info('Using cached vectorizer: {}'.format(cachefn))
fp = open(cachefn, 'r')
v = joblib.load(cachefn)
vectors = joblib.load(cachefn.replace('vectorizer-', 'vectors-'))
fp.close()
else:
features = preprocess(docs, c_ngmin=param['c_ngmin'],
c_ngmax=param['c_ngmax'], w_ngmin=param['w_ngmin'],
w_ngmax=param['w_ngmax'], lowercase=param['lowercase'])
v = TfidfVectorizer(analyzer=identity,
lowercase=(param['lowercase'] == 'all'),
sublinear_tf=param['sublinear'],
min_df=param['min_df'],
norm=param['norm'],
max_features=param['max_features'])
vectors = v.fit_transform(features)
if cache and param['input_name']:
info('Saving vectorizer: {}'.format(cachefn))
joblib.dump(v, cachefn, compress=True)
joblib.dump(vectors,
cachefn.replace('vectorizer-', 'vectors-'),
compress=True)
svd = None
if param['dim_reduce']:
info("reducing dimentionality {} -> {}".format(
len(v.vocabulary_), param['dim_reduce']))
svd = TruncatedSVD(n_components=param['dim_reduce'], n_iter=10)
# svd = TruncatedSVD(n_components=dim_reduce, # algorithm="arpack")
svd.fit(vectors)
info("explained variance: {}".format(
svd.explained_variance_ratio_.sum()))
vectors = svd.transform(vectors)
return vectors, v, None
w_tokenizer = re.compile(r"\w+|[^ \t\n\r\f\v\w]+").findall
def doc_to_numseq(doc, vocab, tokenizer="char", pad=None):
""" Transform given sequence of labels to numeric values
"""
from keras.preprocessing.sequence import pad_sequences
oov_char = 1
start_char = 2
end_char = 3
features = {k:v+4 for v,k in enumerate(vocab.keys())}
X = []
maxlen = 0
for d in doc:
x = [start_char]
if tokenizer == "word":
tokenizer = w_tokenizer
elif tokenizer == "char":
tokenizer = list
tokens = tokenizer(d)
for c in tokens:
if c in features:
x.append(features[c])
else:
x.append(oov_char)
x.append(end_char)
if len(x) > maxlen: maxlen = len(x)
X.append(x)
X = np.array(X)
if pad:
X = pad_sequences(X, maxlen=pad)
return X, maxlen
|
#!/usr/bin/env python
# encoding: utf-8
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from waflib import Logs, Utils, Errors, Build
import os
import collections
import json
# The Visual Code schema version for launch tasks that we are basing on
LAUNCH_JSON_VERSION = '0.2.0'
# The Visual Code schema version for (build) tasks that we are basing on
TASK_JSON_VERSION = '2.0.0'
# Map of unversioned host platforms to platform specific values and format strings that we will use during construction
# of the visual code json configuration files
VSCODE_PLATFORM_SETTINGS_MAP = {
'win32': dict( exePattern = "%s.exe",
scriptPattern = "%s.bat",
dbgType = "cppvsdbg",
problemMatcher = '$msCompile',
wafPlatformPrefix = "win_x64"),
'darwin': dict( exePattern = "./%s",
scriptPattern = "./%s.sh",
dbgType = "cppdbg",
problemMatcher = "$gcc",
wafPlatformPrefix = "darwin_"),
'linux': dict( exePattern = "./%s",
scriptPattern = "./%s.sh",
dbgType = "cppdbg",
problemMatcher = "$gcc",
wafPlatformPrefix = "linux_")
}
class GenerateVSCodeWorkspace(Build.BuildContext):
cmd = 'vscode'
fun = 'build'
default_build_configuration = 'profile'
default_launch_targets = ['Editor', 'AssetProcessor']
def __init__(self):
Build.BuildContext.__init__(self)
# Get the host-platform specific values for this visual code generation and initialize them as
# object variables to this build class
host_name = Utils.unversioned_sys_platform()
try:
platform_settings = VSCODE_PLATFORM_SETTINGS_MAP[host_name]
except KeyError:
raise Errors.WafError("Unsupported platform '{}' for VSCode".format(host_name))
try:
self.exePattern = platform_settings['exePattern']
self.dbgType = platform_settings['dbgType']
self.wafPlatformPrefix = platform_settings['wafPlatformPrefix']
self.scriptPattern = platform_settings['scriptPattern']
self.problemMatcher = platform_settings['problemMatcher']
except KeyError as err:
raise Errors.WafError("VSCode settings '{}' for platform '{}' is missing from the definition table ({})".format(str(err.message), host_name, __file__))
@staticmethod
def write_vscode_node(node, json_obj):
node.write(json.dumps(json_obj, indent=4, separators=(',', ': ')))
def execute(self):
# restore the environments
self.restore()
if not self.all_envs:
self.load_envs()
self.load_user_settings()
self.generate_vscode_project()
def generate_build_tasks_json(self, vscode_dir):
"""
Generate the build tasks (task.json) file for Visual Code
:param vscode_dir: The base folder node to save the task.json file
"""
# Collect all the specs that are specified in 'specs_to_include_in_project_generation'
enabled_specs = [spec_name.strip() for spec_name in self.options.specs_to_include_in_project_generation.split(',')]
# Collect all the enabled platforms
enabled_platforms = self.get_enabled_target_platform_names()
task_list = []
# Prepare the command line for this build task
lmbr_waf_command = os.path.join('${workspaceFolder}', self.scriptPattern % 'lmbr_waf')
default_build_set = False
for platform in enabled_platforms:
# The top level hierarchy will be the 'filtered' platforms
if not platform.startswith(self.wafPlatformPrefix):
# Skip if it doesnt match the platform prefix
continue
# Iterate through the specs
for spec_name in enabled_specs:
# The second level heirarchy will be based on the spec name
configurations_for_platform = self.get_supported_configurations(platform)
for configuration in configurations_for_platform:
# The third level heirarchy will be based on the configuration name
# Set a 'default' build tasks from the first one that matches the desired default build
# configuration
if not default_build_set and configuration == GenerateVSCodeWorkspace.default_build_configuration:
default_build_set = True
is_default_build = True
else:
is_default_build = False
task = collections.OrderedDict()
task['label'] = '[{}] {} : ({})'.format(spec_name, configuration, platform)
task['type'] = 'shell'
task['command'] = lmbr_waf_command
task['args'] = ['build_{}_{}'.format(platform, configuration),
'-p',
spec_name]
task['problemMatcher'] = {
"base": self.problemMatcher,
"fileLocation": "absolute"
}
if is_default_build:
# Default build tasks specifically requires another dictionary
task['group'] = {
'kind': 'build',
'isDefault': True
}
else:
task['group'] = 'build'
task_list.append(task)
tasks_json = collections.OrderedDict()
tasks_json['version'] = TASK_JSON_VERSION
tasks_json['tasks'] = task_list
tasks_json_node = vscode_dir.make_node('tasks.json')
GenerateVSCodeWorkspace.write_vscode_node(tasks_json_node, tasks_json)
def generate_launch_json(self, vscode_dir):
"""
Generate the launch configurations based on specific exe's and the enabled game project's game launchers
:param vscode_dir: The base folder node to save the task.json file
"""
# Collect the possible launch targets
launcher_targets = [default_launch_target for default_launch_target in GenerateVSCodeWorkspace.default_launch_targets]
enabled_game_projects = [game_name.strip() for game_name in self.options.enabled_game_projects.split(',')]
launcher_targets.extend(enabled_game_projects)
# Collect the enabled platforms for this host platform
enabled_platforms = self.get_enabled_target_platform_names()
launch_configurations = []
# Follow the same logic as the build tasks to filter the platforms
for platform in enabled_platforms:
if not platform.startswith(self.wafPlatformPrefix):
# Skip if it doesnt match the platform prefix
continue
# Iterate through the configurations for the current platform
configurations_for_platform = self.get_supported_configurations(platform)
for configuration in configurations_for_platform:
is_dedicated = '_dedicated' in configuration
bin_folder = self.get_output_folders(platform, configuration)[0]
for launcher_target in launcher_targets:
# For dedicated platforms, the name of the launcher will be different, so we need to adjust the
# name for these configurations. For non-game launcher targets, they do not exist at all,
# so dont create them
if is_dedicated:
if launcher_target in enabled_game_projects:
exename = self.exePattern % ('{}Launcher_Server'.format(launcher_target))
else:
# This is not a game launcher, so for dedicated configurations skip
continue
else:
if launcher_target in enabled_game_projects:
exename = self.exePattern % ('{}Launcher'.format(launcher_target))
else:
exename = self.exePattern % (launcher_target)
working_path = os.path.join('${workspaceFolder}', bin_folder.name)
program = os.path.join(working_path, exename)
launch_config = collections.OrderedDict()
launch_config['name'] = '{} ({})'.format(launcher_target, configuration)
launch_config['type'] = self.dbgType
launch_config['request'] = 'launch'
launch_config['program'] = program
launch_config['args'] = []
launch_config['cwd'] = working_path
launch_config['environment'] = []
launch_config['externalConsole'] = False
launch_configurations.append(launch_config)
launch_json = collections.OrderedDict()
launch_json['version'] = LAUNCH_JSON_VERSION
launch_json['configurations'] = launch_configurations
launch_json_node = vscode_dir.make_node('launch.json')
GenerateVSCodeWorkspace.write_vscode_node(launch_json_node, launch_json)
def generate_settings_json(self, vscode_dir):
"""
Generate the additional (environment) visual code settings
:param vscode_dir: The base folder node to save the task.json file
"""
settings_json = {
"files.exclude": {
"Bin*" : True,
"Cache" : True,
"AssetProcessorTemp": True,
"Solutions" : True
},
"files.associations": {
"wscript" : "python",
"*.py" : "python"
},
}
settings_json_node = vscode_dir.make_node('settings.json')
GenerateVSCodeWorkspace.write_vscode_node(settings_json_node, settings_json)
def generate_vscode_project(self):
"""
Generate all of the visual code settings
"""
# ensure that the .vscode dir exists
vscode_dir = self.srcnode.make_node('.vscode')
vscode_dir.mkdir()
self.generate_build_tasks_json(vscode_dir)
self.generate_launch_json(vscode_dir)
self.generate_settings_json(vscode_dir)
|
import sys;
import abc;
import math;
import multiprocessing;
import psutil;
import numpy as np;
from scipy.stats import t, f;
import DataHelper;
class LinearRegression:
__DEFAULT_SIG_LEVEL = 0.05;
@staticmethod
def calcVIF(X):
if X is None:
raise ValueError("matrix X is None");
return [1 / (1 - LinearRegression().fit(np.delete(X, i, 1), X[:, i]).r2) for i in range(0, X.shape[1])];
@staticmethod
def _optimalSubsetsCore(X, y, indices):
return indices, LinearRegression().fit(X[:, indices], y);
@staticmethod
def optimalSubsets(X, y, m = None):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
result = [];
p = X.shape[1];
if m is not None and (m < 1 or m > p):
raise ValueError("m must be between 1 and column numbers of X");
# number of models when m is null: 2^p
if p <= 14 or m is not None:
result.extend([min([LinearRegression._optimalSubsetsCore(X, y, indices) for indices in DataHelper.combinations(p, k)], key = lambda item: item[1].rss) for k in (range(1, p + 1) if m is None else range(m, m + 1))]);
else:
data, models = [], None;
for k in range(1, p + 1):
data.extend([(X, y, indices) for indices in DataHelper.combinations(p, k)]);
data = list(map(tuple, np.array(data, np.object)[DataHelper.randomArrangement(len(data)), :].tolist()));
with multiprocessing.Pool(max(1, psutil.cpu_count(False) - 2)) as pool:
models = pool.starmap(LinearRegression._optimalSubsetsCore, data);
for k in range(1, p + 1):
result.append(min([item for item in models if len(item[0]) == k], key = lambda item: item[1].rss));
# result item format: (indices, model)
return result;
@staticmethod
def forwardSelection(X, y):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
result = [];
p = X.shape[1];
current, leftover = [], list(range(0, p));
# number of models: p * (p + 1) / 2
for k in range(1, p + 1):
result.append(min([(current + [i], LinearRegression().fit(X[:, current + [i]], y)) for i in leftover], key = lambda item: item[1].rss));
current = result[len(result) - 1][0];
leftover.remove(current[len(current) - 1]);
# result item format: (indices, model)
return result;
@staticmethod
def backwardSelection(X, y):
if X is None or y is None:
raise ValueError("matrix X and vector y is None");
result = [];
p = X.shape[1];
leftover = set(range(0, p));
# number of models: p * (p + 1) / 2
result.append((list(leftover), LinearRegression().fit(X, y)));
for k in range(2, p + 1):
result.append(min([(list(leftover - {i}), LinearRegression().fit(X[:, list(leftover - {i})], y)) for i in leftover], key = lambda item: item[1].rss));
leftover = set(result[len(result) - 1][0]);
# result item format: (indices, model)
return result;
@staticmethod
def crossValidation(X, y, k):
if X is None or y is None:
raise ValueError("matrix X and vector y is None");
mse = np.mat([list(map(lambda item: item[1].calcMse(testX[:, item[0]], testY), LinearRegression.optimalSubsets(trainX, trainY))) for trainX, trainY, testX, testY in DataHelper.foldOutSampling(X, y, k)]);
return LinearRegression.optimalSubsets(X, y, np.argmin(mse.mean(0).A.flatten()) + 1)[0];
def __init__(self):
self.__basisFunctions = None;
self.__n = None;
self.__p = None;
self.__beta = None;
self.__sigma = None;
self.__residual = None;
self.__rss = None;
self.__r2 = None;
self.__cp = None;
self.__aic = None;
self.__bic = None;
self.__adjustedR2 = None;
self.__c = None;
self.__allF = None;
self.__allP = None;
self.__betaStd = None;
self.__betaT = None;
self.__betaP = None;
self.__betaValue = None;
self.__sigLevel = None;
def __repr__(self):
return "y = {0}{1}".format(
self.__beta[0, 0],
"".join([" {0} {1} * x{2:.0f}".format("+" if item[0] >= 0 else "-", math.fabs(item[0]), item[1])
for item in
np.hstack((self.__betaValue, np.mat(range(1, self.__p)).T)).tolist()])
);
def __str__(self):
return "y = β0{0}\r\n{1}\r\n{2}".format(
"".join(
[" + β{0:.0f} * x{0:.0f}".format(item) for item in list(range(1, self.__p))]),
"\r\n".join(
["β{0:.0f} = {1}, std = {2}, t-value = {3}, p-value = {4}".format(*item)
for item in
np.hstack((np.mat(range(0, self.__p)).T, self.__beta, self.__betaStd, self.__betaT, self.__betaP)).tolist()]),
"σ = {0}, R^2 = {1}, Cp = {2}, AIC = {3}, BIC = {4}, adjusted R^2 = {5}, F-value = {6}, F p-value = {7}".format(self.__sigma, self.__r2, self.__cp, self.__aic, self.__bic, self.__adjustedR2, self.__allF, self.__allP)
);
@property
def beta(self):
return self.__betaValue;
@property
def betaP(self):
return self.__betaP;
@property
def sigma(self):
return self.__sigma;
@property
def residual(self):
return self.__residual;
@property
def rss(self):
return self.__rss;
@property
def rssDf(self):
return self.__n - self.__p;
@property
def mse(self):
return self.__rss / self.__n;
@property
def r2(self):
return self.__r2;
@property
def cp(self):
return self.__cp;
@property
def aic(self):
return self.__aic;
@property
def bic(self):
return self.__bic;
@property
def adjustedR2(self):
return self.__adjustedR2;
@property
def sigLevel(self):
return self.__sigLevel;
@sigLevel.setter
def sigLevel(self, value):
if self.__betaP is None or self.__allP is None:
return;
if value is None:
value = LinearRegression.__DEFAULT_SIG_LEVEL;
self.__sigLevel = value;
self.__betaValue[(self.__betaP >= value).A.flatten(), :] = 0;
if self.__allP >= value:
self.__betaValue[:, :] = 0;
def __getX(self, X):
dataSet = X;
if self.__basisFunctions is not None and any(self.__basisFunctions):
sets = tuple([item for item in [self.__basisFunctions[j].getX(X[:, j]) if self.__basisFunctions[j] is not None else X[:, j] for j in range(X.shape[1])] if item is not None]);
if len(sets) > 0:
dataSet = np.hstack(sets);
else:
dataSet = np.mat(np.empty((X.shape[0], 0)));
return np.hstack((np.mat(np.ones((dataSet.shape[0], 1))), dataSet));
def __getP(self, value, degree):
if isinstance(value, np.matrix):
return np.mat(2 * (1 - t.cdf(np.abs(value.A.flatten()), degree))).T;
else:
return 2 * (1 - t.cdf(math.fabs(value), degree));
def __predict(self, X):
return X * self.__betaValue;
def fit(self, X, y, baseFunctions = None, w = None):
# w is the weight vector
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
if baseFunctions is not None and len(baseFunctions) != X.shape[1]:
raise ValueError("the length of base functions must be equals to column numbers of X");
if w is not None and w.shape[0] != X.shape[0]:
raise ValueError("the length of weight vector must be equals to row numbers of X");
self.__basisFunctions = baseFunctions;
X = self.__getX(X);
n, p = X.shape;
W = np.diag(w) if w is not None else np.identity(n);
A = X.T * W * X;
if np.linalg.matrix_rank(A, tol = 1e-8) == A.shape[0]:
C = A.I;
else:
C = np.linalg.pinv(A);
self.__n = n;
self.__p = p;
self.__beta = C * X.T * W * y;
centralizedY = y - y.mean();
residual = y - X * self.__beta;
rss = (residual.T * residual)[0, 0];
tss = (centralizedY.T * centralizedY)[0, 0];
sigma2 = rss / (n - p);
self.__sigma = math.sqrt(sigma2);
self.__residual = residual;
self.__rss = rss;
self.__r2 = 1 - rss / tss if tss != 0 else 0;
self.__cp = (rss + 2 * (p - 1) * sigma2) / n;
# self.__aic = (rss + 2 * (p - 1) * sigma2) / (n * sigma2) + math.log(2 * math.pi * sigma2);
# self.__bic = (rss + math.log(n) * (p - 1) * sigma2) / (n * sigma2) + math.log(2 * math.pi * sigma2);
self.__aic = 2 * (p - 1) + n - p + n * math.log(2 * math.pi * sigma2) if sigma2 > 0 else -sys.maxsize;
self.__bic = math.log(n) * (p - 1) + n - p + n * math.log(2 * math.pi * sigma2) if sigma2 > 0 else -sys.maxsize;
self.__adjustedR2 = 1 - (rss / (n - p)) / (tss / (n - 1)) if tss != 0 else 0;
self.__c = C;
self.__betaStd = self.__sigma * np.sqrt(C.diagonal().T);
self.__betaT = np.divide(self.__beta, self.__betaStd) if self.__sigma != 0 else np.ones_like(self.__beta) * sys.maxsize;
self.__betaP = self.__getP(self.__betaT, n - p);
self.__betaValue = self.__beta.copy();
self.__allF = ((tss - rss) / (p - 1) / sigma2 if sigma2 != 0 else sys.maxsize) if p > 1 else 0;
self.__allP = 1 - f.cdf(self.__allF, p - 1, n - p) if p > 1 else 1;
return self;
def predictValue(self, X):
if X is None:
raise ValueError("matrix X is None");
return self.__predict(self.__getX(X));
def predictInterval(self, X, confidence = None, prediction = True):
if X is None:
raise ValueError("matrix X is None");
if confidence is not None and (confidence <= 0 or confidence >= 1):
raise ValueError("the confidence must be between 0 and 1");
X = self.__getX(X);
alpha = 1 - confidence if confidence is not None else LinearRegression.__DEFAULT_SIG_LEVEL;
tValue = t.ppf(1 - alpha / 2, self.__n - self.__p);
interval = np.sqrt((1 if prediction else 0) + np.multiply(X * self.__c, X).sum(1)) * self.__sigma * tValue;
value = self.__predict(X);
return np.mat(np.hstack((value - interval, value, value + interval)));
def calcRss(self, X, y):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
residual = y - self.predictValue(X);
return (residual.T * residual)[0, 0];
def calcMse(self, X, y):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
return self.calcRss(X, y) / X.shape[0];
class IBasisFunction(metaclass = abc.ABCMeta):
@property
@abc.abstractmethod
def df(self):
pass;
@abc.abstractmethod
def getX(self, x):
pass;
'''
d
f(x) = Σ βj * x^d
j=0
degree of freedom = d + 1
'''
class PolynomialFunction(IBasisFunction):
# df excludes intercept
def __init__(self, df):
if df < 0:
raise ValueError("df is at least 0");
self.__d = df;
@property
def df(self):
return self.__d;
def getX(self, x):
if x is None:
raise ValueError("vector x is None");
return DataHelper.vectorPoly(x, self.__d) if self.__d > 0 else None;
class KnottedFunction(IBasisFunction, metaclass = abc.ABCMeta):
def __init__(self, k = None, knots = None):
if k is None and knots is None:
raise ValueError("at least one of k and knots cannot be None");
if k is not None and k < 0:
raise ValueError("k is at least 0");
if knots is not None and not isinstance(knots, list):
raise ValueError("knots must be a list");
if knots is not None:
self._K = len(knots);
self._knots = knots;
else:
self._K = k;
self._knots = None;
@property
def knots(self):
return self._knots;
def _findKnots(self, x):
self._knots = [np.quantile(x, k / (self._K + 1), 0)[0] for k in range(1, self._K + 1)] if self._K > 0 else [];
@abc.abstractmethod
def _getX(self, x):
pass;
def getX(self, x):
if x is None:
raise ValueError("vector x is None");
if self._knots is None:
self._findKnots(x);
return self._getX(x);
'''
K
f(x) = β0 + Σβk * I(Ck < x <= Ck+1)
k=1
degree of freedom = K + 1
'''
class StepFunction(KnottedFunction):
# df excludes intercept
def __init__(self, df = None, knots = None):
if df is not None and df < 0:
raise Value("df is at least 0");
super().__init__(df, knots);
@property
def df(self):
return self._K;
def _getX(self, x):
return np.hstack(tuple([(np.logical_and(x > self._knots[i], x <= self._knots[i + 1]) if i < len(self._knots) - 1 else x > self._knots[i]) - 0 for i in range(0, self._K)])) if self._K > 0 else None;
'''
M-1 K
f(x) = Σ βj * x^(j-1) + Σ θk * (x-ξk)+^(M-1)
j=0 k=1
f, f', f'', ... d^(M-2)f is continuous at ξk, k = 1, 2, ..., K
degree of freedom = K + M
the default is cubic spline with M = 4.
'''
class RegressionSplineFunction(KnottedFunction):
# df excludes intercept
def __init__(self, df = None, m = 4, knots = None):
if m < 1:
raise ValueError("m is at least 1");
super().__init__(max(df + 1 - m, 0) if df is not None else None, knots);
self.__d = m - 1;
@property
def df(self):
return self._K + self.__d;
def _getX(self, x):
if self._K > 0:
Y = np.hstack(tuple([DataHelper.truncatedPower(x, self._knots[k], self.__d) for k in range(0, self._K)]));
return np.hstack((DataHelper.vectorPoly(x, self.__d), Y)) if self.__d > 0 else Y;
else:
return DataHelper.vectorPoly(x, self.__d) if self.__d > 0 else None;
'''
K-2
f = β0 + β1x + Σ θj * (ξK - ξj) * [d(j, x) - d(K-1, x)]
j=1
d(j, x) = [(x - ξj)+^3 - (x - ξK)+^3] / (ξK - ξj)
f''(x) = 0, when x ∈ (-∞, ξ1] ∪ [ξK, ∞)
degree of freedom = K
when K = 1 and 2, f(x) = β0 + β1x.
'''
class NatureCubicSplineFunction(KnottedFunction):
# df excludes intercept
def __init__(self, df = None, knots = None):
super().__init__(max(df + 1, 0) if df is not None else None, knots);
@property
def df(self):
return self._K - 1;
def __d(self, k, x):
return (DataHelper.truncatedPower(x, self._knots[k], 3) - DataHelper.truncatedPower(x, self._knots[self._K - 1], 3)) / (self._knots[self._K - 1] - self._knots[k]);
def _getX(self, x):
if self._K > 2:
dK_1 = self.__d(self._K - 2, x);
return np.hstack(tuple([x] + [(self._knots[self._K - 1] - self.knots[k]) * (self.__d(k, x) - dK_1) for k in range(0, self._K - 2)]));
else:
return x;
|
<reponame>webdevhub42/Lambda<filename>0-notes/job-search/SamplesDSAlgos/data_structures/datastructures-hashtable.py
# HASH TABLE
# array with elements indexed by hashed key
# associative arrays and dictionaries
# objects
# caches (memcached)
# dynamic programming, memoization
# send key through hashing function (MD5, SHA1, etc.), which converts to addressable space (index)
# powerful for maps because now our key points to where our object is being stored
# powerful for sets because we can check where if anything exists at that memory address and, if
# so, then it exists; if not, then key is not in set
# no look-up cost when deleting or adding
# not useful for something with an order
# need sufficiently large amount of memory to store all objects without collisions
# can balloon quickly
# need good hashing algorithm that spits out viable table address
# needs several qualities:
# idempotent (critical), good distribution of values, performant
# key/value data storage & retrieval
# data stored as array
# key converted to integer via hash functino
# hashed key converted to array index via modulo function
# hash function: one-way mapping from arbitrary data to fixed data size & type
# different hash functions with different attributes:
# deterministic
# uniform distribution
# non-invertible
# continuous versus non-continuous
# hash-table collision: when two keys hash to same index
# collisions are unavoidable
# open addressing & linked-list chaining to avoid collisions
# linked-list chaining: elements in hash table are stored as linked lists
# when retrieving a value, traverse down linked list until you find matching key
# hash-table resizing: can occur when load factor passes certain threshhold
# create new hash table with double capacity
# copy elements from old to new one at a time
# resizing = O(n) & occurs at O(log (n)) frequency
# load factor: number of entries / hash-table capacity
# time complexity: Avg | Worst
# Access: N/A | N/A
# Search: O(1) | O(n)
# Insertion: O(1) | O(n)
# Deletion: O(1) | O(n)
# space complexity: O(n)
class HashTableEntry:
"""
Linked List hash table key/value pair
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
# Hash table can't have fewer than this many slots
MIN_CAPACITY = 8
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys.
"""
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.head = None
# return number of slots in hash table
def get_num_slots(self):
self.length = len(self.storage)
return self.length
# return the load factor for this hash table.
def get_load_factor(self):
self.get_num_slots()
load_factor = self.length / self.capacity
return load_factor
# adjust this hash table's load factor
def adjust_load_factor(self):
load_factor = self.get_load_factor()
if load_factor > 0.7:
# automatically rehash the table to double its previous size.
self.resize(self, self.capacity * 2)
elif load_factor < 0.2:
# automatically rehash the table to half its previous size, down to a minimum of 8 slots.
self.resize(self, self.capacity / 2)
# hash data FNV-1 Hash, 64-bit
def fnv1(self, key):
"""
FNV-1 Hash, 64-bit
algorithm fnv-1 is
hash := 14695981039346656037 do
for each byte_of_data to be hashed
hash := hash × 1099511628211
hash := hash XOR byte_of_data
return hash
"""
FNV_offset_basis = 14695981039346656037
FNV_prime = 1099511628211
hash = FNV_offset_basis
kByte = key.encode()
for byte in kByte:
hash = hash ** byte
hash = hash * FNV_prime
return hash
# hash data DJB2 hash, 32-bit
def djb2(self, key):
"""
DJB2 hash, 32-bit
"""
hash = 5381
for byte in key:
ordByte = ord(byte)
hash = (hash * 33) + ordByte
return hash
# return hash table index for submitted key
def hash_index(self, key):
# return self.fnv1(key) % self.capacity
return self.djb2(key) % self.capacity
# add value w/ key to hash table
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
"""
# get index number & current node
index = self.hash_index(key)
current_node = self.storage[index]
new_node = HashTableEntry(key, value)
# if node found in tree then replace it, else add new
if current_node:
current_node = value
else:
new_node.next = self.head
self.head = new_node
return new_node
# delete value w/ key from hash table
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
"""
# get tree head
current_node = self.head
# if tree head exists
if current_node:
# while it exists change to none, else change to next
while current_node:
# if its key matches submitted key, change to none
if current_node.key == key:
current_node.key = None
# else change to next node
else:
current_node = current_node.next
# if tree head nonexistent, tell user
else:
print("No keys found for that value.")
return None
# get value w/ key to hash table
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
"""
# get tree head
current_node = self.head
# if it exists
if current_node:
# while it exists return its value, else change to next
while current_node:
if current_node.key == key:
return current_node.value
else:
current_node = current_node.next
# if tree head nonexistent, tell user
else:
print("No keys found for that value.")
return None
# resize hash table
def resize(self, new_capacity):
# set next storage capacity
next_storage = [None] * new_capacity
# for each node in storage right now
for current_node in self.storage:
# if the current one exists
if current_node:
# get hashed index of current node[0]
key_hashed = self.hash_index(current_node[0])
# use hashed key as index in next storage & set as current node
next_storage[key_hashed] = current_node
# set current storage to next storage
self.storage = next_storage
if __name__ == "__main__":
ht = HashTable(8)
ht.put("line_1", "'Twas brillig, and the slithy toves")
ht.put("line_2", "Did gyre and gimble in the wabe:")
ht.put("line_3", "All mimsy were the borogoves,")
ht.put("line_4", "And the mome raths outgrabe.")
ht.put("line_5", '"Beware the Jabberwock, my son!')
ht.put("line_6", "The jaws that bite, the claws that catch!")
ht.put("line_7", "Beware the Jubjub bird, and shun")
ht.put("line_8", 'The frumious Bandersnatch!"')
ht.put("line_9", "He took his vorpal sword in hand;")
ht.put("line_10", "Long time the manxome foe he sought--")
ht.put("line_11", "So rested he by the Tumtum tree")
ht.put("line_12", "And stood awhile in thought.")
print("")
# Test storing beyond capacity
for i in range(1, 13):
print(ht.get(f"line_{i}"))
# Test resizing
old_capacity = ht.get_num_slots()
ht.resize(ht.capacity * 2)
new_capacity = ht.get_num_slots()
print(f"\nResized from {old_capacity} to {new_capacity}.\n")
# Test if data intact after resizing
for i in range(1, 13):
print(ht.get(f"line_{i}"))
print("")
|
from django.test import TestCase
from .models import Neighbourhood,Profile,Post,Business
# Create your tests here.
# Create your tests here.
#profile test
class ProfileTestClass(TestCase):
#set Up method
def setUp(self):
self.naiyoma = Profile(id=9000,username = 'naiyoma')
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.naiyoma,Profile))
#testing save method
def test_save_method(self):
self.naiyoma.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
#testing for deleting method
def test_delete_method(self):
self.naiyoma.save_profile()
self.naiyoma.delete_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile) > 0)
#testing for update caption
def test_update_metod(self):
self.naiyoma.save_profile()
self.naiyoma.update_description()
profile = Profile.objects.all()
self.assertTrue(len(profile) > 0)
#testing for creatinging post
def test_creation_method(self):
self.naiyoma.create_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile) > 0)
class PostTestClass(TestCase):
#set Up method
def setUp(self):
self.image = Post(image = 'cool')
#test instance
def test_instance(self):
self.assertTrue(isinstance(self.image,Post))
def test_save_method(self):
self.image.save_image()
image = Post.objects.all()
self.assertTrue(len(image) > 0)
#testing for deleting method
def test_delete_method(self):
self.image.save_image()
self.image.delete_image()
image = Post.objects.all()
self.assertTrue(len(image) > 0)
#testing for update caption
def test_update_metod(self):
self.image.save_image()
self.image.update_caption()
image = Post.objects.all()
self.assertTrue(len(image) > 0)
#testing for creatinging post
def test_creation_method(self):
self.image.create_post()
post = Post.objects.all()
self.assertTrue(len(post) > 0)
class BusinessTestClass(TestCase):
#set up method
def setUp(self):
self.business = Business(id=9000,business_name= 'naiyoma')
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.business,Business))
#testing for saving
def test_save_method(self):
self.business.save_business()
business = Business.objects.all()
self.assertTrue(len(business) > 0)
#testing for deleting method
def test_delete_method(self):
self.business.save_business()
self.business.delete_business()
business = Business.objects.all()
self.assertTrue(len(business) > 0)
#testing for update caption
def test_update_metod(self):
self.business.save_business()
self.business.update_business()
business = Business.objects.all()
self.assertTrue(len(business) > 0)
# #testing for creatinging post
# def test_creation_method(self):
# self.business.create_business()
# business = Business.objects.all()
# self.assertTrue(len(Business) > 0)
#testing neighbourhood
class NeighbourhoodTestClass(TestCase):
#set up method
def setUp(self):
self.neighbourhood = Neighbourhood(id=9000,neighbourhood_name= 'naiyoma')
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.neighbourhood,Neighbourhood))
#testing for saving
def test_save_method(self):
self.neighbourhood.save_neighbourhood()
neighbourhood = Neighbourhood.objects.all()
self.assertTrue(len(neighbourhood) > 0)
#testing for deleting method
def test_delete_method(self):
self.neighbourhood.save_neighbourhood()
self.neighbourhood.delete_neighbourhood()
neighbourhood = Neighbourhood.objects.all()
self.assertTrue(len(neighbourhood) < 1)
#testing for update caption
def test_update_metod(self):
self.neighbourhood.save_neighbourhood()
self.neighbourhood.update_neighbourhood()
neighbourhood = Neighbourhood.objects.all()
self.assertTrue(len(neighbourhood) > 0)
#testing for creatinging post
def test_creation_method(self):
self.neighbourhood.create_neighbourhood()
neighbourhood = Neighbourhood.objects.all()
self.assertTrue(len(neighbourhood) > 0) |
<gh_stars>1-10
"""
Main function to build PHIQnet.
"""
from image_quality.layers.fusion import fusion_layer, no_fusion
from backbone.ResNest import ResNest
from tensorflow.keras.layers import Input, Dense, Average, GlobalAveragePooling2D, Concatenate
from tensorflow.keras.models import Model
from image_quality.models.prediction_model_contrast_sensitivity import channel_spatial_attention
from backbone.resnet50 import ResNet50
from backbone.resnet_family import ResNet18
from backbone.resnet_feature_maps import ResNet152v2, ResNet152
from backbone.vgg16 import VGG16
from backbone.densenet import DenseNet121
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
import tensorflow as tf
def phiq_net(n_quality_levels, input_shape=(None, None, 3), naive_backbone=False, backbone='resnet50', feature_fusion=True,
attention_module=True):
"""
Build PHIQnet
:param n_quality_levels: 1 for MOS prediction and 5 for score distribution
:param input_shape: image input shape, keep as unspecifized
:param naive_backbone: flag to use backbone only, i.e., without neck and head, if set to True
:param backbone: backbone networks (resnet50/18/152v2, resnest, vgg16, etc.)
:param feature_fusion: flag to use or not feature fusion
:param attention_module: flag to use or not attention module
:return: PHIQnet model
"""
inputs = Input(shape=input_shape)
n_classes = None
return_feature_maps = True
if naive_backbone:
n_classes = 1
return_feature_maps = False
fc_activation = None
verbose = False
if backbone == 'resnest50':
backbone_model = ResNest(verbose=verbose,
n_classes=n_classes, dropout_rate=0, fc_activation=fc_activation,
blocks_set=[3, 4, 6, 3], radix=2, groups=1, bottleneck_width=64, deep_stem=True,
stem_width=32, avg_down=True, avd=True, avd_first=False,
return_feature_maps=return_feature_maps).build(inputs)
elif backbone == 'resnest34':
backbone_model = ResNest(verbose=verbose,
n_classes=n_classes, dropout_rate=0, fc_activation=fc_activation,
blocks_set=[3, 4, 6, 3], radix=2, groups=1, bottleneck_width=64, deep_stem=True,
stem_width=16, avg_down=True, avd=True, avd_first=False, using_basic_block=True,
return_feature_maps=return_feature_maps).build(inputs)
elif backbone == 'resnest18':
backbone_model = ResNest(verbose=verbose,
n_classes=n_classes, dropout_rate=0, fc_activation=fc_activation,
blocks_set=[2, 2, 2, 2], radix=2, groups=1, bottleneck_width=64, deep_stem=True,
stem_width=16, avg_down=True, avd=True, avd_first=False, using_basic_block=True,
return_feature_maps=return_feature_maps).build(inputs)
elif backbone == 'resnet50':
backbone_model = ResNet50(inputs,
return_feature_maps=return_feature_maps)
elif backbone == 'resnet18':
backbone_model = ResNet18(input_tensor=inputs,
weights=None,
include_top=False)
elif backbone == 'resnet152v2':
backbone_model = ResNet152v2(inputs)
elif backbone == 'resnet152':
backbone_model = ResNet152(inputs)
elif backbone == 'vgg16':
backbone_model = VGG16(inputs)
elif backbone == 'densnet121':
backbone_model = DenseNet121(inputs, return_feature_maps=return_feature_maps)
else:
raise NotImplementedError
if naive_backbone:
backbone_model.summary()
return backbone_model
C2, C3, C4, C5 = backbone_model.outputs
pyramid_feature_size = 256
if feature_fusion:
fpn_features = fusion_layer(C2, C3, C4, C5, feature_size=pyramid_feature_size)
else:
fpn_features = no_fusion(C2, C3, C4, C5, feature_size=pyramid_feature_size)
PF = []
for i, P in enumerate(fpn_features):
if attention_module:
PF.append(channel_spatial_attention(P, n_quality_levels, 'P{}'.format(i)))
else:
outputs = GlobalAveragePooling2D(name='avg_pool_{}'.format(i))(P)
if n_quality_levels > 1:
outputs = Dense(n_quality_levels, activation='softmax', name='fc_prediction_{}'.format(i))(outputs)
else:
outputs = Dense(n_quality_levels, activation='linear', name='fc_prediction_{}'.format(i))(outputs)
PF.append(outputs)
outputs = Average(name='PF_average')(PF)
model = Model(inputs=inputs, outputs=outputs)
model.summary()
return model
if __name__ == '__main__':
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
# input_shape = [None, None, 3]
input_shape = [768, 1024, 3]
# input_shape = [500, 500, 3]
# model = phiq_net(n_quality_levels=5, input_shape=input_shape, backbone='resnet152v2')
model = phiq_net(n_quality_levels=5, input_shape=input_shape, backbone='resnet50')
# model = phiq_net(n_quality_levels=5, input_shape=input_shape, backbone='vgg16')
# model = adiq_net(n_quality_levels=5, input_shape=input_shape, backbone='resnet18', attention_module=True)
|
<filename>src/uproot/behaviors/TProfile3D.py
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines the behavior of ``TProfile3D``.
"""
import numpy
import uproot
import uproot.behaviors.TH3
import uproot.behaviors.TProfile
from uproot.behaviors.TH1 import boost_axis_metadata, boost_metadata
class TProfile3D(uproot.behaviors.TProfile.Profile):
"""
Behaviors for three-dimensional profiles: ROOT's ``TProfile3D``.
"""
no_inherit = (uproot.behaviors.TH3.TH3,)
@property
def axes(self):
return (self.member("fXaxis"), self.member("fYaxis"), self.member("fZaxis"))
def axis(self, axis):
if axis == 0 or axis == -3 or axis == "x":
return self.member("fXaxis")
elif axis == 1 or axis == -2 or axis == "y":
return self.member("fYaxis")
elif axis == 2 or axis == -1 or axis == "z":
return self.member("fZaxis")
else:
raise ValueError(
"axis must be 0 (-3), 1 (-2), 2 (-1) or 'x', 'y', 'z' for a TProfile3D"
)
@property
def weighted(self):
fBinSumw2 = self.member("fBinSumw2", none_if_missing=True)
return fBinSumw2 is None or len(fBinSumw2) != len(self.member("fNcells"))
def counts(self, flow=False):
fBinEntries = numpy.asarray(self.member("fBinEntries"))
out = uproot.behaviors.TProfile._effective_counts_1d(
fBinEntries.reshape(-1),
numpy.asarray(self.member("fBinSumw2")).reshape(-1),
self.member("fNcells"),
)
out = out.reshape(fBinEntries.shape)
if flow:
return out
else:
return out[1:-1, 1:-1, 1:-1]
def values(self, flow=False):
if hasattr(self, "_values"):
values = self._values
else:
(root_cont,) = self.base(uproot.models.TArray.Model_TArray)
root_cont = numpy.asarray(root_cont, dtype=numpy.float64)
values = uproot.behaviors.TProfile._values_1d(
numpy.asarray(self.member("fBinEntries")).reshape(-1),
root_cont.reshape(-1),
)
xaxis_fNbins = self.member("fXaxis").member("fNbins")
yaxis_fNbins = self.member("fYaxis").member("fNbins")
zaxis_fNbins = self.member("fZaxis").member("fNbins")
values = numpy.transpose(
values.reshape(zaxis_fNbins + 2, yaxis_fNbins + 2, xaxis_fNbins + 2)
)
self._values = values
if flow:
return values
else:
return values[1:-1, 1:-1, 1:-1]
def _values_errors(self, flow, error_mode):
attr = "_errors" + uproot.behaviors.TProfile._error_mode_str(error_mode)
if hasattr(self, attr):
values = self._values
errors = getattr(self, attr)
else:
(root_cont,) = self.base(uproot.models.TArray.Model_TArray)
root_cont = numpy.asarray(root_cont, dtype=numpy.float64)
fSumw2 = self.member("fSumw2", none_if_missing=True)
if fSumw2 is not None:
fSumw2 = numpy.asarray(fSumw2).reshape(-1)
values, errors = uproot.behaviors.TProfile._values_errors_1d(
error_mode,
numpy.asarray(self.member("fBinEntries")).reshape(-1),
root_cont.reshape(-1),
fSumw2,
self.member("fNcells"),
numpy.asarray(self.member("fBinSumw2")).reshape(-1),
)
xaxis_fNbins = self.member("fXaxis").member("fNbins")
yaxis_fNbins = self.member("fYaxis").member("fNbins")
zaxis_fNbins = self.member("fZaxis").member("fNbins")
values = numpy.transpose(
values.reshape(zaxis_fNbins + 2, yaxis_fNbins + 2, xaxis_fNbins + 2)
)
errors = numpy.transpose(
errors.reshape(zaxis_fNbins + 2, yaxis_fNbins + 2, xaxis_fNbins + 2)
)
self._values = values
setattr(self, attr, errors)
if flow:
return values, errors
else:
return values[1:-1, 1:-1, 1:-1], errors[1:-1, 1:-1, 1:-1]
def to_boost(self, metadata=boost_metadata, axis_metadata=boost_axis_metadata):
raise NotImplementedError("FIXME @henryiii: this one kinda doesn't exist")
|
<filename>vect/vector.py
import showRepresentation
import copy
class array:
"""
Array class
"""
def __init__(self, v):
self.vector = v
self.l = len(v)
#Show vector (Raw)
def __repr__(self):
return showRepresentation.vector(self, True)
# (With print)
def __str__(self):
return showRepresentation.vector(self)
#Get and set
def __getitem__(self,index):
return self.vector[index]
def __setitem__(self,index,value):
self.vector[index] = value
return
#Length of vector
def __len__(self):
return len(self.vector)
#Math Operations
add = lambda x,y: x+y
sub = lambda x,y: x-y
mul = lambda x,y: x*y
div = lambda x,y: x/y
fdiv = lambda x,y: x//y
mod = lambda x,y: x%y
exp = lambda x,y: x**y
def operations(ope, reverse = False):
def function(func):
def wrapper(self, other):
r = []
if type(other) != array:
other = array([other])
for i in range(max(len(self.vector),len(other.vector))):
if reverse:
r.append(ope(other.vector[i%other.l], self.vector[i%self.l]))
else:
r.append(ope(self.vector[i%self.l],other.vector[i%other.l]))
return array(r)
return wrapper
return function
@operations(add)
def __add__(self, other):
return
@operations(add, True)
def __radd__(self, other):
return
@operations(sub)
def __sub__(self, other):
return
@operations(sub, True)
def __rsub__(self, other):
return
@operations(mul)
def __mul__(self, other):
return
@operations(mul, True)
def __rmul__(self, other):
return
@operations(div)
def __truediv__(self, other):
return
@operations(div, True)
def __rtruediv__(self, other):
return
@operations(fdiv)
def __floordiv__(self, other):
return
@operations(fdiv, True)
def __rfloordiv__(self, other):
return
@operations(exp)
def __pow__(self, other):
return
@operations(exp, True)
def __rpow__(self, other):
return
@operations(mod)
def __mod__(self, other):
return
@operations(mod, True)
def __rmod__(self, other):
return
def __divmod__(self, other):
division = []
module = []
if type(other) != array:
other = array([other])
for i in range(max(len(self.vector),len(other.vector))):
division.append(array.div(self.vector[i%self.l],other.vector[i%other.l]))
module.append(array.mod(self.vector[i%self.l],other.vector[i%other.l]))
return [array(division), array(module)]
#Matrix Multiplication
def __matmul__(self, other):
if self.w != other.l:
raise Exception("The columns in the first matrix must be the same as the rows in the second")
product = [[0 for i in range(other.w)] for j in range(self.l)]
for i in range(self.l):
for j in range(other.w):
for k in range(self.w):
product[i][j] = product[i][j] + self.vector[i][k] * other.vector[k][j]
return product
def __round__(self, ndigits = 2):
return [round(i,ndigits) for i in self]
def __lshift__(self, other):
if type(other) != int:
raise Exception("This function only works with an int")
else:
return self.vector[other%self.l:]+self.vector[:other%self.l]
def __rshift__(self,other):
if type(other) != int:
raise Exception("This function only works with an int")
else:
return self.vector[self.l-other%self.l:]+self.vector[:self.l-other%self.l]
def __abs__(self):
return array([abs(self[i]) for i in range(self.l)])
def __version__(self):
return "This library was created for ARINS project, January 2021"
####################
#START MATRIX CLASS#
####################
class matrix:
"""Matrix class"""
def __init__(self, mat):
self.mat = mat
self.rows = self.len()[0]
self.columns = self.len()[1]
#Get and Set
def __getitem__(self,index):
return self.mat[index]
def __setitem__(self,index,value):
self.mat[index] = value
def __len__(self):
raise Exception("This function is unavailable, use insted len method")
def len(self):
return (len(self.mat), len(self.mat[0]))
def col(self, ind):
if type(ind) == int:
return [self.mat[i][ind%self.columns] for i in range(self.rows)]
if type(ind) == list or type(ind) == range:
return array([[self.mat[i][j%self.columns] for i in range(self.rows)] for j in ind])
def row(self, ind):
if type(ind) == int:
return self.mat[ind%self.len()[0]]
if type(ind) == list or type(ind) == range:
return array([self.mat[i%self.len()[0]] for i in ind])
def __str__(self):
return showRepresentation.matrix(self)
def __repr__(self):
return showRepresentation.matrix(self, True)
#Math Operations
adds = lambda x,y: x+y
subs = lambda x,y: x-y
muls = lambda x,y: x*y
divs = lambda x,y: x/y
fdivs = lambda x,y: x//y
mods = lambda x,y: x%y
exps = lambda x,y: x**y
def AddOperations(ope):
def function(func):
def wrapper(self, other):
if self.len() != other.len():
raise Exception("Two matrices must have the same shape to be added, if you want to add an int or a float to all elements use the sum method")
else:
return matrix([[ope(self.mat[i][j], other.mat[i][j]) for j in range(self.columns)] for i in range(self.rows)])
return wrapper
return function
def ProductOperations(ope):
def function(func):
def wrapper(self, other):
if self.columns != other.rows:
raise Exception("The columns in the first matrix must be the same as the rows in the second")
product = [[0 for i in range(other.columns)] for j in range(self.rows)]
for i in range(self.rows):
for j in range(other.columns):
for k in range(self.columns):
product[i][j] = product[i][j] + ope(self.mat[i][k], other.mat[k][j])
product[i][j] = round(product[i][j],4)
return matrix(product)
return wrapper
return function
@ AddOperations(adds)
def __add__(self, other):
return
@ AddOperations(subs)
def __sub__(self, other):
return
@ ProductOperations(muls)
def __mul__(self, other):
return
@ ProductOperations(muls)
def __matmul__(self, other):
return
@ AddOperations(mods)
def __mod__(self, other):
return
@ AddOperations(exps)
def __pow__(self, other):
return
def __truediv__(self, other):
return matrix(self.mat) * other.inv()
def operations(ope):
def function(func):
def wrapper(self, value):
return matrix([[round(ope(self.mat[j][i], value), 4) for i in range(self.rows)] for j in range(self.columns)])
return wrapper
return function
@operations(adds)
def sum(self, value):
return
@operations(subs)
def sub(self, value):
return
@operations(muls)
def mul(self, value):
return
@operations(divs)
def div(self, value):
return
@operations(fdivs)
def fdiv(self, value):
return
@operations(mods)
def mod(self, value):
return
@operations(exps)
def pow(self, value):
return
def det(self):
resMat = copy.copy(self.mat)
for i in range(self.rows):
for j in range(i+1, self.rows):
factor = resMat[j][i] / resMat[i][i]
for k in range(self.rows):
resMat[j][k] = resMat[j][k] - factor * resMat[i][k]
product = 1
for i in range(self.rows):
product *= resMat[i][i]
return round(product,4)
def transpose(self):
resMat = copy.copy(self.mat)
return matrix([[resMat[i][j] for i in range(self.rows)] for j in range(self.columns)])
def cancel(self, row, column):
rows = list(range(self.rows))
rows.pop(row)
columns = list(range(self.columns))
columns.pop(column)
return matrix([[self.mat[j][i] for i in columns] for j in rows])
def adj(self):
resMat = create(self.rows, self.columns)
for i in range(self.rows):
for j in range(self.columns):
noCancel = self.cancel(i,j)
resMat[i][j] = (-1)**(i+j)*(noCancel.det())
return resMat
def inv(self):
return ((self.adj()).transpose()).div(self.det())
#Create matrix
def create(rows, columns, content = 0):
r = []
for i in range(rows):
r.append([content for j in range(columns)])
return matrix(r)
|
import sys
import json
import datetime
import logging
import yagmail
import pywhatkit
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def readCredentialsJSON(key, file_name="credentials.json"):
creds = json.load(open(file_name))
return (creds[key])
def bookTennisSlots(date_delta=1, slot_hour=None, court_num=None):
slot_booked = False
booking_stage = "Initializing ApnaComplex driver"
apna_complex_creds = readCredentialsJSON(key="apna-complex")
driver = get_apnacomplex_driver(creds=apna_complex_creds)
delay = 60 #seconds
try:
booking_stage = "Waiting for facilities list"
facilities_table = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, "facilities")))
all_facility_rows = facilities_table.find_elements_by_xpath(".//tbody//tr")
all_facility_rows.reverse()
for row in all_facility_rows:
all_cells = row.find_elements_by_xpath(".//td")
booking_stage = "Searching for valid court"
if is_valid_court(all_cells, court_num):
booking_links = all_cells[-1]
all_links = booking_links.find_elements_by_xpath(".//a")
booking_stage = "Iterating all facility links"
for booking_link in all_links:
image_title = booking_link.find_elements_by_xpath(".//img")[0].get_attribute("title")
booking_stage = "Identifying booking link"
if image_title == "Make a booking for this facility":
booking_url = booking_link.get_attribute("href")
booking_stage = "Initializing a new booking"
slot_booked = make_booking(creds=apna_complex_creds, booking_url=booking_url, \
date_delta=date_delta, slot_hour=slot_hour, delay=delay)
break
if slot_booked:
booking_stage = "Booking completed successfully"
break
except:
logging.error("Booking failed at stage: %s" % booking_stage)
# time.sleep(delay)
driver.quit()
return (slot_booked)
def get_apnacomplex_driver(creds, url=None):
options = Options()
options.binary_location = "C:/Program Files/Google/Chrome/Application/chrome.exe"
driver = webdriver.Chrome(options=options, executable_path="chrome-driver/chromedriver.exe")
if url is None:
url = creds["url"]
# Navigate to url
driver.get(url)
# Enter email
email_box = driver.find_element(by=By.ID, value="email")
email_box.send_keys(creds["email"])
# Enter password
pwd_box = driver.find_element(by=By.ID, value="password")
pwd_box.send_keys(creds["password"])
# Submit login form
pwd_box.submit()
return (driver)
def get_booking_date(date_delta):
return ((datetime.date.today() + datetime.timedelta(days=date_delta)).strftime("%d/%m/%Y"))
def get_booking_time_slot(slot_hour):
if slot_hour is None:
slot_hour = datetime.datetime.now().strftime("%H")
time_slot = str.zfill(str(slot_hour), 2)
time_slot = "{}:00 - {}:45".format(time_slot, time_slot)
return (time_slot)
def make_booking(creds, booking_url, date_delta, slot_hour, delay):
slot_booked = False
try:
driver = get_apnacomplex_driver(creds=creds, url=booking_url)
# Check instructions checkbox
instructions_checkbox = WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.ID, "read_instructions")))
instructions_checkbox.click()
# Set booking date (today + 1)
date_selector = driver.find_element(by=By.NAME, value="booking_date")
date_selector.send_keys(get_booking_date(date_delta=date_delta))
# Set time slot
slot_selector = driver.find_element_by_xpath("//select[@name='facility_time_slot_id']/option[text()='" + get_booking_time_slot(slot_hour=slot_hour) + "']")
slot_selector.click()
# Submit form
submit_button = driver.find_element(by=By.NAME, value="make_booking")
submit_button.submit()
# Confirm submission
confirm_button = WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.ID, "confirm")))
confirm_button.click()
# Verify confirmation message
status_message = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, "status_message")))
slot_booked = (status_message.text == "Booking completed successfully.")
except TimeoutException:
logging.error("Booking page did not load correctly.")
slot_booked = False
except:
logging.error("Unknown error occured during booking.")
slot_booked = False
finally:
driver.quit()
return (slot_booked)
def is_valid_court(all_cells, court_num):
facility_name = all_cells[0].text
is_tennis_court = facility_name.startswith("Tennis Court")
is_valid_court_num = (court_num is None) or facility_name.endswith(str(court_num))
return (is_tennis_court and is_valid_court_num)
def send_status_email(msg_text):
gmail_creds = readCredentialsJSON(key="gmail")
yag = yagmail.SMTP(user=gmail_creds["id"], password=gmail_creds["password"])
yag.send(gmail_creds["id"], msg_text, msg_text)
def send_status_whatsapp(msg_text):
try:
whatsapp_creds = readCredentialsJSON(key="whatsapp")
msg_hour = datetime.datetime.now().hour
msg_min = datetime.datetime.now().minute + 1
logging.info("Sending confirmation msg to %s" % whatsapp_creds["mobile"])
pywhatkit.sendwhatmsg(whatsapp_creds["mobile"], msg_text, msg_hour, msg_min, tab_close=True)
except:
logging.error("Error sending whatsapp msg to %s" % whatsapp_creds["mobile"])
def main():
logging.basicConfig(filename="tennis-booking.log", level=logging.INFO)
try:
date_delta = int(sys.argv[1]) if (len(sys.argv) > 1) else 1
slot_hour = int(sys.argv[2]) if (len(sys.argv) > 2) else None
court_num = int(sys.argv[3]) if (len(sys.argv) > 3) else None
except:
logging.error("Invalid arguments provided!")
logging.info("Initiating booking for %s at %s" %(get_booking_date(date_delta=date_delta), get_booking_time_slot(slot_hour=slot_hour)))
result = bookTennisSlots(date_delta=date_delta, slot_hour=slot_hour, court_num=court_num)
if result:
success_msg = "Booking successfully completed for %s at %s" %(get_booking_date(date_delta=date_delta), get_booking_time_slot(slot_hour=slot_hour))
logging.info(success_msg)
# send_status_email(msg_text=success_msg)
# send_status_whatsapp(msg_text=success_msg)
else:
# Send failure email
failure_msg = "Booking unsuccessful for %s at %s" %(get_booking_date(date_delta=date_delta), get_booking_time_slot(slot_hour=slot_hour))
logging.warn(failure_msg)
# send_status_email(msg_text=failure_msg)
# send_status_whatsapp(msg_text=failure_msg)
if __name__ == "__main__":
main() |
import tensorflow as tf
from tensorflow import keras
import keras.backend as K
from tensorflow.keras.layers import Conv3D, Activation, MaxPooling3D, Conv3DTranspose, Add,BatchNormalization, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.models import model_from_json
import numpy as np
import os
import math
from glob import glob
from random import shuffle, randint
#import random
#from metrics import *
from numpy import linalg as LA
import json
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import nibabel as nib
import medpy.metric.binary as medpy_metrics
from numpy import linalg as LA
from sklearn.preprocessing import label_binarize
from sklearn.utils.validation import column_or_1d
from sklearn.metrics import brier_score_loss
from sklearn.calibration import calibration_curve
from datetime import datetime
from skimage import io
#import sys
#sys.settrace
from nibabel import load as load_nii
#from scipy.stats import boxcox
def save_model(modelInput, modelNameInput='model'):
model_json = modelInput.to_json()
model_json = json.loads(model_json)
model_json['class_name'] = 'Model' # this attribute sometimes is not properly set
model_json = json.dumps(model_json)
with open(modelNameInput+".json","w") as json_file:
json_file.write(model_json)
modelInput.save_weights(modelNameInput+".h5")
print("Saved "+modelNameInput)
def load_model(modelNameInput = 'model'):
json_file = open(modelNameInput+'.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(modelNameInput+".h5")
return loaded_model
print("Loaded model from disk")
def ensure_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def add_padding_z(img,depth_with_padding):
pad_z = depth_with_padding
pad_value=img[0][0][0]
image_padded = np.empty((img.shape[0],img.shape[1],pad_z))
image_padded.fill(pad_value)
image_padded[:,:,np.floor_divide((depth_with_padding-img.shape[2]),2):-math.ceil((depth_with_padding-img.shape[2])/2)] = img
return image_padded
def add_padding_x(img,depth_with_padding):
pad_x = depth_with_padding
pad_value=img[0][0][0]
image_padded = np.empty((pad_x,img.shape[1],img.shape[2]))
image_padded.fill(pad_value)
image_padded[np.floor_divide((depth_with_padding-img.shape[0]),2):-math.ceil((depth_with_padding-img.shape[0])/2),:,:] = img
return image_padded
def add_padding_y(img,depth_with_padding):
pad_y = depth_with_padding
pad_value=img[0][0][0]
image_padded = np.empty((img.shape[0],pad_y,img.shape[2]))
image_padded.fill(pad_value)
image_padded[:,np.floor_divide((depth_with_padding-img.shape[1]),2):-math.ceil((depth_with_padding-img.shape[1])/2),:] = img
return image_padded
def multi_class_prediction(prediction):
prediction_images = []
for i in range(prediction.shape[4]):
prediction_images.append(nib.Nifti1Image(prediction[0,:,:,:, i],None))
def one_hot_labels(data, n_labels, labels=None):
new_shape = [data.shape[0] , data.shape[1], data.shape[2], data.shape[3], n_labels]
y = np.zeros(new_shape, np.int8)
for label_index in range(n_labels):
if labels is not None:
y[:, :,:,:,label_index][data[:,:,:,:, 0] == labels[label_index]] = 1
return y
def set2to0(matrix):
matrix[matrix>1.0] = 0.0
return matrix
|
"""
isicarchive.sampler (Sampler)
This module provides the Sampler helper class and doesn't have to be
imported from outside the main package functionality (IsicApi).
"""
# specific version for file
__version__ = '0.4.8'
# imports (needed for majority of functions)
from typing import Any, List, Union
import warnings
from numba import float64, int64, jit, prange
import numpy
# fix value to -1.0 ... 1.0
def _frone(v):
return min(1.0, max(-1.0, v))
# Gaussian (smoothing) kernel
def _gauss_kernel(fwhm:numpy.float64 = 2.0) -> numpy.ndarray:
if fwhm <= 0.29:
return numpy.asarray([0,1,0]).astype(numpy.float64)
fwhm = fwhm / numpy.sqrt(8.0 * numpy.log(2.0))
if fwhm < 2.0:
md = numpy.trunc(0.5 + 6.0 * fwhm)
else:
md = numpy.trunc(0.5 + 6.0 * numpy.log2(fwhm) * fwhm)
k = numpy.exp(-((numpy.arange(-md,md+1.0,1.0) ** 2) / (2.0 * fwhm * fwhm)))
k = k[k >= 0.00000000001]
return k / numpy.sum(k)
# sample grid
@jit([
'f8[:,:](f8[:,:],f8[:],f8[:],f8[:],i8)', #output(array, crd0, crd1, kernel, ksize)
'f8[:,:](f4[:,:],f8[:],f8[:],f8[:],i8)',
'f8[:,:](u1[:,:],f8[:],f8[:],f8[:],i8)',
], nopython=True)
def _sample_grid_2d(
a:numpy.ndarray,
c0:numpy.ndarray,
c1:numpy.ndarray,
k:numpy.ndarray,
ks:int64) -> numpy.ndarray:
nk = k.size - 1
kl = float(nk) / float(2 * ks)
if kl != numpy.trunc(kl):
raise ValueError('Invalid kernel.')
ikl = int(kl)
mks = ikl * ks
fks = float(ks)
as0 = a.shape[0]
as1 = a.shape[1]
nc0 = c0.size
nc1 = c1.size
mn1 = int(numpy.amin(c1) - (kl + 1.0))
if mn1 < 0:
mn1 = 0
mx1 = int(numpy.amax(c1) + (kl + 1.0))
if mx1 > as1:
mx1 = as1
l1 = mx1 - mn1
out = numpy.zeros(nc0 * nc1, dtype=numpy.float64).reshape((nc0,nc1,))
for i0 in prange(nc0): #pylint: disable=not-an-iterable
c0c = c0[i0]
c0b = int(c0c + 0.5)
c0o = c0c - float(c0b)
row = numpy.zeros(l1, dtype=numpy.float64).reshape((1,l1,))
rw = 0.0
for ri in range(c0b-ikl, c0b+ikl+1):
if ri < 0 or ri >= as0:
continue
wi = mks + (ri-c0b) * ks - int(c0o * fks)
if wi > 0 and wi < nk:
kwi = k[wi]
row += kwi * a[ri, mn1:mx1].astype(numpy.float64)
rw += kwi
if rw == 0.0:
rw = 1.0
row /= rw
ascol = row.reshape(l1,)
for i1 in range(nc1):
c1c = c1[i1]
c1b = int(c1c + 0.5 - mn1)
c1o = c1c - float(c1b)
val = 0.0
vw = 0.0
for ci in range(c1b-ikl, c1b+ikl+1):
if ci < 0 or ci >= l1:
continue
cwi = mks + (ci-c1b) * ks - int(c1o * fks)
if cwi > 0 and cwi < nk:
kwi = k[cwi]
valp = ascol[ci]
if not numpy.isnan(valp):
val += kwi * valp
vw += kwi
if vw == 0.0:
vw = 1.0
out[i0,i1] = val / vw
return out
# sample grid coordinates
@jit([
'f8[:](f8[:,:],f8[:,:],f8[:],i8)', #output(array, crd, kernel, ksize)
'f8[:](f4[:,:],f8[:,:],f8[:],i8)',
'f8[:](u1[:,:],f8[:,:],f8[:],i8)',
], nopython=True)
def _sample_grid_coords(
a:numpy.ndarray,
c:numpy.ndarray,
k:numpy.ndarray,
ks:int64) -> numpy.ndarray:
nk = k.size - 1
kl = float(nk) / float(2 * ks)
if kl != numpy.trunc(kl):
raise ValueError('Invalid kernel.')
ikl = int(kl)
mks = ikl * ks
fks = float(ks)
as0 = a.shape[0]
as1 = a.shape[1]
nc = c.shape[0]
if c.shape[1] != 2:
raise ValueError('Invalid coordinate list.')
out = numpy.zeros(nc, dtype=numpy.float64).reshape((nc,))
for i in prange(nc): #pylint: disable=not-an-iterable
c0c = c[i,0]
c0b = int(c0c + 0.5)
c0o = c0c - float(c0b)
c1c = c[i,1]
c1b = int(c1c + 0.5)
c1o = c1c - float(c1b)
val = 0.0
vw = 0.0
for ri in range(c0b-ikl, c0b+ikl+1):
if ri < 0 or ri >= as0:
continue
wi = mks + (ri-c0b) * ks - int(c0o * fks)
if wi < 0 or wi >= nk:
continue
kwi0 = k[wi]
for ci in range(c1b-ikl, c1b+ikl+1):
if ci < 0 or ci >= as1:
continue
cwi = mks + (ci-c1b) * ks - int(c1o * fks)
if cwi < 0 or cwi >= nk:
continue
kwi = kwi0 * k[cwi]
valp = a[ri,ci]
if not numpy.isnan(valp):
val += kwi * valp
vw += kwi
if vw == 0.0:
vw = 1.0
out[i] = val / vw
return out
# sample grid coordinates
@jit([
'f8[:](f8[:,:],f8[:,:],f8[:],i8)', #output(array, crd, kernel, ksize)
'f8[:](f4[:,:],f8[:,:],f8[:],i8)',
'f8[:](u1[:,:],f8[:,:],f8[:],i8)',
], nopython=True)
def _sample_grid_coords_fine(
a:numpy.ndarray,
c:numpy.ndarray,
k:numpy.ndarray,
ks:int64) -> numpy.ndarray:
nk = k.size -1
kl = float(nk) / float(2 * ks)
if kl != numpy.trunc(kl):
raise ValueError('Invalid kernel.')
ikl = int(kl)
mks = ikl * ks
fks = float(ks)
as0 = a.shape[0]
as1 = a.shape[1]
nc = c.shape[0]
if c.shape[1] != 2:
raise ValueError('Invalid coordinate list.')
out = numpy.zeros(nc, dtype=numpy.float64).reshape((nc,))
for i in prange(nc): #pylint: disable=not-an-iterable
c0c = c[i,0]
c0b = int(c0c + 0.5)
c0o = c0c - float(c0b)
c1c = c[i,1]
c1b = int(c1c + 0.5)
c1o = c1c - float(c1b)
val = 0.0
vw = 0.0
for ri in range(c0b-ikl, c0b+ikl+1):
if ri < 0 or ri >= as0:
continue
wf = c0o * fks
wfi = int(wf)
wfp = wf - float(wfi)
wi = mks + (ri-c0b) * ks - wfi
if wi <= 0 or wi >= nk:
continue
kwi0 = (1.0 - wfp) * k[wi] + wfp * k[wi-1]
for ci in range(c1b-ikl, c1b+ikl+1):
if ci < 0 or ci >= as1:
continue
wf = c1o * fks
wfi = int(wf)
wfp = wf - float(wfi)
cwi = mks + (ci-c1b) * ks - wfi
if cwi <= 0 or cwi >= nk:
continue
kwi = kwi0 * ((1.0 - wfp) * k[cwi] + wfp * k[cwi-1])
valp = a[ri,ci]
if not numpy.isnan(valp):
val += kwi * valp
vw += kwi
if vw == 0.0:
vw = 1.0
out[i] = val / vw
return out
# sample values
@jit([
'f8[:](f8[:],f8[:],f8[:],i8)', #output(vector, crd0, kernel, ksize)
'f8[:](f4[:],f8[:],f8[:],i8)',
'f8[:](u1[:],f8[:],f8[:],i8)',
], nopython=True)
def _sample_values(
a:numpy.ndarray,
c:numpy.ndarray,
k:numpy.ndarray,
ks:int64) -> numpy.ndarray:
nk = k.size - 1
kl = float(nk) / float(2 * ks)
if kl != numpy.trunc(kl):
raise ValueError('Invalid kernel.')
ikl = int(kl)
mks = ikl * ks
fks = float(ks)
nc = c.size
al = a.size
v = numpy.zeros(nc, dtype=numpy.float64)
for i0 in prange(nc): #pylint: disable=not-an-iterable
c0c = c[i0]
c0b = int(c0c + 0.5)
c0o = c0c - float(c0b)
val = 0.0
vw = 0.0
for ci in range(c0b-ikl, c0b+ikl+1):
if ci < 0 or ci >= al:
continue
cwi = mks + (ci-c0b) * ks - int(c0o * fks)
if cwi > 0 and cwi < nk:
kwi = k[cwi]
valp = a[ci]
if not numpy.isnan(valp):
val += kwi * valp
vw += kwi
if vw == 0.0:
vw = 1.0
v[i0] = val / vw
return v
def trans_matrix(m:Union[list, dict, tuple]) -> numpy.ndarray:
if isinstance(m, tuple):
mt = m
m = dict()
if len(mt) > 0:
m['trans'] = mt[0]
if len(mt) > 1:
m['rotate'] = mt[1]
if len(mt) > 2:
m['scale'] = mt[2]
if len(mt) > 3:
m['shear'] = mt[3]
if len(mt) > 4:
m['origin'] = mt[4]
if isinstance(m, dict):
m = [m]
elif not isinstance(m, list):
raise ValueError('Invalid input parameter.')
elif len(m) < 1:
raise ValueError('Invalid input parameter.')
nd = 0
try:
for m_in in m:
if not isinstance(m_in, dict):
raise ValueError('Invalid input parameter.')
origin = m_in.get('origin', None)
if not origin is None:
lt = len(origin)
if nd > 0 and nd != lt:
raise ValueError('Invalid origin field in input parameter.')
elif nd == 0:
nd = lt
else:
if nd == 0:
m_in['origin'] = None
else:
m_in['origin'] = numpy.zeros(nd, numpy.float64)
trans = m_in.get('trans', None)
if not trans is None:
lt = len(trans)
if nd > 0 and nd != lt:
raise ValueError('Invalid origin field in input parameter.')
elif nd == 0:
nd = lt
else:
if nd == 0:
m_in['trans'] = None
else:
m_in['trans'] = numpy.zeros(nd, numpy.float64)
rotate = m_in.get('rotate', None)
if not rotate is None:
lt = len(rotate)
if not lt in [1, 3]:
raise ValueError('Invalid rotate field in input parameter.')
elif lt == 1:
lt = 2
if nd > 0 and nd != lt:
raise ValueError('Invalid rotate field in input parameter.')
elif nd == 0:
nd = lt
else:
if nd == 2:
m_in['rotate'] = numpy.zeros(1, numpy.float64)
elif nd == 3:
m_in['rotate'] = numpy.zeros(3, numpy.float64)
else:
m_in['rotate'] = None
scale = m_in.get('scale', None)
if not scale is None:
lt = len(scale)
if lt > 1:
if nd > 0 and nd != lt:
raise ValueError('Invalid scale field in input parameter.')
elif nd == 0:
nd = lt
elif nd > 0:
m_in['scale'] = scale * numpy.ones(nd, numpy.float64)
else:
if nd == 0:
m_in['scale'] = None
else:
m_in['scale'] = numpy.ones(nd, numpy.float64)
shear = m_in.get('shear', None)
if not shear is None:
lt = len(shear)
if not lt in [1, 3]:
raise ValueError('Invalid shear field in input parameter.')
elif lt == 1:
lt = 2
if nd > 0 and nd != lt:
raise ValueError('Invalid rotate field in input parameter.')
elif nd == 0:
nd = lt
else:
if nd == 2:
m_in['shear'] = numpy.zeros(1, numpy.float64)
elif nd == 3:
m_in['shear'] = numpy.zeros(3, numpy.float64)
else:
m_in['shear'] = None
if not nd in [2,3]:
raise ValueError('Invalid input parameter (dimensions not inferred).')
m_out = numpy.zeros((nd+1, nd+1,), numpy.float64)
for n in range(nd+1):
m_out[n,n] = 1.0
m_p = m_out.copy()
for m_in in reversed(m):
origin = m_in['origin']
if origin is None:
origin = numpy.zeros(nd, numpy.float64)
trans = m_in['trans']
if trans is None:
trans = numpy.zeros(nd, numpy.float64)
rotate = m_in['rotate']
if rotate is None:
if nd == 2:
rotate = numpy.zeros(1, numpy.float64)
else:
rotate = numpy.zeros(3, numpy.float64)
rs = numpy.sin(rotate)
rc = numpy.cos(rotate)
scale = m_in['scale']
if scale is None:
scale = numpy.ones(nd, numpy.float64)
shear = m_in['shear']
if shear is None:
if nd == 2:
shear = numpy.zeros(1, numpy.float64)
else:
shear = numpy.zeros(3, numpy.float64)
m_o = m_p.copy()
m_o[:nd,-1] = origin
m_ob = m_p.copy()
m_ob[:nd,-1] = -origin
m_t = m_p.copy()
m_t[:nd,-1] = trans
if nd == 2:
m_r = m_p.copy()
m_r[0:2,0:2] = numpy.asarray([[rc[0], rs[0]], [-rs[0], rc[0]]])
else:
m_r1 = m_p.copy()
m_r1[1:3,1:3] = numpy.asarray([[rc[0], rs[0]], [-rs[0], rc[0]]])
m_r2 = m_p.copy()
m_r2[0,0] = rc[1]
m_r2[0,2] = rs[1]
m_r2[2,0] = -rs[1]
m_r2[2,2] = rc[1]
m_r3 = m_p.copy()
m_r3[0:2,0:2] = numpy.asarray([[rc[2], rs[2]], [-rs[2], rc[2]]])
m_r = numpy.matmul(numpy.matmul(m_r1, m_r2), m_r3)
m_s = m_p.copy()
for n in range(nd):
m_s[n,n] = scale[n]
m_h = m_p.copy()
m_h[0,1] = shear[0]
if nd == 3:
m_h[0,2] = shear[1]
m_h[1,2] = shear[2]
m_c = numpy.matmul(
m_t, numpy.matmul(
m_o, numpy.matmul(
m_r, numpy.matmul(
m_s, numpy.matmul(
m_ob,
m_h)))))
m_out = numpy.matmul(m_c, m_out)
except:
raise
return m_out
def trans_matrix_inv(m:numpy.ndarray):
"""
Decompose transformation matrix into parts
Parameters
----------
m : ndarray
Transformation matrix
Returns
-------
trans : ndarray
2- or 3-element translation
rotate : ndarray
1- or 3-element rotation angles
"""
was2d = False
if m.shape[1] == 3:
was2d = True
m = numpy.asarray([
[1.0, 0.0, 0.0, 0.0],
[0.0, m[0,0], m[0,1], m[0,2]],
[0.0, m[1,0], m[1,1], m[1,2]],
[0.0, 0.0, 0.0, 1.0]], numpy.float64)
trans = m[0:3,3]
rotate = numpy.zeros(3, numpy.float64)
r = m[0:3,0:3]
rc = numpy.linalg.cholesky(numpy.matmul(r.T, r)).T
scale = numpy.diagonal(rc)
if numpy.linalg.det(r) < 0.0:
scale[0] *= -1.0
rcd = rc * numpy.eye(3, dtype=numpy.float64)
rc = numpy.linalg.solve(rcd, rc)
shear = numpy.asarray([rc[0,1], rc[0,2], rc[1,2]], numpy.float64)
r0 = trans_matrix({'rotate': rotate, 'scale': scale, 'shear': shear})[0:3,0:3]
r0 = numpy.linalg.solve(numpy.linalg.inv(r), numpy.linalg.inv(r0))
rotate[1] = numpy.arcsin(_frone(r0[0,2]))
if numpy.abs((numpy.abs(rotate[1]) - (numpy.pi / 2.0))) < 1.0e-6:
rotate[0] = 0.0
rotate[2] = numpy.arctan2(-_frone(r0[1,0]), _frone(-r0[2,0] / r0[0,2]))
else:
rc = numpy.cos(rotate[1])
rotate[0] = numpy.arctan2(_frone(r0[1,2] / rc), _frone(r0[2,2] / rc))
rotate[2] = numpy.arctan2(_frone(r0[0,1] / rc), _frone(r0[0,0] / rc))
if was2d:
trans = trans[1:]
rotate = rotate[0:1]
scale = scale[1:]
shear = shear[2:3]
return (trans, rotate, scale, shear)
class Sampler(object):
__kernels = {}
def __init__(self):
self._kernels = self.__kernels
# prepare some kernels
if not self._kernels:
ks = 4096
kn = numpy.zeros(2*ks+1, dtype=numpy.float64)
kn[ks//2+1:ks+ks//2] = 1.0
kn[ks//2] = 0.5
kn[ks+ks//2] = 0.5
self._kernels['nearest'] = [kn, ks]
kn = numpy.zeros(2*ks+1, dtype=numpy.float64)
kn[0:ks] = numpy.arange(0.0, 1.0, 1.0 / float(ks))
kn[ks:2*ks] = numpy.arange(1.0, 0.0, -1.0 / float(ks))
self._kernels['linear'] = [kn, ks]
k21 = [v for v in range(0,ks)]
k22 = [v for v in range(ks+1,3*ks)]
k23 = [v for v in range(3*ks+1,4*ks)]
k = numpy.abs(numpy.arange(-2.0, 2.0+0.5/float(ks), 1.0/float(ks)).astype(numpy.float64))
k[k21] = -0.5 * (k[k21] ** 3) + 2.5 * (k[k21] * k[k21]) - 4.0 * k[k21] + 2.0
k[k22] = 1.5 * (k[k22] ** 3) - 2.5 * (k[k22] * k[k22]) + 1.0
k[k23] = -0.5 * (k[k23] ** 3) + 2.5 * (k[k23] * k[k23]) - 4.0 * k[k23] + 2.0
k[0::ks] = 0.0
k[2*ks] = 1.0
self._kernels['cubic'] = [k, ks]
kss = [0, 0, 8192, 8192, 4096, 4096, 2048, 2048, 2048, 2048]
math_pi = numpy.float(3.1415926535897931)
for kc in range(2,10):
ks = kss[kc]
k = numpy.arange(-float(kc), float(kc) + 0.5/float(ks), 1.0/float(ks)).astype(numpy.float64)
k[kc*ks] = 1.0
pi_k = math_pi * k
ksin = numpy.sin(math_pi * k) / (pi_k * pi_k)
ksin[0::ks] = 0.0
ksin = (kc * ksin) * numpy.sin((math_pi / float(kc)) * k)
ksin[kc*ks] = 1.0
self._kernels['lanczos' + str(kc)] = [ksin, ks]
# sample values
def sample_values(self,
a:numpy.ndarray,
s:Union[numpy.ndarray,list,tuple,int,float],
k:Union[str,tuple] = 'resample',
out_type:str = 'float64',
) -> numpy.ndarray:
if not isinstance(a, numpy.ndarray):
raise ValueError('Invalid array a to sample.')
ad = a.ndim
ash = a.shape
if isinstance(s, int):
s = float(s) / float(ash[0])
if isinstance(s, float):
s = [int(s * (float(ash[0]) + 0.5))]
if isinstance(s, numpy.ndarray):
if s.ndim != 1 or s.shape[0] != 3:
raise ValueError('Invalid sampling specification.')
s = numpy.arange(s[0], s[1], s[2]).astype(numpy.float64)
elif (not isinstance(s, list) and not isinstance(s, tuple)) or len(s) > 1:
raise ValueError('Invalid sampling specification.')
else:
s = s[0]
try:
if isinstance(s, int):
sf = float(ash[0]) / float(s)
s = numpy.arange(sf/2.0-0.5, float(ash[0])-0.5, sf)
elif not isinstance(s, numpy.ndarray):
s = numpy.asarray(s).astype(numpy.float64)
except:
raise
if isinstance(k, str):
if k == 'resample':
fs = numpy.mean(numpy.diff(s))
fm = 0.1 * numpy.trunc(10.0 * fs)
if fm <= 1.0:
k = self._kernels['cubic']
else:
fms = 'rs_{0:.1f}'.format(fm)
if fms in self._kernels:
k = self._kernels[fms]
else:
kc = self._kernels['cubic']
sk = _gauss_kernel(fm * float(kc[1])).astype(numpy.float64)
skl = sk.size
skr = (skl - 1) // (2 * kc[1])
skr = 2 * kc[1] * skr + 1
skd = (skl - skr) // 2
sk = sk[skd:skr+skd]
sk = sk / numpy.sum(sk)
ksk = numpy.convolve(kc[0], sk)
while numpy.sum(ksk[0:kc[1]]) < 0.01:
ksk = ksk[kc[1]:-kc[1]]
k = [ksk, kc[1]]
self._kernels[fms] = k
elif len(k) > 5 and k[0:5] == 'gauss':
try:
fwhm = 0.1 * float(int(0.5 + 10 * float(k[5:])))
fms = 'g_{0:.1f}'.format(fwhm)
if fms in self._kernels:
k = self._kernels[fms]
else:
sk = _gauss_kernel(fwhm * float(1024))
skr = (sk.size - 1) // 2048
skr = 2048 * skr + 1
skd = (sk.size - skr) // 2
sk = sk[skd:skr+skd]
sk = sk / numpy.sum(sk)
k = [sk, 1024]
self._kernels[fms] = k
except:
raise ValueError('Invalid gaussian kernel requested.')
elif not k in self._kernels:
raise ValueError('Kernel ' + k + ' not available.')
else:
k = self._kernels[k]
elif not isinstance(k, tuple) or len(k) != 2 or (
not isinstance(k[0], numpy.ndarray) or len(k[1]) != 1 or
(float(k[0].size - 1) % float(k[1])) != 0.0):
raise ValueError('Invalid kernel k.')
if ad == 1:
out = _sample_values(a, s, k[0], k[1])
else:
as0 = ash[0]
if ad > 3:
raise ValueError('Invalid data provided for column-based sampling.')
out = _sample_values(a[:,0].reshape(as0), s, k[0], k[1])
out.shape = (out.size,1,)
for d in range(1, ad):
out = numpy.repeat(out, ash[d], axis=d)
if ad == 2:
for d in range(1, ash[1]):
out[:,d] = _sample_values(a[:,d].reshape(as0), s, k[0], k[1])
else:
for d1 in range(ash[1]):
for d2 in range(ash[2]):
out[:,d1,d2] = _sample_values(a[:,d1,d2].reshape(as0), s, k[0],k[1])
if out_type != 'float64':
if out_type == 'uint8':
out = numpy.minimum(numpy.maximum(out, 0.0), 255.0).astype(numpy.uint8)
elif out_type == 'float32':
out = out.astype(numpy.float32)
elif out_type == 'int16':
out = numpy.minimum(numpy.maximum(out, -32768.0), 32767.0).astype(numpy.int16)
elif out_type == 'int32':
out = out.astype(numpy.int32)
else:
warnings.warn('Output of type ' + out_type + ' not supported; returning float64.')
return out
# sample 2D grid
def sample_grid(self,
a:numpy.ndarray,
s:Union[numpy.ndarray,list,tuple,int,float],
k:Union[str,tuple] = 'resample',
out_type:str = 'float64',
m:Union[list,dict,numpy.ndarray] = None,
fine:bool = False,
) -> numpy.ndarray:
if not isinstance(a, numpy.ndarray):
raise ValueError('Invalid array a to sample.')
ad = a.ndim
ash = a.shape
if isinstance(s, int):
s = float(s) / float(max(ash[0], ash[1]))
if isinstance(s, float):
sf = s
s = []
for d in range(min(2,ad)):
s.append(int(sf * (float(ash[d]) + 0.5)))
if isinstance(s, numpy.ndarray):
if s.ndim != 2 or s.shape[0] != 3 or s.shape[1] > ad:
raise ValueError('Invalid sampling specification.')
sl = []
for d in s.shape[1]:
sl.append(numpy.arange(s[0,d], s[1,d], s[2,d]).astype(numpy.float64))
s = sl
elif (not isinstance(s, list) and not isinstance(s, tuple)) or len(s) > ad:
raise ValueError('Invalid sampling specification.')
else:
s = s[:]
try:
for d in range(len(s)):
if isinstance(s[d], int):
sf = float(ash[d]) / float(s[d])
s[d] = numpy.arange(sf/2.0-0.5, float(ash[d])-0.5, sf)
elif isinstance(s[d], float):
sf = 1.0 / s[d]
s[d] = numpy.arange(sf/2.0-0.5, float(ash[d])-0.5, sf)
elif not isinstance(s[d], numpy.ndarray):
s[d] = numpy.asarray(s[d]).astype(numpy.float64)
except:
raise
if isinstance(k, str):
if k == 'resample':
fs = []
for d in range(len(s)):
fs.append(numpy.mean(numpy.diff(s[d])))
fm = 0.1 * numpy.trunc(10.0 * numpy.mean(fs))
if fm <= 1.0:
k = self._kernels['cubic']
else:
fms = 'rs_{0:.1f}'.format(fm)
if fms in self._kernels:
k = self._kernels[fms]
else:
kc = self._kernels['cubic']
sk = _gauss_kernel(fm * float(kc[1])).astype(numpy.float64)
skl = sk.size
skr = (skl - 1) // (2 * kc[1])
skr = 2 * kc[1] * skr + 1
skd = (skl - skr) // 2
sk = sk[skd:skr+skd]
sk = sk / numpy.sum(sk)
ksk = numpy.convolve(kc[0], sk)
while numpy.sum(ksk[0:kc[1]]) < 0.01:
ksk = ksk[kc[1]:-kc[1]]
k = [ksk, kc[1]]
self._kernels[fms] = k
elif len(k) > 5 and k[0:5] == 'gauss':
try:
fwhm = 0.1 * float(int(0.5 + 10 * float(k[5:])))
fms = 'g_{0:.1f}'.format(fwhm)
if fms in self._kernels:
k = self._kernels[fms]
else:
sk = _gauss_kernel(fwhm * float(1024))
skr = (sk.size - 1) // 2048
skr = 2048 * skr + 1
skd = (sk.size - skr) // 2
sk = sk[skd:skr+skd]
sk = sk / numpy.sum(sk)
k = [sk, 1024]
self._kernels[fms] = k
except:
raise ValueError('Invalid gaussian kernel requested.')
elif not k in self._kernels:
raise ValueError('Kernel ' + k + ' not available.')
else:
k = self._kernels[k]
elif not isinstance(k, tuple) or len(k) != 2 or (
not isinstance(k[0], numpy.ndarray) or len(k[1]) != 1 or
(float(k[0].size - 1) % float(k[1])) != 0.0):
raise ValueError('Invalid kernel k.')
ls = len(s)
if ls == 2:
if isinstance(m, list) or isinstance(m, dict):
try:
m = trans_matrix(m)
except:
raise
if m is None or not isinstance(m, numpy.ndarray):
if ad == 2:
out = _sample_grid_2d(a, s[0], s[1], k[0], k[1])
elif ad == 3:
out = _sample_grid_2d(a[:,:,0].reshape((ash[0], ash[1],)),
s[0], s[1], k[0], k[1])
outsh = out.shape
out = numpy.repeat(out.reshape((outsh[0], outsh[1], 1)),
ash[2], axis=2)
for p in range(1, ash[2]):
out[:,:,p] = _sample_grid_2d(a[:,:,p].reshape((ash[0], ash[1])),
s[0], s[1], k[0], k[1]).reshape((outsh[0], outsh[1],))
else:
raise ValueError('Sampling 2D grid of 4D data not supported.')
else:
if m.dtype != numpy.float64 or m.shape[1] != 3 or m.shape[0] < 2:
raise ValueError('Invalid transformation matrix m.')
s0 = s[0].size
s1 = s[1].size
(c1, c0) = numpy.meshgrid(s[1], s[0])
c0.shape = (c0.size,1,)
c1.shape = (c1.size,1,)
c01 = numpy.concatenate(
(m[0,0]*c0+m[0,1]*c1+m[0,2], m[1,0]*c0+m[1,1]*c1+m[1,2]), axis=1)
if ad == 2:
if fine:
out = _sample_grid_coords_fine(
a, c01, k[0], k[1]).reshape((s0,s1,))
else:
out = _sample_grid_coords(
a, c01, k[0], k[1]).reshape((s0,s1,))
elif ad == 3:
outsh = (s0,s1,1,)
if fine:
out = _sample_grid_coords_fine(
a[:,:,0].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape(outsh)
out = numpy.repeat(out, ash[2], axis=2)
for p in range(1, ash[2]):
out[:,:,p] = _sample_grid_coords_fine(
a[:,:,p].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape((s0,s1,))
else:
out = _sample_grid_coords(
a[:,:,0].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape(outsh)
out = numpy.repeat(out, ash[2], axis=2)
for p in range(1, ash[2]):
out[:,:,p] = _sample_grid_coords(
a[:,:,p].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape((s0,s1,))
elif ls == 1:
out = _sample_values(a, s[0], k[0], k[1])
elif ls == 3:
raise NotImplementedError('3D interpolation not yet implemented.')
else:
raise NotImplementedError('Higher dim interpolation not yet implemented.')
if out_type != 'float64':
if out_type == 'uint8':
out = numpy.minimum(numpy.maximum(out, 0.0), 255.0).astype(numpy.uint8)
elif out_type == 'float32':
out = out.astype(numpy.float32)
elif out_type == 'int16':
out = numpy.minimum(numpy.maximum(out, -32768.0), 32767.0).astype(numpy.int16)
elif out_type == 'int32':
out = out.astype(numpy.int32)
else:
warnings.warn('Output of type ' + out_type + ' not supported; returning float64.')
return out
# sample 2D grid
def sample_radial(self,
a:numpy.ndarray,
cx:float,
cy:float,
step:float,
steps:int,
astep:float,
k:Union[str,tuple] = 'cubic',
out_type:str = 'float64',
fine:bool = False,
) -> numpy.ndarray:
if not isinstance(a, numpy.ndarray):
raise ValueError('Invalid array a to sample.')
ad = a.ndim
ash = a.shape
if not isinstance(cx, float) or not isinstance(cy, float):
raise ValueError('Invalid center coordinate.')
if not isinstance(step, float) or step <= 0.0:
raise ValueError('Invalid step size.')
if not isinstance(steps, int) or steps <= 0:
raise ValueError('Invalid number of steps.')
steps
if not isinstance(astep, float) or astep <= 0.0:
raise ValueError('Invalid angular step size.')
astep = numpy.arange(0.0, numpy.pi/2.0, astep)
if isinstance(s, int):
s = float(s) / float(max(ash[0], ash[1]))
if isinstance(s, float):
sf = s
s = []
for d in range(min(2,ad)):
s.append(int(sf * (float(ash[d]) + 0.5)))
if isinstance(s, numpy.ndarray):
if s.ndim != 2 or s.shape[0] != 3 or s.shape[1] > ad:
raise ValueError('Invalid sampling specification.')
sl = []
for d in s.shape[1]:
sl.append(numpy.arange(s[0,d], s[1,d], s[2,d]).astype(numpy.float64))
s = sl
elif (not isinstance(s, list) and not isinstance(s, tuple)) or len(s) > ad:
raise ValueError('Invalid sampling specification.')
else:
s = s[:]
try:
for d in range(len(s)):
if isinstance(s[d], int):
sf = float(ash[d]) / float(s[d])
s[d] = numpy.arange(sf/2.0-0.5, float(ash[d])-0.5, sf)
elif isinstance(s[d], float):
sf = 1.0 / s[d]
s[d] = numpy.arange(sf/2.0-0.5, float(ash[d])-0.5, sf)
elif not isinstance(s[d], numpy.ndarray):
s[d] = numpy.asarray(s[d]).astype(numpy.float64)
except:
raise
if isinstance(k, str):
if k == 'resample':
fs = []
for d in range(len(s)):
fs.append(numpy.mean(numpy.diff(s[d])))
fm = 0.1 * numpy.trunc(10.0 * numpy.mean(fs))
if fm <= 1.0:
k = self._kernels['cubic']
else:
fms = 'rs_{0:.1f}'.format(fm)
if fms in self._kernels:
k = self._kernels[fms]
else:
kc = self._kernels['cubic']
sk = _gauss_kernel(fm * float(kc[1])).astype(numpy.float64)
skl = sk.size
skr = (skl - 1) // (2 * kc[1])
skr = 2 * kc[1] * skr + 1
skd = (skl - skr) // 2
sk = sk[skd:skr+skd]
sk = sk / numpy.sum(sk)
ksk = numpy.convolve(kc[0], sk)
while numpy.sum(ksk[0:kc[1]]) < 0.01:
ksk = ksk[kc[1]:-kc[1]]
k = [ksk, kc[1]]
self._kernels[fms] = k
elif len(k) > 5 and k[0:5] == 'gauss':
try:
fwhm = 0.1 * float(int(0.5 + 10 * float(k[5:])))
fms = 'g_{0:.1f}'.format(fwhm)
if fms in self._kernels:
k = self._kernels[fms]
else:
sk = _gauss_kernel(fwhm * float(1024))
skr = (sk.size - 1) // 2048
skr = 2048 * skr + 1
skd = (sk.size - skr) // 2
sk = sk[skd:skr+skd]
sk = sk / numpy.sum(sk)
k = [sk, 1024]
self._kernels[fms] = k
except:
raise ValueError('Invalid gaussian kernel requested.')
elif not k in self._kernels:
raise ValueError('Kernel ' + k + ' not available.')
else:
k = self._kernels[k]
elif not isinstance(k, tuple) or len(k) != 2 or (
not isinstance(k[0], numpy.ndarray) or len(k[1]) != 1 or
(float(k[0].size - 1) % float(k[1])) != 0.0):
raise ValueError('Invalid kernel k.')
ls = len(s)
if ls == 2:
if isinstance(m, list) or isinstance(m, dict):
try:
m = trans_matrix(m)
except:
raise
if m is None or not isinstance(m, numpy.ndarray):
if ad == 2:
out = _sample_grid_2d(a, s[0], s[1], k[0], k[1])
elif ad == 3:
out = _sample_grid_2d(a[:,:,0].reshape((ash[0], ash[1],)),
s[0], s[1], k[0], k[1])
outsh = out.shape
out = numpy.repeat(out.reshape((outsh[0], outsh[1], 1)),
ash[2], axis=2)
for p in range(1, ash[2]):
out[:,:,p] = _sample_grid_2d(a[:,:,p].reshape((ash[0], ash[1])),
s[0], s[1], k[0], k[1]).reshape((outsh[0], outsh[1],))
else:
raise ValueError('Sampling 2D grid of 4D data not supported.')
else:
if m.dtype != numpy.float64 or m.shape[1] != 3 or m.shape[0] < 2:
raise ValueError('Invalid transformation matrix m.')
s0 = s[0].size
s1 = s[1].size
(c1, c0) = numpy.meshgrid(s[1], s[0])
c0.shape = (c0.size,1,)
c1.shape = (c1.size,1,)
c01 = numpy.concatenate(
(m[0,0]*c0+m[0,1]*c1+m[0,2], m[1,0]*c0+m[1,1]*c1+m[1,2]), axis=1)
if ad == 2:
if fine:
out = _sample_grid_coords_fine(
a, c01, k[0], k[1]).reshape((s0,s1,))
else:
out = _sample_grid_coords(
a, c01, k[0], k[1]).reshape((s0,s1,))
elif ad == 3:
outsh = (s0,s1,1,)
if fine:
out = _sample_grid_coords_fine(
a[:,:,0].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape(outsh)
out = numpy.repeat(out, ash[2], axis=2)
for p in range(1, ash[2]):
out[:,:,p] = _sample_grid_coords_fine(
a[:,:,p].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape((s0,s1,))
else:
out = _sample_grid_coords(
a[:,:,0].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape(outsh)
out = numpy.repeat(out, ash[2], axis=2)
for p in range(1, ash[2]):
out[:,:,p] = _sample_grid_coords(
a[:,:,p].reshape((ash[0], ash[1],)),
c01, k[0], k[1]).reshape((s0,s1,))
elif ls == 1:
out = _sample_values(a, s[0], k[0], k[1])
elif ls == 3:
raise NotImplementedError('3D interpolation not yet implemented.')
else:
raise NotImplementedError('Higher dim interpolation not yet implemented.')
if out_type != 'float64':
if out_type == 'uint8':
out = numpy.minimum(numpy.maximum(out, 0.0), 255.0).astype(numpy.uint8)
elif out_type == 'float32':
out = out.astype(numpy.float32)
elif out_type == 'int16':
out = numpy.minimum(numpy.maximum(out, -32768.0), 32767.0).astype(numpy.int16)
elif out_type == 'int32':
out = out.astype(numpy.int32)
else:
warnings.warn('Output of type ' + out_type + ' not supported; returning float64.')
return out
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from numpy import linalg as LA
import math
from matplotlib.colors import ListedColormap
from BayesClassifier import BayesClassifier
from GlobalClassifier import GlobalClassifier
# data preprocessing for different datasets
"""
x = pd.read_csv('dataset2', sep=" ", header = None)
x1 = x.iloc[:500, :2].values
x2 = x.iloc[500:1000, :2].values
x3 = x.iloc[1000:, :2].values
y1 = np.full(500, 1) # class-1
y2 = np.full(500, 2) # class-2
y3 = np.full(1000, 3) # class-3
total_size = 1500
x_one = pd.read_csv('1Class1.txt', sep=" ", header = None)
x_two = pd.read_csv('1Class2.txt', sep=" ", header = None)
x_three = pd.read_csv('1Class3.txt', sep=" ", header = None)
x1 = x_one.iloc[:, :2].values
x2 = x_two.iloc[:, :2].values
x3 = x_three.iloc[:, :2].values
y1 = np.full(500, 1) # class-1
y2 = np.full(500, 2) # class-2
y3 = np.full(500, 3) # class-3
total_size = 1125
"""
x_one = pd.read_csv('2class1.txt', sep=" ", header = None)
x_two = pd.read_csv('2class2.txt', sep=" ", header = None)
x_three = pd.read_csv('2class3.txt', sep=" ", header = None)
x1 = x_one.iloc[:, :2].values
x2 = x_two.iloc[:, :2].values
x3 = x_three.iloc[:, :2].values
y1 = np.full(2454, 1) # class-1
y2 = np.full(2488, 2) # class-2
y3 = np.full(2291, 3) # class-3
total_size = 5424
# randomly split the data into training and testing sets
from sklearn.cross_validation import train_test_split
x1_train, x1_test, y1_train, y1_test = train_test_split(x1, y1, test_size = 0.25)
x2_train, x2_test, y2_train, y2_test = train_test_split(x2, y2, test_size = 0.25)
x3_train, x3_test, y3_train, y3_test = train_test_split(x3, y3, test_size = 0.25)
# plotting the dataset
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 0.4)
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 0.4)
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 0.4)
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 0.4)
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 0.4)
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 0.4)
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
# declaring classifier handling objects
obj_1 = BayesClassifier(1, 2, x1_train, y1_train, x2_train, y2_train, total_size)
obj_2 = BayesClassifier(2, 3, x2_train, y2_train, x3_train, y3_train, total_size)
obj_3 = BayesClassifier(3, 1, x3_train, y3_train, x1_train, y1_train, total_size)
# preparing the meshgrid to plot the decision boundary
# Note: that for dataset 1, 2 use step = 0.1 and for dataset 3 use step = 5
x_set = np.concatenate((x1_train, x2_train, x3_train), axis = 0)
X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1,
stop = x_set[:, 0].max() + 1, step = 5),
np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1,
step = 5))
# plotting the decision surface between two classes
# choose the classifier according to the four cases of covariance matrix
# by default classifier_one is used in the below code
plt.contourf(X1, X2, obj_1.classifier_one(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4, cmap = ListedColormap(('red', 'green')))
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 1)
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 1)
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
plt.contourf(X1, X2, obj_2.classifier_one(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4, cmap = ListedColormap(('green', 'blue')))
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 1)
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 1)
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
plt.contourf(X1, X2, obj_3.classifier_one(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4, cmap = ListedColormap(('red', 'blue')))
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 1)
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 1)
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
# plotting global decision boundary between all the three classes together with
# their contour plots superimposed on them
glob_obj = GlobalClassifier(x1_train, y1_train, x2_train, y2_train, x3_train, y3_train, total_size)
plt.contourf(X1, X2, glob_obj.global_classifier_one(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4, cmap = ListedColormap(('red', 'green', 'blue')))
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 1)
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 1)
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 1)
plt.contour(X1, X2, obj_1.contour_func_i(X1, X2, 1), [0.3, 0.4, 0.5, 0.6])
plt.contour(X1, X2, obj_1.contour_func_j(X1, X2, 1), [0.3, 0.4, 0.5, 0.6])
plt.contour(X1, X2, obj_2.contour_func_j(X1, X2, 1), [0.3, 0.4, 0.5, 0.6])
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
plt.contourf(X1, X2, glob_obj.global_classifier_two(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4, cmap = ListedColormap(('red', 'green', 'blue')))
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 1)
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 1)
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 1)
plt.contour(X1, X2, obj_1.contour_func_i(X1, X2, 3), [0.6, 0.7, 0.8, 0.9])
plt.contour(X1, X2, obj_1.contour_func_j(X1, X2, 3), [0.6, 0.7, 0.8, 0.9])
plt.contour(X1, X2, obj_2.contour_func_j(X1, X2, 3), [0.3, 0.4, 0.5, 0.6])
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
plt.contourf(X1, X2, glob_obj.global_classifier_three(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4, cmap = ListedColormap(('red', 'green', 'blue')))
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 1)
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 1)
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 1)
plt.contour(X1, X2, obj_1.contour_func_i(X1, X2, 3), [0.6, 0.7, 0.8, 0.9])
plt.contour(X1, X2, obj_1.contour_func_j(X1, X2, 3), [0.6, 0.7, 0.8, 0.9])
plt.contour(X1, X2, obj_2.contour_func_j(X1, X2, 3), [0.3, 0.4, 0.5, 0.6])
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
plt.contourf(X1, X2, glob_obj.global_classifier_four(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4, cmap = ListedColormap(('red', 'green', 'blue')))
plt.scatter(x1_train[:, 0], x1_train[:, 1], color = 'red', alpha = 1)
plt.scatter(x2_train[:, 0], x2_train[:, 1], color = 'green', alpha = 1)
plt.scatter(x3_train[:, 0], x3_train[:, 1], color = 'blue', alpha = 1)
plt.contour(X1, X2, obj_1.contour_func_i(X1, X2, 4), [0.6, 0.7, 0.8, 0.9])
plt.contour(X1, X2, obj_1.contour_func_j(X1, X2, 4), [0.6, 0.7, 0.8, 0.9])
plt.contour(X1, X2, obj_2.contour_func_j(X1, X2, 4), [0.3, 0.4, 0.5, 0.6])
plt.xlabel('x1 feature')
plt.ylabel('x2 feature')
plt.show()
from sklearn.metrics import confusion_matrix
y_true = np.concatenate((y1_train, y2_train, y3_train), axis = 0)
y_pred = glob_obj.global_classifier_four(x_set)
print(confusion_matrix(y_true, y_pred))
|
import base64
import csv
import ctypes
import json
from mmt_retrieval.model.models import OSCAR, ClassificationHead
from mmt_retrieval import MultimodalTransformer
import torch
import os
import numpy as np
def convert_finetuned_oscar(oscar_model_folder, model_args={}):
"""
Convert a fine-tuned OSCAR model downloaded from https://github.com/microsoft/Oscar/blob/master/MODEL_ZOO.md
to a model in our format.
:param oscar_model_folder: Folder where the fine-tuned model is stored
:param model_args: Arguments for the created OSCAR model. See mmt_retrieval.model.models.OSCAR
:return: A MMEMBTransformer with the OSCAR model and a classification head matching the fine-tuned model
"""
oscar = OSCAR(model_path=oscar_model_folder, **model_args)
classifier_state_dict = torch.load(os.path.join(oscar_model_folder, "pytorch_model.bin"))
config = json.load(open(os.path.join(oscar_model_folder, "config.json")))
classifier = ClassificationHead(num_labels=config.get("num_labels", 2), input_key="pooled_cls_token_embeddings",
input_dim=config["hidden_size"], classifier_type=config.get("classifier", "linear"),
scaling_factor=config.get("cls_hidden_scale", 2), dropout=config["hidden_dropout_prob"])
classifier.load_state_dict(classifier_state_dict, strict=False)
model = MultimodalTransformer(modules=[oscar, classifier])
return model
def split_oscar_image_feature_file_to_npz(oscar_image_feature_file, out_folder):
"""
Split the Image Features downloaded from here: https://github.com/microsoft/Oscar/blob/master/DOWNLOAD.md
to the .npz single file format that we support.
Intended for just-in-time loading support.
:param oscar_image_feature_file: The downloaded .pt file with the image features
:param out_folder: Folder where the .npz files will be saved
"""
features = torch.load(oscar_image_feature_file)
os.makedirs(out_folder, exist_ok=True)
for k, v in features.items():
res = {'img_h': 1.0, 'img_w': 1.0, 'num_boxes': v.shape[0],
"features": v[:, 0:2048].numpy(), "boxes": v[:, 2048:2052].numpy()}
output_file = os.path.join(out_folder, f"{k}.npz")
np.savez_compressed(output_file, x=res["features"], bbox=res["boxes"], num_bbox=res["num_boxes"], image_h=1.0, image_w=1.0)
def split_tsv_features_to_npz(tsv_image_feature_file, out_folder):
"""
Split the Image Features generated in the .tsv format from https://github.com/peteanderson80/bottom-up-attention
to the .npz single file format that we support.
Intended for just-in-time loading support.
:param tsv_image_feature_file: the .tsv file with the image features
:param out_folder: Folder where the .npz files will be saved
"""
FIELDNAMES = ["img_id", "img_w", "img_h", "num_boxes", "boxes",
"features"]
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
os.makedirs(out_folder, exist_ok=True)
with open(tsv_image_feature_file, encoding="utf-8") as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for item in reader:
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
output_file = os.path.join(out_folder, f"{item['img_id']}.npz")
np.savez_compressed(output_file, x=item["features"], bbox=item["boxes"], num_bbox=item["num_boxes"], image_h=item["img_h"], image_w=item["img_w"]) |
<reponame>alex4200/PyBlock
# Main Code for editing blocks
import glob
import logging
from pathlib import Path
from .block import Block
from .region import Region
from .tools import block_to_region_chunk
from . import converter as conv
from .maze import Maze
L = logging.getLogger("pyblock")
class MCEditor:
__slots__ = ("path", "blocks_map", "local_chunks")
def __init__(self, path):
"""Initialize the editor with the path to the world.
Args:
path (str): Path to the world folder.
"""
# Set the world path
self.path = Path(path) / "region"
# Dict for the blocks to be set
self.blocks_map = {}
# Dict to hold local chunk data for faster 'get_block'
self.local_chunks = {}
def set_verbosity(self, verbose):
"""Sets the verbosity level. Possible values are 0, 1 or 2
"""
level = (logging.WARNING, logging.INFO, logging.DEBUG)[min(verbose, 2)]
L.setLevel(level)
def set_block(self, block, x, y, z):
"""
Records position and block to be modified.
Args:
block (Block): Minecraft Block
x, y, z (int): World Coordinates
"""
# Get the region and the chunk coordinates where to set the block
region_coord, chunk_coord, block_coord = conv.block_to_region_chunk(x, z)
# Create the location to change (block and coordinates inside the chunk)
loc = (block, block_coord[0], y, block_coord[1])
# Fill the location into the blocks-map
if region_coord in self.blocks_map:
if chunk_coord in self.blocks_map[region_coord]:
self.blocks_map[region_coord][chunk_coord].append(loc)
else:
self.blocks_map[region_coord][chunk_coord] = [loc]
else:
self.blocks_map[region_coord] = {chunk_coord: [loc]}
def get_block(self, x, y, z):
"""Returns the block at the given absolute coordinates.
"""
# Get the region coordinates and the relative chunk and block coordinates
region_coord, chunk_coord, block_coord = block_to_region_chunk(x, z)
# Check if the chunk is already in the local cache
chunk_id = (*region_coord, *chunk_coord)
if chunk_id in self.local_chunks:
chunk = self.local_chunks[chunk_id]
else:
region = Region(self.path, *region_coord)
chunk = region.get_chunk(*chunk_coord)
self.local_chunks[chunk_id] = chunk
coords = block_coord[0], y, block_coord[1]
return chunk.get_block(*coords)
def place_piece(
self, x1, y1, z1, block_floor, block_fill, block_ceil, height=4, mag=1
):
"""Places an element of the maze.
Args:
x1 (int): x coordinate
y1 (int): y coordinate
z1 (int): z coordinate
block_floor: minecraft block for the floor
block_fill: minecraft block to fill
block_ceil: minecraft block for the ceiling
height (int): The height of the walls
mag (int): Magnifier of the maze. 1 means 1:1 size, 2 means all walls, ways
are double size etc.
"""
for dx in range(mag):
for dz in range(mag):
x = x1 + dx
z = z1 + dz
self.set_block(block_floor, x, y1, z)
for y in range(y1, y1 + height):
self.set_block(block_fill, x, y + 1, z)
self.set_block(block_ceil, x, y1 + height + 1, z)
def create_maze(self, maze, coord, blocks, height=4, mag=1):
"""Creates and places a maze with given width and height.
start_coord is the starting coorinates (x,y,z) and
blocks is a list of the three blocks for the floow, the wall and the ceiling.
Args:
size (int, int): Size of the maze in x/z direction
coord (int, int, int): Start coordinates of the maze
blocks (string, string, string): Names for the blocks for floor, wall and ceiling
height (int): Height of the inside of the maze (default: 4)
mag (int): Magnifier number of the maze (thickness of path/walls)
"""
# Get the maze as a simple 0/1 matrix
matrix = maze.get_matrix()
# Define the blocks
block_floor = Block("minecraft", blocks[0])
block_wall = Block("minecraft", blocks[1])
block_ceil = Block("minecraft", blocks[2])
block_air = Block("minecraft", "air")
# Get the coordinates
x0 = coord[0]
y0 = coord[1]
z0 = coord[2]
# Place the walls, floor and ceiling
for row, lines in enumerate(matrix):
for col, block in enumerate(lines):
x = x0 + mag * row
z = z0 + mag * col
if block:
self.place_piece(
x, y0, z, block_floor, block_wall, block_ceil, mag=mag
)
else:
self.place_piece(
x, y0, z, block_floor, block_air, block_ceil, mag=mag
)
def _read_map_file(self, map_file):
"""Returns a block dict from the given filename."""
m = {}
with open(map_file) as filein:
for line in filein.readlines():
t = line.strip().split()
m[t[0]] = t[1]
return m
def from_map(self, path_file, coord, direction="y", repetition=1):
"""Reads a basic template from files and builds it at the given coordinates
repetition times in the specified direction.
Args:
path_file (string): Path to the files(s)
coord (int,int,int): Starting coordinates
direction (string): Direction in which the template is repeated
repetition (int): Number of repetitions
"""
# Read the block map and find the template files.
block_map = self._read_map_file(path_file + ".txt")
tmp_files = glob.glob(path_file + "_*")
# Get basic coordinates.
x0, y0, z0 = coord
# Loop over the repetition.
for rep in range(repetition):
y = y0 + rep * len(tmp_files)
for level in range(len(tmp_files)):
tmp_file = f"{path_file}_{level:03d}.txt"
with open(tmp_file) as template:
for dx, line in enumerate(template.readlines()):
for dz, b in enumerate(line.strip()):
block = Block("minecraft", block_map[b])
self.set_block(block, x0 + dx, y + level, z0 + dz)
def done(self):
"""
Modify the world with the recorded blocks.
"""
# Loop over all regions that are affected
for region_coord, chunks in self.blocks_map.items():
L.info(
f"Modifying {len(chunks)} chunks in region {region_coord[0]}/{region_coord[1]}/"
)
region = Region(self.path, *region_coord)
update_chunks = region.update_chunks(chunks)
region.write(update_chunks)
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from datetime import datetime,timedelta
from time import daylight
from django.conf import settings
import pytz
months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
weekdays = ['sun','mon','tue','wed','thu','fri','sat']
def parseATTime(s, tzinfo=None):
if tzinfo is None:
tzinfo = pytz.timezone(settings.TIME_ZONE)
s = s.strip().lower().replace('_','').replace(',','').replace(' ','')
if s.isdigit():
if len(s) == 8 and int(s[:4]) > 1900 and int(s[4:6]) < 13 and int(s[6:]) < 32:
pass #Fall back because its not a timestamp, its YYYYMMDD form
else:
return datetime.fromtimestamp(int(s),tzinfo)
elif ':' in s:
return datetime.strptime(s,'%H:%M%Y%m%d')
if '+' in s:
ref,offset = s.split('+',1)
offset = '+' + offset
elif '-' in s:
ref,offset = s.split('-',1)
offset = '-' + offset
else:
ref,offset = s,''
return tzinfo.localize(parseTimeReference(ref), daylight) + parseTimeOffset(offset)
def parseTimeReference(ref):
if not ref or ref == 'now': return datetime.now()
#Time-of-day reference
i = ref.find(':')
hour,min = 0,0
if i != -1:
hour = int( ref[:i] )
min = int( ref[i+1:i+3] )
ref = ref[i+3:]
if ref[:2] == 'am': ref = ref[2:]
elif ref[:2] == 'pm':
hour = (hour + 12) % 24
ref = ref[2:]
if ref.startswith('noon'):
hour,min = 12,0
ref = ref[4:]
elif ref.startswith('midnight'):
hour,min = 0,0
ref = ref[8:]
elif ref.startswith('teatime'):
hour,min = 16,0
ref = ref[7:]
refDate = datetime.now().replace(hour=hour,minute=min,second=0)
#Day reference
if ref in ('yesterday','today','tomorrow'): #yesterday, today, tomorrow
if ref == 'yesterday':
refDate = refDate - timedelta(days=1)
if ref == 'tomorrow':
refDate = refDate + timedelta(days=1)
elif ref.count('/') == 2: #MM/DD/YY[YY]
m,d,y = map(int,ref.split('/'))
if y < 1900: y += 1900
if y < 1970: y += 100
refDate = refDate.replace(year=y)
try: # Fix for Bug #551771
refDate = refDate.replace(month=m)
refDate = refDate.replace(day=d)
except:
refDate = refDate.replace(day=d)
refDate = refDate.replace(month=m)
elif len(ref) == 8 and ref.isdigit(): #YYYYMMDD
refDate = refDate.replace(year= int(ref[:4]))
try: # Fix for Bug #551771
refDate = refDate.replace(month= int(ref[4:6]))
refDate = refDate.replace(day= int(ref[6:8]))
except:
refDate = refDate.replace(day= int(ref[6:8]))
refDate = refDate.replace(month= int(ref[4:6]))
elif ref[:3] in months: #MonthName DayOfMonth
refDate = refDate.replace(month= months.index(ref[:3]) + 1)
if ref[-2:].isdigit():
refDate = refDate.replace(day= int(ref[-2:]))
elif ref[-1:].isdigit():
refDate = refDate.replace(day= int(ref[-1:]))
else:
raise Exception, "Day of month required after month name"
elif ref[:3] in weekdays: #DayOfWeek (Monday, etc)
todayDayName = refDate.strftime("%a").lower()[:3]
today = weekdays.index( todayDayName )
twoWeeks = weekdays * 2
dayOffset = today - twoWeeks.index(ref[:3])
if dayOffset < 0: dayOffset += 7
refDate -= timedelta(days=dayOffset)
elif ref:
raise Exception, "Unknown day reference"
return refDate
def parseTimeOffset(offset):
if not offset:
return timedelta()
t = timedelta()
if offset[0].isdigit():
sign = 1
else:
sign = { '+' : 1, '-' : -1 }[offset[0]]
offset = offset[1:]
while offset:
i = 1
while offset[:i].isdigit() and i <= len(offset): i += 1
num = int(offset[:i-1])
offset = offset[i-1:]
i = 1
while offset[:i].isalpha() and i <= len(offset): i += 1
unit = offset[:i-1]
offset = offset[i-1:]
unitString = getUnitString(unit)
if unitString == 'months':
unitString = 'days'
num = num * 30
if unitString == 'years':
unitString = 'days'
num = num * 365
t += timedelta(**{ unitString : sign * num})
return t
def getUnitString(s):
if s.startswith('s'): return 'seconds'
if s.startswith('min'): return 'minutes'
if s.startswith('h'): return 'hours'
if s.startswith('d'): return 'days'
if s.startswith('w'): return 'weeks'
if s.startswith('mon'): return 'months'
if s.startswith('y'): return 'years'
raise Exception, "Invalid offset unit '%s'" % s
|
<reponame>abeja-inc/platform-template-image-segmentation
import http
import os
import traceback
from io import BytesIO
import torch
import torchvision.transforms as T
from PIL import Image
import numpy as np
import json
import base64
import tempfile
from abeja.datasets import Client as DatasetsClient
import train
import parameters
from utils import create_colormap
def bitget(number, pos):
return (number >> pos) & 1
def load_model(training_dir, device):
with open(os.path.join(training_dir,'parameters.json'), 'r') as f:
jf = json.load(f)
seg_model = jf['SEGMENTATION_MODEL']
num_classes = int(jf['NUM_CLASSES'])
model = train.create_model(num_classes=num_classes, model_name=seg_model)
model.load_state_dict(torch.load(os.path.join(training_dir,'model.pth')))
model.to(device)
return model.eval(), num_classes
def get_dataset_labels(dataset_ids):
datasets_client = DatasetsClient()
labels = []
for dataset_id in dataset_ids:
dataset = datasets_client.get_dataset(dataset_id)
labels = dataset.props['categories'][0]['labels']
break
return labels
def decode_segmap(out, label_colors):
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
print('Argmax prediction result: ', om)
r = np.zeros_like(om).astype(np.uint8)
g = np.zeros_like(om).astype(np.uint8)
b = np.zeros_like(om).astype(np.uint8)
for l, c in enumerate(label_colors):
idx = om == l
r[idx] = c[0]
g[idx] = c[1]
b[idx] = c[2]
rgb = np.stack([r, g, b], axis=2)
return rgb
training_dir = os.environ.get('ABEJA_TRAINING_RESULT_DIR', '.')
dataset_ids = os.environ.get('TRAINING_JOB_DATASET_IDS', '').split(',')
device_name = parameters.DEVICE if torch.cuda.is_available() else 'cpu'
device = torch.device(device_name)
resize_to_original = parameters.RESIZE_TO_ORIGINAL
model, num_classes = load_model(training_dir, device)
dataset_labels = get_dataset_labels(dataset_ids)
color_map = create_colormap(dataset_labels)
def segmentation(img):
trf = T.Compose([T.Resize(520),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
inp = trf(img).unsqueeze(0).to(device)
output = model(inp)['out']
print('Predict result: ', output)
segmap = decode_segmap(output, color_map)
new_img = Image.fromarray(segmap)
if resize_to_original:
new_img = new_img.resize(img.size)
return new_img
def handler(request, context):
print('Start predict handler.')
if 'http_method' not in request:
message = 'Error: Support only "abeja/all-cpu:19.04" or "abeja/all-gpu:19.04".'
print(message)
return {
'status_code': http.HTTPStatus.BAD_REQUEST,
'content_type': 'application/json; charset=utf8',
'content': {'message': message}
}
try:
data = request.read()
img = BytesIO(data)
rgbimg = Image.open(img).convert('RGB')
segmap = segmentation(rgbimg)
with tempfile.NamedTemporaryFile(suffix=".png") as tf:
save_file = tf.name
segmap.save(save_file)
b64 = base64.b64encode(open(save_file, 'rb').read()).decode('ascii')
return {
'status_code': http.HTTPStatus.OK,
'content_type': 'application/json; charset=utf8',
'content': {'labels': dataset_labels, 'result': b64}
}
except Exception as e:
print(str(e))
print(traceback.format_exc())
return None
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Abeja Segmentation Template for Prediction')
parser.add_argument('--src', default='', help='source image file path')
parser.add_argument('--dst', default='', help='target image file path to save')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
img = Image.open(args.src).convert('RGB')
seg_img = segmentation(img)
seg_img.save(args.dst)
|
from collections import deque
def pawn(pawn_r, num_of_rows):
arr_pawn = [0]*num_of_rows
arr_pawn[pawn_r] = 1
for i in range(num_of_rows - 1):
if arr_pawn[i] > 0:
arr_pawn[i+1] = arr_pawn[i] + 1
return arr_pawn
def col_extract(visited, pawn_c):
arr_knight = []
for j in range(num_of_rows):
arr_knight.append(visited[j][pawn_c-1])
return arr_knight
def bfs(board, start_r, start_c):
filled = 0
queue_r = deque()
queue_c = deque()
visited = [[0 for i in range(num_of_cols)] for j in range(num_of_rows)]
queue_r.append(start_r)
queue_c.append(start_c)
visited[start_r][start_c] = 1
while len(queue_r) > 0:
current_pos_r = queue_r.popleft()
current_pos_c = queue_c.popleft()
if filled == num_of_cols*num_of_rows - 1:
return visited
r_move = [2, 2, -2, -2, 1, 1, -1, -1]
c_move = [1, -1, 1, -1, 2, -2, 2, -2]
for i in range(8):
new_pos_r = current_pos_r + r_move[i]
new_pos_c = current_pos_c + c_move[i]
if new_pos_r < 0 or new_pos_c < 0:
continue
elif new_pos_r >= num_of_rows or new_pos_c >= num_of_cols:
continue
elif visited[new_pos_r][new_pos_c] != 0:
continue
visited[new_pos_r][new_pos_c] = visited[current_pos_r][current_pos_c] + 1
filled += 1
queue_r.append(new_pos_r)
queue_c.append(new_pos_c)
else:
return visited
for i in range(int(input())):
num_of_rows, num_of_cols = int(input()), int(input())
pawn_r, pawn_c = int(input()), int(input())
knight_r, knight_c = int(input()), int(input())
win, lose = False, True
board = [["." for i in range(num_of_cols)] for j in range(num_of_rows)]
pawn_move = pawn(pawn_r-1, num_of_rows)[pawn_r-1:]
knight_move = col_extract(
bfs(board, knight_r-1, knight_c-1), pawn_c)[pawn_r-1:]
# If the board has width or length 3 or more
for i in range(len(knight_move)):
if pawn_move[i] != knight_move[i] != 0 and num_of_cols != 2 or num_of_rows != 2:
if pawn_move[i] == knight_move[i]:
print(f"Win in {i} knight move(s).")
win, lose = True, False
break
# If the board has a dimention of 2
else:
if num_of_cols == 2 or num_of_rows == 2:
for i in range(len(knight_move)):
if pawn_move[i] != knight_move[i] != 0:
# %, when the knight visits a visited square
# This is not needed when dimentions are >= 3
if pawn_move[i] % knight_move[i] == 0:
print(f"Win in {i} knight move(s).")
win, lose = True, False
break
if not win:
for i in range(len(knight_move)-1):
if pawn_move[i] != 0 or knight_move[i+1] != 0:
if pawn_move[i] == knight_move[i+1]:
print(f"Stalemate in {i} knight move(s).")
break
else:
print(f"Loss in {i} knight move(s).")
|
<filename>first_order/ig.py
import numpy as np
from optimizer import Optimizer
class Ig(Optimizer):
"""
Incremental gradient descent (IG) with decreasing or constant learning rate.
For a formal description and convergence guarantees, see Section 10 in
https://arxiv.org/abs/2006.05988
The method is sensitive to finishing the final epoch, so it will terminate earlier
than it_max if it_max is not divisible by the number of steps per epoch.
Arguments:
prox_every_it (bool, optional): whether to use proximal operation every iteration
or only at the end of an epoch. Theory supports the latter. Only used if the loss includes
a proximal regularizer (default: False)
lr0 (float, optional): an estimate of the inverse smoothness constant, this step-size
is used for the first epoch_start_decay epochs. If not given, it will be set
with the value in the loss.
lr_max (float, optional): a maximal step-size never to be exceeded (default: np.inf)
lr_decay_coef (float, optional): the coefficient in front of the number of finished epochs
in the denominator of step-size. For strongly convex problems, a good value
is mu/3, where mu is the strong convexity constant
lr_decay_power (float, optional): the power to exponentiate the number of finished epochs
in the denominator of step-size. For strongly convex problems, a good value is 1 (default: 1)
epoch_start_decay (int, optional): how many epochs the step-size is kept constant
By default, will be set to have about 2.5% of iterations with the step-size equal to lr0
batch_size (int, optional): the number of samples from the function to be used at each iteration
update_trace_at_epoch_end (bool, optional): save progress only at the end of an epoch, which
avoids bad iterates
"""
def __init__(self, prox_every_it=False, lr0=None, lr_max=np.inf, lr_decay_coef=0, lr_decay_power=1,
epoch_start_decay=None, batch_size=1, update_trace_at_epoch_end=True, *args, **kwargs):
super(Ig, self).__init__(*args, **kwargs)
self.prox_every_it = prox_every_it
self.lr0 = lr0
self.lr_max = lr_max
self.lr_decay_coef = lr_decay_coef
self.lr_decay_power = lr_decay_power
self.epoch_start_decay = epoch_start_decay
self.batch_size = batch_size
self.update_trace_at_epoch_end = update_trace_at_epoch_end
if epoch_start_decay is None and np.isfinite(self.epoch_max):
self.epoch_start_decay = 1 + self.epoch_max // 40
elif epoch_start_decay is None:
self.epoch_start_decay = 1
self.steps_per_epoch = math.ceil(self.loss.n/batch_size)
def step(self):
i_max = min(self.loss.n, self.i+self.batch_size)
idx = np.arange(self.i, i_max)
self.i += self.batch_size
if self.i >= self.loss.n:
self.i = 0
normalization = self.loss.n / self.steps_per_epoch
self.grad = self.loss.stochastic_gradient(self.x, idx=idx, normalization=normalization)
denom_const = 1 / self.lr0
it_decrease = self.steps_per_epoch * max(0, self.finished_epochs-self.epoch_start_decay)
lr_decayed = 1 / (denom_const + self.lr_decay_coef*it_decrease**self.lr_decay_power)
self.lr = min(lr_decayed, self.lr_max)
self.x -= self.lr * self.grad
end_of_epoch = self.i == 0
self.finished_epochs += end_of_epoch
if self.prox_every_it and self.use_prox:
self.x = self.loss.regularizer.prox(self.x, self.lr)
elif end_of_epoch and self.use_prox:
self.x = self.loss.regularizer.prox(self.x, self.lr * self.steps_per_epoch)
def init_run(self, *args, **kwargs):
super(Ig, self).init_run(*args, **kwargs)
self.finished_epochs = 0
if self.lr0 is None:
self.lr0 = 1 / self.loss.batch_smoothness(batch_size)
self.i = 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GraphSAGE tests
"""
from tensorflow import keras
from tensorflow.keras import initializers, regularizers
import numpy as np
import pytest
from stellargraph.mapper import GraphSAGENodeGenerator
from stellargraph.layer.graphsage import (
GraphSAGE,
MeanAggregator,
MaxPoolingAggregator,
MeanPoolingAggregator,
AttentionalAggregator,
)
from ..test_utils.graphs import example_graph
from .. import test_utils
pytestmark = test_utils.ignore_stellargraph_experimental_mark
# Mean aggregator tests
def test_mean_agg_constructor():
agg = MeanAggregator(2)
assert agg.output_dim == 2
assert not agg.has_bias
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_mean_agg_constructor_1():
agg = MeanAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_mean_agg_apply():
agg = MeanAggregator(5, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
assert agg.weight_dims == [3, 2]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 5, 5]]])
assert expected == pytest.approx(actual)
def test_mean_agg_apply_groups():
agg = MeanAggregator(11, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
inp3 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2, inp3])
assert agg.weight_dims == [5, 3, 3]
model = keras.Model(inputs=[inp1, inp2, inp3], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
x3 = np.array([[[[5, 5], [4, 4]]]])
actual = model.predict([x1, x2, x3])
print(actual)
expected = np.array([[[2] * 5 + [5] * 3 + [9] * 3]])
assert expected == pytest.approx(actual)
def test_mean_agg_zero_neighbours():
agg = MeanAggregator(4, bias=False, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MaxPooling aggregator tests
def test_maxpool_agg_constructor():
agg = MaxPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] == False
assert config["act"] == "relu"
def test_maxpool_agg_constructor_1():
agg = MaxPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_maxpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MaxPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + ones(2)), axis=1) = max([[5,5],[7,7]]) = [[7,7]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[14]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 14]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MaxPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + zeros(2)), axis=1) = max([[4,4],[6,6]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_zero_neighbours():
agg = MaxPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MeanPooling aggregator tests
def test_meanpool_agg_constructor():
agg = MeanPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_meanpool_agg_constructor_1():
agg = MeanPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_meanpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MeanPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + ones(2)), axis=1)
# = mean([[5,5],[7,7]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones(2x1)) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MeanPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + zeros(2)), axis=1)
# = mean([[4,4],[6,6]]) = [[5,5]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[10]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 10]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_zero_neighbours():
agg = MeanPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
# Now we have an input shape with a 0, the attention model switches to
# a MLP and the first group will have non-zero output size.
assert agg.weight_dims == [4, 0]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# Attentional aggregator tests
def test_attn_agg_constructor():
agg = AttentionalAggregator(2, bias=False)
assert agg.output_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
# assert agg.attn_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_attn_agg_constructor_1():
agg = AttentionalAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_attn_agg_apply():
agg = AttentionalAggregator(2, bias=False, act="linear", kernel_initializer="ones")
agg.attn_act = keras.activations.get("linear")
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# The AttentionalAggregator implmentation is a hack at the moment, it doesn't
# assign any dimensions in the output to head-node features.
assert agg.weight_dims == [0, 2]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# hs = relu(x1 · ones(2x2)) = [2,2]
# hn = relu(x2 · ones(2x2)) = [[2,2], [4,4], [6,6]]
# attn_u = ones(2) · hs + ones(2) · hn = [8, 12, 16]
# attn = softmax(attn_u) = [3.3e-4, 1.8e-4, 9.81e-1]
# hout = attn · hn = [5.96, 5.96]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[5.963, 5.963]]])
assert expected == pytest.approx(actual, rel=1e-4)
def test_attn_agg_zero_neighbours():
agg = AttentionalAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
def test_graphsage_constructor():
gs = GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, normalize="l2", multiplicity=1
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
# Check incorrect normalization flag
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize=lambda x: x,
multiplicity=1,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize="unknown",
multiplicity=1,
)
# Check requirement for generator or n_samples
with pytest.raises(KeyError):
GraphSAGE(layer_sizes=[4])
# Construction from generator
G = example_graph(feature_size=3)
gen = GraphSAGENodeGenerator(G, batch_size=2, num_samples=[2, 2])
gs = GraphSAGE(layer_sizes=[4, 8], generator=gen, bias=True)
# The GraphSAGE should no longer accept a Sequence
t_gen = gen.flow([1, 2])
with pytest.raises(TypeError):
gs = GraphSAGE(layer_sizes=[4, 8], generator=t_gen, bias=True)
assert gs.dims == [3, 4, 8]
assert gs.n_samples == [2, 2]
assert gs.max_hops == 2
assert gs.bias
assert len(gs._aggs) == 2
def test_graphsage_constructor_passing_aggregator():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
aggregator=MeanAggregator,
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
with pytest.raises(TypeError):
GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1, aggregator=1
)
def test_graphsage_constructor_1():
gs = GraphSAGE(
layer_sizes=[4, 6, 8],
n_samples=[2, 4, 6],
input_dim=2,
multiplicity=1,
bias=True,
dropout=0.5,
)
assert gs.dims == [2, 4, 6, 8]
assert gs.n_samples == [2, 4, 6]
assert gs.max_hops == 3
assert gs.bias
assert len(gs._aggs) == 3
def test_graphsage_apply():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
def test_graphsage_apply_1():
gs = GraphSAGE(
layer_sizes=[2, 2, 2],
n_samples=[2, 2, 2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 2, 4, 8]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[4, 4], [4, 4], [4, 4], [4, 4], [5, 5], [5, 5], [5, 5], [5, 5]]]),
]
expected = np.array([[16, 25]])
actual = model.predict(x)
assert expected == pytest.approx(actual)
# Use the node model:
xinp, xout = gs.build()
model2 = keras.Model(inputs=xinp, outputs=xout)
assert pytest.approx(expected) == model2.predict(x)
def test_graphsage_serialize():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(
model_json, custom_objects={"MeanAggregator": MeanAggregator}
)
model2.set_weights(model_weights)
# Test loaded model
x1 = np.array([[[1, 1]]])
x2 = np.array([[[2, 2], [3, 3]]])
expected = np.array([[2, 2, 5, 5]])
actual = model2.predict([x1, x2])
assert expected == pytest.approx(actual)
def test_graphsage_zero_neighbours():
gs = GraphSAGE(
layer_sizes=[2, 2],
n_samples=[0, 0],
bias=False,
input_dim=2,
multiplicity=1,
normalize="none",
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 0, 0]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [np.array([[[1.5, 1]]]), np.zeros((1, 0, 2)), np.zeros((1, 0, 2))]
actual = model.predict(x)
expected = np.array([[5, 5]])
assert actual == pytest.approx(expected)
def test_graphsage_passing_activations():
gs = GraphSAGE(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1)
assert gs.activations == ["linear"]
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
assert gs.activations == ["relu", "linear"]
gs = GraphSAGE(
layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2, multiplicity=1
)
assert gs.activations == ["relu", "relu", "linear"]
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"],
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"] * 2,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["fred", "wilma", "barney"],
)
gs = GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["linear"] * 3,
)
assert gs.activations == ["linear"] * 3
def test_graphsage_passing_regularisers():
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="fred",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="ones",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer=initializers.ones(),
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer=regularizers.l2(0.01),
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer="wilma",
)
|
"""
GPSDataTools.py: Utilities and class definitions for dealing with raw GPS
tracking data.
In general one is only interested in the Route class, which loads GPS data
from the database for a particular route and automatically turns it into
individual trips.
"""
# Copyright (c) 2010 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dbqueries as db
import datetime
from time import time
from sys import argv
from kml_objects import *
import math
from GTFSBusTrack import GTFSBusSchedule,GTFSBusTrack
import gisutils as gis
rad = math.pi / 180.0
THRESH_SEG_ENDPOINT_TO_SHAPE_ENDPOINT = 50 #meters
THRESH_TIME_BETWEEN_REPORTS = 300 # seconds
THRESH_MINIMUM_REPORTS = 10
def now():
return str(int(time()))
class VehicleReport(object):
"""
POD structure representing a single row in the GPS log.
"""
def __init__(self,id,lat,lon,routetag,dirtag,reported_update_time):
self.vehicle_id = id
self.lat = lat
self.lon = lon
self.route_tag = routetag
self.dirtag = dirtag
self.reported_update_time = reported_update_time
def __str__(self):
return """\
vehicle %s on route %s, dirtag %s: %s, %s, time %s
""" % (self.vehicle_id, self.route_tag, self.dirtag,
self.lat,self.lon,self.reported_update_time)
def __eq__(self,other):
return (self.vehicle_id, self.lat, self.lon, self.route_tag,
self.dirtag, self.reported_update_time) \
== (other.vehicle_id, other.lat, other.lon, other.route_tag,
other.dirtag, other.reported_update_time)
def dayOfWeek(self):
"""
0123456 = MTWThFSSu
"""
return self.reported_update_time.weekday()
def timeInSecondsIntoDay(self):
t = self.reported_update_time
h,m,s = t.hour,t.minute,t.second
return s + 60*( m + 60*h )
class VehicleSegment(object):
"""
A list of VehicleReports, representing a single trip made by
a vehicle.
"""
def __init__(self,reports):
self.reports = reports
self.dirtag = reports[-1].dirtag
self.routetag = reports[-1].route_tag
self.lations = [[r.lat,r.lon] for r in reports]
self.shape = None
self.valid = True
def getGTFSRouteInfo(self):
"""
Returns (routeID,directionID) for this trip.
"""
route_id = db.get_route_for_dirtag(self.dirtag, routetag = self.routetag);
dir_id = db.get_direction_for_dirtag(self.dirtag);
print "Dirtag",self.dirtag,"routetag",self.routetag,"matched to",
print route_id,dir_id
return (route_id,dir_id);
def export_segment(self):
"""
Exports this segment to the database.
Returns ( segment_id, (trip_id,offset,error) ),
where segment_id is the tracked_vehicle_segment ID as exported
into the database,
and (trip_id,offset,error) are the gtfs matchup info as returned
by GPSBusTrack.getMatchingGTFSTripID().
If no match is found, returns None.
"""
from GPSBusTrack import GPSBusTrack
#segID = db.getMaxSegID()+1;
bt = GPSBusTrack(self);
tinfo = bt.getMatchingGTFSTripID();
if tinfo is None:
print "No GTFS trip found (%d interp pts)" % (len(self.reports),)
trip_id,offset,error=None,0,0
else:
trip_id,offset,error = tinfo
trip_date = self.reports[0].reported_update_time
rows=[(r.lat,r.lon,r.reported_update_time) for r in self.reports]
veh_id = self.reports[0].vehicle_id;
segID = db.export_gps_route(trip_id, trip_date, veh_id, error, offset, rows);
return segID, tinfo
class TrackedVehicleSegment(object):
"""
A variation on the VehicleSegment class for segments which have
already been identified with a GTFS trip ID and all the associated
information.
"""
def __init__(self,segment_id,useCorrectedGTFS=True):
self.segment_id = segment_id;
self.trip_id, self.trip_date, self.vehicle_id, self.schedule_error, \
self.offset, self.route = db.load_gps_route(segment_id);
self.reports = map(lambda llr: VehicleReport(self.vehicle_id,llr[0],llr[1],
None,None,llr[2]),
self.route);
if self.trip_id is not None:
if useCorrectedGTFS:
self.schedule = GTFSBusTrack(self.trip_id, offset=self.offset,
use_shape=False);
else:
self.schedule = GTFSBusSchedule(self.trip_id,offset=self.offset);
self.route_id = self.schedule.route_id;
self.dir_id = self.schedule.direction_id;
else: #self.trip_id is None
self.route_id = None
self.dir_id = None
self.schedule = None
self.shape = None;
self.min_time = self.reports[0].timeInSecondsIntoDay();
self.max_time = self.reports[-1].timeInSecondsIntoDay();
if self.max_time < self.min_time: self.max_time += 86400
def getGTFSRouteInfo(self):
"""
Returns (routeID,directionID) for this trip.
"""
return self.route_id,self.dir_id;
class Vehicle(object):
"""
Represents a uinque transit vehicle, as definted by its ID
in the vehicle_track table
"""
def __init__(self,vehicle_id):
self.vehicle_id = vehicle_id;
self.reports = []
self.segments = []
class GShape(object):
def __init__(self,id):
self.id=id
self.points=[]
self.dirtag = ''
class Route(object):
"""
Represents the set of vehicle trips belonging to a particular route.
Upon initialization, all VehicleReports are found for that route,
and subsequently segmented into appropriate VehicleSegments.
"""
def __init__(self,route_short_name,tzdiff=0):
self.route_short_name = str(route_short_name)
self.dirtags=[]
self.shapes = {}
self._vehicles = {}
print "Loading Dirtags..."
self.load_route_dirtags()
print "\t%s"% '\n\t'.join(self.dirtags)
#print "\t%s" % ' '.join(self.dirtags)
print "Loading Shapes..."
self.load_shapes()
print "\tLoaded %s shapes: %s" % (len(self.shapes),', '.join([ shape_id for shape_id,shape in self.shapes.items()]))
self.load_vehicle_reports(tzdiff)
print "\tFound %s vehicles" % len(self._vehicles)
print "Finding route segments..."
self.find_segments()
if self.shapes:
self.filter_by_endpoint()
else:
print "No shapes found, skipping shape check"
self.filter_by_report_time()
def load_route_dirtags(self):
self.dirtags.extend(db.get_route_dirtags(self.route_short_name));
def load_vehicle_reports(self,tzdiff):
print "Loading vehicle reports..."
rows = db.get_vehicle_reports(self.dirtags,tzdiff);
print "\tDB fetch complete (%d rows). Sorting into objects.." % (len(rows),)
def helper(row):
vehicle_id = row['id']
vehicle = self._vehicles.get(vehicle_id);
if vehicle is None:
vehicle = Vehicle(vehicle_id);
self._vehicles[vehicle_id] = vehicle;
vehicle.reports.append(VehicleReport(*row))
map( helper, rows );
def load_shapes(self):
rows = db.get_shapes_for_route(self.route_short_name);
for row in rows:
shape_id = row['shape_id']
dirtag = row['dirtag']
gshape = self.shapes.get(shape_id);
if gshape is None:
gshape = GShape(shape_id);
gshape.dirtag = dirtag
self.shapes[shape_id] = gshape
self.dirtags.append(dirtag)
gshape.points.append([row['shape_pt_lat'],row['shape_pt_lon']])
def find_segments(self):
dropped = 0
for vehicle in self.vehicles():
#print "\tSegmenting Vehicle %s..." % vehicle.vehicle_id
last_report = vehicle.reports[0]
reports=[last_report]
for report in vehicle.reports[1:]:
report_delay = report.reported_update_time - last_report.reported_update_time
report_delay_seconds = 86400*report_delay.days + report_delay.seconds
if report.dirtag != last_report.dirtag \
or report_delay_seconds > THRESH_TIME_BETWEEN_REPORTS:
if len(reports) > THRESH_MINIMUM_REPORTS:
seg = VehicleSegment(reports);
seg.shape = self.shape_for_dirtag(seg.dirtag)
vehicle.segments.append(seg);
else:
dropped += 1
reports=[]
reports.append(report)
last_report = report
#print "\t\t%s segments found" % len(vehicle.segments)
print "\tFound %d segments" % len([s for s in self.segments()])
print "\tDropped %d segments for being too short" % (dropped,)
print "\tRemoving vehicles that have no segments..."
c=0
for vehicle in self.vehicles():
if not vehicle.segments:
c+=1
del self._vehicles[vehicle.vehicle_id]
print "\tRemoved %d vehicles"%c
def filter_by_endpoint(self):
print "Filtering segments by comparing segment endpoints to possible gtf_shape(s)..."
c=0
for seg in self.segments(return_valid_only=True):
seg_start_lation = seg.lations[0]
seg_end_lation = seg.lations[-1]
s = seg.shape #self.shape_for_dirtag(seg.dirtag)
if s is None:
continue
shape_start_lation = s.points[0]#.lation
shape_end_lation = s.points[-1]#.lation
start_point_distance = calcDistance(shape_start_lation,seg_start_lation)
end_point_distance = calcDistance(shape_end_lation,seg_end_lation)
if start_point_distance > THRESH_SEG_ENDPOINT_TO_SHAPE_ENDPOINT:
seg.valid = False
c+=1
else:
seg.valid = True
print "\t%s marked as invalid" % c
def filter_by_report_time(self):
print "Filtering by comparing times between reports..."
c=0
for seg in self.segments(return_valid_only=True):
last = seg.reports[0]
avg=[]
for r in seg.reports[1:]:
t=int((r.reported_update_time - last.reported_update_time).seconds)
avg.append(t)
if t > THRESH_TIME_BETWEEN_REPORTS:
seg.valid = False
dist = calcDistance( (last.lat,last.lon) , (r.lat,r.lon) )
print "Distance:",dist
last = r
if not seg.valid:
c+=1
print "Invalid, max delay:",max(avg)
print "\t%s marked invalid" % c
def segments(self,return_valid_only=False):
sorted_vehicles = self._vehicles.items()
sorted_vehicles.sort()
for vid,vehicle in sorted_vehicles:
for seg in vehicle.segments:
if return_valid_only and seg.valid == False:
continue
else:
yield seg
def shape_for_dirtag(self,dirtag):
for shape_id,shape in self.shapes.items():
if shape.dirtag == dirtag:
return shape
def vehicles(self):
sorted_vehicles = self._vehicles.items()
sorted_vehicles.sort()
for vid,vehicle in sorted_vehicles:
yield vehicle
def clear_filters(self):
for seg in self.segments():
seg.valid = True
def export_segments(self,valid_only=True):
segs = list(self.segments(valid_only));
for i,seg in enumerate(segs):
print "Exporting (%d/%d)..."%(i+1,len(segs))
seg.export_segment();
def calcDistance(lation1,lation2):
"""
Caclulate distance between two lat lons in meters
"""
return gis.distance_meters( map(float,lation1),
map(float,lation2) )
def gen_kml(route,dopoints=False,dotimestamps=False):
print "Building KML.."
#Pepare dirtag folders
dirTagFolders = {}
for tag in route.dirtags:
dirTagFolders[tag] = {}#KFolder(tag)
invalid_paths = KFolder('INVALID')
for vehicle in route.vehicles():
vehicle_folder = KFolder(vehicle.vehicle_id)
for seg in vehicle.segments:
if dopoints:
point_folder = KFolder("points")
point_folder.visibility = False
folder = KFolder()
folder.name = "#%03d %s - %s " % (vehicle.segments.index(seg),vehicle.vehicle_id,seg.dirtag)
path = KPath()
for r in seg.reports:
l = [r.lat,r.lon]
path.add(l)
if dopoints:
p=KPlacemark(KPoint(l),name=r.reported_update_time)
p.visibility=False
point_folder.add(p)
folder.add(KPlacemark(path,folder.name,style_url='segmentLine'))
if dopoints:
folder.add(point_folder)
if dotimestamps:
folder.add(KPlacemark(KPoint(seg.lations[0]),name=seg.reports[0].reported_update_time,style_url='map_shaded_dot_true'))
folder.add(KPlacemark(KPoint(seg.lations[-1]),name=seg.reports[-1].reported_update_time,style_url='map_shaded_dot_false'))
else:
folder.add(KPlacemark(KPoint(seg.lations[0]),name='',style_url='map_shaded_dot_true'))
folder.add(KPlacemark(KPoint(seg.lations[-1]),name='',style_url='map_shaded_dot_false'))
if seg.valid is True:
if not dirTagFolders[seg.dirtag].has_key(vehicle.vehicle_id):
dirTagFolders[seg.dirtag][vehicle.vehicle_id] = KFolder(vehicle.vehicle_id)
dirTagFolders[seg.dirtag][vehicle.vehicle_id].add(folder)
else:
folder.visibility = False
invalid_paths.add(folder)
dir_folder = KFolder('Directions')
sorted_dirs = dirTagFolders.items()
sorted_dirs.sort()
for dirtag,vfolders in sorted_dirs:
dirFolder = KFolder(dirtag)
for vid,vfolder in vfolders.items():
dirFolder.add(vfolder)
dir_folder.add(dirFolder)
main_document = KFolder('Route %s'%route.route_short_name)
main_document.add(dir_folder)
#Creaate a folder which draws the gtf_shape deifintions
shape_folder = KFolder('Shapes')
if route.shapes:
for shape_id,shape in route.shapes.items():
path = KPath()
path.lations = [ l for l in shape.points]
shape_folder.add(KPlacemark(path,name=shape_id,style_url='gShapeLine'))
main_document.add(shape_folder)
kml_doc = KDocument(template_path='kml/document.kml',
docname="Test %s" % now(),
fname="%s_segments.kml" % route.route_short_name,
top_object=main_document,
style_doc='kml/segment_find_styles.kml')
print "Writing..."
kml_doc.write()
if __name__ == "__main__":
route = Route(argv[1])
gen_kml(route)
|
<gh_stars>0
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.stem import WordNetLemmatizer
import re
from nltk.corpus import stopwords
import nltk
#from nltk.probability import FreqDist
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import CountVectorizer
#load clean data
tweets = pd.read_csv('tweets_EDA_clean.csv', encoding='utf-8', index_col=0)
tweets['date'] = pd.to_datetime(tweets['date'])
tweets['created_at'] = pd.to_datetime(tweets['created_at'])
def app():
sentimentAnalyser = SentimentIntensityAnalyzer()
def calculate_sentiment(text):
# Run VADER on the text
scores = sentimentAnalyser.polarity_scores(text)
# Extract the compound score
compound_score = scores['compound']
# Return compound score
return compound_score
# function that will categorize the 'sentiment_score' column by Postive, Negative, or Neutral
def getCategory(score):
if score > 0.05:
return 'Postive'
elif score < -0.05:
return 'Negative'
else:
return 'Neutral'
#######################################
def clean_text(tweet):
# function to clean tweets
temp = tweet.lower()
temp = re.sub(r'\\n'," ", temp) # removing \n -newline, replacing with a space
temp = re.sub(r'&\S+'," ", temp) #remove &, >
temp = re.sub("@[a-z0-9_]+"," ", temp)
temp = re.sub("#[a-z0-9_]+"," ", temp)
temp = re.sub(r'http\S+', " ", temp) #
temp = re.sub(r'covid19|covid-19|coronavirus|virus', "covid", temp)
temp = re.sub(r'vaccine|vaccination|vaccines|vaccinations',"vaccine", temp)
temp = re.sub(r'covid\s+vaccine',"vaccine", temp)
temp = re.sub('[()!?]', ' ', temp)
temp = re.sub('\[.*?\]',' ', temp)
temp = re.sub("[^a-z0-9]", " ", temp) #remove \ - _
# Remove stop words from the twitter texts
stop_words = stopwords.words('english')
temp = temp.split()
temp = [w for w in temp if not w in stop_words]
temp = " ".join(word for word in temp)
return temp
# Clean data
tweets['pre_cleaned_text'] = tweets['text'].apply(clean_text)
wordnet_lemmatizer = WordNetLemmatizer()
tweets['cleaned_text'] = tweets['pre_cleaned_text'].apply(lambda x: " ".join(wordnet_lemmatizer.lemmatize(word, pos='v') for word in x.split()))
##########################################
tweets['sentiment_score'] = tweets['text'].apply(calculate_sentiment)
tweets['analysis'] = tweets['sentiment_score'].apply(getCategory)
#use groupby on hour and variant to aggregate the sentiment score
tweets['variant'] = tweets['variant'].astype('category')
#subset of texts
#words = nltk.word_tokenize(''.join([tweet for tweet in tweets['cleaned_text']]))
vect = CountVectorizer(ngram_range=(1, 3))
vect.fit(tweets['cleaned_text'])
# Transform the review column
X_text = vect.transform(tweets['cleaned_text'])
X_text = X_text.toarray().sum(axis=0)
#cols = vect.get_feature_names()
####################
words1 = []
tagged = nltk.pos_tag(vect.get_feature_names())
for (word, tag) in tagged:
if tag == 'NNP': # If the word is a proper noun
words1.append(word)
#wf = FreqDist(words)
X_df = pd.DataFrame(X_text, index=vect.get_feature_names(), columns=['number'])
X_df = X_df.sort_values(by=['number'], ascending=False)
top_words = list(X_df.index[:500])
#########################
# with open('style.css') as f:
# st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
###########
title1 = '<p style="color:Blue; font-size: 40px;">Covid-19 Sentiment on Twitter</p>'
st.markdown(title1, unsafe_allow_html=True)
title2 = '<p> This figure displays a heatmap of the average <code>sentiment_score</code> across <code>hour</code>\
(x-axis) and <code>variant</code> (y-axis). The user can decide whether to see an entire aggregate of text\
(Yes) or the top 500 words (No) using the radio buttons. </p>'
st.markdown(title2, unsafe_allow_html=True)
#st.markdown("This figure displays a heatmap of the average `sentiment_score` across `hour` (x-axis)\
# and `variant` (y-axis). The user can decide whether to see an entire aggregate of text (Yes) or \
# the top 500 words (No) using the radio buttons")
#st.header("Heatmap:")
title3 = '<p style="color:Blue; font-size: 32px;">Heatmap:</p>'
#title2 = '<p style="color:Blue; font-size: 40px;">Heatmap:</p>'
st.markdown(title3, unsafe_allow_html=True)
select_all = st.radio("Select All:", ["Yes", "No"])
if select_all == "No":
temp_tweets = pd.DataFrame()
select_text = st.selectbox('input text:', top_words)
for idx in range(len(tweets)):
if select_text in tweets.loc[idx, 'cleaned_text']:
temp_tweets = temp_tweets.append(tweets.loc[idx,])
else:
temp_tweets = tweets.copy()
temp_tweets['hour'] = temp_tweets['hour'].astype('int8')
heat_df = temp_tweets.groupby(['hour', 'variant'])['sentiment_score'].mean().reset_index()
heat_pivot = heat_df.pivot('variant', 'hour', 'sentiment_score')
plt.figure(figsize=(20, 10))
sns.heatmap(heat_pivot, cmap="icefire", linewidths=.7, annot=True)
plt.tight_layout()
plt.show()
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot()
#"YlGnBu" |
import copy
import os
import re
from io import StringIO
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
class Data:
"""
Base class for microlensing data from various observatories.
Subclasses should overload the :func:`Data.__load_data` method. The time
series data is stored as a list of Astropy tables, one for each photometric
filter. Available subclasses for specific data sources are ``OGLEData``,
``MOAData``, ``KMTData``, ``NASAExoArchiveData``.
Example usage:
.. code-block:: python
event = caustic.data.OGLEData("path_to_dir")
coords = event.coordinates # Get coordinates of the event
name = event.event_name # Get event name(s)
event.units = "fluxes" # Change units from magitudes to fluxes
event.units = "magnitudes" # Change units from fluxes to magnitudes
Parameters
----------
event_dir : str
Path to the directory containing microlensing data.
"""
def __init__(self, event_dir=None):
self.__tables = []
self.__event_name = ""
self.__coordinates = None
self.__units = "magnitudes" # units have to be the same across bands
def __str__(self):
print(self.__tables)
def __add__(self, other):
"""
Defines an addition operation between datasets. Given multiple
observations of the same event, one can load each dataset seperately
and add them together.
"""
result = Data()
self.units = "fluxes"
other.units = "fluxes"
result.units = "fluxes"
result.light_curves = self.light_curves + other.light_curves
if (
self.event_coordinates is not None
and other.event_coordinates is not None
):
if self.event_coordinates == other.event_coordinates:
result.coordinates = self.__coordinates
else:
raise ValueError(
"Coordinates of the two events need to match."
)
elif self.event_coordinates is not None:
result.event_coordinates = self.event_coordinates
elif other.event_coordinates is not None:
result.event_coordinates = other.event_coordinates
result.event_name = self.event_name + other.event_name
return result
def __load_data(self, event_dir):
"""
Loads raw time series data for each survey into Astropy tables,
stores it in `tables` class atrribute.
"""
def __convert_data_to_fluxes(self):
"""
If the light curves stored in the ``__tables`` attribute are expressed in
magnitudes, calling this function will convert them to fluxes.
"""
if self.__units == "fluxes":
pass
else:
for table in self.__tables:
F, F_err = self.__magnitudes_to_fluxes(
table["mag"], table["mag_err"]
)
table.rename_column("mag", "flux")
table.rename_column("mag_err", "flux_err")
table["flux"] = F
table["flux_err"] = F_err
self.__units = "fluxes"
def __convert_data_to_magnitudes(self):
"""
If the light curves stored in the ``__tables`` attribute are expressed in
fluxes, calling this function will convert them to magnitudes.
"""
if self.__units == "magnitudes":
pass
else:
for table in self.__tables:
m, m_err = self.__fluxes_to_magnitudes(
table["flux"], table["flux_err"]
)
table.rename_column("flux", "mag")
table.rename_column("flux_err", "mag_err")
table["mag"] = m
table["mag_err"] = m_err
self.__units = "magnitudes"
def __magnitudes_to_fluxes(self, m, sig_m, zero_point=22.0):
"""
Given the mean and the standard deviation of a astronomical magnitude
which is assumed to be normally distributed, and a reference magnitude,
this function returns the mean and the standard deviation of a flux,
which is log-normally distributed.
Parameters
----------
m : ndarray
Array of magnitude values.
sig_m : ndarray
Array of standard deviations associated with the magnitude array,
assumed to be normally distributed.
zero_point : float, optional
Magnitude at which flux is defined to be 1. (the default is 22.)
Returns
-------
tuple
Tuple of ndarrays ``(mu_F, sig_F)`` where ``mu_F`` is the mean of the
log-normal distributed flux, and ``sig_F`` is the corresponding
square root variance.
"""
# Truncate errorbars greater than 1 mag for numerical stability reasons
sig_m[sig_m > 1] = 1
# Calculate the mean and std. deviation for lnF which is assumed to be
# normally distributed
e = np.exp(1)
mu_lnF = (zero_point - m) / (2.5 * np.log10(e))
sig_lnF = sig_m / (2.5 * np.log10(e))
# If lnF is normally distributed, F is log-normaly distributed with a mean
# and root-variance given by
mu_F = np.exp(mu_lnF + 0.5 * sig_lnF ** 2)
sig_F = np.sqrt(
(np.exp(sig_lnF ** 2) - 1) * np.exp(2 * mu_lnF + sig_lnF ** 2)
)
return mu_F, sig_F
def __fluxes_to_magnitudes(self, F, sig_F, zero_point=22.0):
"""
Given the mean and the standard deviation of a measured flux
which is assumed to be log-normal distributed, and a reference magnitude,
this function returns the mean and the standard deviation of an
astronomical magnitude, which is normally distributed. This function
is the inverse of :func:`__magnitudes_to_fluxes`.
Parameters
----------
F : ndarray
Array of flux values.
sig_F : ndarray
Array of standard deviations associated with the flux array,
assumed to be log-normal distributed.
zero_point : float, optional
Magnitude at which flux is defined to be 1. (the default is 22.)
Returns
-------
tuple
Tuple of ndarrays ``(mu_F, sig_F)`` where mu_F is the mean of the
log-normal distributed flux, and ``sig_F`` is the corresponding
square root variance.
"""
e = np.exp(1)
sig_m = 2.5 * np.log10(e) * np.sqrt(np.log(sig_F ** 2 / F ** 2 + 1))
mu_m = zero_point - 2.5 * np.log10(e) * (
np.log(F) - 0.5 * np.log(1 + sig_F ** 2 / F ** 2)
)
return mu_m, sig_m
def get_standardized_data(self, rescale=True):
"""
This function returns the light curves as a list of astropy tables in
a format more suitable for modeling. In particular, the value of
2450000 is subtracted from the time axis, and if any sort of masks
are specified for each light curve, the masked data is returned.
The conversion from fluxes to magnitudes defines a flux of 1
to correspond to magnitude 22. In addition if ``rescale=True``, the
flux for all light curves is independently rescaled to zero median and
unit variance. The conversion from fluxes to magnitudes defines a
flux of 1 to correspond to magnitude 22.
Parameters
----------
rescale : bool
If true, the flux for all light curves is independently rescaled
to zero median and unit variance. By default ``True``.
Returns
-------
list
List of :class:``astropy.table.table.Table()`` tables, each table
corresponding to a given observing band.
"""
tmp_data = copy.deepcopy(self)
if not tmp_data.units == "fluxes":
tmp_data.units = "fluxes"
standardized_data = []
if rescale is True:
# Subtract the median from the data such that baseline is at approx
# zero, rescale the data such that it has unit variance
for i, table in enumerate(tmp_data.light_curves):
mask = table["mask"]
table_std = Table()
table_std.meta = table.meta
table_std["HJD"] = table["HJD"][mask] - 2450000
table_std["flux"] = (
table["flux"][mask] - np.median(table["flux"][mask])
) / np.std(table["flux"][mask])
table_std["flux_err"] = table["flux_err"][mask] / np.std(
table["flux"][mask]
)
standardized_data.append(table_std)
else:
for i, table in enumerate(tmp_data.light_curves):
mask = table["mask"]
table_std = Table()
table_std.meta = table.meta
table_std["flux"] = table["flux"][mask]
table_std["flux_err"] = table["flux_err"][mask]
table_std["HJD"] = table["HJD"][mask] - 2450000
standardized_data.append(table_std)
return standardized_data
def plot(self, ax):
"""
Plots raw data.
Parameters
----------
ax : Matplotlib axes object
"""
if self.__units == "fluxes":
unit = "flux"
else:
unit = "mag"
for i, table in enumerate(self.__tables):
mask = table["mask"]
# Plot data
ax.errorbar(
table["HJD"][mask] - 2450000,
table[unit][mask],
table[unit + "_err"][mask],
fmt="o",
color="C" + str(i),
label=table.meta["observatory"] + " " + table.meta["filter"],
ecolor="C" + str(i),
alpha=0.2,
)
ax.set_ylabel(unit)
# Plot masked data
if np.any(table["HJD"][~mask]):
ax.errorbar(
table["HJD"][~mask] - 2450000,
table[unit][~mask],
table[unit + "_err"][~mask],
fmt="o",
color="C" + str(i),
label=table.meta["observatory"]
+ " "
+ table.meta["filter"]
+ " masked",
ecolor="C" + str(i),
alpha=0.05,
)
if self.__units == "magnitudes":
ax.invert_yaxis()
ax.set_xlabel("HJD - 2450000")
ax.set_title(self.__event_name)
ax.grid(True)
ax.legend(prop={"size": 16})
def plot_standardized_data(self, ax, rescale=True):
"""
Plots data in standardized modeling format.
Parameters
----------
ax : Matplotlib axes object
rescale : bool
If true, the flux for all light curves is independently rescaled
to zero median and unit variance. By default ``True``.
"""
if rescale is True:
std_tables = self.get_standardized_data()
label = "Flux (normalized)"
else:
std_tables = self.get_standardized_data(rescale=False)
label = "Flux"
# Plot masked data
for i, table in enumerate(std_tables):
# Plot data
ax.errorbar(
table["HJD"],
table["flux"],
table["flux_err"],
fmt="o",
color="C" + str(i),
label=table.meta["observatory"] + " " + table.meta["filter"],
ecolor="C" + str(i),
alpha=0.2,
)
ax.grid(True)
ax.set_title(self.__event_name)
ax.set_xlabel("HJD - 2450000")
ax.set_ylabel(label)
ax.legend(prop={"size": 16})
def remove_worst_outliers(self, window_size=11, mad_cutoff=5):
if not (self.__units == "fluxes"):
self.__convert_data_to_fluxes()
for i, table in enumerate(self.__tables):
series = pd.Series(table["flux"])
mad = lambda x: 1.4826 * np.median(np.abs(x - np.median(x)))
rolling_mad = np.array(
series.rolling(window_size, center=True).apply(mad, raw=True)
)
rolling_mad[-window_size // 2 :] = rolling_mad[-window_size // 2]
rolling_mad[: window_size // 2] = rolling_mad[window_size // 2]
rolling_median = np.array(
series.rolling(window_size, center=True).median()
)
rolling_median[-window_size // 2 :] = rolling_median[
-window_size // 2
]
rolling_median[: window_size // 2] = rolling_median[
window_size // 2
]
array = np.abs(
(np.array(table["flux"]) - rolling_median) / rolling_mad
)
mask = array > 5
# Update masks
self.__tables[i]["mask"] = ~mask
def reset_masks(self):
for i in range(len(self.__tables)):
self.__tables[i]["mask"] = np.ones(
len(self.__tables[i]["HJD"]), dtype=bool
)
@property
def light_curves(self):
return self.__tables
@light_curves.setter
def light_curves(self, tables):
for table in tables:
if not isinstance(table, Table):
raise ValueError(
"You need to provide a list of Astropy Tables."
)
self.__tables = tables
@property
def event_name(self):
return self.__event_name
@event_name.setter
def event_name(self, value):
if isinstance(value, str):
self.__event_name = value
else:
raise ValueError("Event name has to be a string.")
@property
def event_coordinates(self):
return self.__coordinates
@event_coordinates.setter
def event_coordinates(self, coordinates):
if isinstance(coordinates, SkyCoord):
self.__coordinates = coordinates
else:
raise ValueError(
"Event coordinates must be passed as an"
"astropy.coordinates.SkyCoord object."
)
@property
def units(self):
return self.__units
@units.setter
def units(self, value):
if value == "magnitudes":
self.__convert_data_to_magnitudes()
elif value == "fluxes":
self.__convert_data_to_fluxes()
else:
raise ValueError(
"The only to options for units are 'magnitudes'" "or 'fluxes'."
)
class OGLEData(Data):
"""
Subclass of data class for dealing with OGLE data.
"""
def __init__(self, event_dir=None):
super(OGLEData, self).__init__(event_dir)
with open(event_dir + "/params.dat") as f:
lines = f.readlines()
self._Data__event_name = lines[0][:-1]
RA = lines[4][15:-1]
Dec = lines[5][15:-1]
self._Data__coordinates = SkyCoord(
RA, Dec, unit=(u.hourangle, u.deg, u.arcminute)
)
self.__load_data(event_dir)
def __load_data(self, event_dir):
"""Returns a table with raw data."""
t = Table.read(event_dir + "/phot.dat", format="ascii")
# Remove additional columns
t.columns[0].name = "HJD"
t.columns[1].name = "mag"
t.columns[2].name = "mag_err"
t.keep_columns(("HJD", "mag", "mag_err"))
# Add mask column
mask = Table.Column(
np.ones(len(t["HJD"]), dtype=bool), name="mask", dtype=bool
)
t.add_column(mask) # Insert before the first table column
# Add 2450000 if necessary
if t["HJD"][0] < 2450000:
t["HJD"] += 2450000
t.meta = {"filter": "I", "observatory": "OGLE"}
self._Data__tables.append(t)
class MOAData(Data):
"""Subclass of data class for handling with MOA datasets."""
def __init__(self, event_path=None, index_path=None):
super(MOAData, self).__init__(event_path)
self._Data__units = "fluxes"
# Grabbing the event name is anything but trivial
with open(event_path) as f:
contents = f.readlines()
processed = ""
for i in range(len(contents)):
processed += re.sub("\s+", ",", contents[i].strip()) + "\n"
processed = StringIO(processed)
table = pd.read_csv(
processed, sep=",", header=None, skiprows=2, nrows=5
)
event_code = (
table[2].loc[0]
+ "-"
+ table[2].loc[1]
+ "-"
+ table[2].loc[2]
+ "-"
+ table[2].loc[3]
)
# Load index file, find real name of the event
index = pd.read_csv(
index_path,
sep=" ",
header=None,
usecols=(0, 1),
names=["Name", "code"],
)
key_value = index[index["code"].str.match(event_code)].iloc[0]
self._Data__event_name = "MOA-" + key_value.iloc[0]
self._Data__load_data(event_path)
def __load_data(self, event_path):
"""Returns dataframe with raw data."""
# I'm not sure that time for MOA data is in HJD
with open(event_path) as f:
contents = f.readlines()
processed = ""
for i in range(len(contents)):
processed += re.sub("\s+", ",", contents[i].strip()) + "\n"
t = Table.read(processed, format="ascii")
t.keep_columns(["col1", "col2", "col3"])
t.rename_column("col1", "HJD")
t.rename_column("col2", "flux")
t.rename_column("col3", "flux_err")
t.meta = {"filter": "I", "observatory": "MOA"}
# Remove the random rows with zero time and negative time
t = t[t["HJD"] > 0]
# Add mask column
mask = Table.Column(
np.ones(len(t["HJD"]), dtype=bool), name="mask", dtype=bool
)
t.add_column(mask) # Insert before the first table column
self._Data__tables.append(t)
class KMTData(Data):
"""Subclass of data class for dealing with OGLE data."""
def __init__(self, event_dir=None):
super(KMTData, self).__init__(event_dir)
self.__load_data(event_dir)
self._Data__units = "fluxes"
def __load_data(self, event_dir):
"""Returns a table with raw data."""
t1 = Table.read(event_dir + "/KMTA01_I.diapl", format="ascii")
t1["col1"] += 2450000
t1.keep_columns(("col1", "col2", "col3"))
t1.rename_column("col1", "HJD")
t1.rename_column("col2", "flux")
t1.rename_column("col3", "flux_err")
t1.meta = {"filter": "I", "observatory": "KMTA"}
t2 = Table.read(event_dir + "/KMTC01_I.diapl", format="ascii")
t2["col1"] += 2450000
t2.keep_columns(("col1", "col2", "col3"))
t2.rename_column("col1", "HJD")
t2.rename_column("col2", "flux")
t2.rename_column("col3", "flux_err")
t2.meta = {"filter": "I", "observatory": "KMTC"}
t3 = Table.read(event_dir + "/KMTS01_I.diapl", format="ascii")
t3["col1"] += 2450000
t3.keep_columns(("col1", "col2", "col3"))
t3.rename_column("col1", "HJD")
t3.rename_column("col2", "flux")
t3.rename_column("col3", "flux_err")
t3.meta = {"filter": "I", "observatory": "KMTS"}
self._Data__tables = [t1, t2, t3]
for t in self._Data__tables:
# Add mask column
mask = Table.Column(
np.ones(len(t["HJD"]), dtype=bool), name="mask", dtype=bool
)
t.add_column(mask) # Insert before the first table column
class NASAExoArchiveData(Data):
"""Subclass of data class for dealing with data from NASA Exo Archive."""
def __init__(self, event_dir=None):
super(NASAExoArchiveData, self).__init__(event_dir)
self.__load_data(event_dir)
self._Data__units = "magnitudes"
def __load_data(self, event_dir):
"""Returns tables with raw data."""
count = 0
for file in os.listdir(event_dir):
if file.endswith(".tbl"):
t = Table.read(os.path.join(event_dir, file), format="ascii")
if t.colnames[0] == "JD":
t.rename_column("JD", "HJD")
elif t.colnames[0] == "HJD":
pass
else:
raise ValueError("No column named HJD or JD.")
if t.colnames[1] == "Relative_Flux":
m, m_err = self.__fluxes_to_magnitudes(
t["Relative_Flux"], t["Relative_Flux_Uncertainty"]
)
t["Relative_Flux"] = m
t["Relative_Flux_Uncertainty"] = m_err
t.rename_column("Relative_Flux", "mag")
t.rename_column("Relative_Flux_Uncertainty", "mag_err")
t.keep_columns(["HJD", "mag", "mag_err"])
elif t.colnames[1] == "RELATIVE_MAGNITUDE":
t.rename_column("RELATIVE_MAGNITUDE", "mag")
t.rename_column("MAGNITUDE_UNCERTAINTY", "mag_err")
t.keep_columns(["HJD", "mag", "mag_err"])
else:
raise ValueError(
"No columns specifying flux or magnitude."
)
info = t.meta["keywords"]
# Save coordinates of event, check they're consistent between
# datasets
if count == 0:
ra = info["RA"]["value"]
dec = info["DEC"]["value"]
self.__coordinates = SkyCoord(ra, dec)
elif ra != info["RA"]["value"] or dec != info["DEC"]["value"]:
raise ValueError(
"Event coordinates don't match between"
"different datasets. "
)
# Save event name
if count == 0:
self.__event_name = info["STAR_ID"]["value"]
elif self.__event_name != info["STAR_ID"]["value"]:
self.__event_name += info["keywords"]["STAR_ID"]["value"]
# Check that all times are HJD in epoch J2000.0
if info["EQUINOX"]["value"] != "J2000.0":
raise ValueError(
"Equinox for the dataset ",
info["OBSERVATORY_SITE"]["value"],
"is not J2000.",
)
if info["TIME_REFERENCE_FRAME"]["value"] != "Heliocentric JD":
raise ValueError(
"Time reference frame for ",
info["OBSERVATORY_SITE"]["value"],
"is not HJD.",
)
# Save information about observatory name and filter used
t.meta = {
"observatory": info["OBSERVATORY_SITE"]["value"],
"filter": info["TIME_SERIES_DATA_FILTER"]["value"],
}
t = Table(t, masked=False)
# Add mask column
mask = Table.Column(
np.ones(len(t["HJD"]), dtype=bool), name="mask", dtype=bool
)
t.add_column(mask) # Insert before the first table column
self._Data__tables.append(t)
count = count + 1
|
# importing the Kratos Library
import KratosMultiphysics as KM
import KratosMultiphysics.ShallowWaterApplication as SW
## Import base class file
from KratosMultiphysics.ShallowWaterApplication.shallow_water_base_solver import ShallowWaterBaseSolver
def CreateSolver(model, custom_settings):
return BoussinesqSolver(model, custom_settings)
class BoussinesqSolver(ShallowWaterBaseSolver):
def __init__(self, model, settings):
super().__init__(model, settings)
self.element_name = "BoussinesqElement"
self.condition_name = "BoussinesqCondition"
self.min_buffer_size = 4
def AddDofs(self):
KM.VariableUtils().AddDof(KM.VELOCITY_X, self.main_model_part)
KM.VariableUtils().AddDof(KM.VELOCITY_Y, self.main_model_part)
KM.VariableUtils().AddDof(SW.FREE_SURFACE_ELEVATION, self.main_model_part)
KM.Logger.PrintInfo(self.__class__.__name__, "Boussinesq equations DOFs added correctly.")
def AddVariables(self):
super().AddVariables()
self.main_model_part.AddNodalSolutionStepVariable(KM.ACCELERATION)
self.main_model_part.AddNodalSolutionStepVariable(SW.VERTICAL_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KM.VELOCITY_LAPLACIAN) # Intermediate field
self.main_model_part.AddNodalSolutionStepVariable(SW.VELOCITY_H_LAPLACIAN) # Intermediate field
self.main_model_part.AddNodalSolutionStepVariable(KM.RHS) # This is used by the predictor
self.main_model_part.AddNodalSolutionStepVariable(KM.NODAL_AREA) # This is used to assemble the RHS by the predictor
self.main_model_part.AddNodalSolutionStepVariable(SW.FIRST_DERIVATIVE_WEIGHTS) # Gradient recovery
self.main_model_part.AddNodalSolutionStepVariable(SW.SECOND_DERIVATIVE_WEIGHTS) # Laplacian recovery
def AdvanceInTime(self, current_time):
current_time = super().AdvanceInTime(current_time)
if self._TimeBufferIsInitialized():
current_time_step = self.GetComputingModelPart().ProcessInfo.GetValue(KM.DELTA_TIME)
previous_time_step = self.GetComputingModelPart().ProcessInfo.GetPreviousTimeStepInfo().GetValue(KM.DELTA_TIME)
if current_time_step - previous_time_step > 1e-10:
KM.Logger.PrintWarning(self.__class__.__name__, "The Adams Moulton scheme requires a constant time step.")
return current_time
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
SW.ShallowWaterUtilities().ComputeHeightFromFreeSurface(self.main_model_part)
def _SetProcessInfo(self):
super()._SetProcessInfo()
self.main_model_part.ProcessInfo.SetValue(SW.RELATIVE_DRY_HEIGHT, self.settings["relative_dry_height"].GetDouble())
self.main_model_part.ProcessInfo.SetValue(KM.STABILIZATION_FACTOR, self.settings["stabilization_factor"].GetDouble())
self.main_model_part.ProcessInfo.SetValue(SW.SHOCK_STABILIZATION_FACTOR, self.settings["shock_stabilization_factor"].GetDouble())
def _CreateScheme(self):
return SW.ResidualBasedAdamsMoultonScheme()
@classmethod
def GetDefaultParameters(cls):
default_settings = KM.Parameters("""{
"relative_dry_height" : 0.1,
"stabilization_factor" : 0.01,
"shock_stabilization_factor" : 0.0
}""")
default_settings.AddMissingParameters(super().GetDefaultParameters())
return default_settings
|
<gh_stars>0
#!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from core.constants import SchedulerType
from xdevice import Plugin
from xdevice import get_plugin
from xdevice import platform_logger
from core.command.parameter import Parameter
from core.testcase.testcase_manager import TestCaseManager
from core.config.config_manager import UserConfigManager
class Run(object):
run_log = platform_logger("Run")
def process_command_run(self, command, options):
para = Parameter()
test_type_list = para.get_testtype_list(options.testtype)
if len(test_type_list) == 0:
self.run_log.error("The testtype parameter is incorrect.")
return
options.testtype = test_type_list
self.run_log.info("")
self.run_log.info("------------------------------------")
self.run_log.info("Input parameter:")
self.run_log.info("productform = %s" % options.productform)
self.run_log.info("testtype = %s" % options.testtype)
self.run_log.info("subsystem = %s" % options.subsystem)
self.run_log.info("testmodule = %s" % options.testmodule)
self.run_log.info("testsuit = %s" % options.testsuit)
self.run_log.info("testcase = %s" % options.testcase)
self.run_log.info("testlevel = %s" % options.testlevel)
self.run_log.info("------------------------------------")
self.run_log.info("")
if not para.check_run_parameter(options):
self.run_log.error("Input parameter is incorrect.")
return
if not self._build_test_cases(options):
self.run_log.error("Build test cases failed.")
return
test_case_path = self._get_tests_out_path(
options.productform, options.build_variant)
if not os.path.exists(test_case_path):
self.run_log.error("%s is not exist." % test_case_path)
return
test_dictionary = TestCaseManager().get_test_files(test_case_path,
options)
if not self._check_test_dictionary(test_dictionary):
self.run_log.error("The test file list is empty.")
return
setattr(options, "testdriver", "LiteUnitTest")
options.testdict = test_dictionary
options.target_outpath = self.get_target_out_path(
options.productform, options.build_variant)
scheduler = get_plugin(plugin_type=Plugin.SCHEDULER,
plugin_id=SchedulerType.SCHEDULER)[0]
if scheduler is None:
self.run_log.error("Can not find the scheduler plugin.")
else:
scheduler.exec_command(command, options)
return
##############################################################
##############################################################
@classmethod
def get_target_out_path(cls, product_form, build_variant):
target_out_path = UserConfigManager().get_user_config(
"test_cases").get("dir", "")
if target_out_path == "":
target_out_path = os.path.join(
sys.source_code_root_path,
"out",
build_variant,
"packages",
product_form)
target_out_path = os.path.abspath(target_out_path)
return target_out_path
def _build_test_cases(self, options):
if options.coverage == "coverage":
self.run_log.info("Coverage testing, no need to compile testcases")
return True
project_root_path = sys.source_code_root_path
if "testcase" in options.build and project_root_path != "":
from core.build.build_manager import BuildManager
build_manager = BuildManager()
return build_manager.build_testcases(project_root_path, options)
else:
return True
@classmethod
def _check_test_dictionary(cls, test_dictionary):
is_valid_status = False
key_list = sorted(test_dictionary.keys())
for key in key_list:
file_list = test_dictionary[key]
if len(file_list) > 0:
is_valid_status = True
break
return is_valid_status
@classmethod
def _get_tests_out_path(cls, product_form, build_variant):
tests_out_path = UserConfigManager().get_user_config(
"test_cases").get("dir")
if tests_out_path == "":
tests_out_path = os.path.abspath(os.path.join(
sys.source_code_root_path,
"out",
build_variant,
"packages",
product_form,
"tests"))
return tests_out_path
|
<reponame>leopd/MonsterMirror
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import yaml
import time
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import torchvision.utils as vutils
from .data import ImageLabelFilelist
def update_average(model_tgt, model_src, beta=0.999):
with torch.no_grad():
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert(p_src is not p_tgt)
p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
def loader_from_list(
root,
file_list,
batch_size,
new_size=None,
height=128,
width=128,
crop=True,
num_workers=4,
shuffle=True,
center_crop=False,
return_paths=False,
drop_last=True):
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
if center_crop:
transform_list = [transforms.CenterCrop((height, width))] + \
transform_list if crop else transform_list
else:
transform_list = [transforms.RandomCrop((height, width))] + \
transform_list if crop else transform_list
transform_list = [transforms.Resize(new_size)] + transform_list \
if new_size is not None else transform_list
if not center_crop:
transform_list = [transforms.RandomHorizontalFlip()] + transform_list
transform = transforms.Compose(transform_list)
dataset = ImageLabelFilelist(root,
file_list,
transform,
return_paths=return_paths)
loader = DataLoader(dataset,
batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers)
return loader
def get_evaluation_loaders(conf, shuffle_content=False):
batch_size = conf['batch_size']
num_workers = conf['num_workers']
new_size = conf['new_size']
width = conf['crop_image_width']
height = conf['crop_image_height']
content_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers,
shuffle=shuffle_content,
center_crop=True,
return_paths=True,
drop_last=False)
class_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size * conf['k_shot'],
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1,
shuffle=False,
center_crop=True,
return_paths=True,
drop_last=False)
return content_loader, class_loader
def get_train_loaders(conf):
batch_size = conf['batch_size']
num_workers = conf['num_workers']
new_size = conf['new_size']
width = conf['crop_image_width']
height = conf['crop_image_height']
train_content_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers)
train_class_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers)
test_content_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1)
test_class_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1)
return (train_content_loader, train_class_loader, test_content_loader,
test_class_loader)
def get_config(config):
with open(config, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def make_result_folders(output_directory):
image_directory = os.path.join(output_directory, 'images')
if not os.path.exists(image_directory):
print("Creating directory: {}".format(image_directory))
os.makedirs(image_directory)
checkpoint_directory = os.path.join(output_directory, 'checkpoints')
if not os.path.exists(checkpoint_directory):
print("Creating directory: {}".format(checkpoint_directory))
os.makedirs(checkpoint_directory)
return checkpoint_directory, image_directory
def __write_images(im_outs, dis_img_n, file_name):
im_outs = [images.expand(-1, 3, -1, -1) for images in im_outs]
image_tensor = torch.cat([images[:dis_img_n] for images in im_outs], 0)
image_grid = vutils.make_grid(image_tensor.data,
nrow=dis_img_n, padding=0, normalize=True)
vutils.save_image(image_grid, file_name, nrow=1)
def write_1images(image_outputs, image_directory, postfix):
display_image_num = image_outputs[0].size(0)
__write_images(image_outputs, display_image_num,
'%s/gen_%s.jpg' % (image_directory, postfix))
def _write_row(html_file, it, fn, all_size):
html_file.write("<h3>iteration [%d] (%s)</h3>" % (it, fn.split('/')[-1]))
html_file.write("""
<p><a href="%s">
<img src="%s" style="width:%dpx">
</a><br>
<p>
""" % (fn, fn, all_size))
return
def write_html(filename, it, img_save_it, img_dir, all_size=1536):
html_file = open(filename, "w")
html_file.write('''
<!DOCTYPE html>
<html>
<head>
<title>Experiment name = %s</title>
<meta http-equiv="refresh" content="30">
</head>
<body>
''' % os.path.basename(filename))
html_file.write("<h3>current</h3>")
_write_row(html_file, it, '%s/gen_train_current.jpg' % img_dir, all_size)
for j in range(it, img_save_it - 1, -1):
_write_row(html_file, j, '%s/gen_train_%08d.jpg' % (img_dir, j),
all_size)
html_file.write("</body></html>")
html_file.close()
def write_loss(iterations, trainer, train_writer):
members = [attr for attr in dir(trainer)
if ((not callable(getattr(trainer, attr))
and not attr.startswith("__"))
and ('loss' in attr
or 'grad' in attr
or 'nwd' in attr
or 'accuracy' in attr))]
for m in members:
train_writer.add_scalar(m, getattr(trainer, m), iterations + 1)
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time))
|
<filename>kmip/tests/unit/core/objects/test_objects.py
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import string_types
import testtools
from testtools import TestCase
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import AttributeType
from kmip.core.enums import BlockCipherMode
from kmip.core.enums import HashingAlgorithm as HashingAlgorithmEnum
from kmip.core.enums import KeyRoleType
from kmip.core.enums import PaddingMethod
from kmip.core.enums import Tags
from kmip.core.factories.attributes import AttributeValueFactory
from kmip.core import objects
from kmip.core.objects import Attribute
from kmip.core.objects import ExtensionName
from kmip.core.objects import ExtensionTag
from kmip.core.objects import ExtensionType
from kmip.core.objects import KeyMaterialStruct
from kmip.core import utils
from kmip.core.utils import BytearrayStream
class TestAttributeClass(TestCase):
"""
A test suite for the Attribute class
"""
def setUp(self):
super(TestAttributeClass, self).setUp()
name_a = 'CRYPTOGRAPHIC PARAMETERS'
name_b = 'CRYPTOGRAPHIC ALGORITHM'
self.attribute_name_a = Attribute.AttributeName(name_a)
self.attribute_name_b = Attribute.AttributeName(name_b)
self.factory = AttributeValueFactory()
self.attribute_value_a = self.factory.create_attribute_value(
AttributeType.CRYPTOGRAPHIC_PARAMETERS,
{'block_cipher_mode': BlockCipherMode.CBC,
'padding_method': PaddingMethod.PKCS5,
'hashing_algorithm': HashingAlgorithmEnum.SHA_1,
'key_role_type': KeyRoleType.BDK})
self.attribute_value_b = self.factory.create_attribute_value(
AttributeType.CRYPTOGRAPHIC_PARAMETERS,
{'block_cipher_mode': BlockCipherMode.CCM,
'padding_method': PaddingMethod.PKCS5,
'hashing_algorithm': HashingAlgorithmEnum.SHA_1,
'key_role_type': KeyRoleType.BDK})
index_a = 2
index_b = 3
self.attribute_index_a = Attribute.AttributeIndex(index_a)
self.attribute_index_b = Attribute.AttributeIndex(index_b)
self.attributeObj_a = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_a)
self.attributeObj_b = Attribute(
attribute_name=self.attribute_name_b,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_a)
self.attributeObj_c = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_b,
attribute_index=self.attribute_index_a)
self.attributeObj_d = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_b)
self.key_req_with_crypt_params = BytearrayStream((
b'\x42\x00\x08\x01\x00\x00\x00\x78\x42\x00\x0a\x07\x00\x00\x00\x18'
b'\x43\x52\x59\x50\x54\x4f\x47\x52\x41\x50\x48\x49\x43\x20\x50\x41'
b'\x52\x41\x4d\x45\x54\x45\x52\x53'
b'\x42\x00\x09\x02\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x0b\x01\x00\x00\x00\x40'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x5f\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x38\x05\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x00'
b'\x42\x00\x83\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
))
def tearDown(self):
super(TestAttributeClass, self).tearDown()
def test_read(self):
attrObj = Attribute()
attrObj.read(self.key_req_with_crypt_params)
self.assertEqual(self.attributeObj_a, attrObj)
def test_write(self):
attrObj = Attribute(self.attribute_name_a, self.attribute_index_a,
self.attribute_value_a)
ostream = BytearrayStream()
attrObj.write(ostream)
self.assertEqual(self.key_req_with_crypt_params, ostream)
def test_equal_on_equal(self):
self.assertFalse(self.attributeObj_a == self.attributeObj_b)
self.assertFalse(self.attributeObj_a == self.attributeObj_c)
self.assertFalse(self.attributeObj_a == self.attributeObj_d)
def test_not_equal_on_not_equal(self):
self.assertTrue(self.attributeObj_a != self.attributeObj_b)
class TestKeyMaterialStruct(TestCase):
"""
A test suite for the KeyMaterialStruct.
A placeholder test suite. Should be removed when KeyMaterialStruct is
removed from the code base.
"""
def setUp(self):
super(TestKeyMaterialStruct, self).setUp()
def tearDown(self):
super(TestKeyMaterialStruct, self).tearDown()
def test_valid_tag(self):
"""
Test that the KeyMaterialStruct tag is valid.
"""
struct = KeyMaterialStruct()
self.assertEqual(Tags.KEY_MATERIAL, struct.tag)
class TestExtensionName(TestCase):
"""
A test suite for the ExtensionName class.
Since ExtensionName is a simple wrapper for the TextString primitive, only
a few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionName, self).setUp()
def tearDown(self):
super(TestExtensionName, self).tearDown()
def _test_init(self, value):
if (isinstance(value, string_types)) or (value is None):
extension_name = ExtensionName(value)
if value is None:
value = ''
msg = "expected {0}, observed {1}".format(
value, extension_name.value)
self.assertEqual(value, extension_name.value, msg)
else:
self.assertRaises(TypeError, ExtensionName, value)
def test_init_with_none(self):
"""
Test that an ExtensionName object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionName object can be constructed with a valid
string value.
"""
self._test_init("valid")
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-string value is
used to construct an ExtensionName object.
"""
self._test_init(0)
class TestExtensionTag(TestCase):
"""
A test suite for the ExtensionTag class.
Since ExtensionTag is a simple wrapper for the Integer primitive, only a
few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionTag, self).setUp()
def tearDown(self):
super(TestExtensionTag, self).tearDown()
def _test_init(self, value):
if (isinstance(value, int)) or (value is None):
extension_tag = ExtensionTag(value)
if value is None:
value = 0
msg = "expected {0}, observed {1}".format(
value, extension_tag.value)
self.assertEqual(value, extension_tag.value, msg)
else:
self.assertRaises(TypeError, ExtensionTag, value)
def test_init_with_none(self):
"""
Test that an ExtensionTag object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionTag object can be constructed with a valid
integer value.
"""
self._test_init(0)
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-integer value is
used to construct an ExtensionName object.
"""
self._test_init("invalid")
class TestExtensionType(TestCase):
"""
A test suite for the ExtensionType class.
Since ExtensionType is a simple wrapper for the Integer primitive, only a
few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionType, self).setUp()
def tearDown(self):
super(TestExtensionType, self).tearDown()
def _test_init(self, value):
if (isinstance(value, int)) or (value is None):
extension_type = ExtensionType(value)
if value is None:
value = 0
msg = "expected {0}, observed {1}".format(
value, extension_type.value)
self.assertEqual(value, extension_type.value, msg)
else:
self.assertRaises(TypeError, ExtensionType, value)
def test_init_with_none(self):
"""
Test that an ExtensionType object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionType object can be constructed with a valid
integer value.
"""
self._test_init(0)
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-string value is
used to construct an ExtensionType object.
"""
self._test_init("invalid")
class TestEncryptionKeyInformation(testtools.TestCase):
"""
Test suite for the EncryptionKeyInformation struct.
"""
def setUp(self):
super(TestEncryptionKeyInformation, self).setUp()
# Encoding obtained from the KMIP 1.1 testing document, Section 14.1.
#
# This encoding matches the following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.full_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
self.partial_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestEncryptionKeyInformation, self).tearDown()
def test_init(self):
"""
Test that an EncryptionKeyInformation struct can be constructed with
no arguments.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
def test_init_with_args(self):
"""
Test that an EncryptionKeyInformation struct can be constructed with
valid values.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=cryptographic_parameters
)
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
encryption_key_information.unique_identifier
)
self.assertIsInstance(
encryption_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
parameters = encryption_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.CTR,
parameters.block_cipher_mode
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of an EncryptionKeyInformation struct.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
objects.EncryptionKeyInformation,
**kwargs
)
encryption_key_information = objects.EncryptionKeyInformation()
args = (encryption_key_information, 'unique_identifier', 0)
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_cryptographic_parameters(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the cryptographic parameters of an EncryptionKeyInformation struct.
"""
kwargs = {'cryptographic_parameters': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
objects.EncryptionKeyInformation,
**kwargs
)
encryption_key_information = objects.EncryptionKeyInformation()
args = (
encryption_key_information,
'cryptographic_parameters',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
setattr,
*args
)
def test_read(self):
"""
Test that an EncryptionKeyInformation struct can be read from a data
stream.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
encryption_key_information.read(self.full_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
encryption_key_information.unique_identifier
)
self.assertIsInstance(
encryption_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
cryptographic_parameters = \
encryption_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
cryptographic_parameters.block_cipher_mode
)
def test_read_partial(self):
"""
Test that an EncryptionKeyInformation struct can be read from a partial
data stream.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
encryption_key_information.read(self.partial_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
encryption_key_information.unique_identifier
)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
EncryptionKeyInformation field is missing from the struct encoding.
"""
encryption_key_information = objects.EncryptionKeyInformation()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
encryption_key_information.read,
*args
)
def test_write(self):
"""
Test that an EncryptionKeyInformation struct can be written to a data
stream.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
stream = BytearrayStream()
encryption_key_information.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined EncryptionKeyInformation struct can be
written to a data stream.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
stream = BytearrayStream()
encryption_key_information.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
EncryptionKeyInformation field is missing when encoding the struct.
"""
encryption_key_information = objects.EncryptionKeyInformation()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
encryption_key_information.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
EncryptionKeyInformation structs with the same data.
"""
a = objects.EncryptionKeyInformation()
b = objects.EncryptionKeyInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different unique identifiers.
"""
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different cryptographic
parameters.
"""
a = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different types.
"""
a = objects.EncryptionKeyInformation()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
EncryptionKeyInformation structs with the same data.
"""
a = objects.EncryptionKeyInformation()
b = objects.EncryptionKeyInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different unique identifiers.
"""
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different cryptographic
parameters.
"""
a = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different types.
"""
a = objects.EncryptionKeyInformation()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an EncryptionKeyInformation struct.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
expected = (
"EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None))"
)
observed = repr(encryption_key_information)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to an EncryptionKeyInformation struct.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
expected = str({
'unique_identifier': "100182d5-72b8-47aa-8383-4d97d512e98a",
'cryptographic_parameters': cryptographic_parameters
})
observed = str(encryption_key_information)
self.assertEqual(expected, observed)
class TestMACSignatureKeyInformation(testtools.TestCase):
"""
Test suite for the MACSignatureKeyInformation struct.
"""
def setUp(self):
super(TestMACSignatureKeyInformation, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Section 14.1. The rest of the encoding was built by hand.
#
# This encoding matches the following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.full_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
self.partial_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestMACSignatureKeyInformation, self).tearDown()
def test_init(self):
"""
Test that a MACSignatureKeyInformation struct can be constructed with
no arguments.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
def test_init_with_args(self):
"""
Test that a MACSignatureKeyInformation struct can be constructed with
valid values.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=cryptographic_parameters
)
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
mac_signature_key_information.unique_identifier
)
self.assertIsInstance(
mac_signature_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
parameters = mac_signature_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.CTR,
parameters.block_cipher_mode
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a MACSignatureKeyInformation struct.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
objects.MACSignatureKeyInformation,
**kwargs
)
args = (objects.MACSignatureKeyInformation(), 'unique_identifier', 0)
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_cryptographic_parameters(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the cryptographic parameters of a MACSignatureKeyInformation struct.
"""
kwargs = {'cryptographic_parameters': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
objects.MACSignatureKeyInformation,
**kwargs
)
args = (
objects.MACSignatureKeyInformation(),
'cryptographic_parameters',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
setattr,
*args
)
def test_read(self):
"""
Test that a MACSignatureKeyInformation struct can be read from a data
stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
mac_signature_key_information.read(self.full_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
mac_signature_key_information.unique_identifier
)
self.assertIsInstance(
mac_signature_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
cryptographic_parameters = \
mac_signature_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
cryptographic_parameters.block_cipher_mode
)
def test_read_partial(self):
"""
Test that a MACSignatureKeyInformation struct can be read from a
partial data stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
mac_signature_key_information.read(self.partial_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing from the struct encoding.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
mac_signature_key_information.read,
*args
)
def test_write(self):
"""
Test that a MACSignatureKeyInformation struct can be written to a data
stream.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
stream = BytearrayStream()
mac_signature_key_information.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined MACSignatureKeyInformation struct can be
written to a data stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
stream = BytearrayStream()
mac_signature_key_information.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing when encoding the struct.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
mac_signature_key_information.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
MACSignatureKeyInformation structs with the same data.
"""
a = objects.MACSignatureKeyInformation()
b = objects.MACSignatureKeyInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different unique identifiers.
"""
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different cryptographic
parameters.
"""
a = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different types.
"""
a = objects.MACSignatureKeyInformation()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
MACSignatureKeyInformation structs with the same data.
"""
a = objects.MACSignatureKeyInformation()
b = objects.MACSignatureKeyInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different unique identifiers.
"""
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different cryptographic
parameters.
"""
a = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different types.
"""
a = objects.MACSignatureKeyInformation()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an MACSignatureKeyInformation struct.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
expected = (
"MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None))"
)
observed = repr(mac_signature_key_information)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a MACSignatureKeyInformation struct.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
expected = str({
'unique_identifier': "100182d5-72b8-47aa-8383-4d97d512e98a",
'cryptographic_parameters': cryptographic_parameters
})
observed = str(mac_signature_key_information)
self.assertEqual(expected, observed)
class TestKeyWrappingData(testtools.TestCase):
"""
Test suite for the KeyWrappingData struct.
"""
def setUp(self):
super(TestKeyWrappingData, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Sections 14.1. The rest was built by hand.
#
# This encoding matches the following set of values:
#
# Wrapping Method - ENCRYPT
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature - 0x0123456789ABCDEF
# IV/Counter/Nonce - 0x01
# Encoding Option - NO_ENCODING
self.full_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\xE0'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4D\x08\x00\x00\x00\x08\x01\x23\x45\x67\x89\xAB\xCD\xEF'
b'\x42\x00\x3D\x08\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 14.1.
# This encoding matches the following set of values:
#
# Wrapping Method - ENCRYPT
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# Encoding Option - NO_ENCODING
self.partial_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\x70'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestKeyWrappingData, self).tearDown()
def test_init(self):
"""
Test that a KeyWrappingData struct can be constructed with no
arguments.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
def test_init_with_args(self):
"""
Test that a KeyWrappingData struct can be constructed with valid
values.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="12345678-9012-3456-7890-123456789012",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01',
iv_counter_nonce=b'\x02',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"12345678-9012-3456-7890-123456789012",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.CTR,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_data.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_data.mac_signature_key_information
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertEqual(b'\x01', key_wrapping_data.mac_signature)
self.assertEqual(b'\x02', key_wrapping_data.iv_counter_nonce)
self.assertEqual(
enums.EncodingOption.TTLV_ENCODING,
key_wrapping_data.encoding_option
)
def test_invalid_wrapping_method(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the wrapping method of a KeyWrappingData struct.
"""
kwargs = {'wrapping_method': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
objects.KeyWrappingData,
**kwargs
)
args = (objects.KeyWrappingData(), 'wrapping_method', 0)
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
setattr,
*args
)
def test_invalid_encryption_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encryption key information of a KeyWrappingData struct.
"""
kwargs = {'encryption_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'encryption_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
setattr,
*args
)
def test_invalid_mac_signature_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature key information of a KeyWrappingData struct.
"""
kwargs = {'mac_signature_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'mac_signature_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
setattr,
*args
)
def test_invalid_mac_signature(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature of a KeyWrappingData struct.
"""
kwargs = {'mac_signature': 0}
self.assertRaisesRegexp(
TypeError,
"MAC/signature must be bytes.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'mac_signature',
0
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature must be bytes.",
setattr,
*args
)
def test_invalid_iv_counter_nonce(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the IV/counter/nonce of a KeyWrappingData struct.
"""
kwargs = {'iv_counter_nonce': 0}
self.assertRaisesRegexp(
TypeError,
"IV/counter/nonce must be bytes.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'iv_counter_nonce',
0
)
self.assertRaisesRegexp(
TypeError,
"IV/counter/nonce must be bytes.",
setattr,
*args
)
def test_invalid_encoding_option(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encoding option of a KeyWrappingData struct.
"""
kwargs = {'encoding_option': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'encoding_option',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
setattr,
*args
)
def test_read(self):
"""
Test that a KeyWrappingData struct can be read from a data stream.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
key_wrapping_data.read(self.full_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_data.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_data.mac_signature_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertEqual(
b'\x01\x23\x45\x67\x89\xAB\xCD\xEF',
key_wrapping_data.mac_signature
)
self.assertEqual(
b'\x01',
key_wrapping_data.iv_counter_nonce
)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_data.encoding_option
)
def test_read_partial(self):
"""
Test that a KeyWrappingData struct can be read from a partial data
stream.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
key_wrapping_data.read(self.partial_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsNone(key_wrapping_data.mac_signature_key_information)
self.assertIsNone(key_wrapping_data.mac_signature)
self.assertIsNone(key_wrapping_data.iv_counter_nonce)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_data.encoding_option
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required KeyWrappingData
field is missing from the struct encoding.
"""
key_wrapping_data = objects.KeyWrappingData()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_data.read,
*args
)
def test_write(self):
"""
Test that a KeyWrappingData struct can be written to a data stream.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x23\x45\x67\x89\xAB\xCD\xEF',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_data.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined KeyWrappingData struct can be written to
a data stream.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_data.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required KeyWrappingData
field is missing when encoding the struct.
"""
key_wrapping_data = objects.KeyWrappingData()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_data.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
KeyWrappingData structs with the same data.
"""
a = objects.KeyWrappingData()
b = objects.KeyWrappingData()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_wrapping_method(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different wrapping methods.
"""
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encryption_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different encryption key information.
"""
a = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different MAC/signature key information.
"""
a = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signatures(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different MAC/signatures.
"""
a = objects.KeyWrappingData(mac_signature=b'\x01')
b = objects.KeyWrappingData(mac_signature=b'\x10')
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_iv_counter_nonce(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different IV/counter/nonces.
"""
a = objects.KeyWrappingData(iv_counter_nonce=b'\x01')
b = objects.KeyWrappingData(iv_counter_nonce=b'\x10')
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encoding_option(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different encoding options.
"""
a = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different types.
"""
a = objects.KeyWrappingData()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
KeyWrappingData structs with the same data.
"""
a = objects.KeyWrappingData()
b = objects.KeyWrappingData()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_wrapping_method(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different wrapping methods.
"""
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encryption_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different encryption key information.
"""
a = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different MAC/signature key information.
"""
a = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signatures(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different MAC/signatures.
"""
a = objects.KeyWrappingData(mac_signature=b'\x01')
b = objects.KeyWrappingData(mac_signature=b'\x10')
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_iv_counter_nonce(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different IV/counter/nonces.
"""
a = objects.KeyWrappingData(iv_counter_nonce=b'\x01')
b = objects.KeyWrappingData(iv_counter_nonce=b'\x10')
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encoding_option(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different encoding options.
"""
a = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different types.
"""
a = objects.KeyWrappingData()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an KeyWrappingData struct.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
mac_signature=b'\x01\x01\x02\x02\x03\x03\x04\x04',
iv_counter_nonce=b'\xFF',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = (
"KeyWrappingData("
"wrapping_method=WrappingMethod.ENCRYPT, "
"encryption_key_information=EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-ffff-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.NIST_KEY_WRAP, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature_key_information=MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature={0}, "
"iv_counter_nonce={1}, "
"encoding_option=EncodingOption.TTLV_ENCODING)".format(
b'\x01\x01\x02\x02\x03\x03\x04\x04',
b'\xFF'
)
)
observed = repr(key_wrapping_data)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a KeyWrappingData struct.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
mac_signature=b'\x01\x01\x02\x02\x03\x03\x04\x04',
iv_counter_nonce=b'\xFF',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = str({
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
'mac_signature_key_information':
objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
'mac_signature': b'\x01\x01\x02\x02\x03\x03\x04\x04',
'iv_counter_nonce': b'\xFF',
'encoding_option': enums.EncodingOption.TTLV_ENCODING
})
observed = str(key_wrapping_data)
self.assertEqual(expected, observed)
class TestKeyWrappingSpecification(testtools.TestCase):
"""
Test suite for the KeyWrappingSpecification struct.
"""
def setUp(self):
super(TestKeyWrappingSpecification, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Sections 14.1 and 14.2. The rest was built by hand.
#
# This encoding matches the following set of values:
#
# Wrapping Method - Encrypt
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# Attribute Names
# Cryptographic Usage Mask
# Encoding Option - NO_ENCODING
self.full_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\xE0'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x0A\x07\x00\x00\x00\x18'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x55\x73'
b'\x61\x67\x65\x20\x4D\x61\x73\x6B'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
#
# Wrapping Method - Encrypt
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.partial_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\x60'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestKeyWrappingSpecification, self).tearDown()
def test_init(self):
"""
Test that a KeyWrappingSpecification struct can be constructed with
no arguments.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
def test_init_with_args(self):
"""
Test that a KeyWrappingSpecification struct can be constructed with
valid values.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="12345678-9012-3456-7890-123456789012",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR
)
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=encryption_key_information,
mac_signature_key_information=mac_signature_key_information,
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length',
'Cryptographic Usage Mask'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"12345678-9012-3456-7890-123456789012",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.CTR,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_specification.mac_signature_key_information
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.attribute_names,
list
)
self.assertEqual(3, len(key_wrapping_specification.attribute_names))
self.assertEqual(
'Cryptographic Algorithm',
key_wrapping_specification.attribute_names[0]
)
self.assertEqual(
'Cryptographic Length',
key_wrapping_specification.attribute_names[1]
)
self.assertEqual(
'Cryptographic Usage Mask',
key_wrapping_specification.attribute_names[2]
)
self.assertEqual(
enums.EncodingOption.TTLV_ENCODING,
key_wrapping_specification.encoding_option
)
def test_invalid_wrapping_method(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the wrapping method of a KeyWrappingSpecification struct.
"""
kwargs = {'wrapping_method': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (objects.KeyWrappingSpecification(), 'wrapping_method', 0)
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
setattr,
*args
)
def test_invalid_encryption_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encryption key information of a KeyWrappingSpecification struct.
"""
kwargs = {'encryption_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'encryption_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
setattr,
*args
)
def test_invalid_mac_signature_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature key information of a KeyWrappingSpecification
struct.
"""
kwargs = {'mac_signature_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'mac_signature_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
setattr,
*args
)
def test_invalid_attribute_names(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the attribute names of a KeyWrappingSpecification struct.
"""
kwargs = {'attribute_names': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Attribute names must be a list of strings.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'attribute_names',
['valid', 0]
)
self.assertRaisesRegexp(
TypeError,
"Attribute names must be a list of strings.",
setattr,
*args
)
def test_invalid_encoding_option(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encoding option of a KeyWrappingSpecification struct.
"""
kwargs = {'encoding_option': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'encoding_option',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
setattr,
*args
)
def test_read(self):
"""
Test that a KeyWrappingSpecification struct can be read from a data
stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
key_wrapping_specification.read(self.full_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_specification.mac_signature_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.attribute_names,
list
)
self.assertEqual(
'Cryptographic Usage Mask',
key_wrapping_specification.attribute_names[0]
)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_specification.encoding_option
)
def test_read_partial(self):
"""
Test that a KeyWrappingSpecification struct can be read from a
partial data stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
key_wrapping_specification.read(self.partial_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsNone(
key_wrapping_specification.mac_signature_key_information
)
self.assertIsNone(
key_wrapping_specification.attribute_names
)
self.assertIsNone(
key_wrapping_specification.encoding_option
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing from the struct encoding.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_specification.read,
*args
)
def test_write(self):
"""
Test that a KeyWrappingSpecification struct can be written to a data
stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_specification.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined KeyWrappingSpecification struct can be
written to a data stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
stream = BytearrayStream()
key_wrapping_specification.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
KeyWrappingSpecification field is missing when encoding the struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_specification.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
KeyWrappingSpecification structs with the same data.
"""
a = objects.KeyWrappingSpecification()
b = objects.KeyWrappingSpecification()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_wrapping_method(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different wrapping methods.
"""
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encryption_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different encryption key
information.
"""
a = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different MAC/signature key
information.
"""
a = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_attribute_names(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different attribute names.
"""
a = objects.KeyWrappingSpecification(
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
]
)
b = objects.KeyWrappingSpecification(
attribute_names=['Cryptographic Usage Mask']
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encoding_option(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different encoding options.
"""
a = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different types.
"""
a = objects.KeyWrappingSpecification()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
KeyWrappingSpecification structs with the same data.
"""
a = objects.KeyWrappingSpecification()
b = objects.KeyWrappingSpecification()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_wrapping_method(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different wrapping methods.
"""
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encryption_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different encryption key
information.
"""
a = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different MAC/signature key
information.
"""
a = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_attribute_names(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different attribute names.
"""
a = objects.KeyWrappingSpecification(
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
]
)
b = objects.KeyWrappingSpecification(
attribute_names=['Cryptographic Usage Mask']
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encoding_option(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different encoding options.
"""
a = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different types.
"""
a = objects.KeyWrappingSpecification()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an KeyWrappingSpecification struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = (
"KeyWrappingSpecification("
"wrapping_method=WrappingMethod.ENCRYPT, "
"encryption_key_information=EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-ffff-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.NIST_KEY_WRAP, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature_key_information=MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"attribute_names=["
"'Cryptographic Algorithm', 'Cryptographic Length'], "
"encoding_option=EncodingOption.TTLV_ENCODING)"
)
observed = repr(key_wrapping_specification)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a KeyWrappingSpecification struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = str({
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
'mac_signature_key_information':
objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
'attribute_names': [
'Cryptographic Algorithm',
'Cryptographic Length'
],
'encoding_option': enums.EncodingOption.TTLV_ENCODING
})
observed = str(key_wrapping_specification)
self.assertEqual(expected, observed)
|
import argparse
import copy
import json
import os
import pathlib
import queue
import subprocess
import sys
import threading
import time
import traceback
from configparser import ConfigParser
from dataclasses import dataclass, field
from datetime import datetime
from typing import List
import yaml
from mako.template import Template
DEFAULT_VARSET = 'default'
USER_VARSET = 'user'
RAY_WORKING_DIR = pathlib.Path('./.tmp_pipeline_ray/')
SCENE_LIST_FILENAME = "scenes_single_scene.txt"
DATE_FORMAT = '%Y%m%d-%H%M%S'
RESUME_SAVE_PERIOD_SECONDS = 5*60
STATUS_PRINT_PERIOD_SECONDS = 30
@dataclass
class EvalRunStatus:
total_scenes: int
success_scenes: int = 0
failed_scenes: int = 0
files: dict = field(default_factory=dict)
@dataclass
class EvalParams:
varset: List[str]
scene_dir: str
metadata: str = "level2"
override: dict = field(default_factory=dict)
status: EvalRunStatus = None
file_names: List[str] = field(default_factory=list)
def __post_init__(self):
try:
self.varset.remove("default")
except:
pass
def get_key(self):
varset_str = "-".join(self.varset)
return f"{self.scene_dir}-{varset_str}-{self.metadata}"
def get_resume_eval_params(self):
ep = EvalParams(
copy.deepcopy(self.varset),
self.scene_dir,
self.metadata,
copy.deepcopy(self.override),
)
if self.status:
file_list = []
if self.file_names:
file_list = self.file_names
else:
# for _, _, files in os.walk(self.scene_dir ):
# file_list.extend(files)
all = os.listdir(self.scene_dir)
for file in all:
if os.path.isfile(self.scene_dir + "/" + file):
file_list.append(file)
for filename in file_list:
if (
filename not in self.status.files
or self.status.files.get(filename, "") != "SUCCESS"
):
ep.file_names.append(filename)
if not ep.file_names:
ep = None
return ep
def get_yaml_dict(self):
"""Returns dictionary to write as yaml"""
# for some reason deep copy fixes a yaml issue
return {
"varset": copy.deepcopy(self.varset),
"metadata": [self.metadata],
"dirs": [self.scene_dir],
"override": self.override or {},
"files": self.file_names or [],
}
@dataclass
class EvalGroupsStatus:
total_groups: int
finished_groups: int = 0
run_statuses = {}
def __post_init__(self):
self._start = datetime.now()
def update_run_status(self, key: str, run_status: EvalRunStatus):
self.run_statuses[key] = run_status
def update_dry_run_status(self, key):
self.run_statuses[key].success_scenes = self.run_statuses[
key
].total_scenes
def get_progress_string(self):
success = 0
failed = 0
total = 0
for key in self.run_statuses:
s = self.run_statuses[key]
success += s.success_scenes
failed += s.failed_scenes
total += s.total_scenes
fg = self.finished_groups
fs = success + failed
tg = self.total_groups
ts = total
gp = "{:.1%}".format(fg / tg)
sp = "{:.1%}".format(fs / ts)
return f"Groups: {fg}/{tg} ({gp}%) - scenes: {fs}/{ts} ({sp}%)"
def get_timing_string(self):
finished = 0
total = 0
for key in self.run_statuses:
s = self.run_statuses[key]
finished += s.success_scenes
finished += s.failed_scenes
total += s.total_scenes
finished
elapsed = datetime.now() - self._start
scenes_left = total - finished
if finished == 0:
sec_per_scene = "unknown"
time_left = "unknown"
else:
sec_per_scene = elapsed / finished
time_left = scenes_left * sec_per_scene
return f"Elapsed: {elapsed} seconds per scene: {sec_per_scene} time left: {time_left}"
class LogTailer:
"""Reads a log file as it is written"""
# based off code and comments from
# https://stackoverflow.com/questions/12523044/how-can-i-tail-a-log-file-in-python
_file = None
_terminate = False
_thread = None
_triggers = []
def __init__(self, file, log_prefix="", print_logs=False, id=""):
self._triggers = []
self._file = file
self._log_prefix = log_prefix
self._print_logs = print_logs
self._id = f"{id}-" if id else ""
def add_trigger(self, trigger_str, callback):
self._triggers.append((trigger_str, callback))
def _check_triggers(self, line):
if not isinstance(line, str):
return
for trigger in self._triggers:
try:
if trigger[0] in line:
trigger[1](line)
except:
print(f"Failed to run trigger: {trigger}")
traceback.print_exc()
def stop(self):
"""Will stop the tailing and end the thread if non-blocking"""
self._terminate = True
if self._thread:
self._thread.join()
def tail_non_blocking(self):
"""Tails a file without blocking by using a thread. Can only be called one per instance."""
if not self._thread:
self._terminate = False
self._thread = threading.Thread(
target=self.tail_blocking,
daemon=True,
name=f"tail-{self._id}{self._file}",
)
self._thread.start()
def tail_blocking(self):
"""Tails a file by blocking the calling thread."""
for line in self._get_tail_lines(self._file):
# sys.stdout.write works better with new lines
self._check_triggers(line)
if self._print_logs:
sys.stdout.write(f"{self._log_prefix}{line}")
def _get_tail_lines(self, file):
with open(file, "r") as f:
while True:
line = f.readline()
if line:
yield line
elif self._terminate:
break
else:
time.sleep(0.1)
def get_now_str() -> str:
"""Returns a date as a string in a sortable format."""
return datetime.now().strftime(DATE_FORMAT)
def add_variable_sets(varsets, varset_directory):
varsets = copy.deepcopy(varsets)
vars = {}
if USER_VARSET not in varsets and os.path.exists(f'{varset_directory}{USER_VARSET}.yaml'):
varsets.insert(0, USER_VARSET)
if DEFAULT_VARSET not in varsets:
varsets.insert(0, DEFAULT_VARSET)
for varset in varsets:
with open(f"{varset_directory}{varset}.yaml") as def_file:
new_vars = yaml.safe_load(def_file)
vars = {**vars, **new_vars}
return vars
def execute_shell(cmd, log_file=None):
# By passing all the commands into this function, the method of
# executing the shell can easily be changed later. This could be useful
# if we want to capture the logging.
cmd = f"unbuffer {cmd} 2>&1 | ts -S"
if log_file:
with open(log_file, "a") as f:
subprocess.run(
[cmd, "|", "ts"], stdout=f, stderr=subprocess.STDOUT, shell=True
)
else:
subprocess.run(cmd, shell=True)
class RayJobRunner:
_config_file = None
_log_file = None
def __init__(self, config_file: pathlib.Path, log_file=None) -> None:
self._log_file = log_file
if not isinstance(config_file, pathlib.Path):
self._config_file = pathlib.Path(config_file)
else:
self._config_file = config_file
def up(self):
cmd = f"ray up -y {self._config_file.as_posix()}"
execute_shell(cmd, self._log_file)
def rsync_up(self, source, dest):
execute_shell(
f"ray rsync_up -v {self._config_file.as_posix()} {source} '{dest}'",
self._log_file,
)
def exec(self, cmd):
execute_shell(
f'ray exec {self._config_file.as_posix()} "{cmd}"', self._log_file
)
def submit(self, file, *args):
params = " ".join(args)
execute_shell(
f"ray submit {self._config_file.as_posix()} {file} {params}",
self._log_file,
)
class EvalRun:
dry_run = False
ray_cfg_file = None
mcs_cfg_file = None
submit_params = ""
remote_scene_location = None
remote_scene_list = None
scene_list_file = None
ray_locations_config = None
team = None
metadata = "level2"
local_scene_dir = None
set_status_holder = None
def __init__(
self,
eval,
disable_validation=False,
dev_validation=False,
log_file=None,
cluster="",
output_logs=False,
dry_run=False,
base_dir="mako",
group_working_dir=RAY_WORKING_DIR,
) -> pathlib.Path:
self.eval = eval
self.status = self.eval.status
override_params = eval.override
self.dry_run = dry_run
self.metadata = eval.metadata
self.log_file = log_file
self.local_scene_dir = eval.scene_dir
varset = eval.varset
self.key = eval.get_key()
# Get Variables
varset_directory = f"{base_dir}/variables/"
vars = add_variable_sets(varset, varset_directory=varset_directory)
vars = {**vars, **override_params}
vars["metadata"] = self.metadata
# Setup Tail
if log_file:
self.log_trailer = LogTailer(
log_file, f"c{cluster}: ", print_logs=output_logs, id=cluster
)
self.log_trailer.add_trigger("JSONSTATUS:", self.parse_status)
self.log_trailer.tail_non_blocking()
# Setup working directory
now = get_now_str()
team = vars.get("team", "none")
self.team = team
suffix = f"-{cluster}" if cluster else ""
working_name = f"{now}-{team}{suffix}"
group_working_dir.mkdir(exist_ok=True, parents=True)
working = group_working_dir / working_name
working.mkdir()
self.scene_list_file = working / SCENE_LIST_FILENAME
self.working_dir = working
self.ray_locations_config = f"configs/{team}_aws.ini"
# Generate Ray Config
ray_cfg_template = Template(
filename=f"{base_dir}/templates/ray_template_aws.yaml"
)
ray_cfg = ray_cfg_template.render(**vars)
ray_cfg_file = working / f"ray_{team}_aws.yaml"
ray_cfg_file.write_text(ray_cfg)
self.ray_cfg_file = ray_cfg_file
# Generate MCS config
mcs_cfg_template = Template(
filename=f"{base_dir}/templates/mcs_config_template.ini"
)
mcs_cfg = mcs_cfg_template.render(**vars)
mcs_cfg_file = working / f"mcs_config_{team}_{self.metadata}.ini"
mcs_cfg_file.write_text(mcs_cfg)
self.mcs_cfg_file = mcs_cfg_file
# Find and read Ray locations config file
# source aws_scripts/load_ini.sh $RAY_LOCATIONS_CONFIG
parser = ConfigParser()
parser.read(self.ray_locations_config)
self.remote_scene_location = parser.get("MCS", "scene_location")
self.remote_scene_list = parser.get("MCS", "scene_list")
# Create list of scene files
files = os.listdir(self.local_scene_dir)
with open(self.scene_list_file, "w") as scene_list_writer:
for file in files:
# does file exist and is it in the file list (if we have a list)
if os.path.isfile(
os.path.join(self.local_scene_dir, file)
) and (not eval.file_names or file in eval.file_names):
scene_list_writer.write(file)
scene_list_writer.write("\n")
scene_list_writer.close()
self.submit_params = (
"--disable_validation" if disable_validation else ""
)
self.submit_params += " --dev" if dev_validation else ""
def parse_status(self, line):
json_str = line.split("JSONSTATUS:")[-1]
status = json.loads(json_str)
succ = status["Succeeded"]
fail = status["Failed"]
total = status["Total"]
files = {}
file_statuses = status["statuses"]
for fs in file_statuses:
file = fs["scene_file"]
files[file] = fs["status"]
self.status.files = files
self.status.total_scenes = total
self.status.success_scenes = succ
self.status.failed_scenes = fail
complete_percent = "{:.1%}".format((succ + fail) / total)
sp = "{:.1%}".format(succ / total)
fp = "{:.1%}".format(fail / total)
print(
f"Group Status - {self.local_scene_dir}-{self.metadata}:"
f"\n Finished {succ + fail}/{total} ({complete_percent}) "
f"\n Success: {succ}/{total} ({sp}) Failed: {fail}/{total} ({fp})"
)
(self.working_dir / "status.txt").write_text(json_str)
if self.status_holder:
self.status_holder.update_run_status(self.key, self.status)
def set_status_holder(self, holder):
self.status_holder = holder
def run_eval(self):
"""Runs an eval"""
log_file = self.log_file
# should probably only run once?
if self.dry_run:
# currently we need to sleep just so the timestamp isn't the same
execute_shell("sleep 2", log_file=log_file)
if self.status_holder:
self.status_holder.update_dry_run_status(self.key)
else:
# Start Ray and run ray commands
ray = RayJobRunner(self.ray_cfg_file, log_file=log_file)
# Create config file
# metadata level
ray.up()
ray.rsync_up("pipeline", "~")
ray.rsync_up(f"deploy_files/{self.team}/", "~")
ray.rsync_up("configs/", "~/configs/")
ray.rsync_up(self.mcs_cfg_file.as_posix(), "~/configs/")
ray.exec(f"mkdir -p {self.remote_scene_location}")
ray.rsync_up(f"{self.local_scene_dir}/",
self.remote_scene_location)
ray.rsync_up(
self.scene_list_file.as_posix(), self.remote_scene_list
)
remote_cfg_path = f"configs/{self.mcs_cfg_file.name}"
ray.submit(
"ray_scripts/pipeline_ray.py",
self.ray_locations_config,
remote_cfg_path,
self.submit_params,
)
if self.log_trailer:
self.log_trailer.stop()
def create_eval_set_from_folder(
varset: List[str],
base_dir: str,
metadata: str = "level2",
override: dict = {},
):
eval_set = []
dirs = os.listdir(base_dir)
for dir in dirs:
my_override = copy.deepcopy(override)
scene_dir = os.path.join(base_dir, dir)
if os.path.isdir(scene_dir):
my_override["log_name"] = f"{dir}-{metadata}.log"
eval_set.append(
EvalParams(
varset, scene_dir, metadata=metadata, override=my_override
)
)
return eval_set
def set_status_for_set(eval_set):
num_scenes = 0
group_status = EvalGroupsStatus(len(eval_set))
for eval_run in eval_set:
dir = eval_run.scene_dir
if eval_run.file_names:
run_scenes = 0
for file in eval_run.file_names:
if os.path.exists(os.path.join(dir, file)):
run_scenes += 1
else:
print(
f"Failed to find file: {os.path.join(dir, file)}. Skipping file."
)
else:
run_scenes = len(
[
name
for name in os.listdir(dir)
if name.endswith(".json")
and os.path.isfile(os.path.join(dir, name))
]
)
num_scenes += run_scenes
eval_run.status = EvalRunStatus(run_scenes)
key = eval_run.get_key()
group_status.update_run_status(key, eval_run.status)
return group_status
def print_status_periodically(status: EvalGroupsStatus, periodic_seconds):
while True:
time.sleep(periodic_seconds)
print("Status:")
print(f" {status.get_progress_string()}")
print(f" {status.get_timing_string()}")
def save_config_periodically(
eval_set: List[EvalParams], periodic_seconds, working_dir
):
while True:
time.sleep(periodic_seconds)
resumes = [eval.get_resume_eval_params() for eval in eval_set]
my_list = [eval.get_yaml_dict() for eval in resumes if eval]
resume_file = working_dir / "resume.yaml"
with open(resume_file, "w") as file:
d = {"eval-groups": my_list}
yaml.dump(d, file)
print(f"wrote resume file {resume_file.as_posix()}")
def run_evals(
eval_set: List[EvalParams],
num_clusters=3,
dev=False,
disable_validation=False,
output_logs=False,
dry_run=False,
base_dir="mako",
):
q = queue.Queue()
for eval in eval_set:
q.put(eval)
group_working_dir = RAY_WORKING_DIR / get_now_str()
all_status = set_status_for_set(eval_set)
t = threading.Thread(
target=print_status_periodically,
args=(all_status, STATUS_PRINT_PERIOD_SECONDS),
daemon=True,
name="status-printer",
)
t.start()
t = threading.Thread(
target=save_config_periodically,
args=(eval_set, RESUME_SAVE_PERIOD_SECONDS, group_working_dir),
daemon=True,
name="status-saver",
)
t.start()
def run_eval_from_queue(num, dev=False):
log_dir_path = "logs-test"
log_dir = pathlib.Path(log_dir_path)
log_dir.mkdir(parents=True, exist_ok=True)
last_config_file = None
while not q.empty():
eval = q.get()
override = eval.override
override["clusterSuffix"] = f"-{num}"
print(
f"Starting eval from {eval.scene_dir} at {eval.metadata} in cluster {num}"
)
log_file_name = override.get("log_name")
if log_file_name:
log_file = log_dir / pathlib.Path(log_file_name)
log_file.unlink(missing_ok=True)
execute_shell("echo Starting `date`", log_file)
eval_run = EvalRun(
eval,
log_file=log_file,
cluster=num,
disable_validation=disable_validation,
dev_validation=dev,
output_logs=output_logs,
dry_run=dry_run,
base_dir=base_dir,
group_working_dir=group_working_dir,
)
eval_run.set_status_holder(all_status)
eval_run.run_eval()
last_config_file = eval_run.ray_cfg_file
all_status.finished_groups += 1
# all_status.finished_scenes += eval.status.total_scenes
execute_shell("echo Finishing `date`", log_file)
print(
f"Finished eval from {eval.scene_dir} at {eval.metadata} in cluster {num}"
)
print(f" {all_status.get_progress_string()}")
print(f" {all_status.get_timing_string()}")
print(f"Finished with cluster {num}")
execute_shell(f"ray down -y {last_config_file.as_posix()}", log_file)
threads = []
for i in range(num_clusters):
t = threading.Thread(
target=run_eval_from_queue,
args=((i + 1), dev),
name=f"runner-cluster-{i+1}",
)
t.start()
threads.append(t)
for t in threads:
t.join()
def force_array(val):
"""Returns val if it is an array, otherwise a one element array containing val"""
return val if isinstance(val, list) else [val]
def get_array(group, base, field):
"""
Returns the value of the field with 'group' values taking precedence
over 'base' values
All returns are forced to an array if not already an array.
Returns the field from group. If it doesn't exist, returns the
field from base. If it still doesn't exist, return an empty array."""
return force_array(group.get(field, base.get(field, [])))
def create_eval_set_from_file(cfg_file: str, super_override: dict = {}) -> List[EvalParams]:
"""Creates and array of EvalParams to run an eval from a configuration file. See Readme for details of config file.
Args:
cfg_file (str): config file
super_override (dict, optional): Adds to and overrides override from files. Defaults to {}.
Returns:
(list(EvalParams)): List of parameters for eval runs
"""
with open(cfg_file, "r") as reader:
cfg = yaml.safe_load(reader)
base = cfg.get("base", {})
eval_groups = force_array(cfg.get("eval-groups", []))
evals = []
for group in eval_groups:
my_base = copy.deepcopy(base)
varset = copy.deepcopy(get_array(group, my_base, "varset"))
metadata_list = get_array(group, my_base, "metadata")
override = group.get(field, my_base.get("override", {}))
# apply super override
if super_override:
for key, value in super_override.items():
override[key] = value
files = get_array(group, base, "files")
for metadata in metadata_list:
parents = get_array(group, my_base, "parent-dir")
dirs = get_array(group, my_base, "dirs")
for dir in dirs:
log_dir = dir.split("/")[-1]
my_override = copy.deepcopy(override) if override else {}
my_override["log_name"] = f"{log_dir}-{metadata}.log"
eval = EvalParams(varset, dir, metadata, override=my_override)
if files:
eval.file_names = files
evals.append(eval)
for parent in parents:
new_evals = create_eval_set_from_folder(
varset, parent, metadata, override
)
evals += new_evals
return evals
def _args_to_override(args) -> dict:
override = {}
if args.num_workers and args.num_workers > 0:
override['workers'] = args.num_workers
if args.cluster_user:
override['clusterUser'] = f"-{args.cluster_user}"
return override
def run_from_config_file(args):
super_override = _args_to_override(args)
test_set = create_eval_set_from_file(args.config_file, super_override)
run_evals(
test_set,
dev=args.dev_validation,
disable_validation=args.disable_validation,
num_clusters=args.num_clusters,
output_logs=args.redirect_logs,
dry_run=args.dry_run,
base_dir=args.base_dir
)
def parse_args():
parser = argparse.ArgumentParser(
description="Run multiple eval sets containing scenes using ray."
)
# --config_file is required but still needs tag because varset can have variable parameters
parser.add_argument(
"--config_file",
"-c",
required=True,
help="Path to config file which contains details on how exactly to run a series of eval runs."
+ "for the eval files to run.",
)
parser.add_argument(
"--base_dir",
"-b",
default="mako",
help="Base directory that should contain a templates directory and variables directory.",
)
parser.add_argument(
"--dev_validation",
"-d",
default=False,
action="store_true",
help="Whether or not to validate for development instead of production",
)
parser.add_argument(
"--disable_validation",
default=False,
action="store_true",
help="Whether or not to skip validatation of MCS config file",
)
parser.add_argument(
"--dry_run",
default=False,
action="store_true",
help="If set, do not actually run anything in ray. Just creates and parses config files.",
)
parser.add_argument(
"--redirect_logs",
"-r",
default=False,
action="store_true",
help="Whether or not to copy output logs to stdout",
)
parser.add_argument(
"--num_clusters",
"-n",
type=int,
default=1,
help="How many simultanous clusters should be used.",
)
parser.add_argument(
"--num_workers", "-w",
type=int,
default=None,
help="How many simultanous workers for each cluster. This will override any value in varsets.",
)
parser.add_argument(
"--cluster_user", "-u",
type=str,
default=None,
help="Tags the cluster with a the username provided with this parameter.",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
run_from_config_file(args)
|
import itertools
import os
from random import randint, uniform
import numpy as np
import copy as cp
import pandas as pd
from skmultiflow.core import BaseSKMObject, MetaEstimatorMixin, ClassifierMixin
from skmultiflow.data import RandomTreeGenerator, DataStream
from skmultiflow.evaluation import EvaluatePrequential
from skmultiflow.trees import HoeffdingTreeClassifier
from skmultiflow.metrics import ClassificationPerformanceEvaluator
from skmultiflow.utils import get_dimensions, normalize_values_in_dict
from skmultiflow.data import RandomTreeGenerator, SEAGenerator
from skmultiflow.trees import HoeffdingAdaptiveTreeClassifier
from skmultiflow.meta import AdaptiveRandomForestClassifier, \
StreamingRandomPatchesClassifier
from skmultiflow.meta.adaptive_random_forests import ARFBaseLearner
from tests import TEST_DIRECTORY
class ExtendedHoeffdingAdaptiveTreeClassifier(HoeffdingAdaptiveTreeClassifier):
def __init__(self, max_byte_size, memory_estimate_period, grace_period,
split_criterion, split_confidence, tie_threshold,
binary_split, stop_mem_management,
no_preprune, leaf_prediction, nb_threshold,
nominal_attributes
):
super().__init__(max_byte_size, memory_estimate_period, grace_period,
split_criterion, split_confidence, tie_threshold,
binary_split, stop_mem_management,
no_preprune, leaf_prediction, nb_threshold,
nominal_attributes)
def new_instance(self):
return ExtendedHoeffdingAdaptiveTreeClassifier(max_byte_size=self.max_byte_size,
memory_estimate_period=self.memory_estimate_period,
grace_period=self.grace_period,
split_criterion=self.split_criterion,
split_confidence=self.split_confidence,
tie_threshold=self.tie_threshold,
binary_split=self.binary_split,
stop_mem_management=self.stop_mem_management,
no_preprune=self.no_preprune,
leaf_prediction=self.leaf_prediction,
nb_threshold=self.nb_threshold,
nominal_attributes=self.nominal_attributes)
class HoeffdingForestClassifier(AdaptiveRandomForestClassifier):
def __init__(self, n_estimators, classes):
super().__init__(n_estimators, classes)
self.accuracy_per_sample = []
self.number_of_correct_predictions = 0
def _init_ensemble(self, X):
self._set_max_features(get_dimensions(X)[1])
self.ensemble = [ARFBaseLearner(index_original=i,
classifier=ExtendedHoeffdingAdaptiveTreeClassifier(
max_byte_size=self.max_byte_size,
memory_estimate_period=self.memory_estimate_period,
grace_period=self.grace_period,
split_criterion=self.split_criterion,
split_confidence=self.split_confidence,
tie_threshold=self.tie_threshold,
binary_split=self.binary_split,
stop_mem_management=self.stop_mem_management,
no_preprune=self.no_preprune,
leaf_prediction=self.leaf_prediction,
nb_threshold=self.nb_threshold,
nominal_attributes=self.nominal_attributes),
instances_seen=self.instances_seen,
drift_detection_method=self.drift_detection_method,
warning_detection_method=self.warning_detection_method,
is_background_learner=False)
for i in range(self.n_estimators)]
class DeepStreamLearner(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
def __init__(self, n_ensembel_estimators=10, n_ensembels=2, classes=None):
self.base_ensemble_learner = HoeffdingForestClassifier(
n_ensembel_estimators,
classes
)
self.n_ensembles = n_ensembels
self.ensembel_learners = None
self.last_layer_cascade = None
self.number_of_samples = 0
self.accuracy = []
self.number_of_correct_predictions = 0
self.classes = classes
def _init_cascades(self, X, y):
first_cascade = cp.deepcopy(self.base_ensemble_learner)
first_cascade.partial_fit(X, y, self.classes)
first_layer_class_distribution = first_cascade.predict_proba(X)
extended_features = np.concatenate(
(X, first_layer_class_distribution),
axis=1
)
second_cascade = cp.deepcopy(self.base_ensemble_learner)
second_cascade.partial_fit(extended_features, y, self.classes)
self.ensembel_learners = [first_cascade, second_cascade]
self.first_layer_cascade = first_cascade
self.last_layer_cascade = second_cascade
def partial_fit(self, X, y=None, classes=None, sample_weight=None):
self.number_of_samples += np.shape(X)[0]
if self.ensembel_learners is None:
self._init_cascades(X, y)
else:
self.first_layer_cascade.partial_fit(X, y, self.classes)
first_layer_prediction = self.first_layer_cascade.predict_proba(X)
extended_features = np.concatenate((X, first_layer_prediction), axis=1)
self.last_layer_cascade.partial_fit(extended_features, y, self.classes)
return self
def predict(self, X, y=None):
# return self.last_layer_cascade.predict(X)
y_proba = self.predict_proba(X, y)
n_rows = y_proba.shape[0]
y_pred = np.zeros(n_rows, dtype=int)
for i in range(n_rows):
index = np.argmax(y_proba[i])
y_pred[i] = index
if y_pred == y:
self.number_of_correct_predictions += 1
self.accuracy.append(
self.number_of_correct_predictions / self.number_of_samples
)
return y_pred
def predict_proba(self, X, y=None):
first_layer_predict_proba = self.first_layer_cascade.predict_proba(X)
# Try to track the accuracy per samples for the first layer
first_layer_prediction = self.first_layer_cascade.predict(X)
if first_layer_prediction == y:
self.first_layer_cascade.number_of_correct_predictions +=1
self.first_layer_cascade.accuracy_per_sample.append(
self.first_layer_cascade.number_of_correct_predictions / self.number_of_samples
)
extended_features = np.concatenate(
# (X, self.first_layer_cascade.estimators_votes),
(X, first_layer_predict_proba),
axis=1
)
second_layer_predict_proba = self.last_layer_cascade.predict_proba(
extended_features
)
second_layer_prediction = self.last_layer_cascade.predict(
extended_features
)
if second_layer_prediction == y:
self.last_layer_cascade.number_of_correct_predictions +=1
self.last_layer_cascade.accuracy_per_sample.append(
self.last_layer_cascade.number_of_correct_predictions / self.number_of_samples
)
average_proba = (
first_layer_predict_proba + second_layer_predict_proba
) / 2
# return average_proba
return second_layer_predict_proba
def test_adaptive_forest():
test_data_directory = os.path.join(TEST_DIRECTORY, 'data')
test_file = os.path.join(
test_data_directory,
'test_data/weather.csv'
)
raw_data = pd.read_csv(test_file)
stream1 = DataStream(raw_data, name='Test')
stream2 = DataStream(raw_data, name='Test')
# learner = HoeffdingAdaptiveTreeClassifier()
# learner1 = AdaptiveHoeffdingTreeEnsemble(n_estimators=4)
# stream1_learner = calculate_accuracy(learner, stream1, stream1.n_samples)
# stream2_learner = calculate_accuracy(learner1, stream2, stream2.n_samples)
# stream1_learner = calculate_accuracy(learner, stream1, stream1.n_samples)
# learner3 = HoeffdingForestClassifier(n_estimators=3)
# stream3_learner = calculate_accuracy(learner3, stream1, stream1.n_samples)
# learner4 = StreamingRandomPatchesClassifier(n_estimators=3)
# stream4_learner = calculate_accuracy(learner4, stream1, stream1.n_samples)
learner5 = DeepStreamLearner(
n_ensembel_estimators=3,
classes=stream1.target_values
)
stream5_learner = calculate_accuracy(learner5, stream1, stream1.n_samples)
assert 1 == 1
# print(stream2_learner.base_estimator.accuracy)
import pudb; pudb.set_trace() # XXX BREAKPOINT
with open (
os.path.join(test_data_directory, 'test_data/adaptive_test_result.txt'),
'+w'
) as f:
f.write('stream2 average_accuracy:')
assert 1 == 1
def calculate_accuracy(learner, stream, max_samples=0):
cnt = 0
max_samples = max_samples
proba_predictions = []
wait_samples = 1
correct_predictions = 0
while stream.has_more_samples() and cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
pass
y_pred = learner.predict(X, y)
if y_pred == y:
correct_predictions +=1
# proba_predictions.append(learner.predict_proba(X)[0])
learner.partial_fit(X, y, classes=stream.target_values)
cnt += 1
# if hasattr(learner, 'base_estimator'):
# learner.base_estimator.accuracy = (correct_predictions / stream.n_samples)
#
# else:
# learner.accuracy = (correct_predictions / stream.n_samples)
return learner
|
<gh_stars>0
from conf import *
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import torch
import albumentations as A
import multiprocessing as mp
import numpy as np
import cv2
def collate_fn(batch):
input_dict = {}
target_dict = {}
for key in ['input']:
input_dict[key] = torch.stack([b[key] for b in batch])
for key in ['idx']:
input_dict[key] = torch.stack([b[key] for b in batch]).long()
for key in ['target']:
target_dict[key] = torch.stack([b[key] for b in batch]).long()
return input_dict, target_dict
class GLRDataset(Dataset):
def __init__(self, df, suffix='.jpg', preload=False, aug=None, normalization='simple'):
self.df = df
self.aug = aug
self.normalization = normalization
self.labels = self.df.target.values
self.img_folder = self.df.img_folder.values
self.suffix = suffix
self.image_names = self.df.id.values
# self.image_names = self.df.images.values # SLY CODE
self.images_cache = {}
self.images_in_cache = False
if preload:
self.preload()
self.images_in_cache = True
self.eps = 1e-6
def get_original_item(self, idx):
id_ = self.image_names[idx]
img_folder_ = self.img_folder[idx]
if self.images_in_cache:
img = self.images_cache[id_]
else:
img = self.load_one(id_, img_folder_)
img = img.astype(np.float32)
tensor = self.to_torch_tensor(img)
target = torch.tensor(self.labels[idx])
feature_dict = {'idx': torch.tensor(idx).long(),
'input': tensor,
'target': target.float()}
return feature_dict
def __getitem__(self, idx):
id_ = self.image_names[idx]
img_folder_ = self.img_folder[idx]
if self.images_in_cache:
img = self.images_cache[id_]
else:
img = self.load_one(id_, img_folder_)
if self.aug:
img = self.augment(img)
img = img.astype(np.float32)
if self.normalization:
img = self.normalize_img(img)
tensor = self.to_torch_tensor(img)
target = torch.tensor(self.labels[idx])
feature_dict = {'idx': torch.tensor(idx).long(),
'input': tensor,
'target': target.float()}
return feature_dict
def __len__(self):
return len(self.image_names)
def preload(self):
if self.n_threads > 1:
with mp.Pool(self.n_threads) as p:
imgs = p.map(self.load_one, self.id)
self.images_cache = dict(zip(self.id, imgs))
else:
for i in tqdm(self.id):
self.images_cache[i] = self.load_one(i)
def load_one(self, id_, img_folder_):
try:
img = cv2.imread(img_folder_ + f'{id_[0]}/{id_[1]}/{id_[2]}/{id_}{self.suffix}')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
except:
print("FAIL READING IMG", img_folder_ + f'{id_[0]}/{id_[1]}/{id_[2]}/{id_}{self.suffix}')
img = np.zeros((512, 512, 3), dtype=np.int8)
return img
def augment(self, img):
img_aug = self.aug(image=img)['image']
return img_aug.astype(np.float32)
def normalize_img(self, img):
if self.normalization == 'channel':
pixel_mean = img.mean((0, 1))
pixel_std = img.std((0, 1)) + self.eps
img = (img - pixel_mean[None, None, :]) / pixel_std[None, None, :]
img = img.clip(-20, 20)
elif self.normalization == 'channel_mean':
pixel_mean = img.mean((0, 1))
img = (img - pixel_mean[None, None, :])
img = img.clip(-20, 20)
elif self.normalization == 'image':
img = (img - img.mean()) / img.std() + self.eps
img = img.clip(-20, 20)
elif self.normalization == 'simple':
img = img / 255
elif self.normalization == 'inception':
mean = np.array([0.5, 0.5, 0.5], dtype=np.float32)
std = np.array([0.5, 0.5, 0.5], dtype=np.float32)
img = img.astype(np.float32)
img = img / 255.
img -= mean
img *= np.reciprocal(std, dtype=np.float32)
elif self.normalization == 'imagenet':
mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
std = np.array([58.395, 57.120, 57.375], dtype=np.float32)
img = img.astype(np.float32)
img -= mean
img *= np.reciprocal(std, dtype=np.float32)
else:
pass
return img
def to_torch_tensor(self, img):
return torch.from_numpy(img.transpose((2, 0, 1)))
|
<gh_stars>0
import numpy as np
import pandas as pd
import logging
from ML_Bot_Func.common import *
logger = logging.getLogger('data_parsing')
logger.setLevel(logging.INFO)
def angle(x, y):
radians = math.atan2(y, x)
if radians < 0:
radians = radians + 2 * math.pi
return round(radians / math.pi * 180)
def find_winner(data):
for player, stats in data['stats'].items():
if stats['rank'] == 1:
return player
return -1
def angle_dist(a1, a2):
return (a1 - a2 + 360) % 360
def find_target_planet(bot_id, current_frame, planets, move):
"""
Find a planet which the ship tried to go to. We try to find it by looking at the angle that the ship moved
with and the angle between the ship and the planet.
:param bot_id: id of bot to imitate
:param current_frame: current frame
:param planets: planets data
:param move: current move to analyze
:return: id of the planet that ship was moving towards
"""
if move['type'] == 'dock':
# If the move was to dock, we know the planet we wanted to move towards
return move['planet_id']
if move['type'] != 'thrust':
# If the move was not "thrust" (i.e. it was "undock"), there is no angle to analyze
return -1
ship_angle = move['angle']
ship_data = current_frame['ships'][bot_id][str(move['shipId'])]
ship_x = ship_data['x']
ship_y = ship_data['y']
optimal_planet = -1
optimal_angle = -1
optimal_distnace = 1000
for planet_data in planets:
planet_id = str(planet_data['id'])
if planet_id not in current_frame['planets'] or current_frame['planets'][planet_id]['health'] <= 0:
continue
planet_x = planet_data['x']
planet_y = planet_data['y']
a = angle(planet_x - ship_x, planet_y - ship_y)
currenet_distance = distance(ship_x, ship_y , planet_x,planet_y)
# We try to find the planet with minimal angle distance
if optimal_planet == -1:
optimal_distnace = distance(ship_x, ship_y, planet_x, planet_y)
optimal_planet = planet_id
optimal_angle = a
elif angle_dist(ship_angle, a) < angle_dist(ship_angle, optimal_angle) and not \
((angle_dist(ship_angle, optimal_angle) - angle_dist(ship_angle, a) < 7) and
(optimal_distnace < currenet_distance)):
# if the angle changes is not signficant, we wil
optimal_distnace = distance(ship_x, ship_y, planet_x, planet_y)
optimal_planet = planet_id
optimal_angle = a
return optimal_planet
def format_data_for_training(data):
"""
Create numpy array with planet features ready to feed to the neural net.
:param data: parsed features
:return: numpy array of shape (number of frames, PLANET_MAX_NUM, PER_PLANET_FEATURES)
"""
training_input = []
training_output = []
for d in data:
features, expected_output = d
if len(expected_output.values()) == 0:
continue
features_matrix = []
for planet_id in range(PLANET_MAX_NUM):
if str(planet_id) in features:
features_matrix.append(features[str(planet_id)])
else:
features_matrix.append([0] * PER_PLANET_FEATURES)
fm = np.array(features_matrix)
output = [0] * PLANET_MAX_NUM
for planet_id, p in expected_output.items():
output[int(planet_id)] = p
result = np.array(output)
training_input.append(fm)
training_output.append(result)
return np.array(training_input), np.array(training_output)
def serialize_data(data, dump_features_location):
"""
Serialize all the features into .h5 file.
:param data: data to serialize
:param dump_features_location: path to .h5 file where the features should be saved
"""
training_data_for_pandas = {
(game_id, frame_id, planet_id): planet_features
for game_id, frame in enumerate(data)
for frame_id, planets in enumerate(frame)
for planet_id, planet_features in planets[0].items()}
training_data_to_store = pd.DataFrame.from_dict(training_data_for_pandas, orient="index")
training_data_to_store.columns = FEATURE_NAMES
index_names = ["game", "frame", "planet"]
training_data_to_store.index = pd.MultiIndex.from_tuples(training_data_to_store.index, names=index_names)
training_data_to_store.to_hdf(dump_features_location, "training_data")
def parse(all_games_json_data, bot_to_imitate=None, dump_features_location=None):
"""
Parse the games to compute features. This method computes PER_PLANET_FEATURES features for each planet in each frame
in each game the bot we're imitating played.
:param all_games_json_data: list of json dictionaries describing games
:param bot_to_imitate: name of the bot to imitate or None if we want to imitate the bot who won the most games
:param dump_features_location: location where to serialize the features
:return: data ready for training
"""
print("Parsing data...")
parsed_games = 0
training_data = []
if bot_to_imitate is None:
print("No bot name provided, choosing the bot with the highest number of games won...")
players_games_count = {}
for json_data in all_games_json_data:
w = find_winner(json_data)
p = json_data['player_names'][int(w)]
if p not in players_games_count:
players_games_count[p] = 0
players_games_count[p] += 1
bot_to_imitate = max(players_games_count, key=players_games_count.get)
print("Bot to imitate: {}.".format(bot_to_imitate))
for json_data in all_games_json_data:
frames = json_data['frames']
moves = json_data['moves']
width = json_data['width']
height = json_data['height']
# For each game see if bot_to_imitate played in it
if bot_to_imitate not in set(json_data['player_names']):
continue
# We train on all the games of the bot regardless whether it won or not.
bot_to_imitate_id = str(json_data['player_names'].index(bot_to_imitate))
player_num = len(json_data['player_names'])
parsed_games = parsed_games + 1
game_training_data = []
# Ignore the last frame, no decision to be made there
for idx in range(len(frames) - 1):
if idx > 0:
previous_frame = frames[idx-1]
previous_planet = frames[idx-1]['planets']
current_moves = moves[idx]
current_frame = frames[idx]
if bot_to_imitate_id not in current_frame['ships'] or len(current_frame['ships'][bot_to_imitate_id]) == 0:
continue
planet_features = {} # planet_id -> list of features per ship per planet
current_planets = current_frame['planets']
# find % allocation for all ships
all_moving_ships = 0
allocations = {}
# for each planet we want to find how many ships are being moved towards it now
for ship_id, ship_data in current_frame['ships'][bot_to_imitate_id].items():
if ship_id in current_moves[bot_to_imitate_id][0]:
p = find_target_planet(bot_to_imitate_id, current_frame,
json_data['planets'],
current_moves[bot_to_imitate_id][0][ship_id],
)
planet_id = int(p)
if planet_id < 0 or planet_id >= PLANET_MAX_NUM:
continue
if p not in allocations:
allocations[p] = 0
allocations[p] = allocations[p] + 1
all_moving_ships = all_moving_ships + 1
if all_moving_ships == 0:
continue
# Compute what % of the ships should be sent to given planet
for planet_id, allocated_ships in allocations.items():
allocations[planet_id] = allocated_ships / all_moving_ships
# Compute features
for planet_id in range(PLANET_MAX_NUM):
if str(planet_id) not in current_planets:
continue
planet_data = current_planets[str(planet_id)]
gravity = 0
planet_base_data = json_data['planets'][planet_id]
closest_friendly_ship_distance = 10000
closest_enemy_ship_distance = 10000
ownership = 0
if str(planet_data['owner']) == bot_to_imitate_id:
ownership = 1
elif planet_data['owner'] is not None:
ownership = -1
# calculate the current docking ships' health
docked_health = 0
previous_docked_health = 0
previous_ownership = 0
docked_ships = current_planets[str(planet_id)]['docked_ships']
if len(docked_ships) > 0:
owner_id = current_planets[str(planet_id)]['owner']
for items in docked_ships:
docked_health += ownership * current_frame['ships'][str(owner_id)][str(items)]['health']
if idx > 0:
if str(previous_planet[str(planet_id)]['owner']) == bot_to_imitate_id:
previous_ownership = 1
elif previous_planet[str(planet_id)]['owner'] is not None:
previous_ownership = -1
previous_docked_ships = previous_planet[str(planet_id)]['docked_ships']
if len(previous_docked_ships) > 0:
owner_id = previous_planet[str(planet_id)]['owner']
for items in previous_docked_ships:
# logger.error('planet id %f, previous owner %f, current ship hleath %f'
# % (planet_id,
# owner_id,
# previous_frame['ships'][str(owner_id)][str(items)]['health']))
previous_docked_health += previous_ownership \
* previous_frame['ships'][str(owner_id)][str(items)]['health']
average_distance = 0
my_ships_health = 0
range70_enemy = 0
range25_enemy = 0
previous70_enemy = 0
previous25_enemy = 0
previous_gravity = 0
for player_id, ships in current_frame['ships'].items():
for ship_id, ship_data in ships.items():
is_bot_to_imitate = 1 if player_id == bot_to_imitate_id else -1
dist2 = distance2(planet_base_data['x'], planet_base_data['y'], ship_data['x'], ship_data['y'])
dist = math.sqrt(dist2)
gravity = gravity + is_bot_to_imitate * ship_data['health'] / dist2
if is_bot_to_imitate == 1:
closest_friendly_ship_distance = min(closest_friendly_ship_distance, dist)
average_distance = average_distance + dist * ship_data['health']
my_ships_health = my_ships_health + ship_data['health']
else:
closest_enemy_ship_distance = min(closest_enemy_ship_distance, dist)
if dist <= 25:
range25_enemy += 1
range70_enemy += 1
elif (dist > 25) and (dist <= 70):
range70_enemy += 1
if idx > 0:
for player_id, ships in previous_frame['ships'].items():
for ship_id, ship_data in ships.items():
is_bot_to_imitate = 1 if player_id == bot_to_imitate_id else -1
dist2 = distance2(planet_base_data['x'], planet_base_data['y'], ship_data['x'],
ship_data['y'])
dist = math.sqrt(dist2)
previous_gravity = previous_gravity + is_bot_to_imitate * ship_data['health'] / dist2
if is_bot_to_imitate != 1:
# closest_friendly_ship_distance = min(closest_friendly_ship_distance, dist)
# average_distance = average_distance + dist * ship_data['health']
# my_ships_health = my_ships_health + ship_data['health']
# else:
# closest_enemy_ship_distance = min(closest_enemy_ship_distance, dist)
if dist <= 25:
previous25_enemy += 1
previous70_enemy += 1
elif (dist > 25) and (dist <= 70):
previous70_enemy += 1
distance_from_center = distance(planet_base_data['x'], planet_base_data['y'], width / 2, height / 2)
average_distance = average_distance / my_ships_health
is_active = 1.0 if planet_base_data['docking_spots'] > len(
planet_data['docked_ships']) or ownership != 1 else 0.0
signed_current_production = planet_data['current_production'] * ownership
# Features of the planet are inserted into the vector in the order described by FEATURE_NAMES
planet_features[str(planet_id)] = [
player_num,
planet_data['health'],
planet_base_data['docking_spots'] - len(planet_data['docked_ships']),
planet_data['remaining_production'],
signed_current_production,
gravity,
closest_friendly_ship_distance,
closest_enemy_ship_distance,
ownership,
distance_from_center,
average_distance,
is_active,
range25_enemy,
range70_enemy,
previous25_enemy,
previous70_enemy,
docked_health,
previous_docked_health]
game_training_data.append((planet_features, allocations))
training_data.append(game_training_data)
if parsed_games == 0:
raise Exception("Didn't find any matching games. Try different bot.")
if dump_features_location is not None:
serialize_data(training_data, dump_features_location)
flat_training_data = [item for sublist in training_data for item in sublist]
print("Data parsed, parsed {} games, total frames: {}".format(parsed_games, len(flat_training_data)))
return format_data_for_training(flat_training_data)
|
<reponame>tim-we/py-radio
import glob
import os
import random
import time
from threading import Thread
from itertools import chain
from more_itertools import peekable
import re
from typing import List, Iterable
class ClipLibrary:
def __init__(self, folder: str, log: bool = True, auto_update: bool = True):
if log:
print("Building clip library...")
self.hosts = ClipPool(os.path.join(folder, "hosts"))
self.music = ClipPool(os.path.join(folder, "music"))
self.night = ClipPool(os.path.join(folder, "night"))
self.other = ClipPool(folder)
self.folder = folder
self.abs_path = os.path.abspath(folder)
if log:
print(" ->", self.music.size() + self.night.size(), "songs")
print(" ->", self.hosts.size(), "host clips")
if auto_update:
Thread(target=self._update_thread, name="LibUpdateThread", daemon=True).start()
def update(self) -> None:
print("Updating library...")
self.hosts.scan()
self.music.scan()
self.night.scan()
self.other.scan()
def _update_thread(self) -> None:
while(True):
# wait 30min
time.sleep(30 * 60)
# update library
self.update()
def _filter(self, search: str) -> Iterable[str]:
return chain(
self.music.filter(search),
self.night.filter(search),
self.other.filter(search)
)
def search_clips(self, search: str, short_path: bool = False) -> List[str]:
# get all paths matching the search term
raw_results = peekable(self._filter(search))
# do extended search if there are no matches
if raw_results.peek(None) is None:
delimiters = [".", " ", "-", "_"]
search_parts = list(filter(
lambda s: len(s.strip()) > 0,
re.split("|".join(map(re.escape, delimiters)), search)
))
if len(search_parts) > 0 and (search is not search_parts[0]):
parts = iter(search_parts)
results = self._filter(next(parts))
for search_part in parts:
results = filter(
lambda x: search_part in x.lower(),
results
)
raw_results = peekable(results)
# return only relative paths if short_path is true
n = 0
if short_path:
n = len(self.folder)
# also remove /
if not self.folder[-1] == os.sep:
n += 1
clean_results = map(lambda x: x[n:], raw_results)
return list(clean_results)
class ClipPool:
def __init__(self, folder: str):
assert os.path.exists(folder), "The folder for this ClipPool does not exist"
self.clips: List[str] = []
self._history: List[int] = []
self._history_len: int = 0
self.folder = folder
self.scan()
def empty(self) -> bool:
return len(self.clips) == 0
def next(self) -> str:
assert not self.empty(), "Cannot pick clip from empty pool"
# find a clip that is not in the recent history
idx = random.randrange(0, len(self.clips))
while idx in self._history:
idx = random.randrange(0, len(self.clips))
# add to recent history
self._history.append(idx)
if len(self._history) > self._history_len:
del self._history[0]
return self.clips[idx]
def filter(self, search: str) -> Iterable[str]:
ls = search.lower()
return filter(
lambda x: ls in x.lower(),
self.clips
)
def size(self) -> int:
return len(self.clips)
def scan(self) -> None:
self.clips = glob.glob(os.path.join(self.folder, "*.*"))
size = len(self.clips)
self._history_len = min(size - 1, min(max(size//10, 10), 42))
self._history = []
|
<reponame>MichaelWS/vix_utils
"""
This module provides both the command line program and a Python interface to provide
the VIX futures term structure, the VIX continuous maturity
term structure, and the VIX cash term structure.
"""
import argparse
import vix_utils.vix_futures_term_struture as v
import vix_utils.vix_cash_term_structure as cash
import pandas as pd
import logging as logging
import asyncio
import os.path as ospath
import pathlib
import configparser
from pathlib import Path
from vix_utils.futures_utils import timeit
quandl_api_key = f"This is not a valid quandle key {__file__}"
_override_data_path = False
def _set_config_path(str_path):
global _override_data_path
_override_data_path = str_path
def _vix_util_data_path():
"""
:return: the path where VIX term structure and calendar data are stored.
"""
if _override_data_path:
return Path(_override_data_path)
user_path = Path.home()
vixutil_path = user_path / ".vixutil"
vixutil_path.mkdir(exist_ok=True)
return vixutil_path
def _needs_update(output_file):
# use the cached version if the code hasn't changed. This makes development much
# easier.
return not ospath.exists(output_file) or ospath.getmtime(output_file) < ospath.getmtime(v.__file__)
_vix_futures_constant_maturity_term_structure_file = "vix_futures_constant_maturity_term_structure.pkl"
_vix_futures_term_structure_file = "vix_futures_term_struture.pkl"
_vix_cash_file = "vix_cash_term_structure.pkl"
class VixUtilsApi:
"""
Attributes: data_path. The path folder of the downloaded and pre-prepared
data files.
"""
def __init__(self, *data_dir):
"""
:param data_dir: optional override of the default data dir where
files are stored by the library. The default with will be in the the .vixutils subdirectory of the users home
directory.
"""
self.data_path = _vix_util_data_path() if len(data_dir) == 0 else data_dir[0]
async def rebuild(self):
"""
Downloads the files for vix term structures, and generates a map from trade days to each future settlment date.
Most users will prefer to to use the command line program to perform this.
:return: nothing
"""
download_quandl_coro = asyncio.to_thread(v.download_quandle_data, quandl_api_key, self.data_path)
ch = asyncio.create_task(cash.get_vix_index_histories(self.data_path))
wide_vix_calendar_coro = asyncio.to_thread(v.vix_futures_trade_dates_and_settlement_dates)
(cash_vix, _, wide_vix_calendar) = await asyncio.gather(ch, download_quandl_coro, wide_vix_calendar_coro)
wide_vix_calendar.to_pickle(self.data_path / "wide_vix_calendar.pkl")
cash_vix.to_pickle(self.data_path / _vix_cash_file)
def get_cash_vix_term_structure(self):
"""Return the cash vix term structure. """
return pd.read_pickle(self.data_path / _vix_cash_file)
def get_vix_continuous_future_weights(self):
return v.vix_constant_maturity_weights(self.get_vix_trade_and_future_settlements())
def get_vix_trade_and_future_settlements(self):
return pd.read_pickle(self.data_path / "wide_vix_calendar.pkl")
def _make_vix_futures_term_structure(self):
wide_vix_calendar = self.get_vix_trade_and_future_settlements()
vt = v.vix_futures_term_structure(self.data_path, wide_vix_calendar)
vt.to_pickle(self.data_path / _vix_futures_term_structure_file)
return vt
@staticmethod
def get_or_make_helper(filename, make_callable):
if _needs_update(filename):
df = make_callable()
df.to_pickle(filename)
else:
df = pd.read_pickle(filename)
return df
def get_vix_futures_term_structure(self):
f = self.data_path / _vix_futures_term_structure_file
return self.get_or_make_helper(f, self._make_vix_futures_term_structure)
@timeit()
def get_vix_futures_constant_maturity_weights(self):
f = self.data_path / "vix_futures_constant_maturity_weights.pkl"
def make_weights(): return v.vix_constant_maturity_weights(self.get_vix_trade_and_future_settlements())
return self.get_or_make_helper(f, make_weights)
@timeit()
def get_vix_futures_constant_maturity_term_structure(self):
f = self.data_path / _vix_futures_constant_maturity_term_structure_file
def _make_vix_futures_constant_maturity_term_structure():
return v.vix_continuous_maturity_term_structure(self.get_vix_trade_and_future_settlements(),
self.get_vix_futures_term_structure())
return self.get_or_make_helper(f, _make_vix_futures_constant_maturity_term_structure)
extensions = [".csv", ".pkl", ".xlsx", ".html"] # supported output file types
parser = argparse.ArgumentParser()
output_format_help = f"""The file extension determines the file type. Valid extensions are: {extensions}.
\n Python programmers may prefer to use the API """
parser.add_argument("--config_dir", dest="config_dir",
help="store the config file and other files in this folder instead of the default.")
parser.add_argument("-i", help="information about where the data is stored", dest='info', action='store_true')
parser.add_argument("-s", help='Store Quandl API Key supplied with -q in config file ', dest="store_quandle_api_key",
action='store_true')
parser.add_argument("-q", help='Quandl API Key', dest='quandl_api_key')
parser.add_argument("-r",
help="""download the data from Quandl and CBOE and rebuild the vix futures term
structure and vix cash term structure""",
action="store_true", dest='rebuild')
parser.add_argument("-t", dest="term_structure",
help=f"""output the vix futures term structure to a file. {output_format_help}""")
parser.add_argument("-m", dest="continuous", help=f"""output the vix continuous maturity (i.e. interpolated)
futures term structure to a file.
{output_format_help}""")
parser.add_argument("-w", dest="continuous_weights", help=f"""output the weights of the various vix futures tenors
required to interpolate vix continuous maturity futures.
Note the weights are as of the beginning of the trading day. {output_format_help}""")
parser.add_argument("-c", dest="cash", help=f"""output the vix cash term structure a file.
{output_format_help}. Some other indexes from CBOE
will also be included. {output_format_help} """)
parser.add_argument("--calendar", dest="calendar", help="settlement dates for vix futures for a given trade date")
parser.add_argument("--start_date", dest="start_date", help="iso format date YYYY-MM-DD, exclude any dates prior")
parser.add_argument("--end_date", dest="end_date", help="iso format date, YYYY-MM-DD exclude any dates after")
def read_config_file():
config_file_path = _vix_util_data_path() / 'vixutil.config'
cp = configparser.ConfigParser()
cp.read(config_file_path)
global quandl_api_key
if "QUANDLE" in cp:
quandl_api_key = cp['QUANDLE']['QUANDLE_API_KEY']
def write_config_file():
global quandl_api_key
config_file_path = _vix_util_data_path() / 'vixutil.config'
cp = configparser.ConfigParser()
cp['QUANDLE'] = {'QUANDLE_API_KEY': quandl_api_key}
with open(config_file_path, 'w') as configfile:
cp.write(configfile)
def write_frame_ex(frame, ofile, functions):
extension_to_function_map = dict(zip(extensions, functions))
suffix = pathlib.Path(ofile).suffix
if suffix in extension_to_function_map:
fn = extension_to_function_map[suffix]
fn(ofile)
else:
print(f"Unsupported extension, only {extensions} are supported")
def write_frame(frame,ofile):
functions = [frame.to_csv, frame.to_pickle, frame.to_excel, frame.to_html]
return write_frame_ex(frame,ofile,functions)
def main():
global quandl_api_key
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
args = parser.parse_args()
start_date = pd.to_datetime(start_date_str) if (start_date_str := args.start_date) else None
end_date = pd.to_datetime(end_date_str) if (end_date_str := args.end_date) else None
selection = slice(start_date, end_date) # start_date and end_date are not required to be initialized
# this must happen before reading the configuration file.
if args.config_dir:
o_data_path = args.config_dir
_set_config_path(o_data_path)
read_config_file()
if args.info:
print(f"Data and config file are stored in {_vix_util_data_path()}")
if args.quandl_api_key:
quandl_api_key = args.quandl_api_key
if args.store_quandle_api_key:
write_config_file()
vutils = VixUtilsApi()
if args.rebuild:
print("Rebuilding data files from Quandl and CBOE")
timeit(logging.INFO)(asyncio.run)(vutils.rebuild())
print("Rebuilt Files")
if ofile := args.term_structure:
fts = vutils.get_vix_futures_term_structure()[selection]
write_frame(fts, ofile)
if ofile := args.continuous:
cmt = vutils.get_vix_futures_constant_maturity_term_structure()[selection]
write_frame(cmt, ofile)
if ofile := args.cash:
cash_term_structure = vutils.get_cash_vix_term_structure()[selection]
write_frame(cash_term_structure, ofile)
if ofile := args.calendar:
calendar = vutils.get_vix_trade_and_future_settlements()[selection]
write_frame(calendar, ofile)
if ofile := args.continuous_weights:
weights = vutils.get_vix_futures_constant_maturity_weights()[selection]
write_frame(weights, ofile)
return 0
main()
|
import os
from typing import List
from enum import IntEnum
import cv2 as cv
import numpy as np
from pydicom import dcmread
from pydicom.dataset import Dataset
from pydicom.sequence import Sequence
from rt_utils.utils import ROIData, SOPClassUID
def load_sorted_image_series(dicom_series_path: str):
"""
File contains helper methods for loading / formatting DICOM images and contours
"""
series_data = load_dcm_images_from_path(dicom_series_path)
if len(series_data) == 0:
raise Exception("No DICOM Images found in input path")
# Sort slices in ascending order
series_data.sort(key=lambda ds: ds.ImagePositionPatient[2], reverse=False)
return series_data
def load_dcm_images_from_path(dicom_series_path: str) -> List[Dataset]:
series_data = []
for root, _, files in os.walk(dicom_series_path):
for file in files:
try:
ds = dcmread(os.path.join(root, file))
if hasattr(ds, 'pixel_array'):
series_data.append(ds)
except Exception:
# Not a valid DICOM file
continue
return series_data
def get_contours_coords(mask_slice: np.ndarray, series_slice: Dataset, roi_data: ROIData):
# Create pin hole mask if specified
if roi_data.use_pin_hole:
mask_slice = create_pin_hole_mask(mask_slice, roi_data.approximate_contours)
# Get contours from mask
contours, _ = find_mask_contours(mask_slice, roi_data.approximate_contours)
validate_contours(contours)
# Format for DICOM
formatted_contours = []
for contour in contours:
contour = np.array(contour) # Type cannot be a list
translated_contour = translate_contour_to_data_coordinants(contour, series_slice)
dicom_formatted_contour = format_contour_for_dicom(translated_contour, series_slice)
formatted_contours.append(dicom_formatted_contour)
return formatted_contours
def find_mask_contours(mask: np.ndarray, approximate_contours: bool):
approximation_method = cv.CHAIN_APPROX_SIMPLE if approximate_contours else cv.CHAIN_APPROX_NONE
contours, hierarchy = cv.findContours(mask.astype(np.uint8), cv.RETR_TREE, approximation_method)
# Format extra array out of data
for i, contour in enumerate(contours):
contours[i] = [[pos[0][0], pos[0][1]] for pos in contour]
hierarchy = hierarchy[0] # Format extra array out of data
return contours, hierarchy
def create_pin_hole_mask(mask: np.ndarray, approximate_contours: bool):
"""
Creates masks with pin holes added to contour regions with holes.
This is done so that a given region can be represented by a single contour.
"""
contours, hierarchy = find_mask_contours(mask, approximate_contours)
pin_hole_mask = mask.copy()
# Iterate through the hierarchy, for child nodes, draw a line upwards from the first point
for i, array in enumerate(hierarchy):
parent_contour_index = array[Hierarchy.parent_node]
if parent_contour_index == -1: continue # Contour is not a child
child_contour = contours[i]
line_start = tuple(child_contour[0])
pin_hole_mask = draw_line_upwards_from_point(pin_hole_mask, line_start, fill_value=0)
return pin_hole_mask
def draw_line_upwards_from_point(mask: np.ndarray, start, fill_value: int) -> np.ndarray:
line_width = 2
end = (start[0], start[1] - 1)
mask = mask.astype(np.uint8) # Type that OpenCV expects
# Draw one point at a time until we hit a point that already has the desired value
while mask[end] != fill_value:
cv.line(mask, start, end, fill_value, line_width)
# Update start and end to the next positions
start = end
end = (start[0], start[1] - line_width)
return mask.astype(bool)
def validate_contours(contours: list):
if len(contours) == 0:
raise Exception("Unable to find contour in non empty mask, please check your mask formatting")
def translate_contour_to_data_coordinants(contour, series_slice: Dataset):
offset = series_slice.ImagePositionPatient
spacing = series_slice.PixelSpacing
contour[:, 0] = (contour[:, 0]) * spacing[0] + offset[0]
contour[:, 1] = (contour[:, 1]) * spacing[1] + offset[1]
return contour
def translate_contour_to_pixel_coordinants(contour, series_slice: Dataset):
offset = series_slice.ImagePositionPatient
spacing = series_slice.PixelSpacing
contour[:, 0] = (contour[:, 0] - offset[0]) / spacing[0]
contour[:, 1] = (contour[:, 1] - + offset[1]) / spacing[1]
return contour
def format_contour_for_dicom(contour, series_slice: Dataset):
# DICOM uses a 1d array of x, y, z coords
z_indicies = np.ones((contour.shape[0], 1)) * series_slice.SliceLocation
contour = np.concatenate((contour, z_indicies), axis = 1)
contour = np.ravel(contour)
contour = contour.tolist()
return contour
def create_series_mask_from_contour_sequence(series_data, contour_sequence: Sequence):
mask = create_empty_series_mask(series_data)
# Iterate through each slice of the series, If it is a part of the contour, add the contour mask
for i, series_slice in enumerate(series_data):
slice_contour_data = get_slice_contour_data(series_slice, contour_sequence)
if len(slice_contour_data):
mask[:, :, i] = get_slice_mask_from_slice_contour_data(series_slice, slice_contour_data)
return mask
def get_slice_contour_data(series_slice: Dataset, contour_sequence: Sequence):
slice_contour_data = []
# Traverse through sequence data and get all contour data pertaining to the given slice
for contour in contour_sequence:
for contour_image in contour.ContourImageSequence:
if contour_image.ReferencedSOPInstanceUID == series_slice.SOPInstanceUID:
slice_contour_data.append(contour.ContourData)
return slice_contour_data
def get_slice_mask_from_slice_contour_data(series_slice: Dataset, slice_contour_data):
slice_mask = create_empty_slice_mask(series_slice)
for contour_coords in slice_contour_data:
fill_mask = get_contour_fill_mask(series_slice, contour_coords)
# Invert values in the region to be filled. This will create holes where needed if contours are stacked on top of each other
slice_mask[fill_mask == 1] = np.invert(slice_mask[fill_mask == 1])
return slice_mask
def get_contour_fill_mask(series_slice: Dataset, contour_coords):
# Format data
reshaped_contour_data = np.reshape(contour_coords, [len(contour_coords) // 3, 3])
translated_contour_data = translate_contour_to_pixel_coordinants(reshaped_contour_data, series_slice)
translated_contour_data = np.around(translated_contour_data)
polygon = [np.array([translated_contour_data[:, :2]], dtype=np.int32)]
# Create mask for the region. Fill with 1 for ROI
fill_mask = create_empty_slice_mask(series_slice).astype(np.uint8)
cv.fillPoly(img=fill_mask, pts=polygon, color=1)
return fill_mask
def create_empty_series_mask(series_data):
ref_dicom_image = series_data[0]
mask_dims = (int(ref_dicom_image.Columns), int(ref_dicom_image.Rows), len(series_data))
mask = np.zeros(mask_dims).astype(bool)
return mask
def create_empty_slice_mask(series_slice):
mask_dims = (int(series_slice.Columns), int(series_slice.Rows))
mask = np.zeros(mask_dims).astype(bool)
return mask
class Hierarchy(IntEnum):
"""
Enum class for what the positions in the OpenCV hierarchy array mean
"""
next_node = 0
previous_node = 1
first_child = 2
parent_node = 3
|
#!/usr/bin/python
import argparse
import io
import json
import logging
import os
import re
import sys
import bs4
import docker
import docker.errors
import markdown
import requests
docker_hl_client = docker.from_env()
docker_client = docker_hl_client.api
def parse_cmdline():
def check_docker_tag(value):
image = value.split('@')[0]
if image.count(':') != 1 or image[0] == ':' or image[-1] == ':':
raise argparse.ArgumentTypeError('%s is an invalid Docker tag' % value)
return value
parser = argparse.ArgumentParser()
parser.add_argument('--push', action='store_true')
parser.add_argument('--override-env', action='append', default=[])
parser.add_argument('--override-from')
parser.add_argument('--add-gnupg-curl', action='store_true')
parser.add_argument('--fix-lets-encrypt', action='store_true')
parser.add_argument('images', metavar='IMAGE', type=check_docker_tag, nargs='+')
return parser.parse_args()
class DockerImageError(Exception):
pass
class DockerBuildError(Exception):
pass
class DockerImage(object):
def __init__(self, image):
if '@' in image:
self.image, dockerfile_url = image.split('@')
docekrfile_req = requests.get(dockerfile_url)
if docekrfile_req.status_code != 200:
raise DockerImageError('Error downloading Dockerfile (%s)' % dockerfile_url)
self._dockerfile = docekrfile_req.text
else:
self.image = image
self._dockerfile = None
self._build_time = None
@property
def user(self):
if '/' in self.image:
return self.image.split('/')[0]
return '_'
@property
def repo(self):
return self.image.split(':')[0].split('/')[-1]
@property
def tag(self):
return self.image.split(':')[1]
@property
def build_time(self):
if self._build_time:
return self._build_time
# TODO get details without pulling
logging.info('Pulling %s', self.image)
try:
image = docker_hl_client.images.pull(self.image)
except docker.errors.NotFound:
raise DockerImageError('%s not found', self.image)
self._build_time = image.attrs['Created']
logging.info('%s was last built on %s', self.image, self._build_time)
return self._build_time
@property
def dockerfile(self):
if self._dockerfile:
return self._dockerfile
# TODO there must be a better way...
if self.user != '_':
raise DockerImageError('Unable to pull Dockerfile from non-product image on new hub')
url = 'https://hub.docker.com/api/content/v1/products/images/%s' % (self.repo,)
hub_req = requests.get(url)
if hub_req.status_code != 200:
raise DockerImageError('Error connecting to hub (%s)' % hub_req.text)
description = hub_req.json().get('full_description', '')
description_html = markdown.markdown(description)
soup = bs4.BeautifulSoup(description_html, 'html.parser')
for node in soup(text=self.tag):
dockerfile_url = None
if node.parent.name == 'code' and node.parent.parent.name == 'a':
dockerfile_url = node.parent.parent.get('href')
if node.parent.name == 'code' and node.parent.parent.name == 'li':
dockerfile_urls = [a.get('href') for a in node.parent.parent.find_all('a')]
dockerfile_urls = [u for u in dockerfile_urls if 'windowsservercore' not in u]
if len(dockerfile_urls) == 1:
dockerfile_url = dockerfile_urls[0]
if dockerfile_url:
dockerfile_url = dockerfile_url.replace('github.com', 'raw.githubusercontent.com').replace('/blob/',
'/')
docekrfile_req = requests.get(dockerfile_url)
if docekrfile_req.status_code != 200:
raise DockerImageError('Error downloading Dockerfile (%s)' % dockerfile_url)
self._dockerfile = docekrfile_req.text
return docekrfile_req.text
raise DockerImageError('Unable to find Dockerfile for %s in %s' % (self.tag, url))
def get_from_line(dockerfile):
for line in dockerfile.splitlines():
if line.strip().startswith('FROM'):
return line
def is_compatible_from_lines(images):
from_lines = [get_from_line(i.dockerfile) for i in images]
if all(from_lines[0] == f for f in from_lines):
return True
bases = [f.split(' ')[-1].split(':')[0] for f in from_lines]
if all(b == 'buildpack-deps' for b in bases):
logging.info('Images using FROM buildpack-deps (%s) which are different versions but still compatible',
', '.join(from_lines))
return True
logging.info('%s', from_lines)
return False
def should_rebuild(combo_image, base_images):
try:
combo_image_time = combo_image.build_time
except DockerImageError:
logging.info('Combo image not built yet')
combo_image_time = ''
times = [i.build_time for i in base_images]
return any(combo_image_time < t for t in times)
def combine_image_name_and_tag(images):
name = '_'.join(i.image.split(':')[0] for i in images)
tag = '_'.join(i.image.split(':')[1] for i in images)
return f'combos/{name}:{tag}'
def log_stream(stream):
for lines in stream:
for line in lines.decode('utf-8').strip().split('\n'):
line = json.loads(line)
if line.get('errorDetail'):
raise DockerBuildError(line['errorDetail'].get('message', str(line)))
logging.info('%s', line.get('stream', str(line)).strip('\n'))
class DockerfileBuilder(object):
def __init__(self, from_override, env_overrides):
self.dockerfile = ''
self._use_from = True
self._env_overrides = env_overrides
if from_override:
self._use_from = False
self.dockerfile += f'FROM {from_override}\n'
def add_image(self, image):
saw_from = False
for line in image.dockerfile.splitlines():
line = line.strip()
if line.upper().startswith('FROM '):
if self._use_from:
self.dockerfile += line + '\n'
if saw_from:
raise DockerBuildError('multi-stage not supported yet')
saw_from = True
elif line.upper().startswith('COPY'):
if line.endswith('\\'):
raise DockerBuildError('multi-line COPY commands not supported yet')
m = re.match('^COPY[ \t]+([^ \t]+)[ \t]+([^ \t]+)$', line, re.I)
if not m:
raise DockerBuildError('unable to parse COPY line: ' + line)
copy_from, copy_to = m.groups()
if copy_to.endswith('/'):
path = copy_to + os.path.basename(copy_from)
else:
path = copy_to
self.dockerfile += 'COPY --from=%s %s %s\n' % (image.image, path, path)
elif line.upper().startswith('CMD ') or line.upper().startswith('ENTRYPOINT '):
continue
elif line.upper().startswith('ENV '):
_, name, value = re.split('[ \t]+', line, 2)
if name in self._env_overrides:
self.dockerfile += f'ENV {name} {self._env_overrides[name]}\n'
else:
self.dockerfile += line + '\n'
else:
self.dockerfile += line + '\n'
self._use_from = False
@property
def file(self):
return io.BytesIO(self.dockerfile.encode('utf-8'))
def main():
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(levelname)s %(message)s')
args = parse_cmdline()
images = [DockerImage(i) for i in args.images]
override_env = dict(i.split('=', 1) for i in args.override_env)
if not args.override_from:
if not is_compatible_from_lines(images):
logging.error('%s do not use the same FROM line', ' and '.join(i.image for i in images))
return 1
combo_image = DockerImage(combine_image_name_and_tag(images))
if not should_rebuild(combo_image, images):
logging.info('Up-to-date')
return 0
logging.info('Generating Dockerfile...')
dockerfile = DockerfileBuilder(args.override_from, override_env)
if args.add_gnupg_curl:
dockerfile.dockerfile += 'RUN apt-get update && ' \
'apt-get install -y --no-install-recommends gnupg-curl && ' \
'rm -rf /var/lib/apt/lists/*\n'
if args.fix_lets_encrypt:
dockerfile.dockerfile += "RUN sed -ie 's#mozilla/DST_Root_CA_X3.crt#!mozilla/DST_Root_CA_X3.crt#' " \
"/etc/ca-certificates.conf && update-ca-certificates\n"
for i in images:
dockerfile.add_image(i)
# sks servers are deprecated https://sks-keyservers.net/
dockerfile.dockerfile = dockerfile.dockerfile.replace("p80.pool.sks-keyservers.net", "keys.openpgp.org")
dockerfile.dockerfile = dockerfile.dockerfile.replace("ipv4.pool.sks-keyservers.net", "keyserver.ubuntu.com")
logging.info('Rebuilding...')
build_stream = docker_client.build(fileobj=dockerfile.file, tag=combo_image.image)
log_stream(build_stream)
logging.info('Testing image...')
for i in images:
test_image(combo_image, i)
if args.push:
logging.info('Pushing image...')
docker_client.login(os.getenv('DOCKER_USERNAME'), os.getenv('DOCKER_PASSWORD'))
push_stream = docker_client.push('%s/%s' % (combo_image.user, combo_image.repo), combo_image.tag, stream=True)
log_stream(push_stream)
return 0
def test_image(combo_image, image):
cli = image.repo
version = '--version'
if image.repo == 'java' or image.repo == 'openjdk':
cli = 'java'
version = '-version'
logging.info(f'{cli} {version}: %s',
docker_hl_client.containers.run(
combo_image.image, [cli, version], remove=True, stderr=True).decode('utf-8').strip())
if __name__ == '__main__':
sys.exit(main())
|
<reponame>ArenasGuerreroJulian/morph-kgc<filename>src/morph_kgc/mapping/mapping_constants.py
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "Apache-2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
##############################################################################
####################### MAPPING DATAFRAME COLUMNS ########################
##############################################################################
MAPPINGS_DATAFRAME_COLUMNS = [
'source_name', 'triples_map_id', 'triples_map_type', 'data_source',
'subject_map', 'object_map', 'iterator', 'tablename', 'query',
'subject_template', 'subject_reference', 'subject_constant', 'subject_quoted', 'subject_termtype',
'graph_constant', 'graph_reference', 'graph_template',
'predicate_constant', 'predicate_template', 'predicate_reference',
'object_termtype', 'object_datatype', 'object_language',
'object_constant', 'object_template', 'object_reference', 'object_quoted',
'object_parent_triples_map', 'subject_join_conditions', 'object_join_conditions'
]
##############################################################################
######################## MAPPING PARSING QUERIES #########################
##############################################################################
MAPPING_PARSING_QUERY = """
# This query has been reused from SDM-RDFizer (https://github.com/SDM-TIB/SDM-RDFizer). SDM-RDFizer has been
# developed by members of the Scientific Data Management Group at TIB. Its development has been coordinated and
# supervised by <NAME>. The implementation has been done by <NAME> and <NAME>
# under the supervision of <NAME>, <NAME>, and <NAME>.
# It has been modified by <NAME>, PhD student at the Ontology Engineering Group (OEG)
# in Universidad Politécnica de Madrid (UPM).
prefix rr: <http://www.w3.org/ns/r2rml#>
prefix rml: <http://semweb.mmlab.be/ns/rml#>
SELECT DISTINCT
?triples_map_id ?triples_map_type ?data_source ?iterator ?tablename ?query ?subject_map ?object_map
?subject_template ?subject_reference ?subject_constant ?subject_quoted ?subject_termtype
?graph_constant ?graph_reference ?graph_template
?predicate_constant ?predicate_template ?predicate_reference
?object_constant ?object_template ?object_reference ?object_quoted
?object_termtype ?object_datatype ?object_language
?object_parent_triples_map
WHERE {
?triples_map_id rml:logicalSource ?_source .
?triples_map_id a ?triples_map_type .
OPTIONAL { ?_source rml:source ?data_source . }
OPTIONAL { ?_source rml:iterator ?iterator . }
OPTIONAL { ?_source rr:tableName ?tablename . }
OPTIONAL { ?_source rml:query ?query . }
# Subject -------------------------------------------------------------------------
?triples_map_id rml:subjectMap ?subject_map .
OPTIONAL { ?subject_map rr:template ?subject_template . }
OPTIONAL { ?subject_map rml:reference ?subject_reference . }
OPTIONAL { ?subject_map rr:constant ?subject_constant . }
OPTIONAL { ?subject_map rml:quotedTriplesMap ?subject_quoted . }
OPTIONAL { ?subject_map rr:termType ?subject_termtype . }
# Predicate -----------------------------------------------------------------------
OPTIONAL {
?triples_map_id rr:predicateObjectMap ?_predicate_object_map .
OPTIONAL {
?_predicate_object_map rr:predicateMap ?_predicate_map .
?_predicate_map rr:constant ?predicate_constant .
}
OPTIONAL {
?_predicate_object_map rr:predicateMap ?_predicate_map .
?_predicate_map rr:template ?predicate_template .
}
OPTIONAL {
?_predicate_object_map rr:predicateMap ?_predicate_map .
?_predicate_map rml:reference ?predicate_reference .
}
# Object --------------------------------------------------------------------------
OPTIONAL {
?_predicate_object_map rml:objectMap ?object_map .
}
OPTIONAL {
?_predicate_object_map rml:objectMap ?object_map .
?object_map rml:quotedTriplesMap ?object_quoted .
OPTIONAL { ?object_map rr:termType ?object_termtype . }
}
OPTIONAL {
?_predicate_object_map rml:objectMap ?object_map .
?object_map rr:constant ?object_constant .
OPTIONAL { ?object_map rr:termType ?object_termtype . }
OPTIONAL { ?object_map rr:datatype ?object_datatype . }
OPTIONAL { ?object_map rr:language ?object_language . }
}
OPTIONAL {
?_predicate_object_map rml:objectMap ?object_map .
?object_map rr:template ?object_template .
OPTIONAL { ?object_map rr:termType ?object_termtype . }
OPTIONAL { ?object_map rr:datatype ?object_datatype . }
OPTIONAL { ?object_map rr:language ?object_language . }
}
OPTIONAL {
?_predicate_object_map rml:objectMap ?object_map .
?object_map rml:reference ?object_reference .
OPTIONAL { ?object_map rr:termType ?object_termtype . }
OPTIONAL { ?object_map rr:datatype ?object_datatype . }
OPTIONAL { ?object_map rr:language ?object_language . }
}
OPTIONAL {
?_predicate_object_map rml:objectMap ?object_map .
?object_map rr:parentTriplesMap ?object_parent_triples_map .
OPTIONAL { ?object_map rr:termType ?object_termtype . }
}
OPTIONAL {
?_predicate_object_map rr:graphMap ?_graph_structure .
?_graph_structure rr:constant ?graph_constant .
}
OPTIONAL {
?_predicate_object_map rr:graphMap ?_graph_structure .
?_graph_structure rr:template ?graph_template .
}
OPTIONAL {
?_predicate_object_map rr:graphMap ?_graph_structure .
?_graph_structure rr:reference ?graph_reference .
}
}
}
"""
JOIN_CONDITION_PARSING_QUERY = """
prefix rr: <http://www.w3.org/ns/r2rml#>
SELECT DISTINCT ?term_map ?join_condition ?child_value ?parent_value
WHERE {
?term_map rr:joinCondition ?join_condition .
?join_condition rr:child ?child_value;
rr:parent ?parent_value.
}
""" |
"""Unit tests for the :mod:`networkx.algorithms.bipartite.matching` module."""
import itertools
import networkx as nx
import pytest
from networkx.algorithms.bipartite.matching import eppstein_matching
from networkx.algorithms.bipartite.matching import hopcroft_karp_matching
from networkx.algorithms.bipartite.matching import maximum_matching
from networkx.algorithms.bipartite.matching import minimum_weight_full_matching
from networkx.algorithms.bipartite.matching import to_vertex_cover
class TestMatching:
"""Tests for bipartite matching algorithms."""
def setup(self):
"""Creates a bipartite graph for use in testing matching algorithms.
The bipartite graph has a maximum cardinality matching that leaves
vertex 1 and vertex 10 unmatched. The first six numbers are the left
vertices and the next six numbers are the right vertices.
"""
self.simple_graph = nx.complete_bipartite_graph(2, 3)
self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1}
edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)]
self.top_nodes = set(range(6))
self.graph = nx.Graph()
self.graph.add_nodes_from(range(12))
self.graph.add_edges_from(edges)
# Example bipartite graph from issue 2127
G = nx.Graph()
G.add_nodes_from(
[
(1, "C"),
(1, "B"),
(0, "G"),
(1, "F"),
(1, "E"),
(0, "C"),
(1, "D"),
(1, "I"),
(0, "A"),
(0, "D"),
(0, "F"),
(0, "E"),
(0, "H"),
(1, "G"),
(1, "A"),
(0, "I"),
(0, "B"),
(1, "H"),
]
)
G.add_edge((1, "C"), (0, "A"))
G.add_edge((1, "B"), (0, "A"))
G.add_edge((0, "G"), (1, "I"))
G.add_edge((0, "G"), (1, "H"))
G.add_edge((1, "F"), (0, "A"))
G.add_edge((1, "F"), (0, "C"))
G.add_edge((1, "F"), (0, "E"))
G.add_edge((1, "E"), (0, "A"))
G.add_edge((1, "E"), (0, "C"))
G.add_edge((0, "C"), (1, "D"))
G.add_edge((0, "C"), (1, "I"))
G.add_edge((0, "C"), (1, "G"))
G.add_edge((0, "C"), (1, "H"))
G.add_edge((1, "D"), (0, "A"))
G.add_edge((1, "I"), (0, "A"))
G.add_edge((1, "I"), (0, "E"))
G.add_edge((0, "A"), (1, "G"))
G.add_edge((0, "A"), (1, "H"))
G.add_edge((0, "E"), (1, "G"))
G.add_edge((0, "E"), (1, "H"))
self.disconnected_graph = G
def check_match(self, matching):
"""Asserts that the matching is what we expect from the bipartite graph
constructed in the :meth:`setup` fixture.
"""
# For the sake of brevity, rename `matching` to `M`.
M = matching
matched_vertices = frozenset(itertools.chain(*M.items()))
# Assert that the maximum number of vertices (10) is matched.
assert matched_vertices == frozenset(range(12)) - {1, 10}
# Assert that no vertex appears in two edges, or in other words, that
# the matching (u, v) and (v, u) both appear in the matching
# dictionary.
assert all(u == M[M[u]] for u in range(12) if u in M)
def check_vertex_cover(self, vertices):
"""Asserts that the given set of vertices is the vertex cover we
expected from the bipartite graph constructed in the :meth:`setup`
fixture.
"""
# By Konig's theorem, the number of edges in a maximum matching equals
# the number of vertices in a minimum vertex cover.
assert len(vertices) == 5
# Assert that the set is truly a vertex cover.
for (u, v) in self.graph.edges():
assert u in vertices or v in vertices
# TODO Assert that the vertices are the correct ones.
def test_eppstein_matching(self):
"""Tests that <NAME>'s implementation of the Hopcroft--Karp
algorithm produces a maximum cardinality matching.
"""
self.check_match(eppstein_matching(self.graph, self.top_nodes))
def test_hopcroft_karp_matching(self):
"""Tests that the Hopcroft--Karp algorithm produces a maximum
cardinality matching in a bipartite graph.
"""
self.check_match(hopcroft_karp_matching(self.graph, self.top_nodes))
def test_to_vertex_cover(self):
"""Test for converting a maximum matching to a minimum vertex cover."""
matching = maximum_matching(self.graph, self.top_nodes)
vertex_cover = to_vertex_cover(self.graph, matching, self.top_nodes)
self.check_vertex_cover(vertex_cover)
def test_eppstein_matching_simple(self):
match = eppstein_matching(self.simple_graph)
assert match == self.simple_solution
def test_hopcroft_karp_matching_simple(self):
match = hopcroft_karp_matching(self.simple_graph)
assert match == self.simple_solution
def test_eppstein_matching_disconnected(self):
with pytest.raises(nx.AmbiguousSolution):
match = eppstein_matching(self.disconnected_graph)
def test_hopcroft_karp_matching_disconnected(self):
with pytest.raises(nx.AmbiguousSolution):
match = hopcroft_karp_matching(self.disconnected_graph)
def test_issue_2127(self):
"""Test from issue 2127"""
# Build the example DAG
G = nx.DiGraph()
G.add_edge("A", "C")
G.add_edge("A", "B")
G.add_edge("C", "E")
G.add_edge("C", "D")
G.add_edge("E", "G")
G.add_edge("E", "F")
G.add_edge("G", "I")
G.add_edge("G", "H")
tc = nx.transitive_closure(G)
btc = nx.Graph()
# Create a bipartite graph based on the transitive closure of G
for v in tc.nodes():
btc.add_node((0, v))
btc.add_node((1, v))
for u, v in tc.edges():
btc.add_edge((0, u), (1, v))
top_nodes = {n for n in btc if n[0] == 0}
matching = hopcroft_karp_matching(btc, top_nodes)
vertex_cover = to_vertex_cover(btc, matching, top_nodes)
independent_set = set(G) - {v for _, v in vertex_cover}
assert {"B", "D", "F", "I", "H"} == independent_set
def test_vertex_cover_issue_2384(self):
G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)])
matching = maximum_matching(G)
vertex_cover = to_vertex_cover(G, matching)
for u, v in G.edges():
assert u in vertex_cover or v in vertex_cover
def test_unorderable_nodes(self):
a = object()
b = object()
c = object()
d = object()
e = object()
G = nx.Graph([(a, d), (b, d), (b, e), (c, d)])
matching = maximum_matching(G)
vertex_cover = to_vertex_cover(G, matching)
for u, v in G.edges():
assert u in vertex_cover or v in vertex_cover
def test_eppstein_matching():
"""Test in accordance to issue #1927"""
G = nx.Graph()
G.add_nodes_from(["a", 2, 3, 4], bipartite=0)
G.add_nodes_from([1, "b", "c"], bipartite=1)
G.add_edges_from([("a", 1), ("a", "b"), (2, "b"), (2, "c"), (3, "c"), (4, 1)])
matching = eppstein_matching(G)
assert len(matching) == len(maximum_matching(G))
assert all(x in set(matching.keys()) for x in set(matching.values()))
class TestMinimumWeightFullMatching:
@classmethod
def setup_class(cls):
pytest.importorskip("scipy")
def test_minimum_weight_full_matching_incomplete_graph(self):
B = nx.Graph()
B.add_nodes_from([1, 2], bipartite=0)
B.add_nodes_from([3, 4], bipartite=1)
B.add_edge(1, 4, weight=100)
B.add_edge(2, 3, weight=100)
B.add_edge(2, 4, weight=50)
matching = minimum_weight_full_matching(B)
assert matching == {1: 4, 2: 3, 4: 1, 3: 2}
def test_minimum_weight_full_matching_with_no_full_matching(self):
B = nx.Graph()
B.add_nodes_from([1, 2, 3], bipartite=0)
B.add_nodes_from([4, 5, 6], bipartite=1)
B.add_edge(1, 4, weight=100)
B.add_edge(2, 4, weight=100)
B.add_edge(3, 4, weight=50)
B.add_edge(3, 5, weight=50)
B.add_edge(3, 6, weight=50)
with pytest.raises(ValueError):
minimum_weight_full_matching(B)
def test_minimum_weight_full_matching_square(self):
G = nx.complete_bipartite_graph(3, 3)
G.add_edge(0, 3, weight=400)
G.add_edge(0, 4, weight=150)
G.add_edge(0, 5, weight=400)
G.add_edge(1, 3, weight=400)
G.add_edge(1, 4, weight=450)
G.add_edge(1, 5, weight=600)
G.add_edge(2, 3, weight=300)
G.add_edge(2, 4, weight=225)
G.add_edge(2, 5, weight=300)
matching = minimum_weight_full_matching(G)
assert matching == {0: 4, 1: 3, 2: 5, 4: 0, 3: 1, 5: 2}
def test_minimum_weight_full_matching_smaller_left(self):
G = nx.complete_bipartite_graph(3, 4)
G.add_edge(0, 3, weight=400)
G.add_edge(0, 4, weight=150)
G.add_edge(0, 5, weight=400)
G.add_edge(0, 6, weight=1)
G.add_edge(1, 3, weight=400)
G.add_edge(1, 4, weight=450)
G.add_edge(1, 5, weight=600)
G.add_edge(1, 6, weight=2)
G.add_edge(2, 3, weight=300)
G.add_edge(2, 4, weight=225)
G.add_edge(2, 5, weight=290)
G.add_edge(2, 6, weight=3)
matching = minimum_weight_full_matching(G)
assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1}
def test_minimum_weight_full_matching_smaller_top_nodes_right(self):
G = nx.complete_bipartite_graph(3, 4)
G.add_edge(0, 3, weight=400)
G.add_edge(0, 4, weight=150)
G.add_edge(0, 5, weight=400)
G.add_edge(0, 6, weight=1)
G.add_edge(1, 3, weight=400)
G.add_edge(1, 4, weight=450)
G.add_edge(1, 5, weight=600)
G.add_edge(1, 6, weight=2)
G.add_edge(2, 3, weight=300)
G.add_edge(2, 4, weight=225)
G.add_edge(2, 5, weight=290)
G.add_edge(2, 6, weight=3)
matching = minimum_weight_full_matching(G, top_nodes=[3, 4, 5, 6])
assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1}
def test_minimum_weight_full_matching_smaller_right(self):
G = nx.complete_bipartite_graph(4, 3)
G.add_edge(0, 4, weight=400)
G.add_edge(0, 5, weight=400)
G.add_edge(0, 6, weight=300)
G.add_edge(1, 4, weight=150)
G.add_edge(1, 5, weight=450)
G.add_edge(1, 6, weight=225)
G.add_edge(2, 4, weight=400)
G.add_edge(2, 5, weight=600)
G.add_edge(2, 6, weight=290)
G.add_edge(3, 4, weight=1)
G.add_edge(3, 5, weight=2)
G.add_edge(3, 6, weight=3)
matching = minimum_weight_full_matching(G)
assert matching == {1: 4, 2: 6, 3: 5, 4: 1, 5: 3, 6: 2}
def test_minimum_weight_full_matching_negative_weights(self):
G = nx.complete_bipartite_graph(2, 2)
G.add_edge(0, 2, weight=-2)
G.add_edge(0, 3, weight=0.2)
G.add_edge(1, 2, weight=-2)
G.add_edge(1, 3, weight=0.3)
matching = minimum_weight_full_matching(G)
assert matching == {0: 3, 1: 2, 2: 1, 3: 0}
def test_minimum_weight_full_matching_different_weight_key(self):
G = nx.complete_bipartite_graph(2, 2)
G.add_edge(0, 2, mass=2)
G.add_edge(0, 3, mass=0.2)
G.add_edge(1, 2, mass=1)
G.add_edge(1, 3, mass=2)
matching = minimum_weight_full_matching(G, weight="mass")
assert matching == {0: 3, 1: 2, 2: 1, 3: 0}
|
#!/bin/python3
"""
Copyright kubeinit contributors.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
"""KubeInit's CI utils."""
import os
import random
import re
from google.cloud import storage
from jinja2 import Environment, FileSystemLoader
import requests
def render_index(gc_token_path):
"""Render and upload the index file."""
# Google cloud Storage init
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = gc_token_path
bucket_name = "kubeinit-ci"
client = storage.Client()
jobs = []
print("'kubeinit_ci_utils.py' ==> Rendering CI jobs index page")
prefix = 'jobs/'
delimiter = None
root_blobs = list(client.list_blobs(bucket_name,
prefix=prefix,
delimiter=delimiter))
filtered = list(dict.fromkeys([re.sub('/.*', '', sub.name.replace(prefix, '')) for sub in root_blobs]))
print("'kubeinit_ci_utils.py' ==> Filtered blobs")
print(filtered)
print("'kubeinit_ci_utils.py' ==> Rendering page indexes")
for idx, blob in enumerate(filtered):
print(str(blob))
fields = blob.split("-")
stat = fields[10]
if stat == '0':
status = 'Passed'
badge = 'success'
elif stat == '1':
status = 'Failed'
badge = 'danger'
elif stat == 'u':
status = 'Periodic'
badge = 'primary'
else:
status = 'Running'
badge = 'warning'
extra_data_date_url = 'https://storage.googleapis.com/kubeinit-ci/jobs/' + blob + '/records/1.html'
resp = requests.get(url=extra_data_date_url)
m = re.search("[0-9][0-9][0-9][0-9]\\.[0-9][0-9]\\.[0-9][0-9]\\.[0-9][0-9]\\.[0-9][0-9]\\.[0-9][0-9]", resp.text)
if m and status == "Periodic":
date = m.group(0)
else:
date = fields[9]
m = re.search("https:\\/\\/gitlab\\.com\\/kubeinit\\/kubeinit\\/-\\/jobs\\/[0-9]+", resp.text)
if m and status == "Periodic":
job_id = m.group(0).split('/')[-1]
else:
job_id = fields[8]
jobs.append({'status': status,
'index': idx,
'distro': fields[0],
'driver': fields[1],
'masters': fields[2],
'workers': fields[3],
'hypervisors': fields[4],
'services_type': fields[5],
'launch_from': fields[6],
'job_type': fields[7],
'id': job_id,
'date': date,
'badge': badge,
'url': 'https://storage.googleapis.com/kubeinit-ci/jobs/' + blob + '/index.html'})
path = os.path.join(os.path.dirname(__file__))
file_loader = FileSystemLoader(searchpath=path)
env = Environment(loader=file_loader)
template_index = "kubeinit_ci_logs.html.j2"
print("'kubeinit_ci_utils.py' ==> The path for the template is: " + path)
template = env.get_template(template_index)
output = template.render(jobs=jobs)
bucket = client.get_bucket(bucket_name)
blob = bucket.blob('index.html')
blob.upload_from_string(output, content_type='text/html')
def upload_logs_to_google_cloud(job_path, gc_token_path):
"""Upload the CI results to Google cloud Cloud Storage."""
return_code = 0
try:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = gc_token_path
bucket_name = "kubeinit-ci"
bucket = storage.Client().get_bucket(bucket_name)
print("'kubeinit_ci_utils.py' ==> ----Uploading logs----")
print("'kubeinit_ci_utils.py' ==> Path at terminal when executing this file")
print(os.getcwd() + "\n")
print("'kubeinit_ci_utils.py' ==> This file path, relative to os.getcwd()")
print(__file__ + "\n")
file_list = []
path_to_upload = os.path.join(os.getcwd(), job_path)
print("'kubeinit_ci_utils.py' ==> Path to upload: " + path_to_upload)
for r, _d, f in os.walk(path_to_upload):
for file in f:
file_list.append(os.path.join(r, file))
prefix_path = os.getcwd() + '/'
print("'kubeinit_ci_utils.py' ==> The initial path: " + prefix_path + " will be removed")
for entry in file_list:
try:
blob = bucket.blob('jobs/' + entry.replace(prefix_path, ''))
blob.upload_from_filename(entry)
except Exception as e:
print("'kubeinit_ci_utils.py' ==> An exception hapened adding the initial log files, some files could not be added")
print(e)
return_code = 1
except Exception as e:
print("'kubeinit_ci_utils.py' ==> An exception hapened uploading files to Google Cloud Storage")
print(e)
return_code = 1
return return_code
def remove_label(the_label, pr, repo):
"""Remove a label."""
labels = [label for label in repo.get_labels()]
if any(filter(lambda l: l.name == the_label, labels)):
r_label = repo.get_label(the_label)
else:
r_label = repo.create_label(the_label, "32CD32")
pr.remove_from_labels(r_label)
def add_label(the_label, pr, repo):
"""Assign a label."""
labels = [label for label in repo.get_labels()]
if any(filter(lambda l: l.name == the_label, labels)):
new_label = repo.get_label(the_label)
else:
new_label = repo.create_label(the_label, "32CD32")
pr.add_to_labels(new_label)
def get_periodic_jobs_labels(distro='all'):
"""Get the labels for an specific distro."""
# DISTRO-DRIVER-CONTROLLERS-COMPUTES-HYPERVISORS-[VIRTUAL_SERVICES|CONTAINERIZED_SERVICES]-[LAUNCH_FROM_CONTAINER|LAUNCH_FROM_HOST]
cdk_configs = ["cdk-libvirt-3-1-3-v-c",
"cdk-libvirt-3-0-1-c-h",
"cdk-libvirt-1-1-1-c-c",
"cdk-libvirt-1-0-1-v-h"]
okd_configs = ["okd-libvirt-3-1-1-c-h",
"okd-libvirt-3-0-3-v-c",
"okd-libvirt-1-1-1-v-h",
"okd-libvirt-1-0-1-c-c"]
rke_configs = ["rke-libvirt-3-1-3-c-h",
"rke-libvirt-3-0-1-v-h",
"rke-libvirt-1-1-1-c-c",
"rke-libvirt-1-0-1-v-c"]
k8s_configs = ["k8s-libvirt-3-1-1-v-h",
"k8s-libvirt-3-0-3-c-c",
"k8s-libvirt-1-1-1-c-h",
"k8s-libvirt-1-0-1-v-c"]
eks_configs = ["eks-libvirt-3-1-3-v-c",
"eks-libvirt-3-0-1-v-h",
"eks-libvirt-1-1-1-c-h",
"eks-libvirt-1-0-1-c-c"]
kid_configs = ["kid-libvirt-3-1-1-v-h",
"kid-libvirt-3-0-3-c-h",
"kid-libvirt-1-1-1-v-c",
"kid-libvirt-1-0-1-c-c"]
okd_rke_configs = ["okd.rke-libvirt-1-2-1-v-c",
"okd.rke-libvirt-3-1-1-v-h"]
if re.match(r"([a-z|0-9|\.]+-[a-z]+-[1-9]-[0-9]-[1-9]-[v|c]-[c|h],?)+", distro):
print("'kubeinit_ci_utils.py' ==> We are requesting specific job labels")
req_labels = set(distro.split(","))
all_labels = set(okd_configs + kid_configs + eks_configs + rke_configs + cdk_configs + k8s_configs + okd_rke_configs)
if (req_labels.issubset(all_labels)):
print("'kubeinit_ci_utils.py' ==> The requested labels are defined correctly")
return req_labels
else:
print("'kubeinit_ci_utils.py' ==> The requested labels are not a subset of the allowed labels")
raise Exception("'kubeinit_ci_utils.py' ==> STOP!")
elif distro == 'random':
print("'kubeinit_ci_utils.py' ==> Returning 4 random scenarios to test")
# If the distro parameter is random we return 4 random distros to test
all_scenarios = okd_configs + kid_configs + eks_configs + rke_configs + cdk_configs + k8s_configs + okd_rke_configs
return random.sample(all_scenarios, 4)
elif distro == "all":
print("'kubeinit_ci_utils.py' ==> Appending all configs")
return okd_configs + kid_configs + eks_configs + rke_configs + cdk_configs + k8s_configs + okd_rke_configs
else:
configs = []
if 'okd' in distro and 'okd.rke' not in distro:
print("'kubeinit_ci_utils.py' ==> Appending OKD configs")
configs = configs + okd_configs
if 'rke' in distro and 'okd.rke' not in distro:
print("'kubeinit_ci_utils.py' ==> Appending RKE configs")
configs = configs + rke_configs
if 'kid' in distro:
print("'kubeinit_ci_utils.py' ==> Appending KID configs")
configs = configs + kid_configs
if 'eks' in distro:
print("'kubeinit_ci_utils.py' ==> Appending EKS configs")
configs = configs + eks_configs
if 'cdk' in distro:
print("'kubeinit_ci_utils.py' ==> Appending CDK configs")
configs = configs + cdk_configs
if 'k8s' in distro:
print("'kubeinit_ci_utils.py' ==> Appending K8S configs")
configs = configs + k8s_configs
if 'okd.rke' in distro:
print("'kubeinit_ci_utils.py' ==> Appending OKD.RKE configs")
configs = configs + okd_rke_configs
return configs
|
<reponame>AliciaCurth/CATENets
import abc
import copy
from typing import Any, Optional, Tuple
import numpy as np
import torch
from sklearn.model_selection import StratifiedKFold
from torch import nn
from catenets.models.constants import (
DEFAULT_BATCH_SIZE,
DEFAULT_CF_FOLDS,
DEFAULT_LAYERS_OUT,
DEFAULT_LAYERS_OUT_T,
DEFAULT_LAYERS_R,
DEFAULT_LAYERS_R_T,
DEFAULT_N_ITER,
DEFAULT_N_ITER_MIN,
DEFAULT_N_ITER_PRINT,
DEFAULT_NONLIN,
DEFAULT_PATIENCE,
DEFAULT_PENALTY_L2,
DEFAULT_SEED,
DEFAULT_STEP_SIZE,
DEFAULT_STEP_SIZE_T,
DEFAULT_UNITS_OUT,
DEFAULT_UNITS_OUT_T,
DEFAULT_UNITS_R,
DEFAULT_UNITS_R_T,
DEFAULT_VAL_SPLIT,
)
from catenets.models.torch.base import (
DEVICE,
BaseCATEEstimator,
BasicNet,
PropensityNet,
)
from catenets.models.torch.utils.model_utils import predict_wrapper, train_wrapper
from catenets.models.torch.utils.transformations import (
dr_transformation_cate,
pw_transformation_cate,
ra_transformation_cate,
u_transformation_cate,
)
class PseudoOutcomeLearner(BaseCATEEstimator):
"""
Class implements TwoStepLearners based on pseudo-outcome regression as discussed in
Curth &<NAME> (2021): RA-learner, PW-learner and DR-learner
Parameters
----------
n_unit_in: int
Number of features
binary_y: bool, default False
Whether the outcome is binary
po_estimator: sklearn/PyTorch model, default: None
Custom potential outcome model. If this parameter is set, the rest of the parameters are ignored.
te_estimator: sklearn/PyTorch model, default: None
Custom treatment effects model. If this parameter is set, the rest of the parameters are ignored.
n_folds: int, default 1
Number of cross-fitting folds. If 1, no cross-fitting
n_layers_out: int
First stage Number of hypothesis layers (n_layers_out x n_units_out + 1 x Linear layer)
n_units_out: int
First stage Number of hidden units in each hypothesis layer
n_layers_r: int
Number of shared & private representation layers before hypothesis layers
n_units_r: int
Number of hidden units in representation shared before the hypothesis layers.
n_layers_out_t: int
Second stage Number of hypothesis layers (n_layers_out x n_units_out + 1 x Linear layer)
n_units_out_t: int
Second stage Number of hidden units in each hypothesis layer
n_layers_out_prop: int
Number of hypothesis layers for propensity score(n_layers_out x n_units_out + 1 x Dense
layer)
n_units_out_prop: int
Number of hidden units in each propensity score hypothesis layer
weight_decay: float
First stage l2 (ridge) penalty
weight_decay_t: float
Second stage l2 (ridge) penalty
lr: float
First stage learning rate for optimizer
lr_: float
Second stage learning rate for optimizer
n_iter: int
Maximum number of iterations
batch_size: int
Batch size
val_split_prop: float
Proportion of samples used for validation split (can be 0)
n_iter_print: int
Number of iterations after which to print updates
seed: int
Seed used
nonlin: string, default 'elu'
Nonlinearity to use in NN. Can be 'elu', 'relu', 'selu' or 'leaky_relu'.
weighting_strategy: str, default "prop"
Weighting strategy. Can be "prop" or "1-prop".
patience: int
Number of iterations to wait before early stopping after decrease in validation loss
n_iter_min: int
Minimum number of iterations to go through before starting early stopping
"""
def __init__(
self,
n_unit_in: int,
binary_y: bool,
po_estimator: Any = None,
te_estimator: Any = None,
n_folds: int = DEFAULT_CF_FOLDS,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_out_prop: int = DEFAULT_UNITS_OUT,
n_layers_out_prop: int = 0,
weight_decay: float = DEFAULT_PENALTY_L2,
weight_decay_t: float = DEFAULT_PENALTY_L2,
lr: float = DEFAULT_STEP_SIZE,
lr_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
val_split_prop: float = DEFAULT_VAL_SPLIT,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
nonlin: str = DEFAULT_NONLIN,
weighting_strategy: Optional[str] = "prop",
patience: int = DEFAULT_PATIENCE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
batch_norm: bool = True,
early_stopping: bool = True
):
super(PseudoOutcomeLearner, self).__init__()
self.n_unit_in = n_unit_in
self.binary_y = binary_y
self.n_layers_out = n_layers_out
self.n_units_out = n_units_out
self.n_units_out_prop = n_units_out_prop
self.n_layers_out_prop = n_layers_out_prop
self.weight_decay_t = weight_decay_t
self.weight_decay = weight_decay
self.weighting_strategy = weighting_strategy
self.lr = lr
self.lr_t = lr_t
self.n_iter = n_iter
self.batch_size = batch_size
self.val_split_prop = val_split_prop
self.n_iter_print = n_iter_print
self.seed = seed
self.nonlin = nonlin
self.n_folds = n_folds
self.patience = patience
self.n_iter_min = n_iter_min
self.n_layers_out_t = n_layers_out_t
self.n_units_out_t = n_units_out_t
self.n_layers_out = n_layers_out
self.n_units_out = n_units_out
self.batch_norm = batch_norm
self.early_stopping = early_stopping
# set estimators
self._te_template = te_estimator
self._po_template = po_estimator
self._te_estimator = self._generate_te_estimator()
self._po_estimator = self._generate_te_estimator()
if weighting_strategy is not None:
self._propensity_estimator = self._generate_propensity_estimator()
def _generate_te_estimator(self, name: str = "te_estimator") -> nn.Module:
if self._te_template is not None:
return copy.deepcopy(self._te_template)
return BasicNet(
name,
self.n_unit_in,
binary_y=False,
n_layers_out=self.n_layers_out_t,
n_units_out=self.n_units_out_t,
weight_decay=self.weight_decay_t,
lr=self.lr_t,
n_iter=self.n_iter,
batch_size=self.batch_size,
val_split_prop=self.val_split_prop,
n_iter_print=self.n_iter_print,
seed=self.seed,
nonlin=self.nonlin,
patience=self.patience,
n_iter_min=self.n_iter_min,
batch_norm=self.batch_norm,
early_stopping=self.early_stopping
).to(DEVICE)
def _generate_po_estimator(self, name: str = "po_estimator") -> nn.Module:
if self._po_template is not None:
return copy.deepcopy(self._po_template)
return BasicNet(
name,
self.n_unit_in,
binary_y=self.binary_y,
n_layers_out=self.n_layers_out,
n_units_out=self.n_units_out,
weight_decay=self.weight_decay,
lr=self.lr,
n_iter=self.n_iter,
batch_size=self.batch_size,
val_split_prop=self.val_split_prop,
n_iter_print=self.n_iter_print,
seed=self.seed,
nonlin=self.nonlin,
patience=self.patience,
n_iter_min=self.n_iter_min,
batch_norm=self.batch_norm,
early_stopping=self.early_stopping
).to(DEVICE)
def _generate_propensity_estimator(
self, name: str = "propensity_estimator"
) -> nn.Module:
if self.weighting_strategy is None:
raise ValueError("Invalid weighting_strategy for PropensityNet")
return PropensityNet(
name,
self.n_unit_in,
2, # number of treatments
self.weighting_strategy,
n_units_out_prop=self.n_units_out_prop,
n_layers_out_prop=self.n_layers_out_prop,
weight_decay=self.weight_decay,
lr=self.lr,
n_iter=self.n_iter,
batch_size=self.batch_size,
n_iter_print=self.n_iter_print,
seed=self.seed,
nonlin=self.nonlin,
val_split_prop=self.val_split_prop,
batch_norm=self.batch_norm,
early_stopping=self.early_stopping
).to(DEVICE)
def train(
self, X: torch.Tensor, y: torch.Tensor, w: torch.Tensor
) -> "PseudoOutcomeLearner":
"""
Train treatment effects nets.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Train-sample features
y: array-like of shape (n_samples,)
Train-sample labels
w: array-like of shape (n_samples,)
Train-sample treatments
"""
X = self._check_tensor(X).float()
y = self._check_tensor(y).squeeze().float()
w = self._check_tensor(w).squeeze().float()
n = len(y)
# STEP 1: fit plug-in estimators via cross-fitting
if self.n_folds == 1:
pred_mask = np.ones(n, dtype=bool)
# fit plug-in models
mu_0_pred, mu_1_pred, p_pred = self._first_step(
X, y, w, pred_mask, pred_mask
)
else:
mu_0_pred, mu_1_pred, p_pred = (
torch.zeros(n).to(DEVICE),
torch.zeros(n).to(DEVICE),
torch.zeros(n).to(DEVICE),
)
# create folds stratified by treatment assignment to ensure balance
splitter = StratifiedKFold(
n_splits=self.n_folds, shuffle=True, random_state=self.seed
)
for train_index, test_index in splitter.split(X.cpu(), w.cpu()):
# create masks
pred_mask = torch.zeros(n, dtype=bool).to(DEVICE)
pred_mask[test_index] = 1
# fit plug-in te_estimator
(
mu_0_pred[pred_mask],
mu_1_pred[pred_mask],
p_pred[pred_mask],
) = self._first_step(X, y, w, ~pred_mask, pred_mask)
# use estimated propensity scores
if self.weighting_strategy is not None:
p = p_pred
# STEP 2: direct TE estimation
self._second_step(X, y, w, p, mu_0_pred, mu_1_pred)
return self
def predict(self, X: torch.Tensor, return_po: bool = False) -> torch.Tensor:
"""
Predict treatment effects
Parameters
----------
X: array-like of shape (n_samples, n_features)
Test-sample features
Returns
-------
te_est: array-like of shape (n_samples,)
Predicted treatment effects
"""
if return_po:
raise NotImplementedError(
"PseudoOutcomeLearners have no Potential outcome predictors."
)
X = self._check_tensor(X).float()
return predict_wrapper(self._te_estimator, X)
@abc.abstractmethod
def _first_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pass
@abc.abstractmethod
def _second_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> None:
pass
def _impute_pos(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
# split sample
X_fit, Y_fit, W_fit = X[fit_mask, :], y[fit_mask], w[fit_mask]
# fit two separate (standard) models
# untreated model
temp_model_0 = self._generate_po_estimator("po_estimator_0_impute_pos")
train_wrapper(temp_model_0, X_fit[W_fit == 0], Y_fit[W_fit == 0])
# treated model
temp_model_1 = self._generate_po_estimator("po_estimator_1_impute_pos")
train_wrapper(temp_model_1, X_fit[W_fit == 1], Y_fit[W_fit == 1])
mu_0_pred = predict_wrapper(temp_model_0, X[pred_mask, :])
mu_1_pred = predict_wrapper(temp_model_1, X[pred_mask, :])
return mu_0_pred, mu_1_pred
def _impute_propensity(
self,
X: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.tensor,
pred_mask: torch.Tensor,
) -> torch.Tensor:
# split sample
X_fit, W_fit = X[fit_mask, :], w[fit_mask]
# fit propensity estimator
temp_propensity_estimator = self._generate_propensity_estimator(
"prop_estimator_impute_propensity"
)
train_wrapper(temp_propensity_estimator, X_fit, W_fit)
# predict propensity on hold out
return temp_propensity_estimator.get_importance_weights(
X[pred_mask, :], w[pred_mask]
)
def _impute_unconditional_mean(
self,
X: torch.Tensor,
y: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> torch.Tensor:
# R-learner and U-learner need to impute unconditional mean
X_fit, Y_fit = X[fit_mask, :], y[fit_mask]
# fit model
temp_model = self._generate_po_estimator("po_est_impute_unconditional_mean")
train_wrapper(temp_model, X_fit, Y_fit)
return predict_wrapper(temp_model, X[pred_mask, :])
class DRLearner(PseudoOutcomeLearner):
"""
DR-learner for CATE estimation, based on doubly robust AIPW pseudo-outcome
"""
def _first_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu0_pred, mu1_pred = self._impute_pos(X, y, w, fit_mask, pred_mask)
p_pred = self._impute_propensity(X, w, fit_mask, pred_mask).squeeze()
return mu0_pred.squeeze(), mu1_pred.squeeze(), p_pred
def _second_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> None:
pseudo_outcome = dr_transformation_cate(y, w, p, mu_0, mu_1)
train_wrapper(self._te_estimator, X, pseudo_outcome.detach())
class PWLearner(PseudoOutcomeLearner):
"""
PW-learner for CATE estimation, based on singly robust Horvitz Thompson pseudo-outcome
"""
def _first_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu0_pred, mu1_pred = np.nan, np.nan # not needed
p_pred = self._impute_propensity(X, w, fit_mask, pred_mask).squeeze()
return mu0_pred, mu1_pred, p_pred
def _second_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> None:
pseudo_outcome = pw_transformation_cate(y, w, p)
train_wrapper(self._te_estimator, X, pseudo_outcome.detach())
class RALearner(PseudoOutcomeLearner):
"""
RA-learner for CATE estimation, based on singly robust regression-adjusted pseudo-outcome
"""
def _first_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu0_pred, mu1_pred = self._impute_pos(X, y, w, fit_mask, pred_mask)
p_pred = np.nan # not needed
return mu0_pred.squeeze(), mu1_pred.squeeze(), p_pred
def _second_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> None:
pseudo_outcome = ra_transformation_cate(y, w, p, mu_0, mu_1)
train_wrapper(self._te_estimator, X, pseudo_outcome.detach())
class ULearner(PseudoOutcomeLearner):
"""
U-learner for CATE estimation. Based on pseudo-outcome (Y-mu(x))/(w-pi(x))
"""
def _first_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu_pred = self._impute_unconditional_mean(X, y, fit_mask, pred_mask).squeeze()
mu1_pred = np.nan # only have one thing to impute here
p_pred = self._impute_propensity(X, w, fit_mask, pred_mask).squeeze()
return mu_pred, mu1_pred, p_pred
def _second_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> None:
pseudo_outcome = u_transformation_cate(y, w, p, mu_0)
train_wrapper(self._te_estimator, X, pseudo_outcome.detach())
class RLearner(PseudoOutcomeLearner):
"""
R-learner for CATE estimation. Based on pseudo-outcome (Y-mu(x))/(w-pi(x)) and sample weight
(w-pi(x))^2 -- can only be implemented if .fit of te_estimator takes argument 'sample_weight'.
"""
def _first_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu_pred = self._impute_unconditional_mean(X, y, fit_mask, pred_mask).squeeze()
mu1_pred = np.nan # only have one thing to impute here
p_pred = self._impute_propensity(X, w, fit_mask, pred_mask).squeeze()
return mu_pred, mu1_pred, p_pred
def _second_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> None:
pseudo_outcome = u_transformation_cate(y, w, p, mu_0)
train_wrapper(
self._te_estimator, X, pseudo_outcome.detach(), weight=(w - p) ** 2
)
class XLearner(PseudoOutcomeLearner):
"""
X-learner for CATE estimation. Combines two CATE estimates via a weighting function g(x):
tau(x) = g(x) tau_0(x) + (1-g(x)) tau_1(x)
"""
def __init__(
self,
*args: Any,
weighting_strategy: str = "prop",
**kwargs: Any,
) -> None:
super().__init__(
*args,
**kwargs,
)
self.weighting_strategy = weighting_strategy
def _first_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
fit_mask: torch.Tensor,
pred_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu0_pred, mu1_pred = self._impute_pos(X, y, w, fit_mask, pred_mask)
p_pred = np.nan
return mu0_pred.squeeze(), mu1_pred.squeeze(), p_pred
def _second_step(
self,
X: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
p: torch.Tensor,
mu_0: torch.Tensor,
mu_1: torch.Tensor,
) -> None:
# split by treatment status, fit one model per group
pseudo_0 = mu_1[w == 0] - y[w == 0]
self._te_estimator_0 = self._generate_te_estimator("te_estimator_0_xnet")
train_wrapper(self._te_estimator_0, X[w == 0], pseudo_0.detach())
pseudo_1 = y[w == 1] - mu_0[w == 1]
self._te_estimator_1 = self._generate_te_estimator("te_estimator_1_xnet")
train_wrapper(self._te_estimator_1, X[w == 1], pseudo_1.detach())
train_wrapper(self._propensity_estimator, X, w)
def predict(self, X: torch.Tensor, return_po: bool = False) -> torch.Tensor:
"""
Predict treatment effects
Parameters
----------
X: array-like of shape (n_samples, n_features)
Test-sample features
return_po: bool, default False
Whether to return potential outcome predictions. Placeholder, can only accept False.
Returns
-------
te_est: array-like of shape (n_samples,)
Predicted treatment effects
"""
if return_po:
raise NotImplementedError(
"PseudoOutcomeLearners have no Potential outcome predictors."
)
X = self._check_tensor(X).float().to(DEVICE)
tau0_pred = predict_wrapper(self._te_estimator_0, X)
tau1_pred = predict_wrapper(self._te_estimator_1, X)
weight = self._propensity_estimator.get_importance_weights(X)
return weight * tau0_pred + (1 - weight) * tau1_pred
|
<filename>tgbot/receivers.py
import multiprocessing
import signal
import requests
import ssl
import json
import BaseHTTPServer
import subprocess
import os
import sys
import time
import logging
from telegram import BotAPI
class ReceiveProcess(multiprocessing.Process):
def __init__(self, token, q):
multiprocessing.Process.__init__(self)
self.q = q
self.token = token
self.api = BotAPI(token)
def setup(self):
pass
def run(self):
raise NotImplementedError()
class APIReceiver(ReceiveProcess):
def __init__(self, token, q):
ReceiveProcess.__init__(self, token, q)
self.offset = None
def setup(self):
self.api.remove_webhook()
def fetch_updates(self):
logging.log("Fetching updates")
response = self.api.get_updates(offset = self.offset, timeout = 120) if self.offset != None else self.api.get_updates(timeout = 120)
updates = response["result"]
if len(updates) > 0:
self.offset = updates[len(updates) - 1]["update_id"] + 1
for update in updates:
self.q.put(update)
def run(self):
signal.signal(signal.SIGINT, signal.SIG_IGN) # Make sure KeyboardInterrupts are not sent to this process
while True:
try:
self.fetch_updates()
except Exception as e:
logging.error("An exception occurred inside recv process:\n" + str(e))
time.sleep(120) # Prevent making unnecessary fetches when Telegram API is down
class WebhookRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("tgbot running <b style='color:green;'>OK</b>")
def do_POST(self):
if self.path != self.server.receiver.webhook_path():
self.send_error(404)
return
try:
content_len = int(self.headers.getheader("Content-Length", 0))
except:
self.send_error(400)
return
update_json = self.rfile.read(content_len)
try:
update = json.loads(update_json)
except ValueError:
logging.error("Could not parse JSON sent to webhook by Telegram")
self.send_error(400)
return
logging.log("Webhook server received update from Telegram")
self.server.receiver.q.put(update)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def log_error(self, *args, **kwargs):
return # Stop debug printing
def log_message(self, *args, **kwargs):
return # Stop debug printing
class WebhookServer(BaseHTTPServer.HTTPServer):
def __init__(self, receiver, port):
BaseHTTPServer.HTTPServer.__init__(self, ("", port), WebhookRequestHandler)
self.receiver = receiver
def upgrade_ssl(self, certificate_path, key_path):
self.socket = ssl.wrap_socket(self.socket, certfile = certificate_path, keyfile = key_path, server_side = True)
class WebhookReceiver(ReceiveProcess):
def __init__(self, token, q, port = 8443, openssl_command = "openssl"):
ReceiveProcess.__init__(self, token, q)
self.port = port
self.ip = None
self.openssl_command = openssl_command
def fetch_public_ip(self):
logging.log("Getting public IP address")
response = requests.get("http://ip.42.pl/raw")
self.ip = response.text.strip()
if response.status_code != 200 or len(self.ip) == 0:
raise Exception("Could not find public IP")
logging.log("Found IP " + self.ip)
def webhook_path(self):
return "/" + self.token
def webhook_url(self):
return "https://{0}:{1}{2}".format(self.ip, self.port, self.webhook_path())
def working_dir(self):
return os.getcwd()
def private_key_path(self):
return self.working_dir() + "/tgbot_priv.key"
def public_key_path(self):
return self.working_dir() + "/tgbot_pub.pem"
def gen_certificate(self):
logging.log("Generating self-signed certificate pair")
error = False
try:
subprocess.call("{0} req -newkey rsa:2048 -sha256 -nodes -keyout {1} -x509 -days 365 -out {2} -subj \"/CN={3}\"".format(self.openssl_command, self.private_key_path(), self.public_key_path(), self.ip))
except:
error = True
if error or not os.path.exists(self.private_key_path()) or not os.path.exists(self.public_key_path()):
raise Exception("Could not generate certificate pair, make sure command '{0}' can be run on your machine and {1}, {2} are writable".format(self.openssl_command, self.private_key_path(), self.public_key_path()))
def setup(self):
logging.info("Setting up webhook")
self.fetch_public_ip()
if not os.path.exists(self.private_key_path()) or not os.path.exists(self.public_key_path()):
self.gen_certificate()
self.api.remove_webhook()
self.api.set_webhook(url = self.webhook_url(), certificate = open(self.public_key_path(), "rb"))
def run(self):
signal.signal(signal.SIGINT, signal.SIG_IGN) # Make sure KeyboardInterrupts are not sent to this process
self.server = WebhookServer(self, self.port)
self.server.upgrade_ssl(self.public_key_path(), self.private_key_path())
logging.info("Webhook server started listening to Telegram on " + self.webhook_url())
self.server.serve_forever()
|
<reponame>mberkanbicer/software<gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Chapter10\BackProjectionBH.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.setEnabled(True)
MainWindow.resize(1200, 680)
font = QtGui.QFont()
font.setPointSize(10)
MainWindow.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icon/python_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setAutoFillBackground(False)
MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.SpanningRole, self.line)
self.label_12 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.formLayout.setWidget(9, QtWidgets.QFormLayout.LabelRole, self.label_12)
self.dynamic_range = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dynamic_range.sizePolicy().hasHeightForWidth())
self.dynamic_range.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(13)
self.dynamic_range.setFont(font)
self.dynamic_range.setObjectName("dynamic_range")
self.formLayout.setWidget(9, QtWidgets.QFormLayout.FieldRole, self.dynamic_range)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(10, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.polarization = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.polarization.sizePolicy().hasHeightForWidth())
self.polarization.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(13)
self.polarization.setFont(font)
self.polarization.setObjectName("polarization")
self.polarization.addItem("")
self.polarization.addItem("")
self.polarization.addItem("")
self.formLayout.setWidget(10, QtWidgets.QFormLayout.FieldRole, self.polarization)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.el_start_end = QtWidgets.QLineEdit(self.centralwidget)
self.el_start_end.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(13)
self.el_start_end.setFont(font)
self.el_start_end.setObjectName("el_start_end")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.el_start_end)
self.az_start_end = QtWidgets.QLineEdit(self.centralwidget)
self.az_start_end.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(13)
self.az_start_end.setFont(font)
self.az_start_end.setObjectName("az_start_end")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.az_start_end)
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label.setFont(font)
self.label.setObjectName("label")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label)
self.frequency = QtWidgets.QLineEdit(self.centralwidget)
self.frequency.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(13)
self.frequency.setFont(font)
self.frequency.setObjectName("frequency")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.frequency)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.x_span = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.x_span.setFont(font)
self.x_span.setObjectName("x_span")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.x_span)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.y_span = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.y_span.setFont(font)
self.y_span.setObjectName("y_span")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.y_span)
self.z_span = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.z_span.setFont(font)
self.z_span.setObjectName("z_span")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.z_span)
self.nx_ny_nz = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.nx_ny_nz.setFont(font)
self.nx_ny_nz.setObjectName("nx_ny_nz")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.nx_ny_nz)
self.label_7 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.label_8 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_8)
self.label_9 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label_9)
self.window_type = QtWidgets.QComboBox(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.window_type.setFont(font)
self.window_type.setObjectName("window_type")
self.window_type.addItem("")
self.window_type.addItem("")
self.window_type.addItem("")
self.formLayout.setWidget(11, QtWidgets.QFormLayout.FieldRole, self.window_type)
self.label_10 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(13)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.formLayout.setWidget(11, QtWidgets.QFormLayout.LabelRole, self.label_10)
self.horizontalLayout.addLayout(self.formLayout)
spacerItem = QtWidgets.QSpacerItem(14, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout.addLayout(self.verticalLayout)
self.horizontalLayout.setStretch(1, 1)
self.horizontalLayout.setStretch(2, 20)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1200, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Back Projection"))
self.label_6.setText(_translate("MainWindow", "Input"))
self.label_12.setText(_translate("MainWindow", "Dynamic Range (dB)"))
self.dynamic_range.setText(_translate("MainWindow", "50"))
self.label_4.setText(_translate("MainWindow", "Polarization"))
self.polarization.setItemText(0, _translate("MainWindow", "VV"))
self.polarization.setItemText(1, _translate("MainWindow", "HH"))
self.polarization.setItemText(2, _translate("MainWindow", "HV"))
self.label_2.setText(_translate("MainWindow", "Elevation (deg)"))
self.el_start_end.setText(_translate("MainWindow", "18, 23"))
self.az_start_end.setText(_translate("MainWindow", "66, 71"))
self.label.setText(_translate("MainWindow", "Azimuth (deg)"))
self.frequency.setText(_translate("MainWindow", "7, 13"))
self.label_3.setText(_translate("MainWindow", "Frequency (GHz)"))
self.x_span.setText(_translate("MainWindow", "10"))
self.label_5.setText(_translate("MainWindow", "Image Span-X (m)"))
self.y_span.setText(_translate("MainWindow", "10"))
self.z_span.setText(_translate("MainWindow", "10"))
self.nx_ny_nz.setText(_translate("MainWindow", "10, 10, 10"))
self.label_7.setText(_translate("MainWindow", "Image Span-Y (m)"))
self.label_8.setText(_translate("MainWindow", "Image Span-Z (m)"))
self.label_9.setText(_translate("MainWindow", "Number of pixels (nx, ny, nz)"))
self.window_type.setItemText(0, _translate("MainWindow", "Rectangular"))
self.window_type.setItemText(1, _translate("MainWindow", "Hanning"))
self.window_type.setItemText(2, _translate("MainWindow", "Hamming"))
self.label_10.setText(_translate("MainWindow", "Window Type"))
import Libs.resources_rc
|
<reponame>stevenzim/lrec-2018<gh_stars>1-10
from gensim.models.keyedvectors import KeyedVectors
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.model_selection import train_test_split
from src import nlp, helper, evaluation
from src import evaluation
REPORT_FILE_NAME = 'resources/results/waseem_evaluation_ensemble.results'
# DETERMINES IF EMBEDDING VECS ARE AVERAGED OR SUMMED FOR DOCUMENT
SUM_OR_AVG = 'sum'
#
# -------EMBEDDING MODELS -------------
# Load tweet embeddings lookup
# ----- GODIN ------------
LOWER_CASE_TOKENS = False
word_vectors = KeyedVectors.load_word2vec_format('resources/pre_trained_models/word2vec_twitter_model.bin', binary=True,
unicode_errors='ignore')
# ----- WASEEM HOVY DATA (16208 TOTAL) -------
def get_cnn_embeddings(word_embeddings, tweets_tokenized, max_tokens = 50):
'''twitter embedding model only'''
corpus_vecs = []
for tweet in tweets_tokenized:
tweet_vecs = [[0.0 for x in range(400)] for x in range(max_tokens)]
for cnt, token in enumerate(tweet):
try:
tweet_vecs[cnt] = (word_embeddings[token].tolist())
except:
continue
# tweet_vecs.append(embedding_sum/tweet_length) # NOTE: L1 and High C 10+ better for this scenario
corpus_vecs.append(tweet_vecs) # NOTE: L2 and Low C .01- better for this scenario
return corpus_vecs
hate_corpus = helper.load_json_from_file('resources/hate_speech_corps/NAACL_SRW_2016_DOWNLOADED.json')
print ('Extracting features')
X_train = get_cnn_embeddings(word_vectors,
map(lambda y: nlp.replace_tokens(y),
nlp.tokenize_tweets(map(lambda x: x['text'], hate_corpus),
lower_case=LOWER_CASE_TOKENS)), max_tokens=50)
Y_train = map(lambda x: x['class'], hate_corpus)
print '-----CNN -----------'
from sklearn.model_selection import train_test_split
X_train_split, X_test_split, Y_train_split, Y_test_split = train_test_split(
X_train, Y_train, test_size=0.15, random_state=7)
'''
Hate speech with CNN's
'''
# CNN for the IMDB problem
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import GlobalMaxPooling1D
import numpy as np
# fix random seed for reproducibility
seed = 21
np.random.seed(seed)
# create the model
def cnn_model(tokens = 50, random_seed = 21):
from keras import initializers
seed = random_seed
np.random.seed(seed)
model = Sequential()
# create model
model.add(Conv1D(filters=150, kernel_size=3, padding='same', activation='relu', input_shape=(tokens, 400)))
model.add(GlobalMaxPooling1D())
model.add(Dense(250, activation='relu'))
model.add(Dense(3, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def waseem_hovy_cnn(epochs=5, batch_size=50, tokens=50, random_seed= 21):
Y_hate_encoder, one_hot_y_hate, encoded_Y_hate = helper.encode_ys_categorical(Y_train_split)
# BUILD MODEL
# np.random.seed(random_seed)
m = cnn_model(tokens=tokens, random_seed=random_seed)
m.fit(X_train_split, one_hot_y_hate, epochs=epochs, batch_size=batch_size)
# Get Predictions
c = m.predict(np.array(X_test_split))
encoded_preds = c.argmax(axis=1)
decoded_preds = Y_hate_encoder.inverse_transform(encoded_preds)
# output evaluation data
print '----------F-1/precision/recall report-----------'
print 'MACRO F1:'
print f1_score(Y_test_split, decoded_preds, average='macro')
print 'F1 Matrix'
print evaluation.evaluate_results(Y_test_split, decoded_preds)
o_file = open(REPORT_FILE_NAME, 'a')
o_file.write(str(f1_score(Y_test_split, decoded_preds, average='macro')) + '\n')
o_file.close()
# first generate with specified labels
labels = ['none', 'racism', 'sexism']
cm = confusion_matrix(Y_test_split, decoded_preds, labels)
# then print it in a pretty way
print '-------confusion matrix---------'
print evaluation.print_cm(cm, labels)
Y_soft_max = c
return Y_soft_max , Y_hate_encoder, Y_test_split
def run_ensemble(epochs = 5, batch_size=50, random_int=100):
o_file = open(REPORT_FILE_NAME, 'a')
o_file.write(str(random_int) + '\n')
o_file.write(str(epochs) + '\n')
o_file.write(str(batch_size) + '\n')
o_file.close()
y_s_list = []
encoder = None
test_split = None
RANGE = 10
for i in list(range(RANGE)):
Y_soft_max, Y_hate_encoder, Y_test_split = waseem_hovy_cnn(epochs=epochs, batch_size=batch_size, random_seed=int(random_int*(i+1)))
y_s_list.append(Y_soft_max)
encoder = Y_hate_encoder
test_split = Y_test_split
# ADDD ALL RESULTS
sum_of_ys = y_s_list[0]
for i in [x + 1 for x in range(RANGE - 1)]:
sum_of_ys += y_s_list[i]
# DIVIDE BY RANGE FOR MEAN
sum_of_ys /= RANGE
# ENCODE PREDS
encoded_preds = sum_of_ys.argmax(axis=1)
decoded_preds = encoder.inverse_transform(encoded_preds)
print '----------F-1/precision/recall report-----------'
print 'MACRO F1:'
print f1_score(test_split, decoded_preds, average='macro')
print 'F1 Matrix'
print evaluation.evaluate_results(test_split, decoded_preds)
# o_file.write('ensemble,')
# o_file.write(str(f1_score(Y_test_2013, decoded_preds, average='macro')) + ',')
o_file = open(REPORT_FILE_NAME, 'a')
o_file.write('waseem----ensemble--->\n')
o_file.write(str(f1_score(test_split, decoded_preds, average='macro')) + '\n')
o_file.write('$$$$$$$$$$$$$$$$ END $$$$$$$$$$$$$$$$$$$$$$$$$\n')
o_file.close()
batch_size = [10, 25, 50, 100]
epochs = [3, 5, 10]
random_int = [87, 100, 101]
for bs in batch_size:
for epoch in epochs:
for i in random_int:
o_file = open(REPORT_FILE_NAME, 'a')
o_file.write('$$$$$$$$$$$$$$$$ START $$$$$$$$$$$$$$$$$$$$$$$$$\n')
run_ensemble(epochs = epoch, batch_size=bs, random_int=i)
o_file.close()
|
import argparse
import json
import logging
import os
from os import listdir
from os.path import isfile, join
from collections import Counter
from nlp.data import load_text_file
from nlp.preprocessing import prepareText, frequencies
from echr.utils.folders import make_build_folder
from echr.utils.logger import getlogger
from echr.utils.cli import TAB
from echr.utils.config import config
from rich.markdown import Markdown
from rich.console import Console
from rich.progress import (
Progress,
BarColumn,
TimeRemainingColumn,
)
log = getlogger()
__console = Console(record=True)
def normalized_step(tokens, path='./', force=False, lemmatization=True):
"""
Normalize the tokens
:param tokens: list of strings
:type tokens: [str]
:param path: path to write the output in
:type path: str
:return: normalized tokens
:rtype: [str]
"""
normalized_tokens = prepareText(tokens, lemmatization)
normalized_tokens = [t[0] for t in normalized_tokens]
# print('normalized_tokens', normalized_tokens)
return normalized_tokens
def ngram_step(original_tokens, freq=None, path='./', force=False):
"""
Calculate the ngrams
:param original_tokens: list of tokens
:type original_tokens: [[str]]
:param freq: rules to extract and filter ngrams
:type freq: dict
:param path: path to write the output in
:type path: str
:return: dictionary of ngrams indexed by n
:rtype: dict
"""
if freq is None:
logging.info('No configuration specified, uses the default one')
freq = {1: 1, 2: 1, 3: 1, 4: 1}
for k in freq:
output_file = 'tokens_{}grams.txt'.format(k)
p = os.path.join(path, output_file)
if not force:
if os.path.isfile(p):
raise Exception("The file {} already exists!".format(p))
allgrams = frequencies(original_tokens, n=len(freq), minlimits=freq)
return allgrams
def run(console, build, title, force=False, update=False):
__console = console
global print
print = __console.print
print(Markdown("- **Step configuration**"))
input_folder = os.path.join(build, 'raw', 'preprocessed_documents')
output_folder = os.path.join(build, 'raw', 'normalized_documents')
ngrams_config = {}
try:
ngrams_config = config()['steps']['normalize']['ngrams']
except Exception as e:
print('Cannot retrieve n-grams configuration. Details: {}'.format(e))
exit(5)
print(TAB + '> Step folder: {}'.format(output_folder))
make_build_folder(console, output_folder, force, strict=False)
files = sorted([os.path.join(input_folder, f) for f in listdir(input_folder) if isfile(join(input_folder, f)) if
'_text_without_conclusion.txt' in f])
raw_corpus = []
corpus_id = []
print(Markdown('- **Load documents**'))
with Progress(
TAB + "> Loading in memory... [IN PROGRESS]",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Loading...", total=len(files), error="",
doc=files[0].split('/')[-1].split('_text_without_conclusion.txt')[0])
for i, p in enumerate(files):
error = ""
doc_id = p.split('/')[-1].split('_text_without_conclusion.txt')[0]
try:
raw_corpus.append(load_text_file(p))
corpus_id.append(doc_id)
except Exception as e:
error = '\n| {}'.format('Could not load the document')
log.debug(p, e)
progress.update(task, advance=1, error=error, doc=doc_id)
print(TAB + "> Loading in memory... [green][DONE]")
normalized_tokens = []
print(Markdown('- **Generate language model**'))
try:
with Progress(
TAB + "> Normalize... [IN PROGRESS]\n",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Compute tokens...", total=len(raw_corpus), error="", doc=corpus_id[0])
for i, doc in enumerate(raw_corpus):
filename = os.path.join(output_folder, '{}_normalized.txt'.format(corpus_id[i]))
if not update or not os.path.isfile(filename):
normalized_tokens.append(normalized_step(doc, force=force, lemmatization=True))
else:
with open(filename, 'r') as f:
normalized_tokens.extend(f.read().split())
f.close()
progress.update(task, advance=1, error=error, doc=corpus_id[i])
except Exception as e:
print(TAB + '[bold red]:double_exclamation_mark: Could not normalized the tokens. Details: {}'.format(e))
exit(40)
print(TAB + "> Normalize... [green][DONE]")
all_grams = []
doc_grammed = []
try:
with Progress(
TAB + "> Compute ngrams... [IN PROGRESS]\n",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Compute tokens...", total=len(corpus_id), error="", doc=corpus_id[0])
for i, doc in enumerate(normalized_tokens):
error = ""
filename = os.path.join(output_folder, '{}_normalized.txt'.format(corpus_id[i]))
if not update or not os.path.isfile(filename):
grams = ngram_step(doc, ngrams_config, force=force)
merged = []
for g in grams.values():
merged.extend(g)
doc_grammed.append(merged)
all_grams.extend(merged)
else:
error = "\n| Load document as already normalized."
with open(filename, 'r') as f:
all_grams.extend(f.read().split())
doc_grammed.append(None)
f.close()
progress.update(task, advance=1, error=error, doc=corpus_id[i])
except Exception:
console.print_exception()
print(TAB + "> Compute ngrams... [green][DONE]")
f = Counter(all_grams)
with open(os.path.join(output_folder, 'full_dictionary.txt'), 'w') as outfile:
json.dump(f, outfile, indent=4, sort_keys=True)
print(TAB + '> Save the full dictionary [green][DONE]')
with Progress(
TAB + "> Save normalized documents... [IN PROGRESS]\n",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Compute tokens...", total=len(doc_grammed), error="", doc=corpus_id[0])
for i, doc in enumerate(doc_grammed):
if doc is not None:
with open(os.path.join(output_folder, '{}_normalized.txt'.format(corpus_id[i])), 'a') as file:
file.write(' '.join(doc))
progress.update(task, advance=1, error=error, doc=corpus_id[i])
print(TAB + '> Save normalized documents... [green][DONE]')
def main(args):
console = Console(record=True)
run(console, args.build, args.title, args.force, args.u)
def parse_args(parser):
args = parser.parse_args()
# Check path
return args
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Turn a collection of documents into a BoW and TF-IDF representation.')
parser.add_argument('--build', type=str, default="./build/echr_database/")
parser.add_argument('--title', type=str)
parser.add_argument('-f', action='store_true')
parser.add_argument('-u', action='store_true')
args = parse_args(parser)
main(args)
|
import socket
import sys
import typing
from abc import abstractmethod
from asyncio import BaseTransport, Transport, BaseProtocol
from typing import TYPE_CHECKING, Optional
from cryptography.x509 import Certificate
from bxcommon import constants
from bxcommon.network.ip_endpoint import IpEndpoint
from bxcommon.network.network_direction import NetworkDirection
from bxcommon.network.socket_connection_state import SocketConnectionState, SocketConnectionStates
from bxcommon.utils.stats import hooks
from bxutils import logging
from bxutils.logging import LogRecordType
from bxutils.ssl import ssl_certificate_factory
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports,cyclic-import
from bxcommon.connections.abstract_node import AbstractNode
logger = logging.get_logger(__name__)
network_troubleshooting_logger = logging.get_logger(LogRecordType.NetworkTroubleshooting, __name__)
SO_QUICKACK = 12
class AbstractSocketConnectionProtocol(BaseProtocol):
transport: Optional[Transport]
file_no: int
endpoint: IpEndpoint
direction: NetworkDirection
can_send: bool
state: SocketConnectionState
is_ssl: bool
_node: "AbstractNode"
_should_retry: bool
_receive_buf: bytearray = bytearray(constants.RECV_BUFSIZE)
def __init__(
self,
node: "AbstractNode",
endpoint: Optional[IpEndpoint] = None,
is_ssl: bool = True,
):
self._node = node
self.transport: Optional[Transport] = None
self.file_no = -1
# pyre-fixme[8]: Attribute has type `IpEndpoint`; used as
# `Optional[IpEndpoint]`.
self.endpoint = endpoint
if self.endpoint is None:
self.direction = NetworkDirection.INBOUND
else:
self.direction = NetworkDirection.OUTBOUND
self.can_send = False
self.state = SocketConnectionStates.CONNECTING
self.is_ssl = is_ssl
self._should_retry = self.direction == NetworkDirection.OUTBOUND
self._initial_bytes = None
def __repr__(self) -> str:
return f"{self.__class__.__name__} <{self.endpoint}, {self.direction.name}>"
def connection_made(self, transport: BaseTransport) -> None:
self.transport = typing.cast(Transport, transport)
sock = transport.get_extra_info("socket")
self.file_no = sock.fileno()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if self._node.opts.enable_tcp_quickack:
self.enable_tcp_quickack()
if self.direction == NetworkDirection.INBOUND:
self.endpoint = IpEndpoint(
*transport.get_extra_info("peername")
)
logger.debug("[{}] - accepted connection.", self)
self._node.on_connection_added(self)
self.state = SocketConnectionStates.INITIALIZED
self.can_send = True
self.send()
logger.debug("[{}] - connection established successfully.", self)
def connection_lost(self, exc: Optional[Exception]) -> None:
mark_connection_for_close = (
SocketConnectionStates.MARK_FOR_CLOSE not in self.state
)
self.state |= SocketConnectionStates.MARK_FOR_CLOSE
if not self._should_retry:
self.state |= SocketConnectionStates.DO_NOT_RETRY
if exc is not None:
logger.info(
"[{}] - lost connection due to an error: {}, closing connection, should_retry: {}.",
self,
exc,
self._should_retry,
)
else:
logger.debug(
"[{}] - lost connection with peer, should_retry: {}.",
self,
self._should_retry,
)
self._node.on_connection_closed(self.file_no, mark_connection_for_close)
def pause_writing(self) -> None:
self.can_send = False
logger.debug("[{}] - paused writing.", self)
def resume_writing(self) -> None:
self.can_send = True
self.send()
logger.debug("[{}] - resumed writing.", self)
def send(self) -> None:
# TODO: send should buffer together multiple pending buffers and write\
# them together to the Transport.
total_bytes_sent = 0
conn = self._node.connection_pool.get_by_fileno(self.file_no)
if not conn:
return
while self.is_sendable():
data = conn.get_bytes_to_send()
if not data:
break
transport = self.transport
assert transport is not None, "Connection is broken!"
# note: transport.write() is non blocking and accepts any length of data
# even if data is crossing the buffer high limit (will cause a pause)
bytes_to_send = len(data)
# pyre-fixme[16]: `Transport` has no attribute `get_write_buffer_limits`.
buffer_limits = transport.get_write_buffer_limits()
logger.trace(
"[{}] - about to send {} bytes, current buffer used {} with limits {}",
self,
bytes_to_send,
transport.get_write_buffer_size(),
buffer_limits
)
transport.write(data)
conn.advance_sent_bytes(bytes_to_send)
conn.advance_bytes_written_to_socket(bytes_to_send)
total_bytes_sent += bytes_to_send
if total_bytes_sent:
logger.trace("[{}] - sent {} bytes", self, total_bytes_sent)
def send_bytes(self, bytes_to_send: typing.Union[memoryview, bytearray]):
conn = self._node.connection_pool.get_by_fileno(self.file_no)
if not conn:
return
transport = self.transport
assert transport is not None, "Connection is broken!"
# pyre-fixme[16]: `Transport` has no attribute `get_write_buffer_limits`.
buffer_limits = transport.get_write_buffer_limits()
logger.trace(
"[{}] - about to send {} bytes, current buffer used {} with limits {}",
self,
bytes_to_send,
transport.get_write_buffer_size(),
buffer_limits
)
transport.write(bytes_to_send)
len_bytes_to_sent = len(bytes_to_send)
hooks.add_throughput_event(
NetworkDirection.OUTBOUND,
"outgoing",
len_bytes_to_sent,
conn.peer_desc,
conn.peer_id
)
conn.advance_bytes_written_to_socket(bytes_to_send)
logger.trace("[{}] - sent {} bytes", self, len_bytes_to_sent)
def pause_reading(self) -> None:
if self.is_alive():
assert self.transport is not None, "Connection is broken!"
self.state |= SocketConnectionStates.HALT_RECEIVE
logger.debug("[{}] - paused reading.", self)
def resume_reading(self) -> None:
if self.is_alive():
assert self.transport is not None, "Connection is broken!"
# pylint bug
# pylint: disable=invalid-unary-operand-type
self.state &= ~SocketConnectionStates.HALT_RECEIVE
logger.debug("[{}] - resumed reading.", self)
def mark_for_close(self, should_retry: bool = True) -> None:
if SocketConnectionStates.MARK_FOR_CLOSE in self.state:
return
self.state |= SocketConnectionStates.MARK_FOR_CLOSE
self._should_retry = should_retry
transport = self.transport
assert transport is not None, "Connection is broken!"
# ideally this would be `transport.close()`, but buffers don't
# seem to be flushing for 1+ days
transport.abort()
logger.debug(
"[{}] - marked for close, retrying: {}.", self, should_retry
)
def is_alive(self) -> bool:
return SocketConnectionStates.MARK_FOR_CLOSE not in self.state
def is_receivable(self) -> bool:
return (
self.is_alive()
and SocketConnectionStates.HALT_RECEIVE not in self.state
)
def is_sendable(self) -> bool:
return (
self.is_alive()
and SocketConnectionStates.INITIALIZED in self.state
and self.can_send
)
def get_peer_certificate(self) -> Certificate:
assert self.transport is not None, "Connection is broken!"
try:
return ssl_certificate_factory.get_transport_cert(self.transport)
except ValueError as e:
raise TypeError("Socket is not SSL type!") from e
def get_write_buffer_size(self) -> int:
transport = self.transport
assert transport is not None, "Connection is broken!"
if transport.is_closing():
return 0
else:
return transport.get_write_buffer_size()
def enable_tcp_quickack(self):
if "linux" in sys.platform:
sock = self.transport.get_extra_info("socket")
if sock is None:
logger.debug("Socket info is None on connection")
else:
sock.setsockopt(socket.SOL_SOCKET, SO_QUICKACK, 1)
@abstractmethod
def get_last_read_duration_ms(self) -> float:
pass
@abstractmethod
def get_time_since_read_end_ms(self, end_time: float):
pass
|
import math
from math import pi
import numpy as np
import numpy.testing as nt
import unittest
from spatialmath import DualQuaternion, UnitDualQuaternion, Quaternion, SE3
from spatialmath import base
def qcompare(x, y):
if isinstance(x, Quaternion):
x = x.vec
elif isinstance(x, SMPose):
x = x.A
if isinstance(y, Quaternion):
y = y.vec
elif isinstance(y, SMPose):
y = y.A
nt.assert_array_almost_equal(x, y)
class TestDualQuaternion(unittest.TestCase):
def test_init(self):
dq = DualQuaternion(Quaternion([1.,2,3,4]), Quaternion([5.,6,7,8]))
nt.assert_array_almost_equal(dq.vec, np.r_[1,2,3,4,5,6,7,8])
dq = DualQuaternion([1.,2,3,4,5,6,7,8])
nt.assert_array_almost_equal(dq.vec, np.r_[1,2,3,4,5,6,7,8])
dq = DualQuaternion(np.r_[1,2,3,4,5,6,7,8])
nt.assert_array_almost_equal(dq.vec, np.r_[1,2,3,4,5,6,7,8])
def test_pure(self):
dq = DualQuaternion.Pure([1.,2,3])
nt.assert_array_almost_equal(dq.vec, np.r_[1,0,0,0, 0,1,2,3])
def test_strings(self):
dq = DualQuaternion(Quaternion([1.,2,3,4]), Quaternion([5.,6,7,8]))
self.assertIsInstance(str(dq), str)
self.assertIsInstance(repr(dq), str)
def test_conj(self):
dq = DualQuaternion(Quaternion([1.,2,3,4]), Quaternion([5.,6,7,8]))
nt.assert_array_almost_equal(dq.conj().vec, np.r_[1,-2,-3,-4, 5,-6,-7,-8])
# def test_norm(self):
# q1 = Quaternion([1.,2,3,4])
# q2 = Quaternion([5.,6,7,8])
# dq = DualQuaternion(q1, q2)
# nt.assert_array_almost_equal(dq.norm(), (q1.norm(), q2.norm()))
def test_plus(self):
dq = DualQuaternion(Quaternion([1.,2,3,4]), Quaternion([5.,6,7,8]))
s = dq + dq
nt.assert_array_almost_equal(s.vec, 2*np.r_[1,2,3,4,5,6,7,8])
def test_minus(self):
dq = DualQuaternion(Quaternion([1.,2,3,4]), Quaternion([5.,6,7,8]))
s = dq - dq
nt.assert_array_almost_equal(s.vec, np.zeros((8,)))
def test_matrix(self):
dq1 = DualQuaternion(Quaternion([1.,2,3,4]), Quaternion([5.,6,7,8]))
M = dq1.matrix()
self.assertIsInstance(M, np.ndarray)
self.assertEqual(M.shape, (8,8))
def test_multiply(self):
dq1 = DualQuaternion(Quaternion([1.,2,3,4]), Quaternion([5.,6,7,8]))
dq2 = DualQuaternion(Quaternion([4,3,2,1]), Quaternion([5,6,7,8]))
M = dq1.matrix()
v = dq2.vec
nt.assert_array_almost_equal(M @ v, (dq1 * dq2).vec)
def test_unit(self):
pass
class TestUnitDualQuaternion(unittest.TestCase):
def test_init(self):
T = SE3.Rx(pi/4)
dq = UnitDualQuaternion(T)
nt.assert_array_almost_equal(dq.SE3().A, T.A)
def test_norm(self):
T = SE3.Rx(pi/4)
dq = UnitDualQuaternion(T)
nt.assert_array_almost_equal(dq.norm(), (1,0))
def test_multiply(self):
T1 = SE3.Rx(pi/4)
T2 = SE3.Rz(-pi/3)
T = T1 * T2
d1 = UnitDualQuaternion(T1)
d2 = UnitDualQuaternion(T2)
d = d1 * d2
nt.assert_array_almost_equal(d.SE3().A, T.A)
# ---------------------------------------------------------------------------------------#
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
<gh_stars>0
#!/usr/bin/env python
import sys
import unittest
import time
import rostest
import rospy
from geometry_msgs.msg import PoseStamped
from araig_msgs.msg import BoolStamped, Float64Stamped
class TestCalcPoseDelta(unittest.TestCase):
def setUp(self):
_pub_topic_1 = '/test/in_obj_1'
_pub_topic_2 = '/test/in_obj_2'
_pub_signal = '/test/in_signal'
_sub_topic_1 = '/test/out_disp_angular'
_sub_topic_2 = '/test/out_disp_position'
rospy.init_node('test_calc_pose_delta', anonymous=True)
self.pub_1 = rospy.Publisher(_pub_topic_1, PoseStamped, latch=True, queue_size=10)
self.pub_2 = rospy.Publisher(_pub_topic_2, PoseStamped, latch= True, queue_size=10)
self.pub_signal = rospy.Publisher(_pub_signal, BoolStamped, latch= True, queue_size=10)
rospy.Subscriber(_sub_topic_1, Float64Stamped, callback=self.callback_1, queue_size=10)
rospy.Subscriber(_sub_topic_2, Float64Stamped, callback=self.callback_2, queue_size=10)
self.delta_angle = None
self.delta_pose = None
self.msg_seq_angle = 0
self.msg_seq_pose = 0
def test_expected(self):
pub_pose_msg = PoseStamped()
pub_pose_msg.header.stamp = rospy.Time.now()
pub_pose_msg.pose.position.x = 1
pub_pose_msg.pose.position.y = 1
pub_pose_msg.pose.position.z = 1
pub_pose_msg.pose.orientation.w = 1
self.pub_1.publish(pub_pose_msg)
pub_pose_msg = PoseStamped()
pub_pose_msg.header.stamp = rospy.Time.now()
pub_pose_msg.pose.position.x = 4
pub_pose_msg.pose.position.y = 5
pub_pose_msg.pose.position.z = 5
pub_pose_msg.pose.orientation.w = 1
pub_pose_msg.pose.orientation.z = 1
self.pub_2.publish(pub_pose_msg)
pub_signal_msg = BoolStamped()
pub_signal_msg.header.stamp = rospy.Time.now()
pub_signal_msg.data = False
self.pub_signal.publish(pub_signal_msg)
rospy.sleep(0.5)
pub_signal_msg.data = True
self.pub_signal.publish(pub_signal_msg)
while self.delta_pose is None and self.delta_angle is None:
time.sleep(0.1)
self.assertEqual(self.delta_pose, 5.0, 'test1: {} is not equal to 5.0'.format(self.delta_pose))
self.assertEqual(self.delta_angle, 90.0, 'test1: {} is not equal to 5.0'.format(self.delta_angle))
self.assertEqual(self.msg_seq_angle, 1, 'test1: msg published {} times'.format(self.msg_seq_angle))
self.assertEqual(self.msg_seq_pose, 1, 'test1: msg published {} times'.format(self.msg_seq_pose))
rospy.sleep(0.2)
pub_signal_msg.data = False
self.pub_signal.publish(pub_signal_msg)
self.assertEqual(self.msg_seq_angle, 1, 'test2: msg published {} times'.format(self.msg_seq_angle))
self.assertEqual(self.msg_seq_pose, 1, 'test2: msg published {} times'.format(self.msg_seq_pose))
rospy.sleep(0.2)
pub_signal_msg.data = True
self.pub_signal.publish(pub_signal_msg)
rospy.sleep(0.2)
self.assertEqual(self.msg_seq_angle, 2, 'test3: msg published {} times'.format(self.msg_seq_angle))
self.assertEqual(self.msg_seq_pose, 2, 'test3: msg published {} times'.format(self.msg_seq_pose))
def callback_1(self, msg):
self.delta_angle = msg.data
self.msg_seq_angle = msg.header.seq
def callback_2(self, msg):
self.delta_pose = msg.data
self.msg_seq_pose = msg.header.seq
if __name__ == '__main__':
pkg = 'araig_calculators'
name = 'test_diff_poses_spatial'
rostest.rosrun(pkg, name, TestCalcPoseDelta) |
#!/usr/bin/env python3
import os
import sys
import subprocess
import time
import operator
import pdb
from os.path import join
from functools import reduce
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
'''
# NOTE
- In order to use PerfMon.LEVEL_PERF_LOCK (i.e., perf lock record),
lockdep and lockstat should be enabled in kernel configuration.
# PERF HOWTO
- http://www.brendangregg.com/perf.html
'''
class PerfMon(object):
LEVEL_LOW = 0
LEVEL_PERF_RECORD = 1
LEVEL_PERF_PROBE_SLEEP_LOCK_D = 2
LEVEL_PERF_STAT = 3
LEVEL_PERF_PROBE_SLEEP_LOCK = 998 # Well, it it not useful for fxmark.
LEVEL_PERF_LOCK = 999 # Well, it is mostly useless.
CPU_STAT = ["real", "user", "nice", "sys", "idle",
"iowait", "irq", "softirq", "steal", "guest"]
SC_CLK_TCK = float(os.sysconf("SC_CLK_TCK"))
PROBE_SLEEP_LOCK = [
# - mutex
"mutex_lock", "mutex_trylock", "mutex_lock_killable",
# "mutex_unlock",
# - rwsem
"down_read", "down_read_trylock", "down_write",
"down_write_trylock", "downgrade_write",
# "up_read", "up_write",
# - semaphore
"down",
# "up",
# - scheduler
"io_schedule_timeout", # for wait_on_page_bit()
"schedule",
"preempt_schedule_common"
# "schedule_timeout",
]
PERF_SAMPLE_RATE = 1000
# init
def __init__(self, \
level = int(os.environ.get('PERFMON_LEVEL', "0")), \
ldir = os.environ.get('PERFMON_LDIR', "."), \
lfile = os.environ.get('PERFMON_LFILE', "_perfmon.stat" ),\
duration = 30):
(self.LEVEL, self.DIR, self.FILE) = (level, ldir, lfile)
self.duration = duration
self.cpu_stat = os.path.normpath(
os.path.join(self.DIR, self.FILE))
# entry
def start(self):
if self.LEVEL >= PerfMon.LEVEL_LOW:
self._cpu_stat_start()
if self.LEVEL == PerfMon.LEVEL_PERF_RECORD:
self._perf_record_start()
if self.LEVEL == PerfMon.LEVEL_PERF_PROBE_SLEEP_LOCK:
self._perf_probe_sleep_lock_start("")
if self.LEVEL == PerfMon.LEVEL_PERF_STAT:
self._perf_stat_start()
if self.LEVEL == PerfMon.LEVEL_PERF_PROBE_SLEEP_LOCK_D:
self._perf_probe_sleep_lock_start("%ax")
if self.LEVEL == PerfMon.LEVEL_PERF_LOCK:
self._perf_lock_record_start()
def stop(self):
try:
if self.LEVEL == PerfMon.LEVEL_PERF_LOCK:
self._perf_lock_record_stop()
if self.LEVEL == PerfMon.LEVEL_PERF_RECORD:
self._perf_record_stop()
if self.LEVEL == PerfMon.LEVEL_PERF_PROBE_SLEEP_LOCK:
self._perf_probe_sleep_lock_stop()
if self.LEVEL == PerfMon.LEVEL_PERF_STAT:
self._perf_stat_stop()
if self.LEVEL == PerfMon.LEVEL_PERF_PROBE_SLEEP_LOCK_D:
self._perf_probe_sleep_lock_stop()
if self.LEVEL >= PerfMon.LEVEL_LOW:
self._cpu_stat_stop()
finally:
return
# cpu utilization
def _cpu_stat_start(self):
(ncpu, cpu_stat) = self._get_cpu_stat()
cpu_stat_str = " ".join( map(lambda x: str(x), cpu_stat))
with open(self.cpu_stat, "w") as fd:
print(cpu_stat_str, file=fd)
fd.flush()
def _cpu_stat_stop(self):
(ncpu, stat_stop) = self._get_cpu_stat()
with open(self.cpu_stat, "r") as fd:
stat_start = [float(p) for p in fd.readline().strip().split()]
delta = list(map(operator.sub, stat_stop, stat_start))
# calc. idle time
total_cpu_time = sum(delta[1:])
# calc cpu utlization
delta.extend( list( map(lambda x: x/total_cpu_time * 100.0, delta[1:])))
# column name string
name = list( map(lambda x: "%s.sec" % x, PerfMon.CPU_STAT))
name.extend( list( map(lambda x: "%s.util" % x, PerfMon.CPU_STAT[1:])))
# write to file
with open(self.cpu_stat, "w") as fd:
print( " ".join(name), file=fd)
print( " ".join( map(lambda x: "%g" % x, delta)), file=fd)
fd.flush()
def _get_cpu_stat(self):
# According to Linux Documentation,
# /proc/stat is as follows;
# - user: normal processes executing in user mode
# - nice: niced processes executing in user mode
# - system: processes executing in kernel mode
# - idle: twiddling thumbs
# - iowait: waiting for I/O to complete
# - irq: servicing interrupts
# - softirq: servicing softirqs
# - steal: involuntary wait
# - guest: running a normal guest
# - guest_nice: running a niced guest
p = self._exec_cmd("sudo cat /proc/stat", subprocess.PIPE)
ncpus = 0
cpu_stat = []
for l in p.stdout.readlines():
l = l.decode("utf-8").strip()
if l.startswith("cpu"):
ncpus += 1
if l.startswith("cpu "):
cpu_stat = [time.time()] + \
[int(p)/PerfMon.SC_CLK_TCK \
for p in l[4:].strip().split()]
return (ncpus - 1, cpu_stat[:len(PerfMon.CPU_STAT)])
# perf stat
def _perf_stat_stop(self):
pass
def _perf_stat_start(self):
perf_out = os.path.normpath(
os.path.join(self.DIR, "%s.perf.stat.data" % self.FILE))
self._exec_cmd("sudo perf stat -a -g -o %s sleep %s &" %
(perf_out, self.duration))
# perf record
def _perf_record_stop(self):
self._perf_stop()
def _perf_record_start(self):
perf_out = os.path.normpath(
os.path.join(self.DIR, "%s.perf.data" % self.FILE))
self._exec_cmd("sudo perf record -F %s -a -g -o %s &" %
(PerfMon.PERF_SAMPLE_RATE, perf_out))
# perf probe sleepable locks
def _perf_probe_cleanup(self):
self._exec_cmd("sudo perf probe --del \'*\'")
def _perf_probe_add_trace_points(self, arg0):
self._perf_probe_cleanup()
for prob in PerfMon.PROBE_SLEEP_LOCK:
self._exec_cmd("sudo perf probe --add \'%s %s\'" % (prob, arg0))
def _perf_probe_cmdline(self, arg0):
probe_opt = ""
for prob in PerfMon.PROBE_SLEEP_LOCK:
probe_opt += " -e probe:%s" % prob
if len(arg0) > 0:
perf_out = os.path.normpath(
os.path.join(self.DIR, "%s.perf.sleeplock.%s.data" %
(self.FILE, arg0[1:])))
else:
perf_out = os.path.normpath(
os.path.join(self.DIR, "%s.perf.sleeplock.data" %
self.FILE))
return ("sudo perf record %s -F %s -a -g -o %s &" %
(probe_opt, PerfMon.PERF_SAMPLE_RATE, perf_out))
def _perf_probe_sleep_lock_stop(self):
self._perf_stop()
self._perf_probe_cleanup()
def _perf_probe_sleep_lock_start(self, arg0):
self._perf_probe_add_trace_points(arg0)
cmdline = self._perf_probe_cmdline(arg0)
self._exec_cmd(cmdline)
# perf lock record
def _perf_lock_record_stop(self):
self._exec_cmd("sudo sh -c \"echo 0 >/proc/sys/kernel/lock_stat\"")
self._perf_stop()
lock_stat = os.path.normpath(
os.path.join(self.DIR, "%s.perf.lock_stat" % self.FILE))
self._exec_cmd("sudo cp /proc/lock_stat %s" % lock_stat)
def _perf_lock_record_start(self):
self._exec_cmd("sudo sh -c \"echo 1 >/proc/sys/kernel/lock_stat\"")
perf_out = os.path.normpath(
os.path.join(self.DIR, "%s.perf.lock.data" % self.FILE))
self._exec_cmd("sudo perf lock record -a -g -o %s &" % perf_out)
def _perf_stop(self):
with open("/dev/null", "a") as fd:
self._exec_cmd("sudo kill -INT $(pgrep perf)", fd)
def _exec_cmd(self, cmd, out=None):
p = subprocess.Popen(cmd, shell=True, stdout=out, stderr=out)
p.wait()
return p
if __name__ == "__main__":
# XXX. option parsing for level, ldir, and lfile
# get command
if len(sys.argv) is not 2:
exit(1)
cmd = sys.argv[1]
# run operation
op = {"start":PerfMon.start,
"stop":PerfMon.stop}
def nop(x):
exit(2)
cmd_fn = op.get(cmd, nop)
perfmon = PerfMon()
cmd_fn(perfmon)
|
<filename>tests/unit/integration/github/test_utils.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import time
import uuid
import pretend
import pytest
import requests
from warehouse.integrations.github import tasks, utils
def test_token_leak_matcher_extract():
with pytest.raises(NotImplementedError):
utils.TokenLeakMatcher().extract("a")
def test_plain_text_token_leak_matcher_extract():
assert utils.PlainTextTokenLeakMatcher().extract("a") == "a"
def test_invalid_token_leak_request():
exc = utils.InvalidTokenLeakRequest("a", "b")
assert str(exc) == "a"
assert exc.reason == "b"
@pytest.mark.parametrize(
"record, error, reason",
[
(None, "Record is not a dict but: None", "format"),
({}, "Record is missing attribute(s): token, type, url", "format"),
(
{"type": "not_found", "token": "a", "url": "b"},
"Matcher with code not_found not found. "
"Available codes are: failer, pypi_api_token",
"invalid_matcher",
),
(
{"type": "failer", "token": "a", "url": "b"},
"Cannot extract token from recieved match",
"extraction",
),
],
)
def test_token_leak_disclosure_request_from_api_record_error(record, error, reason):
class MyFailingMatcher(utils.TokenLeakMatcher):
name = "failer"
def extract(self, text):
raise utils.ExtractionFailed()
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
utils.TokenLeakDisclosureRequest.from_api_record(
record, matchers={"failer": MyFailingMatcher(), **utils.TOKEN_LEAK_MATCHERS}
)
assert str(exc.value) == error
assert exc.value.reason == reason
def test_token_leak_disclosure_request_from_api_record():
request = utils.TokenLeakDisclosureRequest.from_api_record(
{"type": "pypi_api_token", "token": "<PASSWORD>", "url": "http://example.com"}
)
assert request.token == "<PASSWORD>"
assert request.public_url == "http://example.com"
class TestCache:
def test_set(self):
cache = utils.PublicKeysCache(cache_time=10)
cache.set(now=1, value="foo")
assert cache.cached_at == 1
assert cache.cache == "foo"
def test_get_no_cache(self):
cache = utils.PublicKeysCache(cache_time=10)
with pytest.raises(utils.CacheMiss):
cache.get(now=1)
def test_get_old_cache(self):
cache = utils.PublicKeysCache(cache_time=10)
cache.set(now=5, value="foo")
with pytest.raises(utils.CacheMiss):
cache.get(now=20)
def test_get_valid(self):
cache = utils.PublicKeysCache(cache_time=10)
cache.set(now=5, value="foo")
assert cache.get(now=10) == "foo"
class TestGitHubTokenScanningPayloadVerifier:
def test_init(self):
metrics = pretend.stub()
session = pretend.stub()
token = "api_token"
url = "http://foo"
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url=url,
session=session,
metrics=metrics,
api_token=token,
public_keys_cache=cache,
)
assert verifier._session is session
assert verifier._metrics is metrics
assert verifier._api_token == token
assert verifier._api_url == url
assert verifier._public_keys_cache is cache
def test_verify_cache_miss(self):
# Example taken from
# https://gist.github.com/ewjoachim/7dde11c31d9686ed6b4431c3ca166da2
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"q\nkCmRCBnYERxZanmcpzQSXs1X/<KEY>"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=lambda *a, **k: response)
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
api_token="<PASSWORD>",
public_keys_cache=cache,
)
key_id = "<KEY>"
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"cb4985f91<PASSWORD>c0234202299'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
verifier.verify(payload=payload, key_id=key_id, signature=signature) is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.github.auth.cache.miss"),
pretend.call("warehouse.token_leak.github.auth.success"),
]
def test_verify_cache_hit(self):
session = pretend.stub()
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
cache = utils.PublicKeysCache(cache_time=12)
cache.cached_at = time.time()
cache.cache = [
{
"key_id": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----",
}
]
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
api_token="<PASSWORD>",
public_keys_cache=cache,
)
key_id = "<KEY>"
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"cb<PASSWORD>'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
verifier.verify(payload=payload, key_id=key_id, signature=signature) is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.github.auth.cache.hit"),
pretend.call("warehouse.token_leak.github.auth.success"),
]
def test_verify_error(self):
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=metrics,
api_token="<PASSWORD>",
public_keys_cache=cache,
)
verifier._retrieve_public_key_payload = pretend.raiser(
utils.InvalidTokenLeakRequest("Bla", "bla")
)
assert verifier.verify(payload={}, key_id="a", signature="a") is False
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.github.auth.cache.miss"),
pretend.call("warehouse.token_leak.github.auth.error.bla"),
]
def test_headers_auth_no_token(self):
headers = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
api_token=None,
public_keys_cache=pretend.stub(),
)._headers_auth()
assert headers == {}
def test_headers_auth_token(self):
headers = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
api_token="<PASSWORD>-token",
public_keys_cache=pretend.stub(),
)._headers_auth()
assert headers == {"Authorization": "token api-token"}
def test_retrieve_public_key_payload(self):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"<KEY>"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=pretend.call_recorder(lambda *a, **k: response))
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
api_token="api-token",
public_keys_cache=pretend.stub(),
)
assert verifier._retrieve_public_key_payload() == meta_payload
assert session.get.calls == [
pretend.call(
"http://foo",
headers={"Authorization": "token api-token"},
)
]
def test_get_cached_public_key_cache_hit(self):
metrics = pretend.stub()
session = pretend.stub()
cache = utils.PublicKeysCache(cache_time=12)
cache_value = pretend.stub()
cache.set(now=time.time(), value=cache_value)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
public_keys_cache=cache,
)
assert verifier._get_cached_public_keys() is cache_value
def test_get_cached_public_key_cache_miss_no_cache(self):
metrics = pretend.stub()
session = pretend.stub()
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
public_keys_cache=cache,
)
with pytest.raises(utils.CacheMiss):
verifier._get_cached_public_keys()
def test_retrieve_public_key_payload_http_error(self):
response = pretend.stub(
status_code=418,
text="I'm a teapot",
raise_for_status=pretend.raiser(requests.HTTPError),
)
session = pretend.stub(
get=lambda *a, **k: response,
)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
verifier._retrieve_public_key_payload()
assert str(exc.value) == "Invalid response code 418: I'm a teapot"
assert exc.value.reason == "public_key_api.status.418"
def test_retrieve_public_key_payload_json_error(self):
response = pretend.stub(
text="Still a non-json teapot",
json=pretend.raiser(json.JSONDecodeError("", "", 3)),
raise_for_status=lambda: None,
)
session = pretend.stub(get=lambda *a, **k: response)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
verifier._retrieve_public_key_payload()
assert str(exc.value) == "Non-JSON response received: Still a non-json teapot"
assert exc.value.reason == "public_key_api.invalid_json"
def test_retrieve_public_key_payload_connection_error(self):
session = pretend.stub(get=pretend.raiser(requests.ConnectionError))
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
verifier._retrieve_public_key_payload()
assert str(exc.value) == "Could not connect to GitHub"
assert exc.value.reason == "public_key_api.network_error"
def test_extract_public_keys(self):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"<KEY>"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
keys = verifier._extract_public_keys(pubkey_api_data=meta_payload)
assert keys == [
{
"key": "-----<KEY>"
"<KEY>"
"<KEY>-----END PUBLIC KEY-----",
"key_id": "90a421169f0a406205f1563a953312f0be"
"<KEY>",
}
]
assert cache.cache == keys
@pytest.mark.parametrize(
"payload, expected",
[
([], "Payload is not a dict but: []"),
({}, "Payload misses 'public_keys' attribute"),
({"public_keys": None}, "Payload 'public_keys' attribute is not a list"),
({"public_keys": [None]}, "Key is not a dict but: None"),
(
{"public_keys": [{}]},
"Missing attribute in key: ['key', 'key_identifier']",
),
(
{"public_keys": [{"key": "a"}]},
"Missing attribute in key: ['key_identifier']",
),
(
{"public_keys": [{"key_identifier": "a"}]},
"Missing attribute in key: ['key']",
),
],
)
def test_extract_public_keys_error(self, payload, expected):
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
list(verifier._extract_public_keys(pubkey_api_data=payload))
assert exc.value.reason == "public_key_api.format_error"
assert str(exc.value) == expected
assert cache.cache is None
def test_check_public_key(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
keys = [
{"key_id": "a", "key": "b"},
{"key_id": "c", "key": "d"},
]
assert verifier._check_public_key(github_public_keys=keys, key_id="c") == "d"
def test_check_public_key_error(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
verifier._check_public_key(github_public_keys=[], key_id="c")
assert str(exc.value) == "Key c not found in github public keys"
assert exc.value.reason == "wrong_key_id"
def test_check_signature(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"<KEY>"
"-----END PUBLIC KEY-----"
)
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"<PASSWORD>'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
is None
)
def test_check_signature_invalid_signature(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----"
)
# Changed the initial N for an M
signature = (
"NEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"<PASSWORD>'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid signature"
assert exc.value.reason == "invalid_signature"
def test_check_signature_invalid_crypto(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = ""
signature = ""
payload = "yeah, nope, that won't pass"
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid cryptographic values"
assert exc.value.reason == "invalid_crypto"
def test_analyze_disclosure(monkeypatch):
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
user_id = uuid.UUID(bytes=b"0" * 16)
user = pretend.stub(id=user_id)
database_macaroon = pretend.stub(
user=user, id=12, caveats={"permissions": "user"}, description="foo"
)
find = pretend.call_recorder(lambda *a, **kw: database_macaroon)
delete = pretend.call_recorder(lambda *a, **kw: None)
record_event = pretend.call_recorder(lambda *a, **kw: None)
svc = {
utils.IMetricsService: pretend.stub(increment=metrics_increment),
utils.IMacaroonService: pretend.stub(
find_from_raw=find, delete_macaroon=delete
),
utils.IUserService: pretend.stub(record_event=record_event),
}
request = pretend.stub(find_service=lambda iface, context: svc[iface])
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(utils, "send_token_compromised_email_leak", send_email)
utils.analyze_disclosure(
request=request,
disclosure_record={
"type": "pypi_api_token",
"token": "pypi-<PASSWORD>",
"url": "http://example.com",
},
origin="github",
)
assert metrics == {
"warehouse.token_leak.github.recieved": 1,
"warehouse.token_leak.github.processed": 1,
"warehouse.token_leak.github.valid": 1,
}
assert send_email.calls == [
pretend.call(request, user, public_url="http://example.com", origin="github")
]
assert find.calls == [pretend.call(raw_macaroon="pypi-1234")]
assert delete.calls == [pretend.call(macaroon_id="12")]
assert record_event.calls == [
pretend.call(
user_id,
tag="account:api_token:removed_leak",
ip_address="127.0.0.1",
additional={
"macaroon_id": "12",
"public_url": "http://example.com",
"permissions": "user",
"description": "foo",
},
)
]
def test_analyze_disclosure_wrong_record():
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
svc = {
utils.IMetricsService: pretend.stub(increment=metrics_increment),
utils.IMacaroonService: pretend.stub(),
}
request = pretend.stub(find_service=lambda iface, context: svc[iface])
utils.analyze_disclosure(
request=request,
disclosure_record={},
origin="github",
)
assert metrics == {
"warehouse.token_leak.github.recieved": 1,
"warehouse.token_leak.github.error.format": 1,
}
def test_analyze_disclosure_invalid_macaroon():
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
find = pretend.raiser(utils.InvalidMacaroon("Bla", "bla"))
svc = {
utils.IMetricsService: pretend.stub(increment=metrics_increment),
utils.IMacaroonService: pretend.stub(find_from_raw=find),
}
request = pretend.stub(find_service=lambda iface, context: svc[iface])
utils.analyze_disclosure(
request=request,
disclosure_record={
"type": "pypi_api_token",
"token": "<PASSWORD>",
"url": "http://example.com",
},
origin="github",
)
assert metrics == {
"warehouse.token_leak.github.recieved": 1,
"warehouse.token_leak.github.error.invalid": 1,
}
def test_analyze_disclosure_unknown_error(monkeypatch):
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
request = pretend.stub(
find_service=lambda *a, **k: pretend.stub(increment=metrics_increment)
)
monkeypatch.setattr(utils, "_analyze_disclosure", pretend.raiser(ValueError()))
with pytest.raises(ValueError):
utils.analyze_disclosure(
request=request,
disclosure_record={},
origin="github",
)
assert metrics == {
"warehouse.token_leak.github.error.unknown": 1,
}
def test_analyze_disclosures_wrong_type():
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
metrics_service = pretend.stub(increment=metrics_increment)
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
utils.analyze_disclosures(
request=pretend.stub(),
disclosure_records={},
origin="yay",
metrics=metrics_service,
)
assert str(exc.value) == "Invalid format: payload is not a list"
assert exc.value.reason == "format"
def test_analyze_disclosures_raise(monkeypatch):
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
metrics_service = pretend.stub(increment=metrics_increment)
task = pretend.stub(delay=pretend.call_recorder(lambda *a, **k: None))
request = pretend.stub(task=lambda x: task)
monkeypatch.setattr(tasks, "analyze_disclosure_task", task)
utils.analyze_disclosures(
request=request,
disclosure_records=[1, 2, 3],
origin="yay",
metrics=metrics_service,
)
assert task.delay.calls == [
pretend.call(disclosure_record=1, origin="yay"),
pretend.call(disclosure_record=2, origin="yay"),
pretend.call(disclosure_record=3, origin="yay"),
]
|
from Bio import PDB
import numpy as np
import pandas as pd
from biodescriptors.calc import constraints
from biodescriptors.calc import utils
def _calc_dssp_hel(dssp, ref):
"""TODO: Documentation"""
# TODO: Split function into smaller functions
chainA = [key for key in dssp.keys() if key[0] == 'A']
helix_map = np.zeros([1, len(chainA)])
res_num = utils.getResidues(dssp)
dssp_start = 0
dssp_end = 0
result = []
#print(res_num)
for i in range(len(ref)):
#print(ref[i][0])
start = utils.getNum(ref[i][0], res_num)
end = utils.getNum(ref[i][1], res_num)
#finding starting point
start_longer_counter = 0
start_shorter_counter = 0
# TODO: wrap in single func
if dssp[list(dssp.keys())[start]][2] == 'H':
# check the first iteration
while dssp[list(dssp.keys())[start-1]][2] == 'H' and utils.getRes(start-1, res_num) != dssp_end:
start_longer_counter+=1
start-=1
missing=False
else:
missing_counter = 0
missing = True
while missing_counter < (end-start):
start+=1
start_shorter_counter+=1
if dssp[list(dssp.keys())[start]][2] == 'H':
missing = False
break
else:
missing_counter +=1
#
#finding endpoint
if missing == False:
end_longer_counter = 0
end_shorter_counter = 0
if dssp[list(dssp.keys())[end]][2] == 'H':
if i != (len(ref)-1):
while dssp[list(dssp.keys())[end+1]][2] == 'H' and end+1 != utils.getNum(ref[i+1][0], res_num):
end_longer_counter+=1
end+=1
else:
while dssp[list(dssp.keys())[end+1]][2] == 'H':
end_longer_counter+=1
end+=1
try:
dssp[list(dssp.keys())[end+1]][2] == 'H'
except IndexError:
break
else:
while dssp[list(dssp.keys())[end]][2] != 'H':
end-=1
end_shorter_counter+=1
if start_shorter_counter > 0:
dssp_start = ref[i][0] + start_shorter_counter
else:
dssp_start = ref[i][0] - start_longer_counter
if end_shorter_counter > 0:
dssp_end = ref[i][1] - end_shorter_counter
else:
dssp_end = ref[i][1] + end_longer_counter
result.append([dssp_start, dssp_end])
for i in range(start, end+1):
helix_map[0][i] = 1
else:
result.append([0, 0])
extras = []
map_elem=0
# TODO: wrap
while map_elem < helix_map.shape[1]:
if helix_map[0][map_elem] == 0:
if dssp[list(dssp.keys())[map_elem]][2] == 'H':
extra_counter = map_elem
while dssp[list(dssp.keys())[extra_counter+1]][2] == 'H':
extra_counter+=1
extras.append([utils.getRes(map_elem, res_num), utils.getRes(extra_counter, res_num)])
if map_elem == extra_counter:
map_elem+=1
else:
map_elem=extra_counter+1
else:
map_elem+=1
else:
map_elem+=1
n_res = 0
for e in extras:
n_res+=e[1]-e[0]+1
return result, n_res
def calc_dssp_hel(pdb_file, ref):
"""
Calculates differences with DSSP output.
Parameters:
----------
pdb_file: str
Filename of .pdb file used for calculation.
ref: list of lists (int, int)
List of amino acid numbers pairs (start, end) for each helix.
Returns:
-------
???.
"""
_, _, model, _, _ = utils.get_model_and_structure(pdb_file)
dssp = PDB.DSSP(model, pdb_file)
if not isinstance(ref, list):
if ref is None:
raise ValueError(f"Ref list is None!")
else:
raise ValueError(f"Unexpected type for ref: {type(ref)}")
return _calc_dssp_hel(dssp, ref)
def dssp_hel_to_pandas(pdb_file, ref, protein_name=None, **kwargs):
"""TODO: write documentation.
Putting differences with dssp in pandas dataframe.
Parameters:
----------
pdb_file: str
Filename of .pdb file used for calculation.
ref: list of ints
List of amino acid numbers pairs (start, end) for each helix.
protein_name: str, default=None
Protein name to be added to the resulting dataframe.
Returns:
-------
pandas.DataFrame with calculated descriptor.
"""
cols_dssp = (['prot_name']
+ ['DSSP start_H' + str(elem) for elem in range(1, 14)]
+ ['DSSP end_H' + str(elem) for elem in range(1, 14)])
df_dssp = pd.DataFrame(columns=cols_dssp)
dssp_hels = None
try:
dssp_hels = calc_dssp_hel(pdb_file, ref)
except KeyError:
if protein_name:
print(f'{protein_name}: KeyError while calculating dssp')
else:
print('KeyError while calculating dssp')
except ValueError as e:
if protein_name:
print(f'{protein_name}: {e}')
else:
print(e)
data_dssp_hels = [protein_name]
if dssp_hels is not None:
for hel in dssp_hels[0]:
data_dssp_hels.append(hel[0])
data_dssp_hels.append(hel[1])
df_dssp = df_dssp.append(pd.Series(data_dssp_hels, index=cols_dssp[0:len(data_dssp_hels)]), ignore_index=True)
return df_dssp
def dssp_extra_to_pandas(pdb_file, ref, protein_name=None, **kwargs):
"""
Putting differences with DSSP in pandas dataframe (extra).
Parameters:
----------
pdb_file: str
Filename of .pdb file used for calculation.
ref: list of ints
List of amino acid numbers pairs (start, end) for each helix.
protein_name: str, default=None
Protein name to be added to the resulting dataframe.
Returns:
-------
pandas.DataFrame with calculated descriptor.
"""
cols_extra_res = ['prot_name', 'N_res extra helical']
df_extra = pd.DataFrame(columns=cols_extra_res)
dssp_hels = None
try:
dssp_hels = calc_dssp_hel(pdb_file, ref)
except KeyError:
if protein_name:
print(f'{protein_name}: KeyError while calculating dssp')
else:
print('KeyError while calculating dssp')
except ValueError as e:
if protein_name:
print(f'{protein_name}: {e}')
else:
print(e)
data_extra_hels = [protein_name]
if dssp_hels is not None:
data_extra_hels.append(dssp_hels[1])
df_extra = df_extra.append(pd.Series(data_extra_hels, index=cols_extra_res[0:len(data_extra_hels)]), ignore_index=True)
return df_extra
|
import streamlit as st
import streamlit.components.v1 as components
import shap
# Text plots return a IPython.core.display.HTML object
# Set diplay=False to return HTML string instead
shap.plots.text.__defaults__ = (0, 0.01, '', None, None, None, False)
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
# Prevent clipping of the ticks and axis labels
plt.rcParams['figure.autolayout'] = True
import base64
from io import BytesIO
# Shap plots internally call plt.show()
# On Linux, prevent plt.show() from emitting a non-GUI backend warning.
import os
os.environ.pop("DISPLAY", None)
# Note: Colorbar changes (introduced bugs) in matplotlib>3.4.3
# cause the colorbar of certain shap plots (e.g. beeswarm) to not display properly
# See: https://github.com/matplotlib/matplotlib/issues/22625 and
# https://github.com/matplotlib/matplotlib/issues/22087
# If colorbars are not displayed properly, try downgrading matplotlib to 3.4.3
def st_shap(plot, height=None, width=None):
"""Takes a SHAP plot as input, and returns a streamlit.delta_generator.DeltaGenerator as output.
It is recommended to set the height and width
parameter to have the plot fit to the window.
Parameters
----------
plot : None or matplotlib.figure.Figure or SHAP plot object
The SHAP plot object.
height: int or None
The height of the plot in pixels.
width: int or None
The width of the plot in pixels.
Returns
-------
streamlit.delta_generator.DeltaGenerator
A SHAP plot as a streamlit.delta_generator.DeltaGenerator object.
"""
# Plots such as waterfall and bar have no return value
# They create a new figure and call plt.show()
if plot is None:
# Test whether there is currently a Figure on the pyplot figure stack
# A Figure exists if the shap plot called plt.show()
if plt.get_fignums():
fig = plt.gcf()
ax = plt.gca()
# Save it to a temporary buffer
buf = BytesIO()
if height is None:
_, height = fig.get_size_inches() * fig.dpi
if width is None:
width, _ = fig.get_size_inches() * fig.dpi
fig.set_size_inches(width / fig.dpi, height / fig.dpi, forward=True)
fig.savefig(buf, format="png")
# Embed the result in the HTML output
data = base64.b64encode(buf.getbuffer()).decode("ascii")
html_str = f"<img src='data:image/png;base64,{data}'/>"
# Enable pyplot to properly clean up the memory
plt.cla()
plt.close(fig)
fig = components.html(html_str, height=height, width=width)
else:
fig = components.html(
"<p>[Error] No plot to display. Received object of type <class 'NoneType'>.</p>"
)
# SHAP plots return a matplotlib.figure.Figure object when passed show=False as an argument
elif isinstance(plot, Figure):
fig = plot
# Save it to a temporary buffer
buf = BytesIO()
if height is None:
_, height = fig.get_size_inches() * fig.dpi
if width is None:
width, _ = fig.get_size_inches() * fig.dpi
fig.set_size_inches(width / fig.dpi, height / fig.dpi, forward=True)
fig.savefig(buf, format="png")
# Embed the result in the HTML output
data = base64.b64encode(buf.getbuffer()).decode("ascii")
html_str = f"<img src='data:image/png;base64,{data}'/>"
# Enable pyplot to properly clean up the memory
plt.cla()
plt.close(fig)
fig = components.html(html_str, height=height, width=width)
# SHAP plots containing JS/HTML have one or more of the following callable attributes
elif hasattr(plot, "html") or hasattr(plot, "data") or hasattr(plot, "matplotlib"):
shap_js = f"{shap.getjs()}".replace('height=350', f'height={height}').replace('width=100', f'width={width}')
shap_html = f"<head>{shap_js}</head><body>{plot.html()}</body>"
fig = components.html(shap_html, height=height, width=width)
# shap.plots.text plots have been overridden to return a string
elif isinstance(plot, str):
fig = components.html(plot, height=height, width=width)
else:
fig = components.html(
"<p>[Error] No plot to display. Unable to understand input.</p>"
)
return fig
|
# American Magnetics, Inc. (AMI) One Axis magnet with PCS_SN14768
import time
import logging
import numpy as np
# from scipy.optimize import brent
# from math import gcd
# from qcodes import Instrument
from qcodes.utils import validators as vals
# from qcodes.instrument.parameter import ManualParameter
from pycqed.analysis import analysis_toolbox as atools
# from pycqed.utilities.general import add_suffix_to_dict_keys
from pycqed.measurement import detector_functions as det
# from pycqed.measurement import composite_detector_functions as cdet
# from pycqed.measurement import mc_parameter_wrapper as pw
from pycqed.measurement import sweep_functions as swf
# from pycqed.measurement import awg_sweep_functions as awg_swf
# from pycqed.analysis import measurement_analysis as ma
# from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_5014
# from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_UHFQC
# from pycqed.measurement.calibration_toolbox import mixer_skewness_calibration_5014
# from pycqed.measurement.optimization import nelder_mead
from pycqed.analysis import analysis_toolbox as a_tools
# import pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts as sq
import logging
import numpy as np
from copy import deepcopy,copy
import qcodes as qc
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
from pycqed.instrument_drivers.pq_parameters import InstrumentParameter
class AMI_Magnet_PCS_SN14768(Instrument):
'''
Instrument used for translating Fields into current settings
and controlling the persistent current switch.
Initialization when the previous measurement did not have the magnet is
a bit awkward. The driver checks the last measurement for the value and if
this does not exists it fails. To do the first initialization it is necessary
to start everything up while having the 'get_field' function return 0 always.
Make a fake folder with the name
'Switch_is_changed_to_SuperConducting_state'.
Then exit and reinitialize with the
'get_field' function returning what it is supposed to.
'''
def __init__(self, name,
Current_source_magnet_name,
Current_source_heater_name,MC_inst,**kw): # IVVI.dac1
super().__init__(name, **kw)
self.protection_state=False
# Set instrumentss
self.add_parameter('i_magnet', parameter_class=InstrumentParameter)
self.i_magnet = Current_source_magnet_name
self.add_parameter('i_heater', parameter_class=InstrumentParameter)
self.i_heater = Current_source_heater_name
self.MC = MC_inst
# Specifications of One Axis AMI magnet with PCS_14768
self.max_current = 5.0 # Amperes
#### Dirty hack to get the z-axis of the new magnet running
self.field_to_current = 0.0496 # Telsa/Ampere
# (Spec sheet says 0.500 kG/A = 50 mT/A)
self.max_field = self.max_current*self.field_to_current # Tesla
#(Spec sheet says 20 kG = 2.0 T)
# self.Ramp_Rate = 0.05 # Ampere/Second
self.max_ramp_rate = 0.2 # Max ramp rate: 1.32 # Ampere/Second
self.init_ramp_rate = 0.025
self.charging_voltage = 1.0 # Volt
self.inductance = 0.7 # Henry
self.persistent_switch_heater_current = 21e-3 # Ampere (21 mA)
self.heater_mvolt_to_current = 0.020*1e-3 # A/mV (20 mA per 1000 mV)
self.persistent_switch_heater_nominal_resistance = 82 # Ohms
#(Measured at room temperature, thus normal conducing.)
self.magnet_resistanc_in_parallel_with_switch = 35 # Ohms
#(Measured at room temperature, thus normal conducting.)
self.add_parameter('source_current',
get_cmd=self.get_source_current,
set_cmd=self.set_source_current,
label='Source Current',
unit='A',
vals=vals.Numbers(min_value=0.,max_value=self.max_current),
docstring='Current supplied to the magnet')
#ramp rate should not be a parameter
self.add_parameter('ramp_rate',
label='Ramp Rate',
unit='A/s',
initial_value=self.init_ramp_rate,
get_parser=float,
vals=vals.Numbers(min_value=0.,max_value=self.max_ramp_rate),
parameter_class=ManualParameter,
docstring='Ramp Rate of the magnet current source')
self.add_parameter('field',
get_cmd=self.get_field,
set_cmd=self.set_field,
label='Persistent Field',
unit='T',
vals=vals.Numbers(min_value=0.,max_value=self.max_field),
docstring='Persistent magnetic field')
# It would be great if the variable field could only be
# set by the program, not by the used. It should only serve as a memory
# of the previous persistent field.
self.add_parameter('switch_state',
get_cmd=self.get_switch_state,
set_cmd=self.set_switch_state,
label='Switch State',
unit='',
vals=vals.Enum('SuperConducting','NormalConducting'),
docstring='Indicating whether the persistent current\
switch is superconducting or normal conducting')
self.protection_state=True
self.get_all()
'''
You need to heat the persistent current switch to turn it from
a superconductor into a normal conductor. When the persistent \
current switch is superconducting there is a persistent current,
when it is normal conducting there is no persistent current and
you can controll the current with the current source.
!! Thus it is important to heat the persistent current switch if
you want to change the field !!
!! Also important that when you want to switch off the persistent
current that you provide the same current with the current source
on the leads !! BEFORE !! you heat the persistent current switch !!
'''
def get_all(self):
self.get_source_current()
self.get_field()
self.switch_state()
return
def get_source_current(self):
return self.i_magnet.measurei()
def set_source_current(self,current):
self.i_magnet.seti(current)
return 'Current set to '+str(current)+' A'
def get_heater_current(self):
return self.heater_mvolt_to_current*self.i_heater()
def get_switch_state(self):
heater_current = self.get_heater_current()
if 1.05*self.persistent_switch_heater_current>heater_current\
>0.95*self.persistent_switch_heater_current:
return 'NormalConducting'
elif 0.05*self.persistent_switch_heater_current>heater_current\
>-0.05*self.persistent_switch_heater_current:
return 'SuperConducting'
else:
raise ValueError('Switch is not in a well defined state!')
def set_switch_state(self,desired_state):
if desired_state == 'SuperConducting' and\
self.get_switch_state() == 'SuperConducting':
print('Already SuperConducting')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state() == 'NormalConducting':
print('Already NormalConducting')
return 'NormalConducting'
elif desired_state == 'SuperConducting' and\
self.get_switch_state() == 'NormalConducting':
print('Ramping current down...')
self.i_heater(0)
print('Wait 2 minutes to cool the switch.')
time.sleep(120) # 120
print('Switch is now SuperConducting')
self.fake_folder(folder_name='Switch_is_changed_to_SuperConducting_state')
return 'SuperConducting'
elif desired_state == 'NormalConducting' and\
self.get_switch_state() == 'SuperConducting':
if self.i_magnet.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
else:
supplied_current = self.get_source_current()
if self.field()==None:
print('Sourcing current...')
self.i_heater(self.persistent_switch_heater_current\
/self.heater_mvolt_to_current)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif supplied_current<2e-3 and np.abs(self.field())<1e-4:
print('Sourcing current...')
self.i_heater(self.persistent_switch_heater_current\
/self.heater_mvolt_to_current)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_is_changed_to_NormalConducting_state')
return 'NormalConducting'
elif not 0.98*supplied_current<self.BtoI(self.field())\
<1.02*supplied_current:
raise ValueError('Current is not \
according to the field value! Use \
bring_source_to_field function to \
bring it to the correct value.')
else:
print('Sourcing current...')
self.i_heater(self.persistent_switch_heater_current\
/self.heater_mvolt_to_current)
print('Wait 30 seconds to heat up the switch.')
time.sleep(30) # 30
print('Switch is now NormalConducting')
self.fake_folder(folder_name='Switch_is_changed_to_NormalConducting_state')
return 'NormalConducting'
else:
return 'Input SuperConducting or NormalConducting as desired state.'
def set_field(self,field):
if not self.protection_state:
return 0.
if self.switch_state() == 'SuperConducting':
raise ValueError('Switch is SuperConducting. Can not change the field.')
elif self.switch_state() =='NormalConducting':
if self.i_magnet.measurei()==0:
self.step_magfield_to_value(field)
# self.field(field)
field_folder_name = 'Changed_field_to_' + str(field) + '_T'
self.fake_folder(folder_name=field_folder_name)
return 'field at ' +str(field)+' T'
elif self.i_magnet.measureR()>1:
raise ValueError('Magnet leads are not connected \
or manget quenched!')
else:
self.step_magfield_to_value(field)
# self.field(field)
field_folder_name = 'Changed_field_to_' + str(field) + '_T'
self.fake_folder(folder_name=field_folder_name)
return 'field at ' +str(field)+' T'
def get_field(self):
return 0.0 # Only add this line when doing the first initialization!
if self.switch_state()=='SuperConducting':
## get the persistent field from the HDF5 file
timestamp = atools.latest_data(contains='Switch_is_changed_to_SuperConducting_state',
return_timestamp=True)[0]
params_dict = {'field':'Magnet.field'}
numeric_params = ['field']
data = a_tools.get_data_from_timestamp_list([timestamp], params_dict,
numeric_params=numeric_params, filter_no_analysis=False)
return data['field'][0]
else: ## Normal conducting
meas_field = self.measure_field()
return meas_field
def step_magfield_to_value(self, field):
MagCurrRatio = self.field_to_current # Tesla/Ampere
Ramp_rate_I = self.ramp_rate()
step_time = 0.01 # in seconds
current_step = Ramp_rate_I * step_time
I_now = self.get_source_current()
current_target = self.BtoI(field)
if current_target >= I_now:
current_step *= +1
if current_target < I_now:
current_step *= -1
num_steps = int(1.*(current_target-I_now)/(current_step))
sweep_time = step_time*num_steps
print('Sweep time is '+str(np.abs(sweep_time))+' seconds')
for tt in range(num_steps):
time.sleep(step_time)
self.i_magnet.seti(I_now)
I_now += current_step
if self.i_magnet.measureR() > 1:
self.i_magnet.seti(0)
raise ValueError('Switch is not in a well defined state!')
self.i_magnet.seti(self.BtoI(field))
self.source_current()
def disconnect_source(self):
if self.switch_state() == 'SuperConducting':
self.step_magfield_to_value(0)
self.fake_folder(folder_name='Ramped_down_current_you_are_able_to_disconnect_the_source_now')
else:
raise ValueError('Switch is not superconducting!')
def bring_source_to_field(self):
if not self.switch_state() == 'SuperConducting':
raise ValueError('Switch is not superconducting!')
if self.i_magnet.measureR() > 1.:
raise ValueError('Magnet leads not connected!')
target_field = self.field()
self.step_magfield_to_value(target_field)
self.fake_folder(folder_name='Ramped_current_up_to_match_persistent_current')
def measure_field(self):
if self.i_magnet is not None:
I = self.get_source_current()
B = self.ItoB(I)
return B
else:
print('no i_magnet')
def BtoI(self, magfield):
MagCurrRatio = self.field_to_current # Tesla/Ampere
I = magfield/MagCurrRatio
return I
def ItoB(self, current):
MagCurrRatio = self.field_to_current # Tesla/Ampere
B = current*MagCurrRatio
return B
def fake_folder(self,folder_name):
'''
Give folder_name in the form of a string.
Create a fake folder in the nanowire experiments folder with the desired folder name.
This is usefull to use when magnetic fields change or when you start a new measurement cycle.
Such that you can distinguish the different measurement sets.
'''
if isinstance(folder_name,str):
sweep_pts = np.linspace(0, 10, 3)
self.MC.set_sweep_function(swf.None_Sweep())
self.MC.set_sweep_points(sweep_pts)
self.MC.set_detector_function(det.Dummy_Detector_Soft())
dat = self.MC.run(folder_name)
else:
raise ValueError('Please enter a string as the folder name!')
####################################################
### This is a working version that contains bugs ###
####################################################
# # American Magnetics, Inc. (AMI) One Axis magnet with PCS_SN14768
# import time
# import logging
# import numpy as np
# # from scipy.optimize import brent
# # from math import gcd
# # from qcodes import Instrument
# from qcodes.utils import validators as vals
# # from qcodes.instrument.parameter import ManualParameter
# # from pycqed.utilities.general import add_suffix_to_dict_keys
# # from pycqed.measurement import detector_functions as det
# # from pycqed.measurement import composite_detector_functions as cdet
# # from pycqed.measurement import mc_parameter_wrapper as pw
# # from pycqed.measurement import sweep_functions as swf
# # from pycqed.measurement import awg_sweep_functions as awg_swf
# # from pycqed.analysis import measurement_analysis as ma
# # from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_5014
# # from pycqed.measurement.calibration_toolbox import mixer_carrier_cancellation_UHFQC
# # from pycqed.measurement.calibration_toolbox import mixer_skewness_calibration_5014
# # from pycqed.measurement.optimization import nelder_mead
# # import pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts as sq
# import logging
# import numpy as np
# from copy import deepcopy,copy
# import qcodes as qc
# from qcodes.instrument.base import Instrument
# from qcodes.utils import validators as vals
# from qcodes.instrument.parameter import ManualParameter
# from pycqed.instrument_drivers.pq_parameters import InstrumentParameter
# class AMI_Magnet_with_PCS_SN14768(Instrument):
# '''
# Instrument used for translating fields into current settings
# and controlling the persistent current switch.
# '''
# def __init__(self, name,
# Current_source_magnet_name,
# Current_source_heater_name,**kw): # IVVI.dac1
# super().__init__(name, **kw)
# self.protection_state=False
# # Set instrumentss
# self.add_parameter('I_magnet', parameter_class=InstrumentParameter)
# self.I_magnet = Current_source_magnet_name
# self.add_parameter('I_heater', parameter_class=InstrumentParameter)
# self.I_heater = Current_source_heater_name
# # Specifications of One Axis AMI magnet with PCS_14768
# self.Max_Current = 5.0 # Amperes
# self.Field_to_Current = 5e-2 # Telsa/Ampere
# # (Spec sheet says 0.500 kG/A = 50 mT/A)
# self.Max_Field = self.Max_Current*self.Field_to_Current # Tesla
# #(Spec sheet says 20 kG = 2.0 T)
# # self.Ramp_Rate = 0.05 # Ampere/Second
# self.Max_Ramp_Rate = 0.2 # Max ramp rate: 1.32 # Ampere/Second
# self.Init_Ramp_Rate = 0.025
# self.Charging_Voltage = 1.0 # Volt
# self.Inductance = 0.7 # Henry
# self.Persistent_Switch_Heater_Current = 21e-3 # Ampere (21 mA)
# self.Heater_mVolt_to_Current = 0.020/1e3 # A/mV (20 mA per 1000 mV)
# self.Persistent_Switch_Heater_Nominal_Resistance = 82 # Ohms
# #(Measured at room temperature, thus normal conducing.)
# self.Magnet_Resistanc_in_Parallel_with_Switch = 35 # Ohms
# #(Measured at room temperature, thus normal conducting.)
# self.add_parameter('Source_Current',
# get_cmd=self.get_source_current,
# set_cmd=self.set_source_current,
# label='Source Current',
# unit='A',
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Current),
# docstring='Current supplied to the magnet')
# self.add_parameter('Ramp_Rate',
# label='Ramp Rate',
# unit='A/s',
# initial_value=self.Init_Ramp_Rate,
# get_parser=float,
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Ramp_Rate),
# parameter_class=ManualParameter,
# docstring='Ramp Rate of the magnet current source')
# self.add_parameter('Persistent_Field',
# label='Persistent Field',
# unit='T',
# initial_value=0.,
# get_parser=float,
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Field),
# parameter_class=ManualParameter,
# docstring='Ramp Rate of the magnet current source')
# # It would be great if the variable Persistent_Field could only be
# # set by the program, not by the used. It should only serve as a memory
# # of the previous persistent field.
# self.add_parameter('Switch_State',
# get_cmd=self.get_switch_state,
# set_cmd=self.set_switch_state,
# label='Switch State',
# unit='',
# vals=vals.Enum('SuperConducting','NormalConducting'),
# docstring='Indicating whether the persistent current\
# switch is superconducting or normal conducting')
# self.add_parameter('Field',
# get_cmd=self.get_field,
# set_cmd=self.set_field,
# label='Field',
# unit='T',
# initial_value=0.,
# get_parser=float,
# # initial_value=0,
# vals=vals.Numbers(min_value=0.,max_value=self.Max_Field),
# docstring='Magnetic field')
# self.protection_state=True
# '''
# You need to heat the persistent current switch to turn it from
# a superconductor into a normal conductor. When the persistent \
# current switch is superconducting there is a persistent current,
# when it is normal conducting there is no persistent current and
# you can controll the current with the current source.
# !! Thus it is important to heat the persistent current switch if
# you want to change the field !!
# !! Also important that when you want to switch off the persistent
# current that you provide the same current with the current source
# on the leads !! BEFORE !! you heat the persistent current switch !!
# '''
# def BtoI(self, magfield):
# MagCurrRatio = self.Field_to_Current # Tesla/Ampere
# I = magfield/MagCurrRatio
# return I
# def ItoB(self, current):
# MagCurrRatio = self.Field_to_Current # Tesla/Ampere
# B = current*MagCurrRatio
# return B
# def get_switch_state(self):
# heater_current = self.get_heater_current()
# if 1.05*self.Persistent_Switch_Heater_Current>heater_current\
# >0.95*self.Persistent_Switch_Heater_Current:
# return 'NormalConducting'
# elif 0.05*self.Persistent_Switch_Heater_Current>heater_current\
# >-0.05*self.Persistent_Switch_Heater_Current:
# return 'SuperConducting'
# else:
# raise ValueError('Switch is not in a well defined state!')
# def set_switch_state(self,desired_state):
# if desired_state == 'SuperConducting' and\
# self.get_switch_state() == 'SuperConducting':
# print('Already SuperConducting')
# return 'SuperConducting'
# elif desired_state == 'NormalConducting' and\
# self.get_switch_state() == 'NormalConducting':
# print('Already NormalConducting')
# return 'NormalConducting'
# elif desired_state == 'SuperConducting' and\
# self.get_switch_state() == 'NormalConducting':
# print('Ramping current down...')
# self.I_heater(0)
# print('Wait 2 minutes to cool the switch.')
# time.sleep(120) # 120
# print('Switch is now SuperConducting')
# return 'SuperConducting'
# elif desired_state == 'NormalConducting' and\
# self.get_switch_state() == 'SuperConducting':
# if self.I_magnet.measureR() > 1.:
# raise ValueError('Magnet leads not connected!')
# else:
# supplied_current = self.get_source_current()
# if self.Field()==None:
# print('Sourcing current...')
# self.I_heater(self.Persistent_Switch_Heater_Current\
# /self.Heater_mVolt_to_Current)
# print('Wait 30 seconds to heat up the switch.')
# time.sleep(30) # 30
# print('Switch is now NormalConducting')
# return 'NormalConducting'
# elif supplied_current<2e-3 and np.abs(self.Field())<1e-4:
# print('Sourcing current...')
# self.I_heater(self.Persistent_Switch_Heater_Current\
# /self.Heater_mVolt_to_Current)
# print('Wait 30 seconds to heat up the switch.')
# time.sleep(30) # 30
# print('Switch is now NormalConducting')
# return 'NormalConducting'
# elif not 0.98*supplied_current<self.BtoI(self.Field())\
# <1.02*supplied_current:
# raise ValueError('Current is not \
# according to the field value! Use \
# bring_source_to_field function to \
# bring it to the correct value.')
# else:
# print('Sourcing current...')
# self.I_heater(self.Persistent_Switch_Heater_Current\
# /self.Heater_mVolt_to_Current)
# print('Wait 30 seconds to heat up the switch.')
# time.sleep(30) # 30
# print('Switch is now NormalConducting')
# return 'NormalConducting'
# else:
# return 'Input SuperConducting or NormalConducting as desired state.'
# def get_source_current(self):
# print('Shit be workin yo')
# return self.I_magnet.measurei()
# def set_source_current(self,current):
# self.I_magnet.seti(current)
# return 'Current set to '+str(current)+' A'
# def get_heater_current(self):
# return self.Heater_mVolt_to_Current*self.I_heater()
# def measure_field(self):
# if self.I_magnet is not None:
# I = self.get_source_current()
# B = self.ItoB(I)
# return B
# else:
# print('no I_magnet')
# def set_field(self,field):
# if not self.protection_state:
# return 0.
# if self.Switch_State() == 'SuperConducting':
# raise ValueError('Switch is SuperConducting. Can not change the field.')
# elif self.Switch_State() =='NormalConducting':
# if self.I_magnet.measureR()>1:
# raise ValueError('Magnet leads are not connected \
# or manget quenched!')
# else:
# self.step_magfield_to_value(field)
# self.Persistent_Field(field)
# return 'Field at ' +str(field)+' T'
# def get_field(self):
# if self.Switch_State()=='SuperConducting':
# return self.Persistent_Field()
# else:
# meas_field = self.measure_field()
# return meas_field
# def disconnect_source(self):
# if self.Switch_State() == 'SuperConducting':
# self.step_magfield_to_value(0)
# else:
# raise ValueError('Switch is not superconducting!')
# def bring_source_to_field(self):
# if not self.Switch_State() == 'SuperConducting':
# raise ValueError('Switch is not superconducting!')
# if self.I_magnet.measureR() > 1.:
# raise ValueError('Magnet leads not connected!')
# target_field = self.Persistent_Field()
# self.step_magfield_to_value(target_field)
# def step_magfield_to_value(self, field):
# MagCurrRatio = self.Field_to_Current # Tesla/Ampere
# Ramp_rate_I = self.Ramp_Rate()
# step_time = 0.01 # in seconds
# current_step = Ramp_rate_I * step_time
# I_now = self.get_source_current()
# current_target = self.BtoI(field)
# if current_target >= I_now:
# current_step *= +1
# if current_target < I_now:
# current_step *= -1
# num_steps = int(1.*(current_target-I_now)/(current_step))
# sweep_time = step_time*num_steps
# print('Sweep time is '+str(np.abs(sweep_time))+' seconds')
# for tt in range(num_steps):
# time.sleep(step_time)
# self.I_magnet.seti(I_now)
# I_now += current_step
# if self.I_magnet.measureR() > 1:
# self.I_magnet.seti(0)
# raise ValueError('Switch is not in a well defined state!')
# self.I_magnet.seti(self.BtoI(field))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8_
# pylint: disable=invalid-name
"""Operator attributes conversion"""
from ._op_translations import add, subtract, multiply, divide, absolute, negative, add_n
from ._op_translations import argmax, argmin, maximum, minimum
from ._op_translations import ceil, floor, hardsigmoid, global_lppooling
from ._op_translations import clip, reduce_log_sum, reduce_log_sum_exp
from ._op_translations import concat, hardmax, topk
from ._op_translations import dropout, local_response_norm, conv, deconv
from ._op_translations import global_avgpooling, global_maxpooling, linalg_gemm
from ._op_translations import identity, random_uniform, random_normal, sample_multinomial
from ._op_translations import leaky_relu, _elu, _prelu, _selu, softmax, fully_connected
from ._op_translations import log_softmax, softsign, lesser, greater, equal
from ._op_translations import logical_and, logical_or, logical_xor, logical_not
from ._op_translations import mean, depthtospace, spacetodepth, lpnormalization
from ._op_translations import reciprocal, squareroot, power, exponent, _log, unsqueeze
from ._op_translations import reduce_max, reduce_mean, reduce_min, reduce_sum
from ._op_translations import reduce_prod, avg_pooling, max_pooling, instance_norm
from ._op_translations import reduce_sum_square, reduce_l1, reduce_l2, max_roi_pooling
from ._op_translations import reshape, cast, split, _slice, transpose, squeeze, flatten
from ._op_translations import sigmoid, pad, relu, matrix_multiplication, batch_norm
from ._op_translations import softplus, shape, gather, lp_pooling, size
from ._op_translations import tanh, arccos, arcsin, arctan, _cos, _sin, _tan
# convert_map defines maps of ONNX operator names to converter functor(callable)
# defined in the op_translations module.
_convert_map = {
# Generator Functions
'Constant': identity,
'RandomUniform': random_uniform,
'RandomNormal': random_normal,
'RandomUniformLike': random_uniform,
'RandomNormalLike': random_normal,
'Multinomial': sample_multinomial,
# Arithmetic Operators
'Add': add,
'Sub': subtract,
'Mul': multiply,
'Div': divide,
'Abs': absolute,
'Neg': negative,
'Sum': add_n, # elemwise sum
# Hyperbolic functions
'Tanh': tanh,
# Rounding
'Ceil': ceil,
'Floor': floor,
# Joining and spliting
'Concat': concat,
# Basic neural network functions
'Sigmoid': sigmoid,
'Relu': relu,
'Pad': pad,
'MatMul': matrix_multiplication, # linalg_gemm2
'Conv': conv,
'ConvTranspose': deconv,
'BatchNormalization': batch_norm,
'SpatialBN': batch_norm,
'LeakyRelu': leaky_relu,
'Elu': _elu,
'PRelu': _prelu,
'Selu': _selu,
'Softmax': softmax,
'FC': fully_connected,
'GlobalAveragePool': global_avgpooling,
'GlobalMaxPool': global_maxpooling,
'GlobalLpPool': global_lppooling,
'Gemm': linalg_gemm,
'LRN': local_response_norm,
'Dropout': dropout,
# Changing shape and type.
'Reshape': reshape,
'Cast': cast,
'Split': split,
'Slice': _slice,
'Transpose': transpose,
'Squeeze': squeeze,
'Unsqueeze': unsqueeze,
'Flatten': flatten,
'Identity': identity,
# Powers
'Reciprocal': reciprocal,
'Sqrt': squareroot,
'Pow': power,
'Exp': exponent,
'Log': _log,
# Reduce Functions
'ReduceMax': reduce_max,
'ReduceMean': reduce_mean,
'ReduceMin': reduce_min,
'ReduceSum': reduce_sum,
'ReduceProd': reduce_prod,
'AveragePool': avg_pooling,
'MaxPool': max_pooling,
# Sorting and Searching
'ArgMax': argmax,
'ArgMin': argmin,
'Max': maximum,
'Min': minimum,
'Clip': clip,
'ReduceLogSum': reduce_log_sum,
'ReduceLogSumExp': reduce_log_sum_exp,
'ReduceSumSquare': reduce_sum_square,
'ReduceL1': reduce_l1,
'ReduceL2': reduce_l2,
'MaxRoiPool': max_roi_pooling,
'InstanceNormalization': instance_norm,
'LogSoftmax': log_softmax,
'Softsign': softsign,
'Less': lesser,
'Greater': greater,
'Equal': equal,
'And': logical_and,
'Xor': logical_xor,
'Not': logical_not,
'Or': logical_or,
'Mean': mean,
'Acos': arccos,
'Asin': arcsin,
'Atan': arctan,
'Cos': _cos,
'Sin': _sin,
'Softplus': softplus,
'Tan': _tan,
'Shape': shape,
'Size': size,
'Gather': gather,
'HardSigmoid': hardsigmoid,
'LpPool': lp_pooling,
'DepthToSpace': depthtospace,
'SpaceToDepth': spacetodepth,
'Hardmax': hardmax,
'LpNormalization': lpnormalization,
'TopK': topk
}
|
<reponame>TerryS6903/DnD_Project
from TheDice import D20, D12, D10, D8, D6, D4
from CharacterStats import strength, dexterity, constitution, wisdom, intelligence, charisma
from AbilityModifiers import initiative, ability_mod
def main():
character_name = input("What is your characters name?\n")
strength_stat = strength()
dexterity_stat = dexterity()
constitution_stat = constitution()
wisdom_stat = wisdom()
intelligence_stat = intelligence()
charisma_stat = charisma()
str_mod = ability_mod(strength_stat)
dex_mod = ability_mod(dexterity_stat)
con_mod = ability_mod(constitution_stat)
wis_mod = ability_mod(wisdom_stat)
int_mod = ability_mod(intelligence_stat)
cha_mod = ability_mod(charisma_stat)
health = 10 + D20() + con_mod
AC = 10 + dex_mod
print("Your strength is {}\n".format(strength_stat))
print("Your dexterity is {}\n".format(dexterity_stat))
print("Your constitution is {}\n".format(constitution_stat))
print("Your wisdom is {}\n".format(wisdom_stat))
print("Your intelligence is {}\n".format(intelligence_stat))
print("Your charisma is {}\n".format(charisma_stat))
print("Your health is {}\n".format(health))
print("Your Armor Class is {}\n".format(AC))
print("You wake up in the woods on the outskirts of a small town with no recollection of how you got there.")
choice = input("1. Walk towards the town\n2. Explore deeper into the woods\n3. Head to the clearing on the left\n")
if choice == str(1):
print("As you are walking towards the town a group of bandits jump out of the treeline!")
choice = input("1. Roll for initiative\n 2. Run\n")
if choice == str(1):
init = initiative(dex_mod)
print("Your initiative is {}".format(init))
enemy_hp = D20() + 3
enemy_ac = 10
if init > 10:
print("You go first!")
while enemy_hp > 0:
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Bandits turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Bandits turn!")
else:
potion = D6()
healed = potion + health
print("You have {} health, the potion heals you...{}, You have {} health now!".format(health, potion, healed))
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D6() + 2
print("The bandit attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print(health)
print("Your turn!")
else:
print("The bandit attacks...and misses!")
print("Your turn!")
else:
print("You go second!")
while enemy_hp > 0:
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D6() + 2
print("The bandit attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print(health)
print("Your turn!")
else:
print("The bandit attacks...and misses!")
print("Your turn!")
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion 3. Run")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Bandits turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Bandits turn!")
print("After beating the bandit, you continue on your way towards the town.")
print("Upon entering the town, you find that most of the residents are not present, except for a single old man towards the center of the town.")
print("The old man approaches you and says 'Everyone left when the bandits came, and nobody's been back since. But then some monster came into town and ran off the bandits. It holed up in my home, and I haven't left because a family heirloom is in there. Dear adventurer would you please retrieve it for me so that I may leave?'")
print("He points you towards the house and you make your way towards the house. The closer you get, the more clearly you can hear the growling of the monster living in the home.")
choice = input("Do you 1. Enter through the front door or 2. Try to go through the back?")
if choice == str(1):
print("Upon entering the house you are immediately greeted with the face of a werewolf. Without a second thought the werewolf jumps at you, roll for initiative!")
init = initiative(dex_mod)
print("Your initiative is {}".format(init))
enemy_hp = D20() + D20 + 15
enemy_ac = 14
if init > 15:
print("You go first!")
while enemy_hp > 0:
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
else:
potion = D6()
healed = potion + health
print("You have {} health, the potion heals you...{}, You have {} health now!".format(health, potion, healed))
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
else:
print("You go second!")
while enemy_hp > 0:
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion 3. Run")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
else:
enemy_hp = D20() + D20 + 15
print("You walk around the back of the house and enter. Upon entering you find a werewolf sleeping and decide to make a preemptive strike, dealing {} damage".format(enemy_hp // 2))
print("The werewolf wakes up and attacks, roll for initiative!")
init = initiative(dex_mod)
print("Your initiative is {}".format(init))
enemy_ac = 14
if init > 15:
print("You go first!")
while enemy_hp > 0:
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
else:
potion = D6()
healed = potion + health
print("You have {} health, the potion heals you...{}, You have {} health now!".format(health, potion, healed))
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
else:
print("You go second!")
while enemy_hp > 0:
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion 3. Run")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
choice = input("Do you 1. Take the Gem for yourself or 2. Return the gem to the old man?")
if choice == str(1):
print("You leave the house with the gem in hand, not looking back, hoping the old man thinks you lost.\n GAME OVER")
elif choice == str(2):
print("You return the gem to the old man, he thanks you, and leaves ith the heirloom in hand, leaing you to your adventures.\n GAME OVER")
elif choice == str(2):
print("You run past the bandits and begin to close in on the small town")
print("Upon entering the town, you find that most of the residents are not present, except for a single old man towards the center of the town.")
print("The old man approaches you and says 'Everyone left when the bandits came, and nobody's been back since. But then some monster came into town and ran off the bandits. It holed up in my home, and I haven't left because a family heirloom is in there. Dear adventurer would you please retrieve it for me so that I may leave?'")
print("He points you towards the house and you make your way towards the house. The closer you get, the more clearly you can hear the growling of the monster living in the home.")
choice = input("Do you 1. Enter through the front door or 2. Try to go through the back?")
if choice == str(1):
print("Upon entering the house you are immediately greeted with the face of a werewolf. Without a second thought the werewolf jumps at you, roll for initiative!")
init = initiative(dex_mod)
print("Your initiative is {}".format(init))
enemy_hp = D20() + D20 + 15
enemy_ac = 14
if init > 15:
print("You go first!")
while enemy_hp > 0:
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
else:
potion = D6()
healed = potion + health
print("You have {} health, the potion heals you...{}, You have {} health now!".format(health, potion, healed))
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
else:
print("You go second!")
while enemy_hp > 0:
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion 3. Run")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
else:
enemy_hp = D20() + D20 + 15
print("You walk around the back of the house and enter. Upon entering you find a werewolf sleeping and decide to make a preemptive strike, dealing {} damage".format(enemy_hp // 2))
print("The werewolf wakes up and attacks, roll for initiative!")
init = initiative(dex_mod)
print("Your initiative is {}".format(init))
enemy_ac = 14
if init > 15:
print("You go first!")
while enemy_hp > 0:
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
else:
potion = D6()
healed = potion + health
print("You have {} health, the potion heals you...{}, You have {} health now!".format(health, potion, healed))
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
else:
print("You go second!")
while enemy_hp > 0:
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 3
print("The werewolf attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print("Your turn!")
else:
print("The werewolf attacks...and misses!")
print("Your turn!")
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion 3. Run")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Werewolf's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Werewolf's turn!")
choice = input("Do you 1. Take the Gem for yourself or 2. Return the gem to the old man?")
if choice == str(1):
print("You leave the house with the gem in hand, not looking back, hoping the old man thinks you lost.\n GAME OVER")
elif choice == str(2):
print("You return the gem to the old man, he thanks you, and leaves ith the heirloom in hand, leaing you to your adventures.\n GAME OVER")
else:
print("Hey! That's not a choice!")
else:
print("Hey! That's not a choice!")
elif choice == str(2):
print(
"\n Walking deeper into the woods, it seems to continue to get darker. You eventually reach the end of the path, where it splits into two directions."
)
choice = input("Do you 1. Take the path that leads to the left\n or 2. Take the path that leads to the right?\n")
if choice == str(1):
print("\n As you're walking down the left path you pass by a sign that says 'Elder Fairy Grove'. You eventually come up on a big hotspring with a large fairy tied up. She looks up at you and says 'Please free me, I need help'." )
choice = input("\n Do you 1. Untie the fairy \n or 2.Turn around to go down the other path.\n")
if choice == str(1):
print("\n Upon untying the fairy she bursts into laughter and floats above you 'Oh yes finally a hero worry to be my servant.' She raises her hand and casts a spell.\n")
wisdom_save = D20() + wis_mod
if wisdom_save < 17:
print("\n You become charmed by the fairy and lose your will to act on your own motivation. As days pass by she makes you act as her champion. You start to lose memories of your past life as you replay them over and over as they are the only remnants of your past. You eventually become a mindless servant and your only desires and memories are that of fulfilling the tasks of the fairy.\n")
print("GAME OVER")
else:
print("As you watch her cast the spell you are unaffected and she stares in awe \n 'WHY AREN'T YOU MY SERVANT?! I THOUGHT YOU WERE FIT TO BE MY CHAMPION BUT YOU ARE PRIDEFUL JUST LIKE THE REST AND FOR THAT YOU SHALL SUFFER!'\n")
print("She casts another spell but this time vines and leaves wrap around you and cover you and you can no longer move. You sit there parlyized and constrained and slowly starve to death and continue standing as a living tree.\n")
print("GAME OVER")
elif choice == str(2):
print("As you turn around your legs become heavy and rough. When you look down at them to see what is causing it you notice your legs have become rooted into the ground and bark is growing up your body parts. The fairy flies in front of you and begins speaking \n 'You adventurers only care about wealth! Now you will suffer.'\n You turn into a tree and slowly your conciousness starts to fade.\n")
print("GAME OVER")
elif choice == str(2):
print("\n You begin walking down the right hand path. You are walking for a while and begin to lose your way. You start frantically going different directions before you realize you have become stuck in forest labrynth. You come up to another split in paths.")
left_counter = 0
right_counter = 0
direction_counter = 0
while left_counter < 4 and right_counter < 6 or direction_counter < 10:
choice = input("You are at a split in the path, which direction do you choose?1. left \n or 2. right\n")
if choice == str(1):
left_counter += 1
direction_counter += 1
elif choice == str(2):
right_counter += 1
direction_counter += 1
if direction_counter == 10 and left_counter < 4 and right_counter < 6:
print(" \n You become lost in in the maze for the rest of your life and lose all hope of making it out. Some days you wander around and hope you find the exit.")
if left_counter >= 4 and right_counter >= 6:
print(" \n You make it out of the dreaded forest maze and in the distance see your house. You decide that you've had enough adventuring for one day and head inside and fall asleep.")
elif choice == str(3):
print("Upon reaching the clearing, you find a large stone building with a young man standing outside of it, he is holding a sign which says 'Finish the maze and win 100 gold'.\n")
print("The man motions you over and begins to explain the maze, 'Listen listen! I have a quick way for you to make money! All you have to do is finish the maze! if you finish the maze you win 100 gold, but if you get stuck you'll have to fight whats in the maze! So, are you gonna do it?'")
choice = input("1. Agree to take on the maze\n2. Decline his offer")
if choice == str(1):
print("That's great! Well, go on ahead! I'll wait at the other end for you!")
print("You hesitantly enter the maze, and the door behind you shuts and locks.")
choice = input("You are presented with three directions:\n1. Left\n2. Right\n3. Middle")
if choice == str(1):
choice = input("You are presented with two more options:\n1. Left\n2. Middle")
if choice == str(1):
print("Uh oh! That's a dead end, a trap door opens below you and you fall in, Game Over!")
elif choice == str(2):
choice = input("You are presented with 2 more options:\n1. Left\n2. Right")
if choice == str(1):
print("You can see the end! You make your way towards the door and make it outside, where you see the man from before. 'Well! it looks like you solved it! Here's your money!' He then hands you the money and you go about your adventure. Good Job!")
elif choice == str(2):
print("You step on a pressure plate and the walls all close, leaving you stuck in one spot forever, Game over!")
elif choice == str(2):
print("It's a dead end! You start to feel sleepy, and suddenly collapse, Game Over!")
elif choice == str(3):
print("As you walk through the middle you notice that all of the paths line up so that it is a straight shot to the end. However upon reaching the end you see that the man from before isn't there. Knowing that you've been scammed, you continue about your adventure, feeling a bit defeated. Good job?")
elif choice == str(2):
print("'You won't do my maze? How dare you!' The man jumps at you with a dagger, Roll for initiative!")
init = initiative(dex_mod)
print("Your initiative is {}".format(init))
enemy_hp = D20() + 5
enemy_ac = 12
if init > 13:
print("You go first!")
while enemy_hp > 0:
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Maze-Keeper's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Maze-Keeper's turn!")
else:
potion = D6()
healed = potion + health
print("You have {} health, the potion heals you...{}, You have {} health now!".format(health, potion, healed))
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 2
print("The Maze-Keeper attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print(health)
print("Your turn!")
else:
print("The Maze-Keeper attacks...and misses!")
print("Your turn!")
else:
print("You go second!")
while enemy_hp > 0:
enemy_hit = D20() + 2
if enemy_hit > AC:
enemy_damage = D8() + 2
print("The Maze-Keeper attacks...and hits! Rolling for damage...{} damage!".format(enemy_damage))
health -= enemy_damage
print(health)
print("Your turn!")
else:
print("The Maze-Keeper attacks...and misses!")
print("Your turn!")
print("What do you do?\n")
choice = input("1. Attack 2. Use a Potion 3. Run")
if choice == str(1):
hit = D20() + 2
print("Rolling to hit...{}".format(hit))
if hit < enemy_ac:
print("You missed! Maze-Keeper's turn!")
else:
damage = D8() + 2
print("You hit! Rolling for damage...{} damage!".format(damage))
enemy_hp -= damage
print("Maze-Keeper's turn!")
print("Upon beating the Maze-Keeper you find the 100 gold he promised on a pouch on his side. You see he no longer needs it, so you take it for yourself, and continue about your journey. Good job!")
else:
print("Hey! That's not a choice!")
if __name__ == "__main__":
main()
|
import json
import pulumi
import pulumi_aws as aws
from pulumi import export, Output, ResourceOptions
import pulumi_redata as redata
from autotag import register_auto_tags
aws_config = pulumi.Config('aws')
aws_account_id = aws.get_caller_identity().account_id
aws_region = aws_config.require('region')
config = pulumi.Config()
# --- REQUIRED CONFIG ---
# Basic config - what VPC and subnets to deploy in?
private_subnet_ids = config.require_object('aws-private-subnet-ids')
public_subnet_ids = config.require_object('aws-public-subnet-ids')
redata_vpc = aws.ec2.get_vpc(id=config.require('aws-vpc-id'))
# External domain name + cert ARN for load balancer
target_domain = config.require('target-domain')
target_domain_cert = config.require('target-domain-cert')
# Service configuration
airflow_admin_email = config.require('airflow-admin-email')
airflow_admin_password = config.require_secret('airflow-admin-password')
airflow_db_password = config.require_secret('airflow-db-password')
grafana_admin_password = config.require_secret('grafana-admin-password')
redata_db_password = config.require_secret('redata-db-password')
redata_image = config.require('redata-image')
redata_sources = config.require_secret_object('sources')
# --- OPTIONAL CONFIG ---
# Allowed CIDR blocks for accessing the HTTPS load balancer (by default public access)
allowed_cidr_blocks = config.get_object('allowed-cidr-blocks') or ['0.0.0.0/0']
# Private zones for DB aliases + Service Discovery
private_zone_db = config.get('private-zone-db') or "db.redata"
private_zone_sd = config.get('private-zone-sd') or "sd.redata"
# Protection flag; set config to 'true' to protect EFS and DBs from accidental deletion
protect_persistent_storage = config.get_bool('protect-persistent-storage') or False
# Redata customization
redata_airflow_schedule_interval = config.get("redata-airflow-schedule-interval") or "0 * * * *"
redata_time_col_blacklist_regex = config.get("redata-time-col-blacklist-regex") or ""
# Extra tags to apply to all taggable resources
tags = config.get_object('tags') or {}
# --- DERIVED / INTERNAL DEFINITIONS ---
airflow_base_log_folder = "/opt/airflow/logs"
base_url = f"https://{target_domain}"
grafana_db_folder = "/var/lib/grafana"
# Automatically inject tags.
register_auto_tags({
'pulumi:project': pulumi.get_project(),
'pulumi:stack': pulumi.get_stack(),
**tags,
})
#
# CLUSTER INFRASTRUCTURE
#
# Create a cluster
cluster = aws.ecs.Cluster('redata-cluster')
# Create a log group with 7 days retention
lg = aws.cloudwatch.LogGroup('redata-log-group',
retention_in_days=7,
)
# Create the Task Execution IAM role for ECS / Fargate
role = aws.iam.Role('redata-task-exec-role',
assume_role_policy=json.dumps({
'Version': '2008-10-17',
'Statement': [{
'Sid': '',
'Effect': 'Allow',
'Principal': {
'Service': 'ecs-tasks.amazonaws.com'
},
'Action': 'sts:AssumeRole',
}]
}),
)
rpa = aws.iam.RolePolicyAttachment('redata-task-exec-policy',
role=role.name,
policy_arn='arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy',
)
# Create an IAM role that can be used by our service tasks
app_role = aws.iam.Role('redata-app-role',
assume_role_policy=json.dumps({
'Version': '2012-10-17',
'Statement': [{
'Sid': '',
'Effect': 'Allow',
'Principal': {
'Service': 'ecs-tasks.amazonaws.com'
},
'Action': 'sts:AssumeRole',
}]
}),
)
# Create a SecurityGroup for our load balancer that permits HTTPS ingress.
alb_secgrp = aws.ec2.SecurityGroup('redata-lb-secgrp',
vpc_id=redata_vpc.id,
description='Enable HTTPS access',
ingress=[aws.ec2.SecurityGroupIngressArgs(
protocol='tcp',
from_port=443,
to_port=443,
cidr_blocks=allowed_cidr_blocks,
)],
egress=[aws.ec2.SecurityGroupEgressArgs(
protocol='-1',
from_port=0,
to_port=0,
cidr_blocks=['0.0.0.0/0'],
)],
)
# Create a SecurityGroup for our services that permits load balancer ingress and unlimited egress.
svc_secgrp = aws.ec2.SecurityGroup('redata-svc-secgrp',
vpc_id=redata_vpc.id,
description='Enable HTTP access',
ingress=[
aws.ec2.SecurityGroupIngressArgs(
protocol='tcp',
from_port=0,
to_port=65535,
security_groups=[alb_secgrp.id],
self=True
),
],
egress=[aws.ec2.SecurityGroupEgressArgs(
protocol='-1',
from_port=0,
to_port=0,
cidr_blocks=['0.0.0.0/0'],
)],
)
# Create a SecurityGroup for databases that allows access from services.
db_secgrp = aws.ec2.SecurityGroup('redata-db-secgrp',
vpc_id=redata_vpc.id,
description='Enable database access',
ingress=[aws.ec2.SecurityGroupIngressArgs(
protocol='tcp',
from_port=5432,
to_port=5435,
security_groups=[svc_secgrp.id]
)],
)
# Elastic File System for persistent storage
efs = redata.fs.FileSystem("redata-efs",
security_groups=[svc_secgrp.id],
subnets=private_subnet_ids,
vpc_id=redata_vpc.id,
opts=ResourceOptions(protect=protect_persistent_storage),
)
# Service Discovery for intra-cluster communication
sd_namespace = aws.servicediscovery.PrivateDnsNamespace("redata-sd-local-namespace",
name=private_zone_sd,
description="Private namespace for Redata services",
vpc=redata_vpc.id,
)
# Create a load balancer to listen for HTTP traffic on port 80.
alb = aws.lb.LoadBalancer('redata-lb',
security_groups=[alb_secgrp.id],
subnets=public_subnet_ids,
)
listener = aws.lb.Listener('redata-listener',
load_balancer_arn=alb.arn,
port=443,
protocol="HTTPS",
ssl_policy="ELBSecurityPolicy-2016-08",
certificate_arn=target_domain_cert,
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="fixed-response",
fixed_response=aws.lb.ListenerDefaultActionFixedResponseArgs(
content_type="text/plain",
message_body="No such page. Try /airflow or /grafana instead.",
status_code="404",
),
)],
)
# Create a Route 53 Alias A record from the target domain name to the load balancer.
subdomain, parent_domain = redata.util.get_domain_and_subdomain(target_domain)
hzid = aws.route53.get_zone(name=parent_domain).id
record = aws.route53.Record(target_domain,
name=subdomain,
zone_id=hzid,
type='A',
aliases=[
aws.route53.RecordAliasArgs(
name=alb.dns_name,
zone_id=alb.zone_id,
evaluate_target_health=True,
),
],
)
#
# DATABASES
#
# Route53 private hosted zone for database aliases
db_zone = aws.route53.Zone("redata-db-zone",
name=private_zone_db,
vpcs=[aws.route53.ZoneVpcArgs(
vpc_id=redata_vpc.id,
vpc_region=aws_region,
)],
)
rds_subnetgroup = aws.rds.SubnetGroup("redata-rds-subnetgroup", subnet_ids=private_subnet_ids)
airflow_db = aws.rds.Instance("airflow-postgres",
allocated_storage=20,
db_subnet_group_name=rds_subnetgroup.name,
engine="postgres",
engine_version="12.5",
instance_class="db.t2.micro",
name="airflow",
password=<PASSWORD>,
port=5432,
skip_final_snapshot=True,
storage_type="gp2",
username="airflow",
vpc_security_group_ids=[db_secgrp.id],
opts=ResourceOptions(protect=protect_persistent_storage),
)
airflow_db_cname = aws.route53.Record("airflow-postgres-cname",
name="airflow-postgres", zone_id=db_zone.zone_id, type='CNAME', ttl=60, records=[airflow_db.address]
)
export(f"airflow-db-endpoint", airflow_db.endpoint)
export(f"airflow-db-alias", Output.concat(f"airflow-postgres.", db_zone.name, ":", airflow_db.port.apply(lambda x: str(x))))
redata_db = aws.rds.Instance("redata-postgres",
allocated_storage=20,
db_subnet_group_name=rds_subnetgroup.name,
engine="postgres",
engine_version="12.5",
instance_class="db.t2.micro",
name="redata",
password=<PASSWORD>,
port=5432,
skip_final_snapshot=True,
storage_type="gp2",
username="redata",
vpc_security_group_ids=[db_secgrp.id],
opts=ResourceOptions(protect=protect_persistent_storage),
)
redata_db_cname = aws.route53.Record("redata-postgres-cname",
name="redata-postgres", zone_id=db_zone.zone_id, type='CNAME', ttl=60, records=[redata_db.address]
)
export(f"redata-db-endpoint", redata_db.endpoint)
export(f"redata-db-alias", Output.concat(f"redata-postgres.", db_zone.name, ":", redata_db.port.apply(lambda x: str(x))))
#
# TASKS
#
environment = Output.all(
airflow_db.address, # 0
airflow_db.password, # 1
sd_namespace.name, # 2
redata_db.address, # 3
redata_db.password, # 4
redata_sources, # 5
airflow_admin_password, # 6
grafana_admin_password, # 7
).apply(
lambda args: [
# Airflow DB
{"name": "AIRFLOW_CONN_METADATA_DB", "value": f"postgres+psycopg2://airflow:{args[1]}@{args[0]}:5432/airflow"},
{"name": "AIRFLOW_VAR__METADATA_DB_SCHEMA", "value": "airflow"},
# Airflow Config
{"name": "AIRFLOW__CORE__LOAD_DEFAULT_CONNECTIONS", "value": "False"},
{"name": "AIRFLOW__CORE__SQL_ALCHEMY_CONN", "value": f"postgres+psycopg2://airflow:{args[1]}@{args[0]}:5432/airflow"},
{"name": "AIRFLOW__CORE__DAGS_FOLDER", "value": "/usr/local/redata/redata/dags"},
{"name": "AIRFLOW__CORE__EXECUTOR", "value": "LocalExecutor"},
{"name": "AIRFLOW__LOGGING__BASE_LOG_FOLDER", "value": airflow_base_log_folder},
# - Front-end IPs that are allowed to set secure headers; only our ALB can talk to us, so set it to *
# (see https://docs.gunicorn.org/en/stable/settings.html#forwarded-allow-ips)
{"name": "FORWARDED_ALLOW_IPS", "value": "*"},
# - Set proper base URL for redirects etc
{"name": "AIRFLOW__WEBSERVER__BASE_URL", "value": f'{base_url}/airflow'},
# - Admin user setup (via entrypoint script):
{"name": "AIRFLOW_SECURITY_ADMIN_USER", "value": "admin"},
{"name": "AIRFLOW_SECURITY_ADMIN_PASSWORD", "value": args[6]},
{"name": "AIRFLOW_SECURITY_ADMIN_EMAIL", "value": airflow_admin_email},
# Grafana Config
{"name": "GF_INSTALL_PLUGINS", "value": "grafana-polystat-panel,grafana-clock-panel,grafana-simple-json-datasource"},
{"name": "GF_SECURITY_ADMIN_USER", "value": "admin"},
{"name": "GF_SECURITY_ADMIN_PASSWORD", "value": args[7]},
{'name': 'GF_SERVER_ROOT_URL', 'value': f'{base_url}/grafana'},
{'name': 'GF_SERVER_SERVE_FROM_SUB_PATH', 'value': 'true'},
# Redata DB
{"name": "REDATA_METRICS_DATABASE_HOST", "value": args[3]},
{"name": "REDATA_METRICS_DATABASE_USER", "value": "redata"},
{"name": "REDATA_METRICS_DATABASE_PASSWORD", "value": args[4]},
{"name": "REDATA_METRICS_DATABASE_NAME", "value": "redata"},
{"name": "REDATA_METRICS_DB_URL", "value": f"postgres://redata:{args[4]}@{args[3]}:5432/redata"},
# Redata Config
{'name': 'GRAFANA_WEB_HOST', 'value': f'grafana-web.{args[2]}'},
{'name': 'GRAFANA_WEB_PORT', 'value': '3000'},
{"name": "REDATA_AIRFLOW_SCHEDULE_INTERVAL", "value": redata_airflow_schedule_interval},
{"name": "REDATA_TIME_COL_BLACKLIST_REGEX", "value": redata_time_col_blacklist_regex},
] + [{"name": f"REDATA_SOURCE_DB_URL_{name}", "value": url} for name, url in args[5].items()]
)
#
# Airflow
#
airflow_logs = redata.fs.AccessPoint("redata-efs-airflow-logs",
file_system=efs,
path="/airflow/logs",
)
airflow_logs_policy = aws.iam.RolePolicy("airflow-logs-policy",
role=app_role.id,
policy=airflow_logs.policy_document
)
airflow_logs_volume = aws.ecs.TaskDefinitionVolumeArgs(
name="redata-airflow-logs-ap",
efs_volume_configuration=aws.ecs.TaskDefinitionVolumeEfsVolumeConfigurationArgs(
authorization_config=aws.ecs.TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs(
access_point_id=airflow_logs.ap.id,
iam="ENABLED",
),
file_system_id=efs.efs.id,
transit_encryption="ENABLED",
)
)
airflow_scheduler_task = aws.ecs.TaskDefinition('airflow-scheduler-task',
family='airflow-scheduler-task',
cpu='1024',
memory='2048',
network_mode='awsvpc',
requires_compatibilities=['FARGATE'],
execution_role_arn=role.arn,
container_definitions=Output.all(environment, lg.name).apply(
lambda args: json.dumps([{
'name': 'redata-airflow-scheduler',
'image': config.require('redata-image'),
'portMappings': [{
'containerPort': 8793,
'hostPort': 8793,
'protocol': 'tcp'
}],
'environment': args[0],
'entryPoint': ['/usr/local/redata/scripts/redata-start.sh'],
'logConfiguration': {
'logDriver': 'awslogs',
'options': {
'awslogs-group': args[1],
'awslogs-region': aws_region,
'awslogs-stream-prefix': 'airflow-scheduler',
},
},
"mountPoints": [{
"sourceVolume": "redata-airflow-logs-ap",
"containerPath": airflow_base_log_folder,
}],
}])
),
task_role_arn=app_role.arn,
volumes=[airflow_logs_volume],
)
airflow_web_task = aws.ecs.TaskDefinition('airflow-web-task',
family='airflow-web-task',
cpu='1024',
memory='2048',
network_mode='awsvpc',
requires_compatibilities=['FARGATE'],
execution_role_arn=role.arn,
container_definitions=Output.all(environment, lg.name).apply(
lambda args: json.dumps([{
'name': 'redata-airflow-web',
'image': config.require('redata-image'),
'portMappings': [{
'containerPort': 8080,
'hostPort': 8080,
'protocol': 'tcp'
}],
'environment': args[0],
'entryPoint': ['/usr/local/redata/scripts/airflow-entrypoint.sh'],
'logConfiguration': {
'logDriver': 'awslogs',
'options': {
'awslogs-group': args[1],
'awslogs-region': aws_region,
'awslogs-stream-prefix': 'airflow-web',
},
},
"mountPoints": [{
"sourceVolume": "redata-airflow-logs-ap",
"containerPath": airflow_base_log_folder,
}],
}])
),
task_role_arn=app_role.arn,
volumes=[airflow_logs_volume],
)
#
# Grafana
#
grafana_db = redata.fs.AccessPoint("redata-efs-grafana-db",
file_system=efs,
path="/grafana/db",
)
grafana_db_policy = aws.iam.RolePolicy("grafana-db-policy",
role=app_role.id,
policy=grafana_db.policy_document
)
grafana_db_volume = aws.ecs.TaskDefinitionVolumeArgs(
name="redata-grafana-db-ap",
efs_volume_configuration=aws.ecs.TaskDefinitionVolumeEfsVolumeConfigurationArgs(
authorization_config=aws.ecs.TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs(
access_point_id=grafana_db.ap.id,
iam="ENABLED",
),
file_system_id=efs.efs.id,
transit_encryption="ENABLED",
)
)
grafana_web_task = aws.ecs.TaskDefinition('grafana-web-task',
family='grafana-web-task',
cpu='1024',
memory='2048',
network_mode='awsvpc',
requires_compatibilities=['FARGATE'],
execution_role_arn=role.arn,
container_definitions=Output.all(environment, lg.name).apply(
lambda args: json.dumps([{
'name': 'redata-grafana-web',
'image': 'grafana/grafana:7.3.0',
'portMappings': [{
'containerPort': 3000,
'hostPort': 3000,
'protocol': 'tcp'
}],
'environment': args[0],
'logConfiguration': {
'logDriver': 'awslogs',
'options': {
'awslogs-group': args[1],
'awslogs-region': aws_region,
'awslogs-stream-prefix': 'grafana',
},
},
"mountPoints": [{
"sourceVolume": "redata-grafana-db-ap",
"containerPath": grafana_db_folder,
}],
}])
),
task_role_arn=app_role.arn,
volumes=[grafana_db_volume],
)
#
# SERVICES
#
airflow_scheduler_svc = redata.service.BackendService('airflow-scheduler',
cluster=cluster.arn,
subnets=private_subnet_ids,
task_definition=airflow_scheduler_task.arn,
namespace_id=sd_namespace.id,
security_groups=[svc_secgrp.id]
)
web_services = [
{"name": "airflow-web", "health_check_path": "/airflow/health", "service_path": "/airflow", "service_port": 8080, "task_definition": airflow_web_task.arn},
{"name": "grafana-web", "health_check_path": "/healthz", "service_path": "/grafana", "service_port": 3000, "task_definition": grafana_web_task.arn},
]
for service in web_services:
websvc = redata.service.WebService(service["name"],
cluster=cluster.arn,
health_check_path=service["health_check_path"],
listener_arn=listener.arn,
namespace_id=sd_namespace.id,
security_groups=[svc_secgrp.id],
service_path=service["service_path"],
service_port=service["service_port"],
subnets=private_subnet_ids,
task_definition=service["task_definition"],
vpc_id=redata_vpc.id,
opts=ResourceOptions(depends_on=[listener])
)
export(f"{service['name']}-url", Output.concat(base_url, service["service_path"]))
|
# Generated by Django 2.0.1 on 2018-01-29 10:10
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CustomField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The key/title/name of an actual field.', max_length=200)),
('slug', models.SlugField(editable=False, max_length=255, unique=True)),
('value', ckeditor.fields.RichTextField(help_text='The value/body/textual data of this field.', max_length=20000)),
('order_id', models.PositiveSmallIntegerField(default=0, help_text="The presentation order of this field in the 'block' that holds field of this type on a wiki page, relative to other fields of the same type.")),
],
options={
'ordering': ['type__order_id', 'order_id', 'slug'],
},
),
migrations.CreateModel(
name='CustomFieldType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='A meaningful name.', max_length=200, unique=True)),
('slug', models.SlugField(editable=False, max_length=255, unique=True)),
('name_plural', models.CharField(help_text="The title of the 'block' in the wiki entry page that holds fields of this custom type.", max_length=255)),
('description', models.TextField(help_text='How should custom fields of this type look like?', max_length=2500)),
('order_id', models.PositiveSmallIntegerField(default=0, help_text="The presentation order of this custom field type 'block' in a wiki entry page, relative to other custom field type 'blocks'.")),
],
options={
'ordering': ['order_id', 'slug'],
},
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The title of this wiki entry (page).', max_length=200, unique=True)),
('slug', models.SlugField(editable=False, max_length=255, unique=True)),
('publish', models.BooleanField(default=False)),
('value', ckeditor.fields.RichTextField(blank=True, help_text='The body of this wiki entry, without custom fields. Displayed above the custom fields in the wiki page.', max_length=20000)),
('order_id', models.PositiveSmallIntegerField(default=0, help_text='The presentation order in wiki section page, relative to other entries of this type.')),
('custom_fields', models.ManyToManyField(blank=True, help_text="Add fields to a 'block' off a custom field type. The presentation order (that is determined by the order id of the fields) will be shown on the right after saving.", to='wiki.CustomField')),
('custom_fields_presentation_order', models.ManyToManyField(blank=True, help_text="Add custom field types 'blocks' to this page. The presentation order (that is determined by the order id of the types) will be shown onthe right after saving.", to='wiki.CustomFieldType')),
('favorite_by', models.ManyToManyField(blank=True, help_text='A list of users that marked this entry as their favorite will be shown on the right. Use to see how popular an entry is.', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'entries',
'ordering': ['section__order_id', 'order_id', 'slug'],
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The name of a wiki section/category.', max_length=200, unique=True)),
('slug', models.SlugField(editable=False, max_length=255, unique=True)),
('description', models.TextField(help_text='What entries should this wiki section hold?', max_length=2500)),
('order_id', models.PositiveSmallIntegerField(default=0, help_text='The presentation order in the wiki sections menu, relative to other wiki sections.')),
],
options={
'ordering': ['order_id', 'slug'],
},
),
migrations.AddField(
model_name='entry',
name='section',
field=models.ForeignKey(help_text='The wiki section to which this entry belongs.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='wiki.Section'),
),
migrations.AddField(
model_name='customfield',
name='type',
field=models.ForeignKey(help_text='The type of this field.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='wiki.CustomFieldType'),
),
]
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _ICONS
class _Vrt_Light_LightPng(_ICONS):
_type = "VRT_Light_LightPng"
_icon_dir = "../resources/icons/VRT_Icons/VRT_LightPng/VRT_light_LightPng"
class DLaserScanner1LightPng(_Vrt_Light_LightPng):
_icon = "2dlaserscanner1.png"
class DLaserScanner2LightPng(_Vrt_Light_LightPng):
_icon = "2dlaserscanner2.png"
class DLaserScanner1LightPng(_Vrt_Light_LightPng):
_icon = "3dlaserscanner1.png"
class DreiDLaserScanner2LightPng(_Vrt_Light_LightPng):
_icon = "3dlaserscanner2.png"
class AegisDatacellCellularDatalogger1LightPng(_Vrt_Light_LightPng):
_icon = "AegisDatacellCellularDatalogger1.png"
class AllenBradleyControllogixPlcLightPng(_Vrt_Light_LightPng):
_icon = "Allen-bradleyControllogixPlc.png"
class AllenBradleyFlexio1LightPng(_Vrt_Light_LightPng):
_icon = "Allen-bradleyFlexio1.png"
class AllenBradleyFlexio2LightPng(_Vrt_Light_LightPng):
_icon = "Allen-bradleyFlexio2.png"
class Appliance1LightPng(_Vrt_Light_LightPng):
_icon = "Appliance1.png"
class AtmSwitch1LightPng(_Vrt_Light_LightPng):
_icon = "AtmSwitch1.png"
class AtmSwitch2LightPng(_Vrt_Light_LightPng):
_icon = "AtmSwitch2.png"
class AuthenticationServer1LightPng(_Vrt_Light_LightPng):
_icon = "AuthenticationServer1.png"
class BranchFeederMonitorLightPng(_Vrt_Light_LightPng):
_icon = "BranchFeederMonitor.png"
class CctvCamera1LightPng(_Vrt_Light_LightPng):
_icon = "CctvCamera1.png"
class CctvCamera3LightPng(_Vrt_Light_LightPng):
_icon = "CctvCamera3.png"
class CetMultiCircuitPowerMonitorLightPngLightPng(_Vrt_Light_LightPng):
_icon = "CetMulti-circuitPowerMonitorLightPng(mcpm).png"
class CommunicationsServerLightPng(_Vrt_Light_LightPng):
_icon = "CommunicationsServer.png"
class CompactPlc1LightPng(_Vrt_Light_LightPng):
_icon = "CompactPlc1.png"
class CoronisWavenisLightPngLightPng(_Vrt_Light_LightPng):
_icon = "CoronisWavenisLightPng.png"
class CurrentTransformerStripLightPng(_Vrt_Light_LightPng):
_icon = "CurrentTransformerStrip.png"
class DataAcquisitionLightPngServer1LightPng(_Vrt_Light_LightPng):
_icon = "DataAcquisitionLightPng(scada)server1.png"
class DataLoggerRtu1LightPng(_Vrt_Light_LightPng):
_icon = "DataLoggerRtu1.png"
class DatabaseServer1LightPng(_Vrt_Light_LightPng):
_icon = "DatabaseServer1.png"
class Desktop1LightPng(_Vrt_Light_LightPng):
_icon = "Desktop1.png"
class Desktop3LightPng(_Vrt_Light_LightPng):
_icon = "Desktop3.png"
class DialUpModem1LightPng(_Vrt_Light_LightPng):
_icon = "Dial-upModem1.png"
class DirectoryServer1LightPng(_Vrt_Light_LightPng):
_icon = "DirectoryServer1.png"
class DirectoryServerLightPng(_Vrt_Light_LightPng):
_icon = "DirectoryServer.png"
class DocumentScanner1LightPng(_Vrt_Light_LightPng):
_icon = "DocumentScanner1.png"
class DocumentScanner3LightPng(_Vrt_Light_LightPng):
_icon = "DocumentDcanner3.png"
class DocumentScanner5LightPng(_Vrt_Light_LightPng):
_icon = "DocumentDcanner5.png"
class DocumentScanner7LightPng(_Vrt_Light_LightPng):
_icon = "DocumentScanner7.png"
class EndUsers1LightPng(_Vrt_Light_LightPng):
_icon = "EndUsers1.png"
class EnergyUtilityMeterLightPng(_Vrt_Light_LightPng):
_icon = "EnergyUtilityMeter.png"
class Facsimile1LightPng(_Vrt_Light_LightPng):
_icon = "Facsimile1.png"
class Facsimile3LightPng(_Vrt_Light_LightPng):
_icon = "Facsimile3.png"
class FibreOpticBreakOutTray1LightPng(_Vrt_Light_LightPng):
_icon = "fibre optic break-out tray 1.png"
class FileServer1LightPng(_Vrt_Light_LightPng):
_icon = "FileServer1.png"
class Firewall1LightPng(_Vrt_Light_LightPng):
_icon = "Firewall1.png"
class Firewall2LightPng(_Vrt_Light_LightPng):
_icon = "Firewall2.png"
class FlatPanelDisplay1LightPng(_Vrt_Light_LightPng):
_icon = "FlatPanelDisplay1.png"
class GasMeterLightPng(_Vrt_Light_LightPng):
_icon = "GasMeter.png"
class GenericBlackBox1LightPng(_Vrt_Light_LightPng):
_icon = "GenericBlackBox1.png"
class GenericPlcDcsController1LightPng(_Vrt_Light_LightPng):
_icon = "GenericPLCDCSController1.png"
class Hub1LightPng(_Vrt_Light_LightPng):
_icon = "Hub1.png"
class Hub2LightPng(_Vrt_Light_LightPng):
_icon = "Hub2.png"
class IndustrialBarcodeScanner1LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialBarcodeScanner1.png"
class IndustrialBarcodeScanner3LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialBarcodeScanner3.png"
class IndustrialCellularModem1LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialCellularModem1.png"
class IndustrialCellularModem3LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialCellularModem3.png"
class IndustrialEthernetToSerialConverter1LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialEthernettoSerialConverter1.png"
class IndustrialFibreToEthernetConverter1LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialFibretoEthernetConverter 1.png"
class IndustrialPc1LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialPc1.png"
class IndustrialPc3LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialPc3.png"
class IndustrialSwitch1LightPng(_Vrt_Light_LightPng):
_icon = "IndustrialSwitch1.png"
class InkjetPrinter1LightPng(_Vrt_Light_LightPng):
_icon = "InkjetPrinter1.png"
class InkjetPrinter3LightPng(_Vrt_Light_LightPng):
_icon = "InkjetPrinter3.png"
class Laptop1LightPng(_Vrt_Light_LightPng):
_icon = "Laptop1.png"
class Laptop3LightPng(_Vrt_Light_LightPng):
_icon = "Laptop3.png"
class LargeHmiPanel1LightPng(_Vrt_Light_LightPng):
_icon = "LargeHmiPanel1.png"
class LaserPrinter1LightPng(_Vrt_Light_LightPng):
_icon = "LaserPrinter1.png"
class LaserPrinter3LightPng(_Vrt_Light_LightPng):
_icon = "LaserPrinter3.png"
class LeadAcidBattery1LightPng(_Vrt_Light_LightPng):
_icon = "Lead-acidBattery 1.png"
class MailServer1LightPng(_Vrt_Light_LightPng):
_icon = "MailServer1.png"
class MicrowaveSatelliteBase1LightPng(_Vrt_Light_LightPng):
_icon = "MicrowaveSatelliteBase1.png"
class MultiRoleServer1LightPng(_Vrt_Light_LightPng):
_icon = "Multi-roleServer1.png"
class NetworkAttachedStorage1LightPng(_Vrt_Light_LightPng):
_icon = "NetworkSttachedStorage1.png"
class Projector1LightPng(_Vrt_Light_LightPng):
_icon = "Projector1.png"
class RackServer1LightPng(_Vrt_Light_LightPng):
_icon = "RackServer1.png"
class RackmountSwitch1LightPng(_Vrt_Light_LightPng):
_icon = "RackmountSwitch1.png"
class RoleEmblemAuthenticationLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-Authentication.png"
class RoleEmblemDataAcquisitionLightPngLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-DataAcquisitionLightPng(SCADA).png"
class RoleEmblemDatabaseLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-Database.png"
class RoleEmblemDirectoryLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-Directory.png"
class RoleEmblemFileLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-File.png"
class RoleEmblemMailLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-Mail.png"
class RoleEmblemVideoLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-Video.png"
class RoleEmblemVirtualisationObjectLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-VirtualisationObject.png"
class RoleEmblemVirtualisationLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-Virtualisation.png"
class RoleEmblemWebLightPng(_Vrt_Light_LightPng):
_icon = "RoleEmblem-Web.png"
class Router1LightPng(_Vrt_Light_LightPng):
_icon = "Router1.png"
class RouterFirewallLightPng(_Vrt_Light_LightPng):
_icon = "RouterFirewall.png"
class RouterLightPng(_Vrt_Light_LightPng):
_icon = "Router.png"
class SinglePhaseEnergyMeterLightPngLightPng(_Vrt_Light_LightPng):
_icon = "Single-phaseEnergyMeter(DIN).png"
class SinglePhaseEnergyMeter1LightPng(_Vrt_Light_LightPng):
_icon = "Single-phaseEnergyMeter1.png"
class SmallHmiPanel1LightPng(_Vrt_Light_LightPng):
_icon = "SmallHMIPanel1.png"
class SmallTouchPanel1LightPng(_Vrt_Light_LightPng):
_icon = "SmallTouchPanel1.png"
class Smartphone1LightPng(_Vrt_Light_LightPng):
_icon = "Smartphone1.png"
class Smartphone3LightPng(_Vrt_Light_LightPng):
_icon = "Smartphone3.png"
class SolarPvPanel1LightPng(_Vrt_Light_LightPng):
_icon = "SolarPVPanel1.png"
class SolarPvPanel2LightPng(_Vrt_Light_LightPng):
_icon = "SolarPVPanel2.png"
class Switch1LightPng(_Vrt_Light_LightPng):
_icon = "Switch1.png"
class Switch2LightPng(_Vrt_Light_LightPng):
_icon = "Switch2.png"
class Tablet1LightPng(_Vrt_Light_LightPng):
_icon = "Tablet1.png"
class Tablet3LightPng(_Vrt_Light_LightPng):
_icon = "Tablet3.png"
class Telephone1LightPng(_Vrt_Light_LightPng):
_icon = "Telephone1.png"
class Telephone3LightPng(_Vrt_Light_LightPng):
_icon = "Telephone3.png"
class ThinClient1LightPng(_Vrt_Light_LightPng):
_icon = "ThinClient1.png"
class ThinClient3LightPng(_Vrt_Light_LightPng):
_icon = "ThinClient3.png"
class ThreePhaseEnergyMeterLightPngLightPng(_Vrt_Light_LightPng):
_icon = "Three-phaseEnergyMeterLightPng(DIN).png"
class ThreePhaseEnergyMeter1LightPng(_Vrt_Light_LightPng):
_icon = "Three-phaseEnergyMeter1.png"
class ThreePhaseMultiFunctionMeter1LightPng(_Vrt_Light_LightPng):
_icon = "Three-phaseMulti-functionMeter1.png"
class ThreePhasePowerQualityAnalyser1LightPng(_Vrt_Light_LightPng):
_icon = "Three-phasePowerQualityAnalyser1.png"
class TowerServer1LightPng(_Vrt_Light_LightPng):
_icon = "TowerServer1.png"
class UnifiedCommunicationsServer1LightPng(_Vrt_Light_LightPng):
_icon = "UnifiedCommunicationsServer1.png"
class UninterruptiblePowerSupplyLightPng1LightPng(_Vrt_Light_LightPng):
_icon = "uninterruptiblePowerSupplyLightPng(UPS)1.png"
class VideoServer1LightPng(_Vrt_Light_LightPng):
_icon = "VideoServer1.png"
class VirtualisationObjectServer1LightPng(_Vrt_Light_LightPng):
_icon = "VirtualisationObjectServer1.png"
class VirtualisationServer1LightPng(_Vrt_Light_LightPng):
_icon = "VirtualisationServer1.png"
class VpnConcentrator1LightPng(_Vrt_Light_LightPng):
_icon = "VPNConcentrator1.png"
class VpnConcentrator2LightPng(_Vrt_Light_LightPng):
_icon = "VPNConcentrator2.png"
class WagesHubASeries1LightPng(_Vrt_Light_LightPng):
_icon = "WagesHubASeries1.png"
class WaterThermalMeterLightPng(_Vrt_Light_LightPng):
_icon = "WaterThermalMeter.png"
class WebServer1LightPng(_Vrt_Light_LightPng):
_icon = "WebServer1.png"
class WiredSegmentLightPngLightPng(_Vrt_Light_LightPng):
_icon = "WiredSegmentLightPng(Internet).png"
class WiredSegmentLightPngLightPng(_Vrt_Light_LightPng):
_icon = "WiredSegmentLightPng(subnet).png"
class WirelessAccessPoint1LightPng(_Vrt_Light_LightPng):
_icon = "WirelessAccessPoint1.png"
class WirelessAccessPoint2LightPng(_Vrt_Light_LightPng):
_icon = "WirelessAccessPoint2.png"
class WirelessBase1LightPng(_Vrt_Light_LightPng):
_icon = "WirelessBase1.png"
class WirelessRouterAccessPoint1LightPng(_Vrt_Light_LightPng):
_icon = "WirelessRouterAccessPoint1.png"
class WirelessRouterAccessPoint3LightPng(_Vrt_Light_LightPng):
_icon = "WirelessRouterAccessPoint3.png"
class WirelessRouterFirewallLightPng(_Vrt_Light_LightPng):
_icon = "WirelessRouterFirewall.png"
class WirelessRouterLightPng(_Vrt_Light_LightPng):
_icon = "WirelessRouter.png"
class WorkgroupSwitch1LightPng(_Vrt_Light_LightPng):
_icon = "WorkgroupSwitch1.png"
|
#!/usr/bin/env python
# coding: utf-8
# # Import Libraries and Dataset
# In[21]:
# Installing plotly Library
get_ipython().system('pip install plotly')
# In[84]:
# Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly
# In[85]:
# Importing Dataset (Winter Olympics)& Dataset (Summer Olympics)
winter= pd.read_csv(r"C:\Users\shruti\Desktop\Decodr Session Recording\Project\Decodr Project\In-depth Analysis of Olympic Dataset\winter.csv")
summer= pd.read_csv(r"C:\Users\shruti\Desktop\Decodr Session Recording\Project\Decodr Project\In-depth Analysis of Olympic Dataset\summer.csv")
# In[86]:
winter.head()
# In[87]:
winter.tail()
# In[88]:
summer.head()
# In[89]:
summer.tail()
# In[90]:
# Import Dataset (Dictionary)
dicts=pd.read_csv(r"C:\Users\shruti\Desktop\Decodr Session Recording\Project\Decodr Project\In-depth Analysis of Olympic Dataset\dictionary.csv")
# In[91]:
dicts.head()
# In[92]:
dicts.tail()
# # Analyzing "summer" Dataset
# In[93]:
summer.rename(columns={"Country": "Code"}, inplace= True)
# In[94]:
summer.head()
# In[95]:
summer=pd.merge(summer, dicts, on= "Code", how= "outer")
# In[96]:
summer.head()
# In[97]:
summer.describe()
# ### Plotting Choropleth Map
# In[98]:
summer_medals= summer.groupby(["Country", "Code"])["Medal"].count().reset_index()
summer_medals= summer_medals[summer_medals["Medal"]>0]
# In[99]:
fig= px.choropleth(summer_medals, locations= "Code", color= "Medal", hover_name= "Country",
color_continuous_scale= px.colors.sequential.Plasma)
fig.show()
# ### Most Successful Male & Female Athlete
# In[133]:
# Most successful Male Athlete
print("Most successful male Athlete in Summer Olympics is:", summer[summer["Gender"]=="Men"]["Athlete"].value_counts()[:1].index[0],
"with", summer[summer["Gender"]=="Men"]["Athlete"].value_counts().values[0], "medals")
# In[134]:
# Most successful Female Athlete
print("Most successful female Athlete in Summer Olympics is:", summer[summer["Gender"]=="Women"]["Athlete"].value_counts()[:1].index[0],
"with", summer[summer["Gender"]=="Women"]["Athlete"].value_counts().values[0], "medals")
# ### Winner of most Medals in Summer Olympics
# In[142]:
medals= summer.groupby(["Athlete", "Medal"])["Sport"].count().reset_index().sort_values(by="Sport", ascending= False)
# In[143]:
medals
# In[144]:
summer_medals= medals.drop_duplicates(subset=["Medal"], keep="first")
# In[137]:
summer_medals
# In[106]:
medals.columns= [["Athlete", "Medal", "Count"]]
# In[107]:
medals
# ### Visualizing the medal distribution of Top 10 countries in Summer Olympics
# In[108]:
medals_country= summer.groupby(["Country", "Medal"])["Gender"].count().reset_index().sort_values(by="Gender", ascending= False)
# In[109]:
medals_country
# In[110]:
medals_country= medals_country.pivot("Country", "Medal", "Gender").fillna(0)
# In[111]:
medals_country
# In[112]:
top= medals_country.sort_values(by= "Gold", ascending= False)[:10]
# In[113]:
top
# In[135]:
fig= top.plot.barh(width=0.8)
fig= plt.gcf()
fig.set_size_inches(10,10)
plt.title("Medal distribution in Top 10 countries in Summer Olympics")
plt.show()
# In[ ]:
# In[ ]:
# # Analyzing "winter" Dataset
# In[115]:
winter.rename(columns={"Country": "Code"}, inplace= True)
# In[116]:
winter= pd.merge(winter, dicts, on="Code", how="outer")
# In[117]:
winter.head()
# In[119]:
winter.describe()
# ### Plotting Choropleth Map
# In[122]:
winter_medals=winter.groupby(["Country", "Code"])["Medal"].count().reset_index()
winter_medals=winter_medals[winter_medals["Medal"]>0]
# In[124]:
fig=px.choropleth(winter_medals, locations="Code", color="Medal", hover_name="Country",
color_continuous_scale= px.colors.sequential.Plasma)
fig.show()
# ### Most Successful Male & Female Athlete
# In[131]:
# Most successful Male Athlete
print("Most successful male Athlete in Winter Olympics is:", winter[winter["Gender"]=="Men"]["Athlete"].value_counts()[:1].index[0],
"with", winter[winter["Gender"]=="Men"]["Athlete"].value_counts().values[0], "medals")
# In[132]:
# Most successful Female Athlete
print("Most successful female Athlete in Winter Olympics is:", winter[winter["Gender"]=="Women"]["Athlete"].value_counts()[:1].index[0],
"with", winter[winter["Gender"]=="Women"]["Athlete"].value_counts().values[0], "medals")
# ### Winner of most Medals in Winter Olympics
# In[145]:
medals= winter.groupby(["Athlete", "Medal"])["Sport"].count().reset_index().sort_values(by="Sport", ascending= False)
# In[146]:
medals
# ### Visualizing the medal distribution of Top 10 countries in Winter Olympics
# In[147]:
medals_country= winter.groupby(["Country", "Medal"])["Gender"].count().reset_index().sort_values(by="Gender", ascending= False)
# In[148]:
medals_country
# In[149]:
medals_country= medals_country.pivot("Country", "Medal", "Gender").fillna(0)
# In[152]:
medals_country
# In[150]:
top= medals_country.sort_values(by= "Gold", ascending= False)[:10]
# In[151]:
fig= top.plot.barh(width=0.8)
fig= plt.gcf()
fig.set_size_inches(10,10)
plt.title("Medal distribution in Top 10 countries in Winter Olympics")
plt.show()
# In[ ]:
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
import asyncio
import logging
import time
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import httpx
import requests
from ._http_backoff_policy import HttpBackoffPolicy
from ._http_response import HttpResponse
from ._http_retry_policy import HttpRetryPolicy
from ._proto_http_request import ProtoHttpRequest
from ._proto_http_request import is_json_mime_type
from ._utils import create_curl_request
_LOGGER = logging.getLogger("accelbyte_py_sdk.http")
# NOTE(elmer): convert into a class if needed
HttpRawResponse = Tuple[int, str, Any] # code, content-type, content
class HttpClient(ABC):
backoff_policy: Optional[HttpBackoffPolicy] = None
request_log_formatter: Optional[Callable[[dict], str]] = None
response_log_formatter: Optional[Callable[[dict], str]] = None
retry_policy: Optional[HttpRetryPolicy] = None
# noinspection PyMethodMayBeStatic
def close(self) -> None:
pass
# noinspection PyMethodMayBeStatic
def is_async_compatible(self) -> bool:
return False
@abstractmethod
def create_request(self, proto: ProtoHttpRequest) -> Any:
pass
@abstractmethod
def send_request(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Union[None, HttpResponse]]:
pass
async def send_request_async(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Union[None, HttpResponse]]:
if not self.is_async_compatible():
return None, HttpResponse.create_error(400, "HTTP client is not async compatible.")
raise NotImplementedError
@abstractmethod
def handle_response(
self,
raw_response: Any,
**kwargs
) -> Tuple[Union[None, HttpRawResponse], Union[None, HttpResponse]]:
pass
def _log_request(self, request_dict: dict) -> None:
req_fmt = self.request_log_formatter or format_request_log
_LOGGER.debug(req_fmt(request_dict))
def _log_response(self, response_dict: dict):
res_fmt = self.response_log_formatter or format_response_log
_LOGGER.debug(res_fmt(response_dict))
class RequestsHttpClient(HttpClient):
def __init__(self, allow_redirects: bool = True):
self.allow_redirects = allow_redirects
self.session = requests.Session()
def close(self) -> None:
if self.session is not None:
self.session.close()
def create_request(self, proto: ProtoHttpRequest) -> Any:
prepared_request = requests.Request(
method=proto.method,
url=proto.url,
headers=proto.headers,
files=proto.files,
data=proto.data,
json=proto.json_,
).prepare()
return prepared_request
def send_request(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Union[None, HttpResponse]]:
if "allow_redirects" not in kwargs:
kwargs["allow_redirects"] = self.allow_redirects
return self._send_request_internal(request, retry_policy=retry_policy, backoff_policy=backoff_policy, **kwargs)
def _send_request_internal(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Optional[HttpResponse]]:
# pylint: disable=not-callable
retry_policy = retry_policy if retry_policy is not None else self.retry_policy
backoff_policy = backoff_policy if backoff_policy is not None else self.backoff_policy
attempts = 0
elapsed = timedelta(0)
should_retry = True
raw_response = None
error = None
while True:
try:
self.log_request(request)
raw_response = self.session.send(request, **kwargs)
except requests.exceptions.ConnectionError as e:
_LOGGER.error(str(e))
error = HttpResponse.create_connection_error()
if raw_response is not None and raw_response.ok:
error = None
break
if retry_policy is None:
should_retry = False
break
attempts += 1
elapsed += raw_response.elapsed if raw_response is not None else timedelta(0)
should_retry = retry_policy(request, raw_response, retries=attempts - 1, elapsed=elapsed, **kwargs)
if not should_retry:
break
if backoff_policy:
sleep_duration = backoff_policy(request, raw_response, retries=attempts - 1, elapsed=elapsed, **kwargs)
time.sleep(sleep_duration)
elapsed += timedelta(seconds=sleep_duration)
return raw_response, error
def handle_response(
self,
raw_response: requests.Response,
**kwargs
) -> Tuple[Union[None, HttpRawResponse], Union[None, HttpResponse]]:
http_raw_response, http_response = process_response(
status_code=raw_response.status_code,
content_json=lambda: raw_response.json(),
content_raw=lambda: raw_response.content,
content_text=lambda: raw_response.text,
headers=raw_response.headers,
is_redirect=raw_response.is_redirect,
history=raw_response.history,
)
self.log_response(raw_response)
return http_raw_response, http_response
def log_request(self, prepared_request: requests.PreparedRequest) -> None:
if _LOGGER.isEnabledFor(logging.DEBUG):
request_dict = RequestsHttpClient.convert_request_to_dict(prepared_request)
request_dict["timestamp"] = int(time.time())
self._log_request(request_dict)
def log_response(self, response: requests.Response) -> None:
if _LOGGER.isEnabledFor(logging.DEBUG):
response_dict = RequestsHttpClient.convert_response_to_dict(response)
response_dict["timestamp"] = int(time.time())
self._log_response(response_dict)
@staticmethod
def convert_to_curl(prepared_request: requests.PreparedRequest) -> str:
return create_curl_request(
uri=prepared_request.url,
method=prepared_request.method,
headers={k: v for k, v in prepared_request.headers.items()},
data=prepared_request.body
)
@staticmethod
def convert_request_to_dict(prepared_request: requests.PreparedRequest) -> dict:
return {
"url": prepared_request.url,
"method": prepared_request.method,
"headers": {k: v for k, v in prepared_request.headers.items()},
"data": prepared_request.body
}
@staticmethod
def convert_response_to_dict(response: requests.Response) -> dict:
return {
"url": response.url,
"status_code": response.status_code,
"elapsed": response.elapsed.total_seconds(),
"headers": {k: v for k, v in response.headers.items()},
"body": response.content,
}
class HttpxHttpClient(HttpClient):
def __init__(
self,
max_connections: int = 100,
max_keepalive_connections: int = 20,
):
limits = httpx.Limits(
max_connections=max_connections,
max_keepalive_connections=max_keepalive_connections,
)
self.transport = httpx.HTTPTransport(
verify=True,
cert=None,
http1=True,
http2=True,
limits=limits,
trust_env=True,
)
self.transport_async = httpx.AsyncHTTPTransport(
verify=True,
cert=None,
http1=True,
http2=False,
limits=limits,
trust_env=True,
)
self.client = httpx.Client(transport=self.transport)
self.client_async = httpx.AsyncClient(transport=self.transport_async)
def close(self) -> None:
if self.client is not None:
self.client.close()
if self.transport is not None:
self.transport.close()
if self.client_async is not None:
_ = asyncio.create_task(self.client_async.aclose())
if self.transport_async is not None:
_ = asyncio.create_task(self.transport_async.aclose())
# noinspection PyMethodMayBeStatic
def is_async_compatible(self) -> bool:
return True
def create_request(self, proto: ProtoHttpRequest) -> Any:
httpx_request = httpx.Request(
method=proto.method,
url=proto.url,
headers=proto.headers,
files=proto.files,
data=proto.data,
json=proto.json_,
)
return httpx_request
def send_request(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Union[None, HttpResponse]]:
return self._send_request_internal(request, retry_policy=retry_policy, backoff_policy=backoff_policy, **kwargs)
def _send_request_internal(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Optional[HttpResponse]]:
# pylint: disable=not-callable
retry_policy = retry_policy if retry_policy is not None else self.retry_policy
backoff_policy = backoff_policy if backoff_policy is not None else self.backoff_policy
attempts = 0
elapsed = timedelta(0)
should_retry = True
raw_response = None
error = None
while True:
self.log_request(request)
raw_response = self.client.send(request)
ok = self.__ok(raw_response)
if raw_response is not None:
setattr(raw_response, "ok", ok)
if ok:
error = None
break
if retry_policy is None:
should_retry = False
break
attempts += 1
elapsed += raw_response.elapsed if raw_response is not None else timedelta(0)
should_retry = retry_policy(request, raw_response, retries=attempts - 1, elapsed=elapsed, **kwargs)
if not should_retry:
break
if backoff_policy:
sleep_duration = backoff_policy(request, raw_response, retries=attempts - 1, elapsed=elapsed, **kwargs)
time.sleep(sleep_duration)
elapsed += timedelta(seconds=sleep_duration)
return raw_response, error
async def send_request_async(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Union[None, HttpResponse]]:
return await self._send_request_internal_async(request, retry_policy=retry_policy, backoff_policy=backoff_policy, **kwargs)
async def _send_request_internal_async(
self,
request: Any,
retry_policy: Optional[HttpRetryPolicy] = None,
backoff_policy: Optional[HttpBackoffPolicy] = None,
**kwargs
) -> Tuple[Any, Optional[HttpResponse]]:
# pylint: disable=not-callable
retry_policy = retry_policy if retry_policy is not None else self.retry_policy
backoff_policy = backoff_policy if backoff_policy is not None else self.backoff_policy
attempts = 0
elapsed = timedelta(0)
should_retry = True
raw_response = None
error = None
while True:
self.log_request(request)
raw_response = await self.client_async.send(request)
ok = self.__ok(raw_response)
if raw_response is not None:
setattr(raw_response, "ok", ok)
if ok:
error = None
break
if retry_policy is None:
should_retry = False
break
attempts += 1
elapsed += raw_response.elapsed if raw_response is not None else timedelta(0)
should_retry = retry_policy(request, raw_response, retries=attempts - 1, elapsed=elapsed)
if not should_retry:
break
if backoff_policy:
sleep_duration = backoff_policy(request, raw_response, retries=attempts - 1, elapsed=elapsed, **kwargs)
await asyncio.sleep(sleep_duration)
elapsed += timedelta(seconds=sleep_duration)
return raw_response, error
def handle_response(
self,
raw_response: httpx.Response,
**kwargs
) -> Tuple[Union[None, HttpRawResponse], Union[None, HttpResponse]]:
http_raw_response, http_response = process_response(
status_code=raw_response.status_code,
content_json=lambda: raw_response.json(),
content_raw=lambda: raw_response.content,
content_text=lambda: raw_response.text,
headers=raw_response.headers,
is_redirect=raw_response.is_redirect,
history=raw_response.history,
)
self.log_response(raw_response)
return http_raw_response, http_response
def log_request(self, httpx_request: httpx.Request) -> None:
if _LOGGER.isEnabledFor(logging.DEBUG):
request_dict = HttpxHttpClient.convert_request_to_dict(httpx_request)
request_dict["timestamp"] = int(time.time())
self._log_request(request_dict)
def log_response(self, httpx_response: httpx.Response) -> None:
if _LOGGER.isEnabledFor(logging.DEBUG):
response_dict = HttpxHttpClient.convert_response_to_dict(httpx_response)
response_dict["timestamp"] = int(time.time())
self._log_request(response_dict)
@staticmethod
def convert_to_curl(httpx_request: httpx.Request) -> str:
return create_curl_request(
uri=httpx_request.url,
method=httpx_request.method,
headers={k: v for k, v in httpx_request.headers.items()},
data=httpx_request.content
)
@staticmethod
def convert_request_to_dict(httpx_request: httpx.Request) -> dict:
return {
"url": httpx_request.url,
"method": httpx_request.method,
"headers": {k: v for k, v in httpx_request.headers.items()},
"data": httpx_request.content,
}
@staticmethod
def convert_response_to_dict(httpx_response: httpx.Response) -> dict:
return {
"url": httpx_response.url,
"status_code": httpx_response.status_code,
"elapsed": httpx_response.elapsed.total_seconds(),
"headers": {k: v for k, v in httpx_response.headers.items()},
"body": httpx_response.content,
}
@staticmethod
def __ok(response) -> bool:
try:
response.raise_for_status()
except (RuntimeError, httpx.HTTPStatusError):
return False
return True
def format_request_log(request_dict: dict) -> str:
return str(request_dict)
def format_response_log(response_dict: dict) -> str:
return str(response_dict)
def process_response(
status_code: int,
content_json: Callable[[], Any],
content_raw: Callable[[], Any],
content_text: Callable[[], Optional[str]],
headers: Dict[str, str],
is_redirect: Optional[bool] = None,
history: Optional[List[Any]] = None,
) -> Tuple[Union[None, HttpRawResponse], Union[None, HttpResponse]]:
is_redirect = is_redirect if is_redirect is not None else False
history = history if history is not None else []
if 400 <= status_code <= 599:
_LOGGER.error(f"[{status_code}] {str(content_text())}")
if is_redirect:
content_type = "location"
content = headers["Location"]
elif history:
temp_status_code: int = status_code
temp_content: Optional[str] = None
for h in history:
if not hasattr(h, "headers"):
continue
h_headers = h.headers
if not isinstance(h_headers, dict):
continue
if "Location" not in h_headers:
continue
temp_status_code = h.status_code if hasattr(h, "status_code") and isinstance(h.status_code, int) else status_code
temp_content = h_headers["Location"]
break
if temp_content is None:
return None, HttpResponse.create_unhandled_error()
status_code, content_type, content = temp_status_code, "location", temp_content
elif "Content-Type" in headers:
content_type = headers.get("Content-Type")
if is_json_mime_type(content_type):
try:
content = content_json()
if content is None:
_LOGGER.warning("Expecting 'application/json' content received null.")
content = ""
except ValueError:
content = ""
elif content_type.startswith("text/"):
content = content_text()
if content is None:
_LOGGER.warning("Expecting 'text/*' content received null.")
content = ""
else:
content = content_raw()
elif status_code == 201:
content_type = "location"
content = headers.get("Location", "")
else:
content_type = None
content = None
return (status_code, content_type, content), None
HTTP_CLIENTS = [
RequestsHttpClient,
HttpxHttpClient,
]
DEFAULT_HTTP_CLIENT = RequestsHttpClient
|
<filename>model/BaseModel.py
# -*- coding: utf-8 -*-
# Created by <NAME> on 2019/11/7
import os
import pathlib
import torch
import torch.nn as nn
from module.bertology_encoder import BERTologyEncoder
class BaseModel(nn.Module):
def __init__(self, args):
super().__init__()
if args.encoder in ['bert', 'xlnet', 'xlm', 'roberta']:
self.encoder = BERTologyEncoder(bertology_type=args.bertology_type,
bertology_path=args.saved_model_path,
bertology_output=args.bertology_output,
)
else:
raise ValueError(f'Unsupported Encoder Type {args.encoder}')
self.classifier = nn.Sequential(
nn.Linear(args.encoder_output_dim, 128),
nn.ReLU(),
nn.Linear(128, args.class_num),
)
self.classifier.apply(self.init_weights)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, inputs):
assert isinstance(inputs, dict)
encoder_output = self.encoder(**inputs)
return self.classifier(encoder_output)
def save_pretrained(self, save_directory, weight_file_name="pytorch_model.bin"):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
"""
assert os.path.isdir(
save_directory), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model it-self if we are using distributed training
model_to_save = self.module if hasattr(self, 'module') else self
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, weight_file_name)
torch.save(model_to_save.state_dict(), output_model_file)
print("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, args, saved_model_path=None, weight_file_name="pytorch_model.bin",
initialize_from_bertology=False):
"""
注意,这里不支持训练BERT的LM和Next Sentence任务
:param args:
:param saved_model_path:
:param weight_file_name:
:param initialize_from_bertology:
:return:
"""
import re
from collections import OrderedDict
if saved_model_path:
args.saved_model_path = saved_model_path
model = cls(args)
resolved_archive_file = pathlib.Path(args.saved_model_path) / weight_file_name
assert resolved_archive_file.exists()
state_dict = torch.load(str(resolved_archive_file), map_location='cpu')
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# 如果模型从BERTology预训练模型加载,则需要修改参数的名称以匹配现有的模型架构
if initialize_from_bertology:
rename_state_dict = OrderedDict()
for key in state_dict.keys():
# 这里 ^bert 表示以bert开头的模型参数,目前仅仅支持BERT类型的模型,XLNET等需要补充
rename_key = re.sub('^bert.', 'encoder.bertology.', key)
rename_state_dict[rename_key] = state_dict[key]
state_dict = rename_state_dict
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model)
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
model.eval()
return model
if __name__ == '__main__':
pass
|
__author__ = '<NAME>'
import types
import ast
from ast_tool_box.views.editor_widget import EditorPane
from ast_tool_box.views.search_widget import SearchLineEdit
from ast_tool_box.models.transform_models.transform_file import AstTransformItem, CodeGeneratorItem
from PySide import QtGui, QtCore
DEBUGGING = False
class TransformTreeWidgetItem(QtGui.QTreeWidgetItem):
"""
connects a gui tree item with the corresponding node in the actual ast tree
"""
def __init__(self, parent, name=None, source=None):
super(TransformTreeWidgetItem, self).__init__(parent)
self.name = name
self.source = source
def picked(self):
print("got selected %s" % self.name)
class TransformTreeWidget(QtGui.QTreeWidget):
"""
displays an ast as a tree widget
"""
COL_NODE = 0
COL_FIELD = 1
COL_CLASS = 2
COL_VALUE = 3
COL_POS = 4
COL_HIGHLIGHT = 5
expand_all_at_create = True
def __init__(self, transform_presenter=None, transform_pane=None):
super(TransformTreeWidget, self).__init__()
self.transform_presenter = transform_presenter
self.transform_pane = transform_pane
self.setColumnCount(2)
self.setHeaderLabels(["Transforms"])
self.header().resizeSection(TransformTreeWidget.COL_NODE, 800)
self.header().setStretchLastSection(True)
self.transform_signal = QtCore.Signal(int)
self.expand_descendants_action = QtGui.QAction(
"&Expand all children",
self,
statusTip="Expand all descendant nodes",
triggered=self.expand_descendants
)
self.itemClicked.connect(self.clicked)
self.itemDoubleClicked.connect(self.double_clicked)
@QtCore.Slot(TransformTreeWidgetItem)
def clicked(self, item):
print("click %s" % item)
self.transform_pane.load_editor_from(item)
@QtCore.Slot(TransformTreeWidgetItem)
def double_clicked(self, info):
print("doubleclick on %s" % info)
print("doubleclick on %s" % self.currentItem())
print("comparing to %s" % AstTransformItem)
print("comparing to %s" % AstTransformItem)
if isinstance(self.currentItem().source, AstTransformItem) or\
isinstance(self.currentItem().source, CodeGeneratorItem):
self.transform_presenter.apply_current_transform()
else:
self.transform_pane.show_error("Only works for Ast Transforms and Code Generators")
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.expand_descendants_action)
sub_menu = QtGui.QMenu(self)
sub_menu.setTitle("Available transformers")
for transform_item in self.transform_presenter.transform_items():
sub_menu_action = TransformerAction(transform_item=transform_item, ast_tree_widget=self)
sub_menu.addAction(sub_menu_action)
menu.addMenu(sub_menu)
menu.exec_(event.globalPos())
def transform_current_ast(self, name):
transformer = self.ast_transformers.get_instance_by_name(name)
self.main_window.add_tree_tab(transformer=transformer)
def expand_descendants(self, item=None):
"""Expand all descendants of the current item"""
if item is None:
print("item is none")
item = self.currentItem()
print("item is %s" % item)
item.setExpanded(True)
for child_index in range(item.childCount()):
self.expand_descendants(item.child(child_index))
def collapse_descendants(self, item=None):
"""Expand all descendants of the current item"""
if item is None:
item = self.currentItem()
item.setExpanded(False)
for child_index in range(item.childCount()):
self.collapse_descendants(item.child(child_index))
def rebuild(self, transform_file):
file_node = None
for index in range(self.topLevelItemCount()):
wi = self.topLevelItem(index)
if wi.source is transform_file:
file_node = wi
break
if not file_node:
print("Could not find %s" % transform_file)
def remove_children(node):
for child_index in xrange(node.childCount()-1, -1, -1):
print("removing child %d from node %s" % (child_index, node))
remove_children(node.child(child_index))
node.takeChild(child_index)
remove_children(file_node)
self.build_children(transform_file, file_node)
self.expandToDepth(100)
def build_children(self, transform_file, file_node):
first_node = None
if len(transform_file.node_transforms) > 0:
transforms_node = TransformTreeWidgetItem(file_node)
transforms_node.setText(
TransformTreeWidget.COL_NODE,
"ast.NodeTransformer : (%d)" % len(transform_file.node_transforms)
)
for transform in transform_file.node_transforms:
transform_node = TransformTreeWidgetItem(transforms_node, name=transform.name, source=transform)
if not first_node:
first_node = transform_node
transform_node.setText(TransformTreeWidget.COL_NODE, transform.name())
# print("loaded transform to tree %s" % transform.name)
transform_node.setToolTip(TransformTreeWidget.COL_NODE, transform.doc)
else:
if transform_file.load_error_info:
first_node = file_node
if len(transform_file.code_generators) > 0:
code_generators_node = TransformTreeWidgetItem(file_node)
code_generators_node.setText(
TransformTreeWidget.COL_NODE,
"ctree.CodeGenVisitor : (%d)" % len(transform_file.code_generators)
)
print("%d code_generators" % len(transform_file.code_generators))
for code_generator in transform_file.code_generators:
code_generator_node = TransformTreeWidgetItem(
code_generators_node,
name=code_generator.name,
source=code_generator
)
if not first_node:
first_node = code_generator_node
code_generator_node.setText(TransformTreeWidget.COL_NODE, code_generator.name())
code_generator_node.setToolTip(TransformTreeWidget.COL_NODE, code_generator.doc)
return first_node
def build(self, transform_files):
self.clear()
first_node = None
for transform_file in transform_files:
file_node = TransformTreeWidgetItem(self, name=transform_file.base_name, source=transform_file)
file_node.setText(
TransformTreeWidget.COL_NODE,
"%s (%s)" % (transform_file.base_name, transform_file.package_name)
)
file_node.setToolTip(TransformTreeWidget.COL_NODE, transform_file.path)
node = self.build_children(transform_file, file_node)
if not first_node:
first_node = node
self.expandToDepth(100)
if first_node:
self.setCurrentItem(first_node)
self.transform_pane.load_editor_from(self.currentItem())
class TransformerAction(QtGui.QAction):
def __init__(self, transform_item, ast_tree_widget, **kwargs):
super(TransformerAction, self).__init__(transform_item.name(), ast_tree_widget, **kwargs)
self.ast_tree_widget = ast_tree_widget
self.transform_item = transform_item
self.text = transform_item.name()
self.triggered.connect(self.do_transform)
def do_transform(self):
print("Triggered with string %s" % self.text)
self.ast_tree_widget.transform_presenter.apply_transform(
code_item=self.ast_tree_widget.currentItem().ast_node,
transform_item=self.transform_item
)
|
<gh_stars>1-10
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
''' Variational Autoencoder based tumor subpopulation detection
author: <NAME>
'''
import sys
import numpy as np
import matplotlib.pyplot as plt
import scipy
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn import mixture
from keras.layers import Input, Dense, Lambda, Layer
from keras.models import Model
from keras import backend as K
from keras import metrics, optimizers
from mpl_toolkits.mplot3d import Axes3D
import operator
def my_except_hook(exctype, value, traceback):
print('There has been an error in the system')
sys.excepthook = my_except_hook
def main(input_datafile='oligo_malignant.txt',latent_dim=3,
N_starts=5,batch_size=100,learning_rate=.0001, epochs = 5,
clip_norm=2,output_datafile='output',to_cluster= 1,n_genes=5000,
gene_selection=0,selection_criteria='average',to_plot=1,verbose=0,
relative_expression=0):
# read datafile
x_t=np.loadtxt(input_datafile)
# dict=scipy.io.loadmat(input_datafile)
# x_t=dict['syn_expr'] # input expression matrix # of cells * # of genes
x_t[np.isnan(x_t)]=0
orig_size=x_t.shape
if orig_size[1]<1000:
print('Number of genes too small')
## check input parameters
# Latent dim
if not (type(latent_dim) is int):
raise TypeError('Latent dimensions must be an integer')
if latent_dim<1:
raise ValueError('Latent dimensions should be atleast 1')
elif latent_dim>256:
raise ValueError('Latent dimensions should be less than 256')
# N_starts
if not (type(N_starts) is int):
raise TypeError('Number of warm starts must be an integer')
elif N_starts<1:
raise ValueError('Number of warm starts must be more than 1')
elif N_starts>50:
raise Warning('Number of warm starts more than 50. Should take a long time to run.')
# batch_size
if not (type(batch_size) is int):
raise TypeError('Batch size must be an integer')
elif batch_size==0:
raise ValueError('Batch size should not be zero')
elif batch_size>orig_size[0]:
raise ValueError('Batch size should not be larger than the total number of cells')
# n_genes
if not (type(n_genes) is int):
raise TypeError('Number of genes must be an integer')
elif n_genes<1000:
print('Number of genes too small, Encoding might not be optimal')
elif n_genes>orig_size[1]:
n_genes=orig_size[1]
print('Using all the genes in the dataset')
#epochs
if not (type(epochs) is int):
raise TypeError('Number of epochs must be an integer')
elif epochs<1:
raise ValueError('Number of epochs should be atleast 0')
elif epochs>100:
print('Very large number of epochs, training should take a lot of time')
# output_datafile
if not (type(output_datafile) is str):
raise TypeError('Output datafile name should be a string')
if learning_rate>.1:
print('Learning rate too high')
if clip_norm>10:
print('Clip norm too high')
# gene selection
if gene_selection:
a=np.zeros((orig_size[1])) #[0 for i in range(size[1])]
cv=np.zeros((orig_size[1]))
en=np.zeros((orig_size[1]))
for i in range(0,orig_size[1]):
cv[i]=np.std(x_t[:,i])/np.mean(x_t[:,i]) # CV criteria
a[i]=sum(x_t[:,i]) # average value
hist, bin_edges=np.histogram(x_t[:,i],bins=100)
pk=hist/sum(hist)
en[i]=scipy.stats.entropy(pk) # entropy
if selection_criteria=='average':
sorted_indices=sorted(range(len(a)), key=lambda k: a[k],reverse=True)
elif selection_criteria == 'cv':
sorted_indices=sorted(range(len(cv)), key=lambda k: cv[k],reverse=True)
elif selection_criteria == 'entropy':
sorted_indices=sorted(range(len(en)), key=lambda k: en[k],reverse=True)
else:
raise ValueError('Not a valid selection criteria, Refer to the readme file for valid selection criteria')
x_t=x_t[:,sorted_indices[0:min(n_genes,orig_size[1])]]
if relative_expression:
y=np.mean(x_t,axis=1)
if gene_selection:
x_t=x_t-np.tile(y,(n_genes,1)).transpose()
else:
x_t=x_t-np.tile(y,(orig_size[1],1)).transpose()
x_train=x_t
# pad end cells for being compatible with batch size
reminder=orig_size[0]%batch_size
x_train=np.concatenate((x_train,x_train[(orig_size[0]-batch_size+reminder):orig_size[0],:]),axis=0)
size=x_train.shape
# internal parameters
original_dim = size[1]
epsilon_std = 1.0
n_clusters=6
intermediate_deep_dim=1024
intermediate_deep_dim2=512
intermediate_dim = 256
color_iter = ['navy', 'turquoise', 'cornflowerblue','darkorange','mistyrose','seagreen','hotpink','purple','thistle','darkslategray']
# required initializations
silhouette_avg=np.zeros((N_starts))#[0 for i in range(N_starts)]
all_x_encoded = np.zeros((N_starts,size[0],latent_dim))#np.asarray([[[0 for k in range(latent_dim)] for j in range(size[0])] for i in range(N_starts)])
all_x_encoded = all_x_encoded.astype(float)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean):
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean = inputs[1]
loss = self.vae_loss(x, x_decoded_mean)
self.add_loss(loss, inputs=inputs)
return x
for i in range(0,N_starts):
x = Input(batch_shape=(batch_size, original_dim))
e = Dense(intermediate_deep_dim, activation = 'relu')(x)
d= Dense(intermediate_deep_dim2, activation ='relu')(e)
h = Dense(intermediate_dim, activation='relu')(d)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_d = Dense(intermediate_deep_dim2, activation ='relu')
decoder_e = Dense(intermediate_deep_dim, activation = 'relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
d_decoded = decoder_d(h_decoded)
e_decoded = decoder_e(d_decoded)
x_decoded_mean = decoder_mean(e_decoded)
y = CustomVariationalLayer()([x, x_decoded_mean])
vae = Model(x, y)
rmsprop = optimizers.rmsprop(lr=learning_rate,clipnorm=clip_norm)
vae.compile(optimizer=rmsprop, loss=None)
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
verbose=verbose)
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
x_encoded = encoder.predict(x_train, batch_size=batch_size)
if np.isnan(x_encoded).any():
# x_encoded=np.asarray([[0 for j in range(latent_dim)] for i in range(size[0])])
silhouette_avg[i]=0
else:
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(x_encoded)
silhouette_avg[i] = silhouette_score(x_encoded, cluster_labels)
all_x_encoded[i][:][:]=x_encoded
index, value = max(enumerate(silhouette_avg), key=operator.itemgetter(1))
x_encoded_final=all_x_encoded[index][:][:]
x_encoded_final=x_encoded_final[0:orig_size[0],:]
if np.isnan(x_encoded_final).any():
print(np.isnan(x_encoded_final).any())
raise Warning('NaNs, check input, learning rate, clip_norm parameters')
if to_plot:
if latent_dim>=3:
fig=plt.figure(figsize=(6, 6))
ax3D = fig.add_subplot(111, projection='3d')
ax3D.scatter(x_encoded_final[:, 0], x_encoded_final[:, 1], x_encoded_final[:, 2])
ax3D.set_xlabel('Latent dim 1')
ax3D.set_ylabel('Latent dim 2')
ax3D.set_zlabel('Latent dim 3')
plt.savefig(output_datafile+'fig_projection.png')
elif latent_dim==2:
fig=plt.figure(figsize=(6, 6))
plt.scatter(x_encoded_final[:, 0], x_encoded_final[:, 1])
plt.xlabel('Latent dim 1')
plt.ylabel('Latent dim 2')
plt.savefig(output_datafile+'fig_projection.png')
elif latent_dim==1:
n_range = range(0, orig_size[0])
fig=plt.figure(figsize=(6, 6))
plt.plot(n_range,x_encoded_final)
plt.xlabel('Cells')
plt.ylabel('Latent dim 1')
plt.savefig(output_datafile+'fig_projection.png')
if to_cluster:
n_components_range = range(1, 10)
bic = []
for n_components in n_components_range:
gmm = mixture.GaussianMixture(n_components=n_components, covariance_type='tied',n_init=10)
gmm.fit(x_encoded_final)
bic.append(gmm.bic(x_encoded_final))
bic = np.array(bic)+np.log(size[0])*n_components_range*latent_dim
ind,val=min(enumerate(bic), key=operator.itemgetter(1))
if to_plot:
fig=plt.figure(figsize=(6, 6))
plt.plot(n_components_range,bic)
plt.xlabel('Number of clusters')
plt.ylabel('BIC')
plt.savefig(output_datafile+'fig_bic.png')
gmm = mixture.GaussianMixture(n_components=ind+1, covariance_type='tied')
gmm.fit(x_encoded_final)
labels=gmm.predict(x_encoded_final)
if to_plot:
if latent_dim>=3:
fig=plt.figure()
ax3D = fig.add_subplot(111, axisbg="1.0",projection='3d')
for i in range(0,labels.max()+1):
ax3D.scatter(x_encoded_final[labels==i, 0], x_encoded_final[labels==i, 1], x_encoded_final[labels==i, 2],alpha=1, color=color_iter[i])
ax3D.set_xlabel('Latent dim 1')
ax3D.set_ylabel('Latent dim 2')
ax3D.set_zlabel('Latent dim 3')
plt.savefig(output_datafile+'fig_cluster.png')
elif latent_dim==2:
fig=plt.figure()
for i in range(0,labels.max()+1):
plt.scatter(x_encoded_final[labels==i, 0], x_encoded_final[labels==i, 1],alpha=1, color=color_iter[i])
plt.xlabel('Latent dim 1')
plt.ylabel('Latent dim 2')
plt.savefig(output_datafile+'fig_cluster.png')
#scipy.io.savemat(output_datafile+'.mat', {'vect':x_encoded_final,'labels':labels,'bic':bic})
np.savetxt(output_datafile+'labels.txt',labels)
np.savetxt(output_datafile+'bic.txt',bic)
#else:
#scipy.io.savemat(output_datafile+'.mat', {'vect':x_encoded_final})
np.savetxt(output_datafile+'.txt',x_encoded_final)
|
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import mmcv
import argparse
from mmdet.datasets import build_dataloader, build_dataset
import os
from mmcv.runner import load_checkpoint
from mmdet.models import build_detector
from mmcv.parallel import MMDataParallel
from tools.test import single_gpu_test
import time
import copy
import numpy as np
import datetime
import shutil
import itertools
from terminaltables import AsciiTable
import matplotlib.pyplot as plt
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if p.iouType == 'segm':
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = maskUtils.iou(d, g, iscrowd)
return ious
def evaluateImg(self, imgId, catId, aRng, score, prog_bar):
p = self.params
gts = self.cocoGt
images = gts.imgs
img_width = images[imgId]['width']
img_height = images[imgId]['height']
prog_bar.update()
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
################################### eval at resized image ###################################
for g in gt:
bbox_width = g['bbox'][2]
bbox_heigth = g['bbox'][3]
if args.mode == 'global':
resized_width = bbox_width * (800 / img_width)
resized_heigth = bbox_heigth * (800 / img_height)
elif args.mode == 'local':
resized_width = bbox_width * (3000 / img_width)
resized_heigth = bbox_heigth * (3000 / img_height)
else:
raise ValueError('Wrong mode')
resized_area = resized_width * resized_heigth
if g['ignore'] or (resized_area < aRng[0] or resized_area > aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
################################### eval at original image ###################################
# for g in gt:
# if g['ignore'] or (g['area'] < aRng[0] or g['area'] > aRng[1]):
# g['_ignore'] = 1
# else:
# g['_ignore'] = 0
# ############################################################################################
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
#################################################################################
dt = [d for d in dt if d['score'] > score]
#################################################################################
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:p.maxDets[-1]]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T, D))
if not len(ious) == 0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
# continue to next gt unless better match made
if ious[dind, gind] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]['id']
gtm[tind, m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area'] < aRng[0] or d['area'] > aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'score': score,
'maxDet': p.maxDets[-1], # 100000
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def coco_evaluate(self):
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
self.ious = {(imgId, catId): computeIoU(self, imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
######################################################################################
prog_bar = mmcv.ProgressBar(len(self.params.imgIds) * len(self.params.areaRng) * len(self.params.catIds))
######################################################################################
self.evalImgs = [evaluateImg(self, imgId, catId, areaRng, p.scoceThrs, prog_bar)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc - tic))
def coco_accumulate(self, p=None):
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories
recall = -np.ones((T, K, A, M))
scores = -np.ones((T, R, K, A, M))
Pre = []
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:, inds]
dtIg = np.concatenate([e['dtIgnore'][:, 0:maxDet] for e in E], axis=1)[:, inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
Pre.append(np.mean(pr))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t, k, a, m] = rc[-1]
else:
recall[t, k, a, m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t, :, k, a, m] = np.array(q)
scores[t, :, k, a, m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'Pr': Pre,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc - tic))
def coco_summarize(self, args):
"""
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
"""
def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100000, write_handle=None):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | Score={:>4} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
if ap == 1:
titleStr = 'Average Precision'
elif ap == 0:
titleStr = 'Average Recall'
elif ap == 2:
titleStr = 'Precision'
else:
raise NotImplementedError('ap should be 0,1,2')
if ap == 1:
typeStr = '(AP)'
elif ap == 0:
typeStr = '(AR)'
elif ap == 2:
typeStr = '(PR)'
else:
raise NotImplementedError('ap should be 0,1,2')
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
elif ap == 0:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
elif ap == 2:
s = self.eval['Pr']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
##################################################
idx = aind[0] * len(p.iouThrs) + t[0]
##################################################
s = s[idx]
else:
pass
if isinstance(s, list): # for ap==2 ,to get average precision
mean_s = np.mean(s)
else:
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
print(iStr.format(titleStr, typeStr, iouStr, p.scoceThrs, areaRng, maxDets, mean_s))
if write_handle is not None:
write_handle.write(iStr.format(titleStr, typeStr, iouStr, p.scoceThrs, areaRng, maxDets, mean_s) + '\n')
return mean_s
def _summarizeDets(args):
stats = np.zeros((3,))
f = open(os.path.join(args.work_dir, 'result.txt'), 'a+')
bare_name = os.path.basename(args.work_dir)
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n')
f.write('Detect with {}\n'.format(args.checkpoint))
stats[0] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f) # AP
stats[1] = _summarize(0, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f) # AR
stats[2] = _summarize(2, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f) # PR
f.write('\n')
f.close()
print('Successfully Write Result File...')
# for batter stored result file,create a dir to stored result,and it can be upload to github
assert os.path.isfile(os.path.join(args.work_dir, 'result.txt'))
out_dir = 'result'
os.makedirs(out_dir, exist_ok=True)
out_file_name = os.path.join(out_dir, '{}.txt'.format(bare_name))
shutil.copy(os.path.join(args.work_dir, 'result.txt'), out_file_name)
return stats
def _summarizeDets2(args):
stats = np.zeros((9,))
f = open(os.path.join(args.work_dir, 'result.txt'), 'a+')
bare_name = os.path.basename(args.work_dir)
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n')
f.write('Detect with {}\n'.format(args.checkpoint))
stats[0] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f)
stats[1] = _summarize(0, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f)
stats[2] = _summarize(2, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[3] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[-1], write_handle=f)
stats[4] = _summarize(0, iouThr=.75, maxDets=self.params.maxDets[-1], write_handle=f)
stats[5] = _summarize(2, iouThr=.75, maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[6] = _summarize(1, maxDets=self.params.maxDets[-1], write_handle=f)
stats[7] = _summarize(0, maxDets=self.params.maxDets[-1], write_handle=f)
stats[8] = _summarize(2, maxDets=self.params.maxDets[-1], write_handle=f)
f.write('\n')
f.close()
print('Successfully Write Result File...')
# for batter stored result file,create a dir to stored result,and it can be upload to github
assert os.path.isfile(os.path.join(args.work_dir, 'result.txt'))
out_dir = 'result'
os.makedirs(out_dir, exist_ok=True)
out_file_name = os.path.join(out_dir, '{}.txt'.format(bare_name))
shutil.copy(os.path.join(args.work_dir, 'result.txt'), out_file_name)
return stats
def _summarizeDets3(args):
stats = np.zeros((12,))
f = open(os.path.join(args.work_dir, 'result.txt'), 'a+')
bare_name = os.path.basename(args.work_dir)
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n')
f.write('Detect with {}\n'.format(args.checkpoint))
stats[0] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f) # AP
stats[1] = _summarize(0, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f) # AR
stats[2] = _summarize(2, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f) # PR
f.write(' ' + '-' * 94 + '\n')
stats[3] = _summarize(1, iouThr=.5, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[4] = _summarize(0, iouThr=.5, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[5] = _summarize(2, iouThr=.5, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[6] = _summarize(1, iouThr=.5, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[7] = _summarize(0, iouThr=.5, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[8] = _summarize(2, iouThr=.5, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[9] = _summarize(1, iouThr=.5, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[10] = _summarize(0, iouThr=.5, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[11] = _summarize(2, iouThr=.5, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
f.write('\n')
f.close()
print('Successfully Write Result File...')
# for batter stored result file,create a dir to stored result,and it can be upload to github
assert os.path.isfile(os.path.join(args.work_dir, 'result.txt'))
out_dir = 'result'
os.makedirs(out_dir, exist_ok=True)
out_file_name = os.path.join(out_dir, '{}.txt'.format(bare_name))
shutil.copy(os.path.join(args.work_dir, 'result.txt'), out_file_name)
return stats
def _summarizeDets4(args):
stats = np.zeros((36,))
f = open(os.path.join(args.work_dir, 'result.txt'), 'a+')
bare_name = os.path.basename(args.work_dir)
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n')
f.write('Detect with {}\n'.format(args.checkpoint))
stats[0] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f)
stats[1] = _summarize(0, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f)
stats[2] = _summarize(2, iouThr=.5, maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[3] = _summarize(1, iouThr=.5, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[4] = _summarize(0, iouThr=.5, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[5] = _summarize(2, iouThr=.5, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[6] = _summarize(1, iouThr=.5, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[7] = _summarize(0, iouThr=.5, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[8] = _summarize(2, iouThr=.5, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[9] = _summarize(1, iouThr=.5, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[10] = _summarize(0, iouThr=.5, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[11] = _summarize(2, iouThr=.5, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[12] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[-1], write_handle=f)
stats[13] = _summarize(0, iouThr=.75, maxDets=self.params.maxDets[-1], write_handle=f)
stats[14] = _summarize(2, iouThr=.75, maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[15] = _summarize(1, iouThr=.75, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[16] = _summarize(0, iouThr=.75, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[17] = _summarize(2, iouThr=.75, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[18] = _summarize(1, iouThr=.75, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[19] = _summarize(0, iouThr=.75, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[20] = _summarize(2, iouThr=.75, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[21] = _summarize(1, iouThr=.75, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[22] = _summarize(0, iouThr=.75, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[23] = _summarize(2, iouThr=.75, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[24] = _summarize(1, maxDets=self.params.maxDets[-1], write_handle=f)
stats[25] = _summarize(0, maxDets=self.params.maxDets[-1], write_handle=f)
stats[26] = _summarize(2, maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[27] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[28] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
stats[29] = _summarize(2, areaRng='small', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[30] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[31] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
stats[32] = _summarize(2, areaRng='medium', maxDets=self.params.maxDets[-1], write_handle=f)
f.write(' ' + '-' * 94 + '\n')
stats[33] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[34] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
stats[35] = _summarize(2, areaRng='large', maxDets=self.params.maxDets[-1], write_handle=f)
f.write('\n')
f.close()
print('Successfully Write Result File...')
# for batter stored result file,create a dir to stored result,and it can be upload to github
assert os.path.isfile(os.path.join(args.work_dir, 'result.txt'))
out_dir = 'result'
os.makedirs(out_dir, exist_ok=True)
out_file_name = os.path.join(out_dir, '{}.txt'.format(bare_name))
shutil.copy(os.path.join(args.work_dir, 'result.txt'), out_file_name)
return stats
if len(self.params.iouThrs) == 1 and len(self.params.areaRng) == 1:
summarize = _summarizeDets
elif len(self.params.iouThrs) == 10 and len(self.params.areaRng) == 1:
summarize = _summarizeDets2
elif len(self.params.iouThrs) == 1 and len(self.params.areaRng) == 4:
summarize = _summarizeDets3
elif len(self.params.iouThrs) == 10 and len(self.params.areaRng) == 4:
summarize = _summarizeDets4
else:
raise ValueError('wrong param, please check set_param function')
self.stats = summarize(args)
def evaluate(args, anns):
cocoGt = COCO(args.val_path)
cat_ids = cocoGt.get_cat_ids()
cocoDt = cocoGt.loadRes(anns)
cocoEval = COCOeval(cocoGt, cocoDt, iouType=args.eval)
set_param(cocoEval)
coco_evaluate(cocoEval)
coco_accumulate(cocoEval)
coco_summarize(cocoEval, args)
if args.pr_curve:
precisions = cocoEval.eval['precision']
if args.iou_mode == 'single':
pr_array1 = precisions[0, :, 0, 0, 0]
print(pr_array1)
x = np.arange(0.0, 1.01, 0.01)
plt.plot(x, pr_array1, label='iou=0.5')
plt.xlabel("recall")
plt.ylabel("precision")
plt.xlim(0, 1.0)
plt.ylim(0, 1.01)
plt.grid(True)
plt.legend(loc='lower left')
plt.show()
plt.savefig(os.path.join(args.work_dir, 'pr_curve.png'))
else:
pr_array1 = precisions[0, :, 0, 0, 0]
pr_array2 = precisions[1, :, 0, 0, 0]
pr_array3 = precisions[2, :, 0, 0, 0]
pr_array4 = precisions[3, :, 0, 0, 0]
pr_array5 = precisions[4, :, 0, 0, 0]
pr_array6 = precisions[5, :, 0, 0, 0]
pr_array7 = precisions[6, :, 0, 0, 0]
pr_array8 = precisions[7, :, 0, 0, 0]
pr_array9 = precisions[8, :, 0, 0, 0]
pr_array10 = precisions[9, :, 0, 0, 0]
x = np.arange(0.0, 1.01, 0.01)
plt.plot(x, pr_array1, label='iou=0.5')
plt.plot(x, pr_array2, label='iou=0.55')
plt.plot(x, pr_array3, label='iou=0.6')
plt.plot(x, pr_array4, label='iou=0.65')
plt.plot(x, pr_array5, label='iou=0.7')
plt.plot(x, pr_array6, label='iou=0.75')
plt.plot(x, pr_array7, label='iou=0.8')
plt.plot(x, pr_array8, label='iou=0.85')
plt.plot(x, pr_array9, label='iou=0.9')
plt.plot(x, pr_array10, label='iou=0.95')
plt.xlabel("recall")
plt.ylabel("precision")
plt.xlim(0, 1.0)
plt.ylim(0, 1.01)
plt.grid(True)
plt.legend(loc='lower left')
plt.show()
plt.savefig(os.path.join(args.work_dir, 'pr_curve.png'))
if args.classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = cocoGt.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print(table.table)
def det2json(dataset, results, mode):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
if mode == 'local':
imgInfo = dataset.data_infos[idx]
width = imgInfo['width']
height = imgInfo['height']
result[:, 0] = result[:, 0] * (width / args.resize_width)
result[:, 1] = result[:, 1] * (height / args.resize_height)
result[:, 2] = result[:, 2] * (width / args.resize_width)
result[:, 3] = result[:, 3] * (height / args.resize_height)
for i in result:
data = dict()
data['image_id'] = img_id
data['bbox'] = dataset.xyxy2xywh(i[:-1])
data['score'] = float(i[4])
data['category_id'] = 1
json_results.append(data)
elif mode == 'global':
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = dataset.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def det(args):
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# ---------------------------------------------------------------------------------------------------------
if args.show_feature:
cfg.model.test_cfg.show_feature = True
cfg.model.test_cfg.feature_dir = os.path.join(args.work_dir, 'feature')
os.makedirs(cfg.model.test_cfg.feature_dir, exist_ok=True)
if args.assess_proposal_quality:
if args.mode == 'local':
img_sacle = (3000, 3000)
elif args.mode == 'global':
img_sacle = (800, 800)
else:
raise ValueError
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='MultiScaleFlipAug',
img_scale=img_sacle,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
cfg.data.test.pipeline = test_pipeline
cfg.data.test.assess_proposal_quality = True
cfg.model.test_cfg.assess_proposal_quality = True
# ---------------------------------------------------------------------------------------------------------
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
model = build_detector(cfg.model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, show=args.show, out_dir=args.outdir)
anns = det2json(dataset, outputs, args.mode)
if args.dump_resfile:
mmcv.dump(anns, args.ResFile)
return anns
def set_param(self):
p = self.params
# -------------------------------- 调用cocoeval时所需的参数 --------------------------------------------------
p.maxDets = [100000]
if args.iou_mode == 'single':
p.iouThrs = np.array([0.5])
elif args.iou_mode == 'multiple':
p.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
else:
raise ValueError('wrong iou_mode')
p.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
p.scoceThrs = args.score
if args.area_mode == 'single':
p.areaRng = [[0 ** 2, 1e5 ** 2]]
elif args.area_mode == 'multiple':
p.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
else:
raise ValueError('wrong area_mode')
# ---------------------------------------------------------------------------------------------------------
self.params = p
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
# ---------------------------------------------------------------------------------------------------------
parser.add_argument('work_dir', type=str)
# please point out work_dir in this placef
parser.add_argument('--score', default=0.3, type=float)
# drop result if result's score smaller than args.score
parser.add_argument('--weight_file', type=str, default='epoch_30.pth')
# choose weight file to eval
parser.add_argument('--dataset', type=str, choices=['xview'], default='xview')
# only support xview now
parser.add_argument('--show', default=False, type=bool)
# whether to draw pred box to img
parser.add_argument('--dump_resfile', default=False, type=bool)
# whether to save ResFile.json
parser.add_argument('--classwise', type=bool, default=False)
# wherther to eval classwise ap
parser.add_argument('--assess_proposal_quality', type=bool, default=False)
# calculate proposal number of feature maps
parser.add_argument('--show_feature', type=bool, default=False)
# draw feature maps
parser.add_argument('--pr_curve', type=bool, default=False)
# draw pr_curve
parser.add_argument('--iou_mode', choices=['single', 'multiple'], type=str, default='single')
# if iou_mode is single, only eval iouThr=0.5
# else if iou_mode is multiple, eval iouThr from 0.5 to 0.95
parser.add_argument('--area_mode', choices=['single', 'multiple'], type=str, default='multiple')
# if area_mode is single, only eval areaRng='all'
# else if area_mode is multiple, eval areaRng='all', 'small', 'medium', 'large'
# it takes very long time, more than 20 minutes, use carefully
# ---------------------------------------------------------------------------------------------------------
parser.add_argument('--resize_width', default=3000, type=float, help='the width of image after resize')
parser.add_argument('--resize_height', default=3000, type=float, help='the height of image after resize')
parser.add_argument('--eval', type=str, default='bbox', nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'dota' in args.work_dir:
args.dataset = 'dota'
if args.dataset == 'xview':
args.val_path = 'data/xview/annotations/instances_val2017.json'
elif args.dataset == 'dota':
args.val_path = 'data/dota/val/DOTA_val.json'
else:
raise NotImplementedError('Wrong dataset name, please check arg.dataset')
bare_name = os.path.basename(args.work_dir)
if 'global' in bare_name or 'mode1' in bare_name or 'dota' in bare_name or 'xview' in bare_name:
args.mode = 'global'
elif 'local' in bare_name or 'mode2' in bare_name:
args.mode = 'local'
elif 'mode3' in bare_name:
args.mode = 'global'
elif 'ABFN' in bare_name:
args.mode = 'global'
else:
raise ValueError('Wrong work_dir name')
print('Start Evaluateing {} model'.format(bare_name))
config_file = [i for i in os.listdir(args.work_dir) if i.endswith('.py')]
assert len(config_file) == 1, 'please ensure work_dir only have one config file'
config_file = config_file[0]
args.config = os.path.join(args.work_dir, config_file)
args.ResFile = os.path.join(args.work_dir, 'ResFile.json')
args.checkpoint = os.path.join(args.work_dir, args.weight_file)
if args.show is True:
args.outdir = os.path.join(args.work_dir, 'out')
os.makedirs(args.outdir, exist_ok=True)
else:
args.outdir = None
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
if __name__ == '__main__':
args = parse_args()
if args.show or not os.path.isfile(args.ResFile):
tik = time.time()
anns = det(args)
tok = time.time()
print()
print('eval time:{}'.format(tok - tik))
print('FPS:{}'.format(245 / (tok - tik)))
evaluate(args, anns)
else:
print('Load ResFile From Local...')
evaluate(args, args.ResFile)
|
<reponame>choco0908/PTWQIProject<gh_stars>0
# fig: 캔버스 같은 역할을 하는 Matplotlib의 Figure 클래스 객체
# axes:차트를 그리기 위한 Matplotlib의 Axes 클래스 객체
# Figure 제목:에포크 및 탐험률
# Axes 1: 종목의 일봉차트
# Axes 2: 보유 주식 수 및 에이전트 행동 차트
# Axes 3: 정책 신경망 출력 및 탐험 차트
# Axes 4: 포트폴리오 가치 차트
import threading
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from mplfinance.original_flavor import candlestick_ohlc
from agent import Agent
from enum import Enum
class ChartIndex(Enum):
Daily = 0
Agent = 1
Value = 2
Policy = 3
PortfolioValue = 4
lock = threading.Lock()
class Visualizer:
COLORS = ['r', 'b', 'g']
def __init__(self, vnet=False):
self.canvas = None
# 캔버스 같은 역할을 하는 Matplotlib의 Figure 클래스 객체
self.fig = None
# 차트를 그리기 위한 Matplotlib의 Axes 클래스 객체
self.axes = None
self.title = '' # 그림 제목
def prepare(self, chart_data, title):
self.title = title
with lock:
# 캔버스를 초기화하고 5개의 차트를 그릴 준비
self.fig, self.axes = plt.subplots(nrows=5, ncols=1, facecolor='w', sharex=True)
for ax in self.axes:
# 보기 어려운 과학적 표기 비활성화
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
# y axis 위치 오른쪽으로 변경
ax.yaxis.tick_right()
# 차트 1. 일봉 차트
self.axes[ChartIndex.Daily.value].set_ylabel('Env.') # y 축 레이블 표시
x = np.arange(len(chart_data))
# open, high, low, close 순서로된 2차원 배열
ohlc = np.hstack((x.reshape(-1, 1), np.array(chart_data)[:, 1:-1]))
# 양봉은 빨간색으로 음봉은 파란색으로 표시
candlestick_ohlc(self.axes[ChartIndex.Daily.value], ohlc, colorup='r', colordown='b')
# 거래량 가시화
ax = self.axes[ChartIndex.Daily.value].twinx()
volume = np.array(chart_data)[:, -1].tolist()
ax.bar(x, volume, color='b', alpha=0.3)
def plot(self, epoch_str=None, num_epoches=None, epsilon=None, action_list=None, actions=None, num_stocks=None, outvals_value=[], outvals_policy=[], exps=None, learning_idxes=None, initial_balance=None, pvs=None):
with lock:
x = np.arange(len(actions)) # 모든 차트가 공유할 x축 데이터
actions = np.array(actions) # 에이전트의 행동 배열
# 가치 신경망의 출력 배열
outvals_value = np.array(outvals_value)
# 정책 신경망의 출력 배열
outvals_policy = np.array(outvals_policy)
# 초기 자본금 배열
pvs_base = np.zeros(len(actions)) + initial_balance
# 차트 2. 에이전트 상태 (행동, 보유 주식 수)
for action, color in zip(action_list, self.COLORS):
for i in x[actions == action]:
# 배경 색으로 행동 표시
self.axes[ChartIndex.Agent.value].axvline(i, color=color, alpha=0.1)
self.axes[ChartIndex.Agent.value].plot(x, num_stocks, '-k') # 보유 주식 수 그리기
# 차트 3. 가치 신경망
if len(outvals_value) > 0:
max_actions = np.argmax(outvals_value, axis=1)
for action, color in zip(action_list, self.COLORS):
# 배경 그리기
for idx in x:
if max_actions[idx] == action:
self.axes[ChartIndex.Value.value].axvline(idx, color=color, alpha=0.1)
# 가치 신경망 출력의 tanh 그리기
self.axes[ChartIndex.Value.value].plot(x, outvals_value[:, action], color=color, linestyle='-')
# 차트 4. 정책 신경망
# 탐험을 노란색 배경으로 그리기
for exp_idx in exps:
self.axes[ChartIndex.Policy.value].axvline(exp_idx, color='y')
# 행동을 배경으로 그리기
_outvals = outvals_policy if len(outvals_policy) > 0 else outvals_value
for idx, outval in zip(x, _outvals):
color = 'white'
if np.isnan(outval.max()):
continue
if outval.argmax() == Agent.ACTION_BUY:
color = 'r' # 매수 빨간색
elif outval.argmax() == Agent.ACTION_SELL:
color = 'b' # 매도 파란색
self.axes[ChartIndex.Policy.value].axvline(idx, color=color, alpha=0.1)
# 정책 신경망의 출력 그리기
if len(outvals_policy) > 0:
for action, color in zip(action_list, self.COLORS):
self.axes[ChartIndex.Policy.value].plot(x, outvals_policy[:, action], color=color, linestyle='-')
# 차트 5. 포트폴리오 가치
self.axes[ChartIndex.PortfolioValue.value].axhline(initial_balance, linestyle='-', color='gray')
self.axes[ChartIndex.PortfolioValue.value].fill_between(x, pvs, pvs_base, where=pvs > pvs_base, facecolor='r', alpha=0.1)
self.axes[ChartIndex.PortfolioValue.value].fill_between(x, pvs, pvs_base, where=pvs < pvs_base, facecolor='b', alpha=0.1)
self.axes[ChartIndex.PortfolioValue.value].plot(x, pvs, '-k')
# 학습 위치 표시
for learning_idx in learning_idxes:
self.axes[ChartIndex.PortfolioValue.value].axvline(learning_idx, color='y')
# 에포크 및 탐험 비율
self.fig.suptitle('{} \nEpoch:{}/{} e={:.2f}'.format(self.title, epoch_str, num_epoches, epsilon))
# 캔버스 레이아웃 조정
self.fig.tight_layout()
self.fig.subplots_adjust(top=0.85)
def clear(self, xlim):
with lock:
_axes = self.axes.tolist()
for ax in _axes[1:]:
ax.cla() # 그린 차트 지우기
ax.relim() # limit를 초기화
ax.autoscale() # 스케일 재설정
# y축 레이블 재설정
self.axes[ChartIndex.Agent.value].set_ylabel('Agent')
self.axes[ChartIndex.Value.value].set_ylabel('V')
self.axes[ChartIndex.Policy.value].set_ylabel('P')
self.axes[ChartIndex.PortfolioValue.value].set_ylabel('PV')
for ax in _axes:
ax.set_xlim(xlim) # x축 limit 재설정
ax.get_xaxis().get_major_formatter().set_scientific(False) # 과학적 표기 비활성화
ax.get_yaxis().get_major_formatter().set_scientific(False) # 과학적 표기 비활성화
# x축 간격을 일정하게 설정
ax.ticklabel_format(useOffset=False)
def save(self, path):
with lock:
self.fig.savefig(path) |
<reponame>ShibataLab/cloth_assist_framework
#!/usr/bin/env python
# plotFuncs.py: plot functions for data inspection
# Author: <NAME>
# Date: 2016/02/01
import sys
import GPy
import numpy as np
from matplotlib import cm
from matplotlib import pyplot as plt
################################################################################
# Functions to visualize trajectory data
################################################################################
def plotTraj(Dataset, plotType = 0, jointIndex = np.arange(7),
colors={'Train':'b','Test':'r'}):
"""function to plot multiple joint tracks."""
timeData = {}
leftData = {}
rightData = {}
LEFT_ANGLE_OFFSET = 1
RIGHT_ANGLE_OFFSET = 8
# loop over first plotNum files
for key,data in Dataset.iteritems():
timeData[key] = data[:, 0]
leftData[key] = data[:, LEFT_ANGLE_OFFSET+jointIndex]
rightData[key] = data[:, RIGHT_ANGLE_OFFSET+jointIndex]
jointData = [leftData, rightData]
# number of joints to plot
xlabel = 'Time(sec)'
arms = ['Left', 'Right']
nJoints = jointIndex.size
if plotType == 0:
ylabels = 7*['Joint Angle (rad)']
else:
ylabels = 3*['Position (m)']+4*['Angle (rad)']
# plot all the joint data
for ind in range(2):
fig = plt.figure(figsize=(10, 2*nJoints))
for i, jI in enumerate(jointIndex):
plt.subplot(nJoints, 1, i+1)
# plot all the tracks
for key in Dataset.keys():
timeDat = timeData[key]
nSamples = jointData[ind][key].shape[0]
plt.plot(timeDat, jointData[ind][key][:, i], label=key,
color=colors[key], linewidth=2)
plt.xlabel(xlabel, fontsize=12, fontweight='bold')
plt.ylabel(ylabels[i], fontsize=12, fontweight='bold')
if plotType == 0:
plt.title('%s Joint %d' % (arms[ind], jI+1), fontsize=15,
fontweight='bold')
else:
plt.title('%s Pose %d' % (arms[ind], jI+1), fontsize=15,
fontweight='bold')
# plot legend only for 1st sub plot
if i == 0:
plt.legend()
# adjust subplots for legend
fig.subplots_adjust(top=0.96, right=0.8)
plt.tight_layout()
# show all the plots
plt.show()
def plotLatentTraj(Dataset, points = None, colors={'Train':'b','Test':'r'}):
"""function to plot multiple joint tracks."""
timeData = {}
latentData = {}
# loop over first plotNum files
for key,data in Dataset.iteritems():
timeData[key] = data[:, 0]
latentData[key] = data[:, 1:]
# number of latent dims to plot
xlabel = 'Time(sec)'
nDim = latentData[key].shape[1]
ylabels = nDim*['Latent Position']
# plot all the latent data
fig = plt.figure(figsize=(10, 2*nDim))
for i in range(nDim):
plt.subplot(nDim, 1, i+1)
# plot all the tracks
for n,key in enumerate(Dataset.keys()):
timeDat = timeData[key]
nSamples = latentData[key].shape[0]
plt.plot(timeDat, latentData[key][:, i], label=key,
color=colors[key], linewidth=2)
if points:
plt.plot(points[i][:, 0], points[i][:, 1], 'ob', markersize=15,
label='viapoints')
plt.xlabel(xlabel, fontsize=12, fontweight='bold')
plt.ylabel(ylabels[i], fontsize=12, fontweight='bold')
plt.title('Dim %d' % (i+1), fontsize=15, fontweight='bold')
# adjust subplots for legend
plt.tight_layout()
# show all the plots
plt.show()
################################################################################
# Functions to visualize model parameters and latent spaces
################################################################################
def plotScales(model, yThresh=0.05):
# get ARD weight parameters
scales = model.kern.input_sensitivity(summarize=False)
scales = scales/scales.max()
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.arange(1,scales.shape[0]+1)
ax.bar(x, height=scales, width=0.8, align='center',
color='b', edgecolor='k', linewidth=1.3)
ax.plot([0.4, scales.shape[0]+0.6], [yThresh, yThresh],
'--', linewidth=3, color='r')
# setting the bar plot parameters
ax.set_xlim(.4, scales.shape[0]+.6)
ax.set_title(model.name, fontsize=25)
ax.set_ylabel('ARD Weight', fontsize=20)
ax.tick_params(axis='both', labelsize=20)
ax.set_xticks(xrange(1,scales.shape[0]+1))
ax.set_xlabel('Latent Dimensions', fontsize=20)
return ax
def plotLatent(model, trainInput, testInput, mode=2, plotIndices = [0, 1]):
sTest = 200
sTrain = 150
resolution = 50
testMarker = 's'
trainMarker = 'o'
nTest = testInput.shape[0]
nTrain = trainInput.shape[0]
testLabels = [(1,0,0)]*nTest
trainLabels = [(0,0,1)]*nTrain
# get latent space plot parameters
if mode == 0:
model = model['model']
testData = model.transform(testInput)
trainData = model.transform(trainInput)
else:
qDim = model.X.mean.shape[1]
testData = np.zeros((testInput.shape[0], qDim))
trainData = np.zeros((trainInput.shape[0], qDim))
for n in range(trainInput.shape[0]):
# infer latent position
xTrain, _ = model.infer_newX(np.atleast_2d(trainInput[n,:]),
optimize=False)
# update parameter
if mode == 1:
trainData[n,:] = xTrain
else:
trainData[n,:] = xTrain.mean
sys.stdout.write('.')
sys.stdout.write('\n')
for n in range(testInput.shape[0]):
# infer latent position
xTest, _ = model.infer_newX(np.atleast_2d(testInput[n,:]),
optimize=True)
# update parameter
if mode == 1:
testData[n,:] = xTest
else:
testData[n,:] = xTest.mean
sys.stdout.write('.')
sys.stdout.write('\n')
# variables for plotting
fig = plt.figure()
ax = fig.add_subplot(111)
qDim = trainData.shape[1]
if mode == 2:
scales = model.kern.input_sensitivity(summarize=False)
plotIndices = np.argsort(scales)[-2:]
input1, input2 = plotIndices
# compute plot limits
xmin, ymin = trainData[:, [input1, input2]].min(0)
xmax, ymax = trainData[:, [input1, input2]].max(0)
x_r, y_r = xmax-xmin, ymax-ymin
xmin -= .1*x_r
xmax += .1*x_r
ymin -= .1*y_r
ymax += .1*y_r
if mode > 0:
# plot the variance for the model
def plotFunction(x):
Xtest_full = np.zeros((x.shape[0], qDim))
Xtest_full[:, [input1, input2]] = x
_, var = model.predict(np.atleast_2d(Xtest_full))
var = var[:, :1]
return -np.log(var)
x, y = np.mgrid[xmin:xmax:1j*resolution, ymin:ymax:1j*resolution]
gridData = np.hstack((x.flatten()[:, None], y.flatten()[:, None]))
gridVariance = (plotFunction(gridData)).reshape((resolution, resolution))
varianceHandle = plt.imshow(gridVariance.T, interpolation='bilinear',
origin='lower', cmap=cm.gray,
extent=(xmin, xmax, ymin, ymax))
testHandle = ax.scatter(testData[:, input1], testData[:, input2],
marker=testMarker, s=sTest, c=testLabels,
linewidth=.2, edgecolor='k', alpha=1.)
trainHandle = ax.scatter(trainData[:, input1], trainData[:, input2],
marker=trainMarker, s=sTrain, c=trainLabels,
linewidth=.2, edgecolor='k', alpha=1.)
ax.grid(b=False)
ax.set_aspect('auto')
ax.tick_params(axis='both', labelsize=20)
ax.set_xlabel('Latent Dimension %i' % (input1+1), fontsize=25,
fontweight='bold')
ax.set_ylabel('Latent Dimension %i' % (input2+1), fontsize=25,
fontweight='bold')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
properties = {'weight':'bold','size':25}
plt.legend([trainHandle, testHandle], ['Train', 'Test'], prop=properties)
fig.canvas.draw()
fig.tight_layout()
fig.canvas.draw()
plt.show()
return ax
################################################################################
# Functions to visualize results
################################################################################
# function to plot error bars
def plotErrorBars(mE, vE, xLabels, legend, colors, ylabel='NRMSE',
legendLoc=3, title='Comparison', ylimit=[0.,1.],
xlimit=[-0.1,2.1]):
fontSize = 25
N = mE.shape[1]
widthFull = 0.8
width = widthFull/N
buffer = (1.0 - widthFull)/2.0
ind = np.arange(mE.shape[0])
fig, ax = plt.subplots()
for i in range(mE.shape[1]):
err = ax.bar(buffer+ind+i*width, mE[:,i], yerr=vE[:,i], width=width,
color=colors[i], ecolor='k')
ax.set_ylim(ylimit)
ax.set_xlim(xlimit)
ax.set_xticks(ind + 0.5)
ax.set_ylabel(ylabel, fontsize=fontSize, fontweight='bold')
ax.set_xticklabels(xLabels, fontsize=fontSize-5, fontweight='bold')
ax.legend(legend, loc=legendLoc, fontsize=fontSize,
prop = {'size':fontSize-5, 'weight':'bold'})
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontSize-5)
plt.tight_layout()
plt.show()
return ax
|
<reponame>lalithr95/competitive-programming
import urllib2
import os
import math
import json
endpoint = "curl --header 'token: <KEY>' https://www.find.foo/api/challenge"
# data = os.system(endpoint)
# print data
import subprocess
result = os.popen(endpoint).read()
data = json.loads(result)
challenge = data['challenge']
if challenge[1] == 'b' :
result = challenge.split()
if result[1] == '+' :
data = int(result[0], 2) + int(result[2], 2)
data = bin(data)
if result[1] == '-' :
data = int(result[0], 2) - int(result[2], 2)
data = bin(data)
if result[1] == '*' :
data = int(result[0],2) * int(result[2], 2)
data = bin(data)
if result[1] == '/' :
data = int(result[0], 2) / int(result[2], 2)
data = bin(data)
elif challenge[1] == 'x' :
result = challenge.split()
if result[1] == '+' :
data = int(result[0],16) + int(result[2], 16)
data = hex(data)
if result[1] == '-' :
data = int(result[0], 16) - int(result[2], 16)
data = hex(data)
if result[1] == '*' :
data = int(result[0], 16) * int(result[2], 16)
data = hex(data)
if result[1] == '/' :
data = int(result[0], 16) / int(result[2], 16)
data = hex(data)
new_endpoint = "curl --header 'token: <KEY>' --data 'answer="+data+"' https://www.find.foo/api/challenge"
# os.system(new_endpoint)
result = os.popen(new_endpoint).read()
data = json.loads(result)
#### challenge 2 ####
def is_prime(num):
for j in range(2,int(math.sqrt(num)+1)):
if (num % j) == 0:
return False
return True
odd_num = list()
even_num = list()
prime_num = [2,3]
fib_num = list()
for i in range(101) :
if i % 2 == 0 :
even_num.append(i)
else :
odd_num.append(i)
fib_num = [0 , 1]
i = 2
while 1 :
sum_num = fib_num[i-1] + fib_num[i-2]
if sum_num > 100 :
break
fib_num.append(sum_num)
i +=1
i = 4
while i < 10001 :
if is_prime(i) :
prime_num.append(i)
i +=1
def check(data, ch) :
result = list()
if ch == 'O' :
for i in data :
if int(i) in odd_num :
result.append(int(i))
elif ch == 'E' :
for i in data :
if int(i) in even_num :
result.append(int(i))
elif ch == 'P' :
for i in data :
if int(i) in prime_num :
result.append(int(i))
elif ch == 'F' :
for i in data :
if int(i) in fib_num :
result.append(int(i))
return result
challenge = data['challenge']
challenge2_list = ''
result = list()
index = challenge.index('[')
challenge2_list = challenge[index+1:-1]
challenge2_list = challenge2_list.replace(",", "")
challenge2_list = challenge2_list.split(" ")
if challenge[0] == 'O' :
result = check(challenge2_list, challenge[0])
elif challenge[0] == 'E' :
result = check(challenge2_list, challenge[0])
elif challenge[0] == 'F' :
result = check(challenge2_list, challenge[0])
elif challenge[0] == 'P' :
result = check(challenge2_list, challenge[0])
new_endpoint = "curl --header 'token: <PASSWORD>F<PASSWORD>' --data 'answer="+str(result)+"' https://www.find.foo/api/challenge"
# os.system(new_endpoint)
result = os.popen(new_endpoint).read()
data = json.loads(result)
### challenge 3###
L = data['challenge'][29:33]
R = int(data['challenge'][38:42])
result = 0
i = int(L)
while i <= R :
if is_prime(int(i)) :
result += int(i)
i +=1
new_endpoint = "curl --header 'token: oSgD<PASSWORD>LL<PASSWORD>KZwFoiK<PASSWORD>' --data 'answer="+str(result)+"' https://www.find.foo/api/challenge"
# os.system(new_endpoint)
result = os.popen(new_endpoint).read()
data = json.loads(result)
|
<gh_stars>1-10
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
from Scraper.framework.base_component import BaseComponent
from Scraper.framework.i_components import IComponents
# TODO: Fix a problem where config {tags} used in master dir string
# will be presented in the form of a list instead of expected string
class ComponentGelbooru(BaseComponent, IComponents):
BASE_URL = "https://gelbooru.com/index.php?page=post&s=list&tags={tag}&pid={page}"
IMAGES_PER_PAGE = 42
def __init__(self, config_path: str = None, config_dict: dict = None, load_config_from_abs_path=False,
init_verbose=False):
if config_path or config_path:
super(ComponentGelbooru, self).__init__(config_path, config_dict, load_config_from_abs_path, init_verbose)
else:
super().__init__("gelbooru.ini", init_verbose)
self.config.cvt_str_list(["tags", "tags_exclude"])
def _construct_query(self) -> str:
tags_include = ("+".join(self.config["tags"]))
tags_exclude = ("+".join("-" + i for i in self.config["tags_exclude"]))
rating = ""
if (self.config["rating"] and self.config["rating_exclude"]):
self.logger.warning(
"Both \"rating\" and \"rating_exclude\" are specified. but only one can exist. Using \"rating_exclude\"")
rating = f"-rating:{self.config['rating_exclude']}"
else:
if (self.config["rating"]):
rating = f"rating:{self.config['rating']}"
elif (self.config["rating_exclude"]):
rating = f"rating:{self.config['rating_exclude']}"
query_terms = "+".join(i for i in [tags_exclude, tags_include, rating] if i)
self.logger.debug(f"Constructed query: {query_terms}")
return query_terms
def generate_urls(self):
# Urls page number increment by 42 and that's because there is 42 image on a page
tags = self._construct_query()
return [(self.BASE_URL.format(tag=tags, page=i * self.IMAGES_PER_PAGE), str(i)) for i in
range(self.config["start_page"], self.config["end_page"])]
def _predict_highres_url(self, org_url: str, tags: str) -> str:
video_kw = ["webm", "animated"]
is_video = False
for kw in video_kw:
if kw in tags:
is_video = True;
# build the base url w/o the image extension
url = urlparse(org_url)
url_path = url.path
# URL path look like this: '/thumbnails/[first 2 char hash]/[last 2 char hash]/thumbnail_[full_hash].jpg
image_hash = url_path.split("/")[-1].split("_")[-1].split(".")[0]
# scheme://subdomain.domain.tld//images/[first 2 char of hash]/[2-4 char of hash]/[full hash].[file ext]
image_org_path = f"{url.scheme}://{url.netloc}//images/{image_hash[0:2]}/{image_hash[2:4]}/{image_hash}"
# Guess the file extension
ext = [".mp4"] if is_video else [".jpg", ".png"]
for extension in ext:
full_url = image_org_path + extension
# Checks if URL is available by sending a HEAD request (if not, try another extension)
# so it doesn't cost that much resource for the server to respond
r = requests.head(full_url, headers=self.request_header)
if r.status_code <= 400:
return (full_url, extension)
self.logger.debug("Original image did not have file extension type: {}".format(extension))
self.logger.warning(
"Failed to find an applicable file extension for image with original url: {}. Ignoring".format(org_url))
return ("", "")
def _extract_img_data(self, web_data: bytes, encoding="utf-8") -> list:
bs = BeautifulSoup(web_data, "lxml", from_encoding=encoding)
parent_base_url = "https://gelbooru.com/index.php?page=post&s=view&id={id}"
image_data = []
img = bs.find_all("img", attrs={"class": "thumbnail-preview"})
for elem in img:
img_id = elem["alt"].split(": ")[1]
# Extract some infomation from title attribute
# Title attribute look like this: "[tags (space separated)] score:[score] rating:[rating]"
image_title = elem["title"].split(" ") # The title element contains tags, rating and score
image_title = [i for i in image_title if i] # remove empty element from array
image_rating = image_title[-1].split(":")[-1]
image_score = int(image_title[-2].split(":")[-1])
image_tags = " ".join(image_title[0:image_title.__len__() - 2])
# Get image's highres url
image_url, image_extension = self._predict_highres_url(elem["src"], image_tags)
if not image_url: continue
img_data = {
"image_id": img_id,
"image_links": [image_url],
"image_parent_link": parent_base_url.format(id=img_id),
"image_score": image_score,
"image_tags": image_tags,
"image_rating": image_rating,
"image_extension": image_extension
}
image_data.append(img_data)
return image_data
def process_page(self, url: str):
r = requests.get(url, headers=self.request_header)
if (r.status_code >= 400):
self.logger.error(f"Failed to fetch content from remote. Server returned status: {r.status_code}")
return self._extract_img_data(r.content, r.encoding)
def are_requirements_satisfied(self, data: dict):
if data["image_score"] < self.config["min_score"]:
return False
return True
|
#!/usr/bin/env python
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# MINIHACK_RELEASE_BUILD
# If set, builds wheel (s)dist such as to prepare it for upload to PyPI.
#
import os
import setuptools
import subprocess
packages = [
"minihack",
"minihack.envs",
"minihack.scripts",
"minihack.tiles",
"minihack.tests",
"minihack.agent",
"minihack.agent.polybeast",
"minihack.agent.polybeast.models",
"minihack.agent.polybeast.core",
"minihack.agent.rllib",
"minihack.agent.common",
"minihack.agent.common.envs",
"minihack.agent.common.models",
"minihack.agent.common.util",
]
entry_points = {
"console_scripts": [
"mh-play = minihack.scripts.play:main",
"mh-guiplay = minihack.scripts.play_gui:main",
"mh-envs = minihack.scripts.env_list:main",
]
}
install_requires = ["numpy>=1.16", "gym<0.20"]
if not os.getenv("READTHEDOCS"):
install_requires.append("nle>=0.7.3")
extras_deps = {
"dev": [
"pre-commit>=2.0.1",
"black>=19.10b0",
"flake8>=3.7",
"flake8-bugbear>=20.1",
"pytest>=5.3",
"pytest-benchmark>=3.1.0",
"sphinx==4.0.2",
"sphinx-rtd-theme==1.0.0",
"myst-parser==0.15.1",
"nbsphinx==0.8.6",
],
"polybeast": [
"torch>=1.3.1",
"hydra-core>=1.0.0",
"hydra-colorlog>=1.0.0",
"hydra-submitit-launcher>=1.1.1",
"wandb>=0.10.31",
"pyyaml",
],
"rllib": [
"torch>=1.3.1",
"ray[rllib]==1.3.0",
"ray[default]==1.3.0",
"hydra-core>=1.0.0",
"hydra-colorlog>=1.0.0",
"hydra-submitit-launcher>=1.1.1",
"wandb>=0.10.31",
],
"wiki": [
"inflect",
"stanza",
],
}
extras_deps["all"] = [item for group in extras_deps.values() for item in group]
if __name__ == "__main__":
with open("README.md") as f:
long_description = f.read()
cwd = os.path.dirname(os.path.abspath(__file__))
sha = "Unknown"
version = open("version.txt", "r").read().strip()
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
except subprocess.CalledProcessError:
pass
if sha != "Unknown" and not os.getenv("MINIHACK_RELEASE_BUILD"):
version += "+" + sha[:7]
print("Building wheel {}-{}".format("minihack", version))
version_path = os.path.join(cwd, "minihack", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
setuptools.setup(
name="minihack",
version=version,
description="MiniHack The Planet: "
+ "A Sandbox for Open-Ended Reinforcement Learning Research",
long_description=long_description,
long_description_content_type="text/markdown",
author="The MiniHack Team",
url="https://github.com/facebookresearch/minihack",
license="Apache License, Version 2.0",
entry_points=entry_points,
packages=packages,
install_requires=install_requires,
extras_require=extras_deps,
python_requires=">=3.7",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Development Status :: 4 - Beta",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Games/Entertainment",
],
zip_safe=False,
include_package_data=True,
)
|
# This is a python script to take 2D (in space) passive tracer
# data and calculate the time mean effective diffusivity. The
# effective diffusivity is described in more detail in
# Nakamura (1996), Shuckburgh and Haynes (2003), and Abernathey
# and Marshall (2013).
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import scipy.io as sio
import scipy.ndimage.interpolation as imrotate
from effDiffFunctions import *
# import passive tracer data from .mat file
dict = sio.loadmat( 'p_gulf.mat' )
pGulf = dict['p_gulf']
# reverse directions of x and y axes
pGulf = pGulf[:,::-1,:]
pGulf = pGulf[:,:,::-1]
# make a passive tracer snapshot from week 100
snapshot = pGulf[100,:,:]
snapshot[ snapshot == 0 ] = 0.2
snapshotRot = imrotate.rotate( snapshot, -9 )
# pre-allocate for rotated tracer values
dim = snapshotRot.shape
pGulfRot = np.zeros( ( 1137, dim[0], dim[1] ) )
# rotate passive tracer data week-by-week
for week in range(1137) :
weeklyTracer = pGulf[week, :, :]
pGulfRot[week,:,:] = imrotate.rotate( weeklyTracer, -9 )
# define region and extract values from jet extension
X0 = [28,250]
Y0 = [50,137]
pGulfRot = pGulfRot[ :, Y0[0]:Y0[1]+1, X0[0]:X0[1]+1 ]
# calculate the effective diffusivity for each week
effDiff = calcEffDiff( pGulfRot )
meanEffDiff = np.mean( effDiff, 0 )
# calculate sub-annual variations
effDiffComposite = calcAnnualComposite( effDiff )
effDiffComposite = np.transpose( effDiffComposite )
# calculate annual means and standard deviations
effDiffAnnual, effDiffStdDev = calcAnnualMeans( effDiff )
###### PLOTTING #####
dy = 111 / 10
y = np.linspace( 0, meanEffDiff.shape[0], meanEffDiff.shape[0] )*dy
fig, axArray = plt.subplots( 2, 2, figsize=(13,8) )
ax0 = axArray[0,0]
ax1 = axArray[1,0]
ax2 = axArray[0,1]
ax3 = axArray[1,1]
# plot tracer snapshot and illustrate region
ax0.pcolormesh( snapshotRot )
ax0.add_patch( patches.Rectangle( (X0[0],Y0[0] ), X0[1]-X0[0], Y0[1]-Y0[0], fill=False, edgecolor='white' ) )
ax0.invert_xaxis()
ax0.axis('off')
ax0.set_title('Illustration of Region Sampled')
# plot time-mean effective diffusivity, with spread
ax1.fill_between( y, ( meanEffDiff - effDiffStdDev )/1000, ( meanEffDiff + effDiffStdDev )/1000 )
ax1.plot( y, meanEffDiff/1000, color='black' )
ax1.set_xlabel('Distance, Perpendicular to Jet (km)')
ax1.set_ylabel('Effective Diffusivity (1000 m2s-1)')
ax1.set_ylim( [0,8.5] )
ax1.set_xlim( [y[0],y[-1]] )
ax1.set_title('Time-Mean Effective Diffusivity 1993-2014')
# plot weeky-by-week effective diffusivity as 2D colourmap
im2 = ax2.pcolormesh( np.linspace(1,52,52), y, effDiffComposite )
fig.colorbar( im2, ax=ax2)
ax2.set_ylabel('Distance (km)')
ax2.set_xlabel('Week', labelpad=0 )
ax2.set_title('Effective Diffusivity Weekly-Composite')
ax2.invert_yaxis()
# plot inter-annual variability
ax3.plot( y, np.transpose( effDiffAnnual/1000 ) )
ax3.set_xlabel('Distance, Perpendicular to Jet (km)')
ax3.set_ylabel('Effective Diffusivity (1000 m2s-1)')
ax3.set_xlim( [y[0],y[-1]] )
ax3.set_ylim( [0,8.5] )
ax3.set_title('Annual Means of Effective Diffusivity')
plt.show()
fig.savefig('gulfStreamPassiveTracer.png', bbox_inches='tight', dpi=600)
|
<filename>backend/position/views.py
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from utils.generic_json_creator import create_response
from .models import JobPosition, PositionDetail
from .serializers import JobPositionSerializer, PositionDetailSerializer
from utils.models import Country, State
from company.utils import get_or_create_company
from company.models import Company
from JH_RestAPI import pagination
@csrf_exempt
@api_view(["GET"])
def positions(request):
body = request.data
if request.method == "GET":
q = request.GET.get('q')
if q is None:
positions = JobPosition.objects.all()
else:
positions = JobPosition.objects.filter(job_title__icontains=q)
if request.GET.get('count') is not None:
cnt = int(request.GET.get('count'))
positions = positions[:cnt]
serialized_positions = JobPositionSerializer(
instance=positions, many=True).data
return JsonResponse(create_response(data=serialized_positions, paginator=None), safe=False)
@csrf_exempt
@api_view(["GET", "POST", "PATCH", "DELETE"])
def company_positions(request):
body = request.data
if request.method == "GET":
company_id = request.GET.get('id')
q = request.GET.get('q')
department = request.GET.get('department')
job_type = request.GET.get('type')
if q is not None and company_id is not None:
# jobs = JobPosition.objects.filter(job_title__icontains=q)
positions = PositionDetail.objects.filter(
company_id=company_id, is_deleted=False, job__icontains=q).order_by("-updated_date")
elif q is None and company_id is None:
positions = PositionDetail.objects.filter(is_deleted=False).order_by("-updated_date")
elif q is None and company_id is not None:
positions = PositionDetail.objects.filter(
company_id=company_id, is_deleted=False).order_by("-updated_date")
elif q is not None and company_id is None:
positions = PositionDetail.objects.filter(is_deleted=False, job__icontains=q).order_by("-updated_date")
if department is not None:
positions = positions.filter(department=department)
if job_type is not None:
positions = positions.filter(job_type=job_type)
paginator = pagination.CustomPagination()
positions = paginator.paginate_queryset(positions, request)
serialized_positions = PositionDetailSerializer(
instance=positions, many=True).data
return JsonResponse(create_response(data=serialized_positions, paginator=paginator), safe=False)
elif request.method == "POST":
job_title = body["job_title"]
responsibilities = body["responsibilities"]
requirements = body["requirements"]
department = body["department"]
job_type = body["job_type"]
city = body["city"]
company_id = int(body["company_id"])
#job = JobPosition.objects.get(job_title=job_title)
job=job_title
state = State.objects.get(pk=body["state_id"])
country = Country.objects.get(pk=body["country_id"])
company = Company.objects.get(pk=body["company_id"])
new_position = PositionDetail(job=job, responsibilities=responsibilities, requirements=requirements,
department=department, job_type=job_type, city=city, country=country, state=state, company=company)
new_position.save()
return JsonResponse(
create_response(data=PositionDetailSerializer(instance=new_position, many=False, context={'user': request.user}).data),safe=False)
elif request.method == "DELETE":
position_id = body["position_id"]
position = PositionDetail.objects.get(pk=position_id)
position.is_deleted = True
position.save()
return JsonResponse(create_response(data=None), safe=False)
elif request.method == "PATCH":
position_id = body["position_id"]
position = PositionDetail.objects.get(pk=position_id)
job= body.get('job_title')
responsibilities = body.get('responsibilities')
requirements = body.get('requirements')
department = body.get('department')
job_type = body.get('job_type')
city = body.get('city')
state_id = body.get('state_id')
country_id = body.get('country_id')
if responsibilities is not None:
position.responsibilities = responsibilities
if job is not None:
position.job = job
if requirements is not None:
position.requirements = requirements
if department is not None:
position.department = department
if job_type is not None:
position.job_type = job_type
if city is not None:
position.city = city
if state_id is not None:
position.state_id = state_id
if country_id is not None:
position.country_id = country_id
position.save()
return JsonResponse(create_response(data=PositionDetailSerializer(instance=position, many=False, context={'user': request.user}).data), safe=False)
|
#!/usr/bin/env python3
"""Finds latest versions of the CSVs for each subsystem, then plots the time
domain and X-Y data.
If provided, the first argument to this script is a filename regex that
restricts which CSVs are plotted to those that match the regex.
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import re
class UnitGroup:
def __init__(self):
# List of DataSeries objects (time and data column pairs)
self.series = []
# List of data labels
self.labels = []
class NameGroup:
def __init__(self, filename, series):
self.filename = filename
self.series = series
class DataSeries:
def __init__(self, time, data):
self.time = time
self.data = data
def num_lines(filename):
with open(filename) as f:
i = 0
for i, l in enumerate(f):
pass
return i + 1
def get_file_list(regex):
# Get list of files in current directory
files = [
os.path.join(dp, f)
for dp, dn, fn in os.walk(".")
for f in fn
if f.endswith(".csv")
]
# Ignore files not matching optional pattern
if regex:
files = [f for f in files if re.search(regex, f)]
# Maps subsystem name to tuple of csv_group and date (group is set of files
# with states, inputs, or outputs suffixes and the same name stub like
# "Flywheel")
filtered = {}
file_rgx = re.compile(
r"(?P<name>^\./.*?[A-Za-z ]+)-(?P<date>\d{4}-\d{2}-\d{2}-\d{2}_\d{2}_\d{2})\.csv$"
)
for f in files:
match = file_rgx.search(f)
if not match:
continue
# If file is empty or only has header (that is, has no data), ignore it.
# We ignore the case of one line of data because it might be truncated.
if num_lines(f) <= 2:
continue
# If the file is a CSV with the correct name pattern, add it to the
# filtered list. Files with newer dates override old ones in lexographic
# ordering.
name = match.group("name")
date = match.group("date")
if name not in filtered.keys() or filtered[name] < date:
filtered[name] = date
# Make filtered list of files
files = []
for name_stub in filtered.keys():
files.append(name_stub + "-" + filtered[name_stub] + ".csv")
return files
def make_groups(files):
# Group files by category (sets of files with states, inputs, or outputs
# suffixes and the same name stub like "Flywheel")
category_rgx = re.compile(
r"^\./.*?(?P<category>[A-Za-z ]+) (states|inputs|outputs)-")
file_groups = {}
if files:
print("Loading CSVs...")
else:
print("No data to plot.")
# Sorting the file list puts files into the order ["inputs", "outputs",
# "states"]. This means data series will be loaded in the order of
# ["inputs", "outputs", "references", "states"] (references are logged
# before states). This produces the desired dataset layering on plots.
for f in sorted(files):
print(f" {os.path.split(f)[1]}")
match = category_rgx.search(f)
if not match:
# Couldn't find the file's category, so put it in its own category
substr = os.path.split(f)[1]
substr = substr[:substr.find("-")]
file_groups[substr] = [f]
else:
category = match.group("category")
# Create a new category if one doesn't exist, or add the file to the
# existing category if it does
if category not in file_groups.keys():
file_groups[category] = [f]
else:
file_groups[category].append(f)
return file_groups
def main():
plt.rcParams.update({'figure.max_open_warning': 0})
parser = argparse.ArgumentParser()
parser.add_argument("-ymin",
dest="ymin",
type=float,
help="Y minimum for plots")
parser.add_argument("-ymax",
dest="ymax",
type=float,
help="Y maximum for plots")
parser.add_argument("regex", nargs="?")
args = parser.parse_args()
file_groups = make_groups(get_file_list(args.regex))
if file_groups:
print("Plotting...")
# Within each group, make groups of datasets keyed on their unit, then plot
# each group on their own figure
unit_rgx = re.compile(r"^(?P<name>[\w\- ]+) \((?P<unit>.*?)\)$")
for category, file_group in file_groups.items():
unit_groups = {}
name_groups = {}
for filename in file_group:
# Get labels from first row of file
with open(filename) as f:
labels = [
x.strip('"') for x in f.readline().rstrip().split(",")
]
# Retrieve data from remaining rows of file. "skip_footer=1" skips
# the last line because it may be incompletely written.
data = np.genfromtxt(filename,
delimiter=",",
skip_header=1,
skip_footer=1)
times = data[:, 0:1]
# Skips label in first column because that's always "Time (s)"
for i, label in enumerate(labels[1:]):
match = unit_rgx.search(label)
name = match.group("name")
unit = match.group("unit")
if unit not in unit_groups.keys():
unit_groups[unit] = UnitGroup()
# "i + 1" skips the time data column
unit_groups[unit].series.append(
DataSeries(times, data[:, i + 1:i + 2]))
unit_groups[unit].labels.append(name)
# "i + 1" skips the time data column
name_groups[name] = NameGroup(filename, data[:, i + 1:i + 2])
# Plot time domain datasets
print(f' [vs time] {category} ({", ".join(unit_groups.keys())})')
for unit, unit_group in unit_groups.items():
fig, ax = plt.subplots(1, 1)
ax.set_title(f"{category} ({unit})")
for i in range(len(unit_group.series)):
ax.plot(unit_group.series[i].time, unit_group.series[i].data)
if args.ymin:
ax.set_ylim(bottom=args.ymin)
if args.ymax:
ax.set_ylim(top=args.ymax)
# First label is x axis label (time). The remainder are dataset
# names.
ax.set_xlabel("Time (s)")
ax.set_ylabel(f"Data ({unit})")
ax.legend(unit_group.labels)
# Plot X-Y datasets. If the file doesn't have all the required keys,
# skip it.
if not (set(["X reference", "Y reference", "X estimate", "Y estimate"])
- set(name_groups.keys())):
print(f' [y vs x] {category}')
fig, ax = plt.subplots(1, 1)
ax.set_title(f"{category} trajectory")
ax.plot(name_groups["X reference"].series,
name_groups["Y reference"].series)
ax.plot(name_groups["X estimate"].series,
name_groups["Y estimate"].series)
ax.set_xlabel("X (m)")
ax.set_ylabel("Y (m)")
ax.legend(["Reference", "Estimate"])
# This equalizes the X and Y axes so the trajectories aren't warped
ax.axis("equal")
if any([
"AutoNav" in group.filename or
"Galactic Search" in group.filename
for group in name_groups.values()
]):
img = plt.imread("tools/images/2021-challenge-space.png")
ax.imshow(img, extent=[0, 9.144, 0, 4.572])
else:
img = plt.imread("tools/images/2021-field.png")
ax.imshow(img, extent=[0, 15.98295, 0, 8.21055])
plt.show()
if __name__ == "__main__":
main()
|
<filename>aup2rpp.py
import struct
import xml.etree.ElementTree as ET
import uuid
import math
import pprint
import os
import html
import argparse
"""
shermnotes
.AU : A container format, used by Audacity for storage of lossless, uncompressed,
PCM audio data. Not be confused with Sun/NeXT AU files, which are usually U-Law
encoded PCM files but may be headerless.
https://forum.audacityteam.org/viewtopic.php?t=73428
if converting to 16 bit then one should use dithering.
"""
#DONE: force 16-bit. currently jsut enable the flag and set noclip = False
#TODO: 24 bit support
#TODO: support weird sample rates if they arent already?
#TODO: auto save files only to 32 bit when the filehas peaks above 0db.
AU_SAMPLE_FORMAT_16 = 3
AU_SAMPLE_FORMAT_24 = 4
AU_SAMPLE_FORMAT_FLOAT = 6
IEEE_FLOAT = 3
PCM = 1
noclip = False #This feature isn't implemented properly yet.
convert_to_16 = False #DEBUG #This feature isn't implemented properly yet.
convert_to_32 = False #DEBUG #This feature isn't implemented properly yet.
conversion_dict = {AU_SAMPLE_FORMAT_16:16, AU_SAMPLE_FORMAT_24:24, AU_SAMPLE_FORMAT_FLOAT:32}
def load_au_file(au_fpath):
"""
Returns dict of data info
"""
with open(au_fpath, 'rb') as f:
# See https://github.com/audacity/audacity/blob/master/src/blockfile/SimpleBlockFile.cpp
# wxUint32 magic; // magic number
# wxUint32 dataOffset; // byte offset to start of audio data
# wxUint32 dataSize; // data length, in bytes (optional)
# wxUint32 encoding; // data encoding enumeration
# wxUint32 sampleRate; // samples per second
# wxUint32 channels; // number of interleaved channels
hcount = 6
hdata = struct.unpack('I' * hcount, f.read(hcount * 4))
result = {
'magic': hdata[0],
'data_offset': hdata[1],
'data_size': hdata[2],
'encoding': hdata[3],
'sample_rate': hdata[4],
'channels': hdata[5]
}
#print(result)
if result['magic'] == 0x2e736e64:
encoding = result['encoding']
else:
print("ERROR: Endianess needs to be swapped but I dunno what to do")
return
f.seek(result['data_offset'])
ds = result['data_size']
#if ds == 0xffffffff:
# Size was specified as optional... read to end of file I guess?
#ds = -1
if encoding == AU_SAMPLE_FORMAT_16:
sfc = 'h'
ss = 2
elif encoding == AU_SAMPLE_FORMAT_24:
print("ERROR: 24-bit samples? Dunno how to read them")
return
elif encoding == AU_SAMPLE_FORMAT_FLOAT:
sfc = 'f'
ss = 4
else:
print("ERROR: I dunno this format ", encoding)
return
sample_data = []
# Note: the file may be very big
i = 0
while i < ds:
d = f.read(ss)
if len(d) == 0:
break
sample_data.append(struct.unpack(sfc, d)[0])
i += 1
result['encoding'] = encoding #CLEANUP this seems redundant..?
print(' ', result)
result['sample_data'] = sample_data
return result
class WavWriter:
def __init__(self, f, sample_rate, channels, bits_per_sample):
#WavWriter(f, au['sample_rate'], nchannels, 16 OR 32)
self.f = f
self.sample_rate = sample_rate
self.channels = channels
self.bits_per_sample = bits_per_sample
self.finalized = False
self.samples_count = 0
self.fmt_chunk_size = 2 + 2 + 4 + 4 + 2 + 2
self.initial_fpos = f.tell()
if self.bits_per_sample == 32:
self.type_of_format = IEEE_FLOAT
else:
self.type_of_format = PCM
# Leave blank header size, we'll write it once all audio has been written.
# Go straight to the offset where we will write samples
riff_header_size = 8
riff_chunk_size_without_data = 4 + (8 + self.fmt_chunk_size) + 8 + 0
f.write(bytearray(riff_header_size + riff_chunk_size_without_data))
self.data_fpos = f.tell()
def append_multichannel_samples(self, sample_data_per_channel):
assert not self.finalized
assert self.channels == len(sample_data_per_channel)
nchannels = self.channels
if nchannels == 1:
# We can take a shortcut
interleaved_sample_data = sample_data_per_channel[0]
max_sample_count = len(interleaved_sample_data)
else:
# Get max channel length
max_sample_count = 0
for sample_data in sample_data_per_channel:
if len(sample_data) > max_sample_count:
if max_sample_count != 0:
# Ew, we had to adjust maximum twice
print("WARNING: appending multichannel sample data with different amount of samples!")
max_sample_count = len(sample_data)
# Make sure all channels have the same size
for sample_data in sample_data_per_channel:
if len(sample_data) > max_sample_count:
# Damn, where is resize(n)?
del sample_data[-(len(sample_data) - max_sample_count):]
else:
while len(sample_data) < max_sample_count:
sample_data.append(0)
# Interleave
interleaved_sample_data = [0] * (max_sample_count * nchannels)
for channel, sample_data in enumerate(sample_data_per_channel):
i = channel
for v in sample_data:
interleaved_sample_data[i] = v
i += nchannels
self.append_interleaved_samples(interleaved_sample_data)
def append_interleaved_samples(self, sample_data):
assert not self.finalized
nsamples = len(sample_data) // self.channels
assert nsamples * self.channels == len(sample_data)
sfc = 'h'
if self.bits_per_sample == 32:
sfc = 'f'
f = self.f
#print(self.bits_per_sample) #DEBUG
for v in sample_data:
f.write(struct.pack(sfc, v))
self.samples_count += nsamples
def finalize(self): #sherman note: I think this is where the header is actually written.
assert not self.finalized
f = self.f
end = f.tell()
data_chunk_size = f.tell() - self.data_fpos
f.seek(self.initial_fpos)
assert data_chunk_size == (self.samples_count * self.channels * self.bits_per_sample // 8)
# "WAVE" letters + two FourCC+size headers and their chunk size.
# Does not include the size of the top-level header "RIFF"+size.
riff_chunk_size = 4 + (8 + self.fmt_chunk_size) + (8 + data_chunk_size)
f.write(b'RIFF')
f.write(struct.pack('I', riff_chunk_size))
f.write(b'WAVE')
#wave_chunk_size = ???
#f.write(struct.pack('I', wave_chunk_size))
# ----------
f.write(b'fmt ')
f.write(struct.pack('I', self.fmt_chunk_size))
# Format
# PCM = 1 (i.e. Linear quantization) Values other than 1 indicate some form of compression.
# IEEE float = 3
f.write(struct.pack('H', self.type_of_format))
f.write(struct.pack('H', self.channels))
f.write(struct.pack('I', self.sample_rate))
# SampleRate * NumChannels * BitsPerSample/8
byte_rate = self.sample_rate * self.channels * self.bits_per_sample // 8
f.write(struct.pack('I', byte_rate))
# NumChannels * BitsPerSample/8
block_align = self.channels * self.bits_per_sample // 8
f.write(struct.pack('H', block_align))
# 8 bits = 8, 16 bits = 16, etc.
f.write(struct.pack('H', self.bits_per_sample))
f.write(b'data')
f.write(struct.pack('I', data_chunk_size))
# And what follows is what we wrote before
self.finalized = True
# Legacy shortcut
# def write_wav_file(fpath, sample_rate, channels, bits_per_sample, sample_data):
# with open(fpath, 'wb') as f:
# w = WavWriter(f, sample_rate, channels, bits_per_sample)
# w.append_samples(sample_data)
# w.finalize()
def convert_au_files_to_wav(src_paths_by_channel, dst_path):
if len(src_paths_by_channel) == 0:
return
# Eliminate channels with no blocks
temp = []
for c in src_paths_by_channel:
if len(c) != 0:
temp.append(c)
src_paths_by_channel = temp
print("Converting blocks ", src_paths_by_channel)
# Concatenate a bunch of .au block files into a single WAV file
with open(dst_path, 'wb') as f:
w = None
nchannels = len(src_paths_by_channel)
# For each block
for block_index in range(len(src_paths_by_channel[0])):
samples_by_channel = []
# Process each corrsponding channel for that block
for channel in range(nchannels):
# this for loop gets an au block for each channel.
src_paths = src_paths_by_channel[channel]
if block_index >= len(src_paths):
# That block doesn't have data on each channel...
samples_by_channel.append([])
continue
au = load_au_file(src_paths[block_index])
samples = au['sample_data'] #load samples
if au['channels'] != 1:
# TODO Deal with this eventually...
# As far as I've seen, Audacity actually saves stereo blocks as separate mono .au files. WHY??
print("ERROR: Unexpected AU file in stereo. Are you sure this is an audacity project?")
return 0
# Make sure it ends up in the encoding we want
print(au['encoding']) #DEBUG
if au['encoding'] == AU_SAMPLE_FORMAT_FLOAT:
if convert_to_32:
pass #it's already 32
if convert_to_16:#converts 32-bit float to 16-bit PCM.
#If samples clip don't convert. Unless noclip is off.
if abs(max(samples, key=abs)) > 32767 or not noclip:
for i, v in enumerate(samples):
# convert to 16 bit PCM and clip any thing above 1 dB
samples[i] = int(v * 32767.0)
#clipping.
samples[i] = min(samples[i], 32767) #too high
samples[i] = max(samples[i], -32767) #too low
elif au['encoding'] == AU_SAMPLE_FORMAT_24:
#TODO: 24 bit support incl conversion
print("ERROR: 24 bits not supported")
return #TODO: this should be a proper error catch not a return IMO
elif au['encoding'] == AU_SAMPLE_FORMAT_16:
#if convert_to_32: #TODO
pass # Already OK
else:
print("ERROR: Unknown .au encoding: ", au['encoding'])
return 0 #this return 0 breaks it on purpose #TODO: proper error return
#WavWriter calls
if w is None: #if it's the first au written to the wav.
if convert_to_16:
w = WavWriter(f, au['sample_rate'], nchannels, 16) #no error here but still.
else:
w = WavWriter(f, au['sample_rate'], nchannels, conversion_dict[au['encoding']])
#Encode to the existing encoding for the AU sample. This should probably have some weird error if the
#AU samples are concatenated to one wav and differ in encoding. But I don't think that should ever happen?
#FIXME: feature: 16bit noclip. for this feature to work I need to check for 16bit vs 32 and make every au in that wav the same bitrate.
if w.sample_rate != au['sample_rate']: #TODO understand how it works when it concatenates multiple AU files.
print("ERROR: sample rate differs in one of the .au files I wanted to concatenate into one .wav")
# TODO Return multiple files and split the clip...
break
samples_by_channel.append(samples) #add to list of list of samples.
w.append_multichannel_samples(samples_by_channel) #Error here due to not writing to 32bit instead
w.finalize()
return 0 if w is None else w.samples_count
def load_audacity_project(fpath):
root = ET.parse(fpath).getroot()
rate = int(float(root.attrib["rate"]))
name = root.attrib['projname']
ns = { 'ns': 'http://audacity.sourceforge.net/xml/' }
data_dir = os.path.splitext(fpath)[0] + '_data'
if not os.path.isdir(data_dir):
data_dir = ""
def unescape(s):
return html.unescape(s)
output = {
'rate': rate,
'name': unescape(name),
'data_dir': data_dir,
'tracks': []
}
for project_item in root:
tag = project_item.tag.split('}')[1]
if tag == 'wavetrack':
o_track = {
'name': unescape(project_item.attrib['name']),
'channel': int(project_item.attrib['channel']),
'linked': True if project_item.attrib['linked'] == '1' else False,
'mute': True if project_item.attrib['mute'] == '1' else False,
'solo': True if project_item.attrib['solo'] == '1' else False,
'rate': int(project_item.attrib['rate']),
'gain': float(project_item.attrib['gain']),
'pan': float(project_item.attrib['pan']),
'color_index': int(project_item.attrib['colorindex']),
'clips': []
}
output['tracks'].append(o_track)
waveclips = project_item.findall('ns:waveclip', ns)
for waveclip in waveclips:
o_clip = {
'offset': float(waveclip.attrib['offset']),
'color_index': int(waveclip.attrib['colorindex']),
}
o_track['clips'].append(o_clip)
sequence = waveclip.findall('ns:sequence', ns)[0]
o_sequence = {
'max_samples': int(sequence.attrib['maxsamples']),
'sample_format': int(sequence.attrib['sampleformat']),
'numsamples': int(sequence.attrib['numsamples']),
'blocks': []
}
o_clip['sequence'] = o_sequence
for waveblock in sequence.findall('ns:waveblock', ns):
waveblock_start = int(waveblock.attrib['start'])
for block in waveblock:
btag = block.tag.split('}')[1]
if btag == 'simpleblockfile':
o_sequence['blocks'].append({
'type': btag,
'start': waveblock_start,
'len': int(block.attrib['len']),
'filename': unescape(block.attrib['filename']),
'min': float(block.attrib['min']),
'max': float(block.attrib['max']),
'rms': float(block.attrib['rms']),
})
elif btag == 'pcmaliasblockfile':
o_sequence['blocks'].append({
'type': btag,
'start': waveblock_start,
'len': int(block.attrib['aliaslen']),
'file_start': int(block.attrib['aliasstart']),
'filename': unescape(block.attrib['aliasfile']),
'summary_file': block.attrib['summaryfile'],
'channel': int(block.attrib['aliaschannel']),
'min': float(block.attrib['min']),
'max': float(block.attrib['max']),
'rms': float(block.attrib['rms'])
})
elif btag == 'silentblockfile':
o_sequence['blocks'].append({
'type': btag,
'len': int(block.attrib['len'])
})
else:
print("WARNING: Unknown block type: '{0}'".format(btag))
envelope = waveclip.findall('ns:envelope', ns)[0]
points = []
for point in envelope.findall('ns:controlpoint', ns):
points.append({
't': float(point.attrib['t']),
'val': float(point.attrib['val'])
})
o_clip['envelope'] = {
'points': points
}
return output
def convert_au_files_from_audacity_project(project, target_dir):
# This is where most of the conversion happens.
indexed_files = {}
if project['data_dir'] != "":
# Audacity saves its media files under a nested hierarchy,
# I don't quite understand why since files seem to have unique names
for root, dirs, files in os.walk(project['data_dir']):
for name in files:
indexed_files[name] = os.path.join(root, name)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
tracks = project['tracks']
wavblock_history = {} # of form au_fpaths:(dst_fpath, converted_numsamples)
#This stores hashes for sets of au files to prevent data duplication
#TODO Eventually just make an entirely new project dictionary rather than modifying the input one
converted_tracks = []
project['converted_tracks'] = converted_tracks
for track_index, track in enumerate(tracks):
previous_track = None if track_index == 0 else tracks[track_index - 1]
next_track = None if track_index + 1 == len(tracks) else tracks[track_index + 1]
is_stereo_track = False
if track['channel'] == 1:
if previous_track is not None and previous_track['linked']:
# Ignore second channel of a linked stereo track,
# should be handled both in the previous iteration.
# This means a converted project may have less tracks.
continue
elif track['channel'] == 0 and track['linked']:
is_stereo_track = True
converted_track = {
'name': track['name'],
'mute': track['mute'],
'solo': track['solo'],
'rate': track['rate'],
'gain': track['gain'],
'pan': track['pan'],
'color_index': track['color_index'],
}
converted_tracks.append(converted_track)
converted_clips = []
converted_track['converted_clips'] = converted_clips
for clip_index, clip in enumerate(track['clips']):
sequence = clip['sequence']
au_fpaths = [[], []]
converted_numsamples = 0
converted_clip_start = clip['offset'] # In seconds
blocks = sequence['blocks']
clip2 = None
if is_stereo_track:
clip2 = next_track['clips'][clip_index]
if clip2['offset'] != clip['offset']:
print("WARNING: Stereo track has non-aligned clips??")
# Okayyy
clip2 = None
# Convert clip-wise envelopes into a track-wise one
if len(clip['envelope']['points']) > 0:
if 'envelope' not in converted_track:
converted_envelope = { 'points': [] }
converted_track['envelope'] = converted_envelope
else:
converted_envelope = converted_track['envelope']
# Note: points will be sorted once we have gone through all clips
points = clip['envelope']['points']
for p in points:
converted_envelope['points'].append({
't': p['t'],
'val': p['val']
})
# A clip can be made of many different blocks.
# The goal is to process them in order to get one file per clip,
# and then possibly splitting the clip or ignoring blocks.
# Another fun part is joining stereo tracks,
# because they are saved separately
for block_index, block in enumerate(blocks):
btype = block['type']
is_last = block_index + 1 == len(blocks)
is_next_different = not is_last and btype != blocks[block_index + 1]['type']
if btype == 'simpleblockfile' or btype == 'pcmaliasblockfile':
if converted_numsamples == 0:
converted_clip_start = clip['offset'] + block['start'] / project['rate']
converted_numsamples += block['len']
if btype == 'simpleblockfile':# The files should probably end in au
assert block['filename'].endswith('.au')
block2 = None
if is_stereo_track and clip2 is not None:
for b in clip2['sequence']['blocks']:
if b['start'] == block['start'] and b['len'] == block['len']:
block2 = b
break
if block2 is not None:
src_fpath = indexed_files[block['filename']]
au_fpaths[0].append(src_fpath)
src_fpath2 = indexed_files[block2['filename']]
au_fpaths[1].append(src_fpath2)
else:
src_fpath = indexed_files[block['filename']]
au_fpaths[0].append(src_fpath)
if is_last or is_next_different: #stop grabbing new au files, we have ourselves a singel wav to make
dst_fname = "track{0}_clip{1}.wav".format(track_index, len(converted_clips))
dst_fpath = os.path.join(target_dir, dst_fname)
#convert au_fpaths to a hashable object. of type tuple of tuple
if type(au_fpaths) is list:
au_fpaths = tuple(tuple(i) for i in au_fpaths)
if au_fpaths not in wavblock_history.keys():
if os.path.isfile(dst_fpath):
print("Overwriting ", dst_fpath)
samples_in_file = convert_au_files_to_wav(au_fpaths, dst_fpath)
# Check this because there is redundancy, I'm curious if that can fail
if samples_in_file != converted_numsamples:
print("WARNING: Sample count mismatch between what I found in the .aup and the actual files")
print(" .aup: {0}, file: {1}".format(total_samples, converted_numsamples))
else:
print('Repeat found! Memory saved :)', wavblock_history[au_fpaths][0]) #duplicate sounds like an error. say Repeat. Not an Error
if samples_in_file != converted_numsamples:
print("WARNING: Sample count mismatch between source and repeat") #If this ever happens then my trick doesnt work.
print(" saved wav: {0}, repeat clip start: {1}".format(wavblock_history[au_fpaths][0], converted_clip_start))
#Try to not duplicate files when the .au was re-used.
# We could do this by hashing au_fpaths, and if it's the same then use existing result
wavblock_history.setdefault(au_fpaths, (dst_fpath, converted_numsamples))
converted_clips.append({
'offset': converted_clip_start,
'numsamples': wavblock_history[au_fpaths][1], #numsamples should be the same for duplicates... ! think!
'filename': wavblock_history[au_fpaths][0]
})
au_fpaths = [[], []]
converted_numsamples = 0
elif btype == 'pcmaliasblockfile':
# We don't do anything special regarding stereo, the source file should be fine already
if not is_last:
next_block = blocks[block_index + 1]
if next_block['type'] == 'pcmaliasblockfile':
if next_block['filename'] != block['filename']:
is_next_different = True
if is_last or is_next_different:
converted_clips.append({
'offset': converted_clip_start,
'numsamples': converted_numsamples,
'filename': block['filename'],
'file_start': block['file_start']
})
converted_numsamples = 0
elif btype == 'silentblockfile':
pass # Ignore
else:
print("WARNING: Unsupported block type: '{0}'".format(btype))
# Reorder envelope points by time
if 'envelope' in converted_track:
envelope = converted_track['envelope']
envelope['points'] = sorted(envelope['points'], key=lambda x: x['t'])
def write_rpp_file_from_audacity_project(fpath, project):
audacity_color_to_peakcol = [
0, # 0: Default color in Audacity (blue)
0x013333ff, # 1: Red
0x0133ff33, # 2: Green
0x01222222 # 3: Black
]
def get_file_tag(fname):
ext = os.path.splitext(fname)[1]
if ext == '.wav':
return 'WAVE'
elif ext == 'ogg':
return 'VORBIS'
return ext[1:].upper()
# Audacity saves gain as a linear value, and it turns out Reaper also does
# def linear2db(p_linear)
# return math.log(p_linear) * 8.6858896380650365530225783783321
class RppWriter:
def __init__(self, f):
self.indent_unit = " "
self.indent = ""
self.f = f
def open_block(self, tag, *args):
self.f.write('{0}<{1}'.format(self.indent, tag))
self._args(args)
self.indent += self.indent_unit
def close_block(self):
self.indent = self.indent[:-len(self.indent_unit)]
self.f.write('{0}>\n'.format(self.indent))
def line(self, tag, *args):
self.f.write('{0}{1}'.format(self.indent, tag))
self._args(args)
def _args(self, args):
for v in args:
if type(v) == str:
s = ' "{0}"'# if v.contains(' ') else ' {0}'
self.f.write(s.format(v))
elif type(v) == bool:
self.f.write(' {0}'.format(1 if v else 0))
elif type(v) == uuid.UUID:
self.f.write(' {' + str(v).upper() + '}')
else:
self.f.write(' ' + str(v))
self.f.write('\n')
# One nice thing about Reaper projects is that you can omit things in it,
# it will not complain and just load what it finds, apparently
with open(fpath, 'w', encoding="utf-8") as f:
w = RppWriter(f)
# Arbitrary version, which happens to be mine at time of writing. #3 years old, but we'll keep it to not break things...
# TODO I don't know what the number at the end is
w.open_block('REAPER_PROJECT', 0.1, '5.92/x64', 1534982487)
project_samplerate = int(project['rate'])
w.line('SAMPLERATE', project_samplerate, 0, 0)
for track in project['converted_tracks']:
track_uid = uuid.uuid4()
w.open_block('TRACK', track_uid)
w.line('NAME', track['name'])
w.line('TRACKID', track_uid)
w.line('VOLPAN', track['gain'], track['pan'], -1, -1, 1)
w.line('NCHAN', 2)
w.line('MUTESOLO', track['mute'], track['solo'])
w.line('PEAKCOL', audacity_color_to_peakcol[track['color_index']])
if 'envelope' in track:
w.open_block('VOLENV2')
for point in track['envelope']['points']:
w.line('PT', point['t'], point['val'])
w.close_block()
for clip in track['converted_clips']:
w.open_block('ITEM')
w.line('POSITION', clip['offset'])
# TODO I don't know what these UIDs are
w.line('IGUID', uuid.uuid4())
w.line('GUID', uuid.uuid4())
w.line('NAME', os.path.basename(clip['filename']))
nsamples = clip['numsamples']
item_len_seconds = nsamples / project_samplerate
w.line('LENGTH', item_len_seconds)
if 'file_start' in clip:
w.line('SOFFS', clip['file_start'] / project_samplerate)
w.open_block('SOURCE ' + get_file_tag(clip['filename']))
w.line('FILE', clip['filename'])
w.close_block()
# Note: sources like this can exist:
# <SOURCE SECTION
# LENGTH 3.55565072008221
# STARTPOS 7.40378238649376
# OVERLAP 0.01
# <SOURCE FLAC
# FILE "D:\PROJETS\AUDIO\coproductions\1287\Episodes\Episode 7\foule_armee.flac"
# >
# >
#NOTE: would it be possible to create a reaper file which just links to AU files? much faster..
w.close_block()
w.close_block()
w.close_block()
def convert(aup_path):
project = load_audacity_project(aup_path)
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(project)
# return
data_dir = os.path.splitext(aup_path)[0] + '_wav_data'
convert_au_files_from_audacity_project(project, data_dir)
rpp_path = os.path.splitext(aup_path)[0] + '.rpp'
write_rpp_file_from_audacity_project(rpp_path, project)
print("Done")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converts Audacity projects into Reaper projects.')
parser.add_argument('audacity_project', metavar='audacity_project', type=str,
help='Path to the Audacity project to convert (.aup file)')
#TODO: parser output filename
#parser.add_argument('output_project', type=str,)
#parser: --bitrate [auto 16 float ]
parser.add_argument('--force-encoding', type=str, help='force all audio to be encoded as {16,32} bit wav files. If not specified, encoding is flexible.')
parser.add_argument('--dont-clip', type=str, help='if transcoding lower causes clipping, don\' transcode it')
args = parser.parse_args()
convert(args.audacity_project)
|
<gh_stars>0
import base64
import hashlib
from http import HTTPStatus
from typing import Optional
from embit import bech32
from embit import compact
import base64
from io import BytesIO
import hmac
from fastapi import Request
from fastapi.param_functions import Query
from starlette.exceptions import HTTPException
from lnbits.core.services import create_invoice
from lnbits.utils.exchange_rates import fiat_amount_as_satoshis
from . import lnurlpos_ext
from .crud import (
create_lnurlpospayment,
get_lnurlpos,
get_lnurlpospayment,
update_lnurlpospayment,
)
def bech32_decode(bech):
"""tweaked version of bech32_decode that ignores length limitations"""
if (any(ord(x) < 33 or ord(x) > 126 for x in bech)) or (
bech.lower() != bech and bech.upper() != bech
):
return
bech = bech.lower()
pos = bech.rfind("1")
if pos < 1 or pos + 7 > len(bech):
return
if not all(x in bech32.CHARSET for x in bech[pos + 1 :]):
return
hrp = bech[:pos]
data = [bech32.CHARSET.find(x) for x in bech[pos + 1 :]]
encoding = bech32.bech32_verify_checksum(hrp, data)
if encoding is None:
return
return bytes(bech32.convertbits(data[:-6], 5, 8, False))
def xor_decrypt(key, blob):
s = BytesIO(blob)
variant = s.read(1)[0]
if variant != 1:
raise RuntimeError("Not implemented")
# reading nonce
l = s.read(1)[0]
nonce = s.read(l)
if len(nonce) != l:
raise RuntimeError("Missing nonce bytes")
if l < 8:
raise RuntimeError("Nonce is too short")
# reading payload
l = s.read(1)[0]
payload = s.read(l)
if len(payload) > 32:
raise RuntimeError("Payload is too long for this encryption method")
if len(payload) != l:
raise RuntimeError("Missing payload bytes")
hmacval = s.read()
expected = hmac.new(
key, b"Data:" + blob[: -len(hmacval)], digestmod="sha256"
).digest()
if len(hmacval) < 8:
raise RuntimeError("HMAC is too short")
if hmacval != expected[: len(hmacval)]:
raise RuntimeError("HMAC is invalid")
secret = hmac.new(key, b"Round secret:" + nonce, digestmod="sha256").digest()
payload = bytearray(payload)
for i in range(len(payload)):
payload[i] = payload[i] ^ secret[i]
s = BytesIO(payload)
pin = compact.read_from(s)
amount_in_cent = compact.read_from(s)
return pin, amount_in_cent
@lnurlpos_ext.get(
"/api/v1/lnurl/{pos_id}",
status_code=HTTPStatus.OK,
name="lnurlpos.lnurl_v1_params",
)
async def lnurl_v1_params(
request: Request,
pos_id: str = Query(None),
p: str = Query(None),
):
pos = await get_lnurlpos(pos_id)
if not pos:
return {
"status": "ERROR",
"reason": f"lnurlpos {pos_id} not found on this server",
}
if len(p) % 4 > 0:
p += "=" * (4 - (len(p) % 4))
data = base64.urlsafe_b64decode(p)
pin = 0
amount_in_cent = 0
try:
result = xor_decrypt(pos.key.encode(), data)
pin = result[0]
amount_in_cent = result[1]
except Exception as exc:
return {"status": "ERROR", "reason": str(exc)}
price_msat = (
await fiat_amount_as_satoshis(float(amount_in_cent) / 100, pos.currency)
if pos.currency.lower() != "sats"
else amount_in_cent
) * 1000
lnurlpospayment = await create_lnurlpospayment(
posid=pos.id,
payload=p,
sats=price_msat,
pin=pin,
payhash="payment_hash",
)
if not lnurlpospayment:
return {"status": "ERROR", "reason": "Could not create payment."}
return {
"tag": "payRequest",
"callback": request.url_for(
"lnurlpos.lnurl_callback", paymentid=lnurlpospayment.id
),
"minSendable": price_msat,
"maxSendable": price_msat,
"metadata": await pos.lnurlpay_metadata(),
}
@lnurlpos_ext.get(
"/api/v1/lnurl/cb/{paymentid}",
status_code=HTTPStatus.OK,
name="lnurlpos.lnurl_callback",
)
async def lnurl_callback(request: Request, paymentid: str = Query(None)):
lnurlpospayment = await get_lnurlpospayment(paymentid)
pos = await get_lnurlpos(lnurlpospayment.posid)
if not pos:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="lnurlpos not found."
)
payment_hash, payment_request = await create_invoice(
wallet_id=pos.wallet,
amount=int(lnurlpospayment.sats / 1000),
memo=pos.title,
description_hash=hashlib.sha256(
(await pos.lnurlpay_metadata()).encode("utf-8")
).digest(),
extra={"tag": "lnurlpos"},
)
lnurlpospayment = await update_lnurlpospayment(
lnurlpospayment_id=paymentid, payhash=payment_hash
)
return {
"pr": payment_request,
"successAction": {
"tag": "url",
"description": "Check the attached link",
"url": request.url_for("lnurlpos.displaypin", paymentid=paymentid),
},
"routes": [],
}
return resp.dict()
|
<gh_stars>100-1000
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numbers
import warnings
from typing import Optional, Union
import networkx as nx
import numpy as np
from beartype import beartype
from graspologic.embed import LaplacianSpectralEmbed
from graspologic.embed.base import SvdAlgorithmType
from graspologic.preconditions import check_argument, is_real_weighted
from graspologic.utils import is_fully_connected, pass_to_ranks, remove_loops
from ...utils import LaplacianFormType
from . import __SVD_SOLVER_TYPES # from the module init
from ._elbow import _index_of_elbow
from .embeddings import Embeddings
__FORMS = ["DAD", "I-DAD", "R-DAD"]
@beartype
def laplacian_spectral_embedding(
graph: Union[nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph],
form: LaplacianFormType = "R-DAD",
dimensions: int = 100,
elbow_cut: Optional[int] = None,
svd_solver_algorithm: SvdAlgorithmType = "randomized",
svd_solver_iterations: int = 5,
svd_seed: Optional[int] = None,
weight_attribute: str = "weight",
regularizer: Optional[numbers.Real] = None,
) -> Embeddings:
"""
Given a directed or undirected networkx graph (*not* multigraph), generate an
Embeddings object.
The laplacian spectral embedding process is similar to the adjacency spectral
embedding process, with the key differentiator being that the LSE process looks
further into the latent space when it captures changes, whereas the ASE process
is egocentric and focused on immediate differentiators in a node's periphery.
All weights will be rescaled based on their relative rank in the graph,
which is beneficial in minimizing anomalous results if some edge weights are
extremely atypical of the rest of the graph.
Parameters
----------
graph : Union[nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph]
An undirected or directed graph. The graph **must**:
- be fully numerically weighted (every edge must have a real, numeric weight
or else it will be treated as an unweighted graph)
- be a basic graph (meaning it should not be a multigraph; if you have a
multigraph you must first decide how you want to handle the weights of the
edges between two nodes, whether summed, averaged, last-wins,
maximum-weight-only, etc)
form : str (default="R-DAD")
Specifies the type of Laplacian normalization to use. Allowed values are:
{ "DAD", "I-DAD", "R-DAD" }. See
:func:`~graspologic.utils.to_laplacian` for more details regarding form.
dimensions : int (default=100)
Dimensions to use for the svd solver.
For undirected graphs, if ``elbow_cut==None``, you will receive an embedding
that has ``nodes`` rows and ``dimensions`` columns.
For directed graphs, if ``elbow_cut==None``, you will receive an embedding that
has ``nodes`` rows and ``2*dimensions`` columns.
If ``elbow_cut`` is specified to be not ``None``, we will cut the embedding at
``elbow_cut`` elbow, but the provided ``dimensions`` will be used in the
creation of the SVD.
elbow_cut : Optional[int] (default=None)
Using a process described by Zhu & Ghodsi in their paper "Automatic
dimensionality selection from the scree plot via the use of profile likelihood",
truncate the dimensionality of the return on the ``elbow_cut``-th elbow.
By default this value is ``None`` but can be used to reduce the dimensionality
of the returned tensors.
svd_solver_algorithm : str (default="randomized")
allowed values: {'randomized', 'full', 'truncated'}
SVD solver to use:
- 'randomized'
Computes randomized svd using
:func:`sklearn.utils.extmath.randomized_svd`
- 'full'
Computes full svd using :func:`scipy.linalg.svd`
Does not support ``graph`` input of type scipy.sparse.csr_matrix
- 'truncated'
Computes truncated svd using :func:`scipy.sparse.linalg.svds`
svd_solver_iterations : int (default=5)
Number of iterations for randomized SVD solver. Not used by 'full' or
'truncated'. The default is larger than the default in randomized_svd
to handle sparse matrices that may have large slowly decaying spectrum.
svd_seed : Optional[int] (default=None)
Used to seed the PRNG used in the ``randomized`` svd solver algorithm.
weight_attribute : str (default="weight")
The edge dictionary key that contains the weight of the edge.
regularizer : Optional[numbers.Real] (default=None)
Only used when form="R-DAD". Must be None or nonnegative.
Constant to be added to the diagonal of degree matrix. If None, average
node degree is added. If int or float, must be >= 0.
Returns
-------
Embeddings
Raises
------
beartype.roar.BeartypeCallHintParamViolation if parameters do not match type hints
ValueError if values are not within appropriate ranges or allowed values
See Also
--------
graspologic.pipeline.embed.Embeddings
graspologic.embed.LaplacianSpectralEmbed
graspologic.embed.select_svd
graspologic.utils.to_laplacian
Notes
-----
The singular value decomposition:
.. math:: A = U \Sigma V^T
is used to find an orthonormal basis for a matrix, which in our case is the
Laplacian matrix of the graph. These basis vectors (in the matrices U or V) are
ordered according to the amount of variance they explain in the original matrix.
By selecting a subset of these basis vectors (through our choice of dimensionality
reduction) we can find a lower dimensional space in which to represent the graph.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. "A
Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,"
Journal of the American Statistical Association, Vol. 107(499), 2012.
.. [2] <NAME>, Ulrike. "A tutorial on spectral clustering," Statistics
and computing, Vol. 17(4), pp. 395-416, 2007.
.. [3] <NAME>, <NAME>, and <NAME>. "Spectral clustering and
the high-dimensional stochastic blockmodel," The Annals of Statistics,
Vol. 39(4), pp. 1878-1915, 2011.
.. [4] <NAME>. and <NAME>. (2006). Automatic dimensionality selection from the
scree plot via the use of profile likelihood. Computational Statistics & Data
Analysis, 51(2), pp.918-930.
"""
check_argument(
form in __FORMS, f"form must be one of the values in {','.join(__FORMS)}"
)
check_argument(dimensions >= 1, "dimensions must be positive")
check_argument(elbow_cut is None or elbow_cut >= 1, "elbow_cut must be positive")
check_argument(
svd_solver_algorithm in __SVD_SOLVER_TYPES,
f"svd_solver_algorithm must be one of the values in {','.join(__SVD_SOLVER_TYPES)}",
)
check_argument(svd_solver_iterations >= 1, "svd_solver_iterations must be positive")
check_argument(
svd_seed is None or 0 <= svd_seed <= 2**32 - 1,
"svd_seed must be a nonnegative, 32-bit integer",
)
check_argument(
regularizer is None or float(regularizer) >= 0,
"regularizer must be nonnegative",
)
check_argument(
not graph.is_multigraph(),
"Multigraphs are not supported; you must determine how to represent at most "
"one edge between any two nodes, and handle the corresponding weights "
"accordingly",
)
used_weight_attribute: Optional[str] = weight_attribute
if not is_real_weighted(graph, weight_attribute=weight_attribute):
warnings.warn(
f"Graphs with edges that do not have a real numeric weight set for every "
f"{weight_attribute} attribute on every edge are treated as an unweighted "
f"graph - which presumes all weights are `1.0`. If this is incorrect, "
f"please add a '{weight_attribute}' attribute to every edge with a real, "
f"numeric value (e.g. an integer or a float) and call this function again."
)
used_weight_attribute = None # this supercedes what the user said, because
# not all of the weights are real numbers, if they exist at all
# this weight=1.0 treatment actually happens in nx.to_scipy_sparse_matrix()
node_labels = np.array(list(graph.nodes()))
graph_as_csr = nx.to_scipy_sparse_matrix(
graph, weight=used_weight_attribute, nodelist=node_labels
)
if not is_fully_connected(graph):
warnings.warn("More than one connected component detected")
graph_sans_loops = remove_loops(graph_as_csr)
ranked_graph = pass_to_ranks(graph_sans_loops)
embedder = LaplacianSpectralEmbed(
form=form,
n_components=dimensions,
n_elbows=None, # in the short term, we do our own elbow finding
algorithm=svd_solver_algorithm,
n_iter=svd_solver_iterations,
svd_seed=svd_seed,
concat=False,
)
results = embedder.fit_transform(ranked_graph)
results_arr: np.ndarray
if elbow_cut is None:
if isinstance(results, tuple) or graph.is_directed():
results_arr = np.concatenate(results, axis=1)
else:
results_arr = results
else:
column_index = _index_of_elbow(embedder.singular_values_, elbow_cut)
if isinstance(results, tuple):
left, right = results
left = left[:, :column_index]
right = right[:, :column_index]
results_arr = np.concatenate((left, right), axis=1)
else:
results_arr = results[:, :column_index]
embeddings = Embeddings(node_labels, results_arr)
return embeddings
|
"""Search Engine"""
import sqlite3
from typing import Iterator, Union
from numpy import array
from pandas import DataFrame, read_sql_query
class SearchEngine:
"""Search Engine
Parameters
----------
database: str, optional, default='data/pubmed.db'
SQL database
articles_table: str, optional, default='articles'
Name of table in :code:`database`
"""
def __init__(
self,
/,
database: str = 'data/pubmed.db',
articles_table: str = 'articles',
):
# save passed
self._articles_table = articles_table
# connect to database
self._database = database
self._con = sqlite3.connect(database)
def search(
self,
/,
keywords: Union[str, list[str]] = None,
max_date: str = None,
min_date: str = None,
pmids: list[Union[str, int]] = None,
scores_table: str = None,
*,
join: str = 'AND',
limit: int = 30,
min_score: float = None,
require_abstract: bool = True,
) -> DataFrame:
"""Find article by keyword
Parameters
----------
keywords: Union[str, list[str]], optional, default=None
keyword or keywords to use in search
max_date: str, optional, default=None
maximum date of articles to return
min_date: str, optional, default=None
minimum date of articles to return
pmids: list[Union[str, int]], optional, default=None
pubmed ids of articles to return
scores_table: str, optional, default=None
search for articles from within this table
join: str, optional, default='AND'
if :code:`'AND'`, require that all keywords be present in found
articles. If :code:`'OR'`, require that any keyword be present in
found articles.
limit: int, optional, default=30
max number of results
min_score: float, optional, default=None
minimum score to include in results. Ignored if
:code:`scores_table` is not provided
require_abstract: bool. optional, default=True
only pull articles with abstracts
Returns
-------
DataFrame
Matching articles
"""
# select cols
query = f"""
SELECT {
', '.join([
f'{self._articles_table}.{field}'
for field in [
'PMID',
'Date',
'Title',
'Abstract',
'Keywords',
]
])
} FROM {self._articles_table}
"""
# join scores table
if scores_table:
query += f"""
INNER JOIN {scores_table}
ON {scores_table}.PMID = {self._articles_table}.PMID
"""
# track if conditions have been inserted into query
has_conditions = False
# condition: keyword
if keywords:
if type(keywords) is str:
keywords = [keywords]
for keyword in keywords:
query += f"""
{join if has_conditions else 'WHERE ('}
({' OR '.join(array([
[
f'{field} LIKE "%{keyword}%"'
]
for field in [
'Title',
'Abstract',
'Keywords',
]
]).flatten())
})
"""
has_conditions = True
query += ') '
# condition: min_date
if min_date:
query += f"""
{'AND' if has_conditions else 'WHERE'}
(
Date >= "{min_date}"
)
"""
has_conditions = True
# condition: max_date
if max_date:
query += f"""
{'AND' if has_conditions else 'WHERE'}
(
Date <= "{max_date}"
)
"""
has_conditions = True
# condition: pmid
if pmids:
query += f"""
{'AND' if has_conditions else 'WHERE'}
(
PMID in ({
", ".join([
f'"{pmid}"'
for pmid in pmids
])
})
)
"""
has_conditions = True
# condition: min_score
if min_score is not None:
query += f"""
{'AND' if has_conditions else 'WHERE'}
(Score >= {min_score})
"""
has_conditions = True
# condition: has abstract
if require_abstract:
query += f"""
{'AND' if has_conditions else 'WHERE'}
(Abstract != '')
"""
has_conditions = True
# order results
if scores_table:
query += """
ORDER BY Score DESC
"""
else:
query += """
ORDER BY RANDOM()
"""
# limit results
if limit:
query += f"""
LIMIT {limit}
"""
# return matching articles
return read_sql_query(query, con=self._con)
def get_rand(self, /, count: int) -> DataFrame:
"""Find random articles
Parameters
----------
count: int
number of articles to pull
Returns
-------
DataFrame
Articles
"""
return read_sql_query(
f"""
SELECT * FROM {self._articles_table}
ORDER BY RANDOM()
LIMIT {count}
""",
con=self._con,
)
def get_all(self, /, chunksize: int = 10000) -> Iterator[DataFrame]:
"""Get all articles
Parameters
----------
chunksize: int, optional, default=10E3
number of articles in each iteration pt of output
Returns
-------
Iterator[DataFrame]
Iterator to all articles in database
"""
return read_sql_query(
f'SELECT * FROM {self._articles_table}',
chunksize=chunksize,
con=self._con,
)
def get_count(self) -> int:
"""Get number of articles in database
Returns
-------
int
total number of articles
"""
query = f'SELECT COUNT(PMID) FROM {self._articles_table}'
return self._con.execute(query).fetchone()[0]
|
#import base64
#import binascii
from datetime import datetime
import json
import traceback
from decimal import Decimal
from bson.decimal128 import Decimal128
from pymongo import MongoClient
import pymongo
from google.protobuf.json_format import MessageToJson, Parse, MessageToDict
from utils.getData import *
DB_CON = {
"host": "localhost",
"port": 27017,
"database":"qrldata"
}
SERVER_CONNECTION = MongoClient(host=DB_CON["host"], port=DB_CON["port"])
DB_CONNECTION = SERVER_CONNECTION.qrldata
class MongoDB(object):
def insertData(coll, data):
try:
if data is not None: # why is data sometimes none ??
if (coll == 'addresses' and 'address' in data):
data["_id"] = data["address"]
# del data["address"]
if (coll == 'blocks' and 'block_number' in data):
data["_id"] = data["block_number"]
if 'extra_nonce' in data:
data["extra_nonce"] = str(data["extra_nonce"]) # sometimes int length passes max length mongodb can handel > therefor it will be saved as an string
coll = DB_CONNECTION[coll]
coll.insert_one(data)
except pymongo.errors.DuplicateKeyError:
print('already got that one')
pass
except Exception as e:
print(e)
print(traceback.format_exc())
eData={}
try:
dataKeys = [k for k in data.keys()]
dataKeyType = [type(k) for k in data.keys()]
except:
dataKeys = ''
dataKeyType = ''
eData["date"], eData["location"], eData["traceback"], eData["data"] = datetime.now(), 'insertData', str(traceback.format_exc()), str(data)
eData["data_keys"], eData["data_key_type"], eData["error"], eData["blocknumber"] = str(dataKeys), str(dataKeyType), str(e), ""
coll, data = "errors_get_data" , eData
MongoDB.insertData(coll, data)
print('Error while inserting data into Database')
raise
def updateData(coll, data, idkey, idval ):
coll = DB_CONNECTION[coll]
try:
query = {idkey: idval}
newVal = { "$set": data }
coll.update_one(query, newVal)
except Exception as e:
print(e)
print('Exception while updating data from Database')
raise
def dropDB():
try:
if DB_CON["database"] in SERVER_CONNECTION.list_database_names():
SERVER_CONNECTION.drop_database(DB_CON["database"])
print('Database Dropped')
else:
print('Database Doenst Exists')
pass
except Exception as e:
print(e)
print('Error while dropping Database')
raise
def dropCollections():
try:
collections = DB_CONNECTION.list_collection_names()
for collection in collections:
print(" ".join(["Dropping collection:" , collection]))
DB_CONNECTION.drop_collection(collection)
except Exception as e:
print(e)
print('Error while dropping Collections')
def truncateCollections():
try:
collections = DB_CONNECTION.list_collection_names()
for collection in collections:
print("".join(["Truncate collection:" , collection]))
DB_CONNECTION.collection.remove({}) #can a var be used here ?
except Exception as e:
print(e)
print('Error while Truncate Collections')
raise
def getBlockData(source):
try:
blockheightBC = getData.getBlockHeight(source)
blockHeightInDB = DB_CONNECTION.blocks.find_one(sort=[("block_number", -1)])
if blockHeightInDB != None:
blockHeightInDB = blockHeightInDB["block_number"]
else:
blockHeightInDB = 0
if blockheightBC < blockHeightInDB:
print('current blockheight in database is heigher than node')
for i in range(blockHeightInDB , blockheightBC+1):
print(" ".join(["Parsing block" , str(i) , "/" , str(blockheightBC)]))
blockData = getData.getBlockData(i, source)
coll, data, transactions = 'blocks', blockData, blockData["transactions"]
del blockData["transactions"]
MongoDB.insertData(coll, data)
for t in transactions:
MongoDB.getTransactionData(t, source, blockData["block_number"], blockData["timestamp"] )
except Exception as e:
print(e)
print('Error while getting block data')
eData={}
eData["date"], eData["location"], eData["traceback"], eData["data"], eData["error"], eData["blocknumber"] = datetime.now(), 'getBlockData', str(traceback.format_exc()), "" , str(e), ""
coll, data = "errors_get_data" , eData
MongoDB.insertData(coll, data)
raise
def getTransactionData(t, source, block_number, timestamp):
try:
transactionProcessed = False
tData = {}
if "masterAddr" in t:
MongoDB.getAddressData(source, t["masterAddr"], timestamp, block_number)
if "coinbase" in t:
coll, data = 'transactions_coinbase', getData.getTransactionDataCoinbase(t, block_number, timestamp)
MongoDB.getAddressData(source, t["coinbase"]["addrTo"], timestamp, block_number)
MongoDB.insertData(coll, data)
transactionProcessed = True
if "transfer" in t:
if "addrsTo" in t["transfer"]:
addrs_to = t["transfer"]["addrsTo"]
amounts = t["transfer"]["amounts"]
transfers = [{"addr_to" : addrs_to[i], "amount":amounts[i]} for i in range(len(addrs_to))]
for transfer in transfers:
coll, data = 'transactions_transfer', getData.getTransactionDataTransfer(t, block_number, timestamp, transfer)
MongoDB.getAddressData(source, transfer["addr_to"] , timestamp, block_number)
MongoDB.insertData(coll, data)
transactionProcessed = True
if "token" in t:
coll, data = 'transactions_token', getData.getTransactionDataToken(t, block_number, timestamp)
MongoDB.getAddressData(source, t["token"]["owner"] , timestamp, block_number)
MongoDB.insertData(coll, data)
transactionProcessed = True
if "message" in t:
coll, data = 'transactions_message', getData.getTransactionDataMessage(t, block_number, timestamp)
MongoDB.insertData(coll, data)
transactionProcessed = True
if "latticePk" in t:
coll, data = "transactions_latticePk", getData.getTransactionDataLatticePk(t, block_number, timestamp)
MongoDB.insertData(coll, data)
transactionProcessed = True
if "slave" in t:
if "slavePks" in t["slave"]:
slave_pks = t["slave"]["slavePks"]
access_types = t["slave"]["accessTypes"]
transfers = [{"slave_pk" : slave_pks[i], "access_type":access_types[i]} for i in range(len(slave_pks))]
for transfer in transfers:
coll, data = "transactions_slave", getData.getTransactionDataSlave(t, block_number, timestamp, transfer)
MongoDB.insertData(coll, data)
transactionProcessed = True
if "transferToken" in t:
if "addrsTo" in t["transferToken"]:
addrs_to = t["transferToken"]["addrsTo"]
amounts = t["transferToken"]["amounts"]
token_txhash = t["transferToken"]["tokenTxhash"]
transfers = [{"addr_to" : addrs_to[i], "amount":amounts[i], "token_txhash":token_txhash} for i in range(len(addrs_to))]
for transfer in transfers:
coll, data = "transactions_transfertoken", getData.getTransactionDataTransferToken(t, block_number, timestamp, transfer)
MongoDB.getAddressData(source, transfer["addr_to"] , timestamp, block_number)
MongoDB.insertData(coll, data)
transactionProcessed = True
if not transactionProcessed:
coll, data = "transactions_others", getData.getTransactionDataOthers(t, block_number, timestamp)
MongoDB.insertData(coll, data)
except Exception as e:
print(e)
print('Error while getting transaction data')
eData={}
eData["date"], eData["location"], eData["traceback"], eData["data"], eData["error"], eData["blocknumber"] = datetime.now(), 'getTransactionData', str(traceback.format_exc()), str(t) , str(e), block_number
coll, data = "errors_get_data" , eData
MongoDB.insertData(coll, data)
raise
def getAddressData(source, b64Addr, timeStamp, block_number):
addressData = getData.getAddressData(source, b64Addr, timeStamp)
try:
dup_check = DB_CONNECTION.addresses .find_one({"address": addressData["address"]})
if dup_check == None:
coll, data = 'addresses', addressData
MongoDB.insertData(coll, data)
else:
coll, idkey, idval, data = 'addresses', 'address', addressData["address"], addressData
del addressData["address"]
del addressData["first_seen"]
MongoDB.updateData(coll, data, idkey, idval)
except Exception as e:
print(e)
print('Error while getting address data')
eData={}
eData["date"], eData["location"], eData["traceback"], eData["data"], eData["error"], eData["blocknumber"] = datetime.now(), 'getAddressData', str(traceback.format_exc()), b64Addr , str(e), block_number
coll, data = "errors_get_data" , eData
MongoDB.insertData(coll, data)
raise
|
<gh_stars>0
'''
---------------------------------------------
LinkedList - My version of the class List
Author: <NAME>
---------------------------------------------
Description:
This is my version of the python list. It is a double linkedlist, so you can
traverse the linkedlist forward or backward.
'''
from typing import Union, Iterable, Any
# Node of a doubly linkedlist
class Node:
# constructor
def __init__(self, data=None):
self.data = data
self.next = None
self.prev = None
# method for setting the data field of the node
def setData(self, data):
self.data = data
# method for getting the data field of the node
def getData(self):
return self.data
# method for setting the next field of the node
def setNext(self, nextOne):
self.next = nextOne
# method for getting the next field of the node
def getNext(self):
return self.next
# return True if the node has a pointer to the next node
def hasNext(self):
return self.next is not None
# method for setting the next field of the node
def setPrev(self, prevOne):
self.prev = prevOne
# method for getting the prev field of the node
def getPrev(self):
return self.prev
# return True if the node has a pointer to the previous node
def hasPrev(self):
return self.prev is not None
'''
returns a copy of the current Node's data
if include_pointers is set to True, the pointers
next and prev will be added to the returned node
'''
def copy(self, include_pointers=False):
if include_pointers:
to_return = Node(self.data)
to_return.next = self.next
to_return.prev = self.prev
return to_return
return Node(self.data)
class LinkedList:
def __init__(self, iterable: Iterable = None):
self.__head = None
self.current = None
self.tail = None
if iterable is not None:
for item in iterable:
self.insertAtEnd(item)
'''
Method to display all the Nodes. Returns the data of then Node.
:param getObj, returns the Node object instead of the data
'''
def DisplayAllNodes(self, getObj=False):
self.current = self.head
while self.current is not None:
if getObj:
yield self.current
else:
yield self.current.getData()
self.current = self.current.getNext()
'''
Returns the length of the linkedlist
'''
def GetLength(self):
self.current = self.head
currentNum = 0
while self.current is not None:
if self.current.getNext() is not None:
currentNum += 1
self.current = self.current.getNext()
else:
break
return currentNum+1
# Appending Methods
'''
# Inserts an element at the start of the linkedlist
:param data accepts either an instance of the class Node or some other data
'''
def insertAtBeginning(self, data):
if isinstance(data, Node):
toInsert = data
else:
toInsert = Node(data)
if self.head is not None:
toInsert.next = self.head
self.head.prev = toInsert
self.head = toInsert
else:
self.head = toInsert
'''
# Inserts an element at the end of the linkedlist
:oaram data accepts either an instance of the class Node or some other data
'''
def insertAtEnd(self, data):
if isinstance(data, Node):
toInsert = data
else:
toInsert = Node(data)
if self.tail is not None:
toInsert.prev = self.tail
self.tail.next = toInsert
self.tail = toInsert
else:
self.head = toInsert
'''
# Inserts an element at the specified position in the linkedlist
:param data accepts either an instance of the class Node or some other data
:param pos takes an integer with the position to insert the element
'''
def insertAtPos(self, pos: int, data):
if pos > self.GetLength() or pos < 0:
raise IndexError("linkedlist assignment index out of range")
elif pos == 0:
self.insertAtBeginning(data)
elif pos == self.GetLength():
self.insertAtEnd(data)
else:
if isinstance(data, Node):
toInsert = data
else:
toInsert = Node(data)
self.current = self.head
currentNum = 0
while currentNum < pos-1:
currentNum += 1
self.current = self.current.getNext()
tmp = self.current.getNext()
toInsert.next = tmp
toInsert.prev = self.current
self.current.next = toInsert
tmp.prev = toInsert
# Deleting Methods
'''
# Deletes the head of the linkedlist
'''
def deleteAtBeginning(self):
tmp = self.head.getNext()
if tmp is None:
self.tail = None
self.head = tmp
self.head.prev = None
'''
# Deletes the tail of the linkedlist
'''
def deleteAtEnd(self):
self.tail = self.tail.prev
self.tail.next = None
def deleteAtPos(self, pos: int):
if pos > self.GetLength() or pos < 0 or self.GetLength() == 0:
raise IndexError("delete index out of range")
elif pos == 0:
self.deleteAtBeginning()
elif pos == self.GetLength():
self.deleteAtEnd()
else:
self.current = self.head
currentNum = 0
while currentNum < pos-1:
currentNum += 1
self.current = self.current.getNext()
tmp = self.current.getNext().getNext()
if tmp is not None:
self.current.next = tmp
tmp.prev = self.current
else:
self.current.next = tmp
'''
# Returns the head (as a node class) of the linkedlist
'''
def getAtBeginning(self):
return self.__head
'''
# Returns the tail (as a node class) of the linkedlist
'''
def getAtEnd(self):
return self.tail
'''
# Returns the node at the specified position in the linkedlist
:param pos - int specifing position at which to return element
'''
def getAtPos(self, pos: int):
if pos > self.GetLength() or self.GetLength() == 0:
raise IndexError("index out of range")
elif pos == 0:
return self.__head
elif pos == self.GetLength():
return self.tail
else:
if pos < 0:
pos = self.GetLength()-(abs(pos)-1)
self.current = self.head
currentNum = 0
while currentNum < pos:
currentNum += 1
self.current = self.current.getNext()
return self.current
def updateAtBeginning(self, data):
self.head.data = data
def updateAtEnd(self, data):
self.tail.data = data
def updateAtPos(self, pos: int, data):
data = self.__toData(data)
if pos > self.GetLength() or pos < 0 or self.GetLength() == 0:
raise IndexError("update index out of range")
elif pos == 0:
self.head.data = data
elif pos == self.GetLength():
self.tail.data = data
else:
self.current = self.head
currentNum = 0
while currentNum < pos:
currentNum += 1
self.current = self.current.getNext()
self.current.data = data
def index(self, element_to_find):
currentNum = 0
for element in self.DisplayAllNodes():
if element == element_to_find:
return currentNum
currentNum += 1
def copy(self) -> "LinkedList":
to_return_copy = LinkedList()
to_return_copy.head = Node(self.head.data)
for i in range(1, self.GetLength()):
to_return_copy.insertAtEnd(Node(self.getAtPos(i).data))
return to_return_copy
def load_from_iterable(self, lst: Iterable):
for element in lst:
self.insertAtEnd(Node(element))
'''
Returns length of linkedlist.
'''
def __len__(self):
return self.GetLength()
def __toNode(self, data):
if not isinstance(data, Node):
return Node(data)
return data
def __toData(self, data):
if isinstance(data, Node):
return data.data
return data
'''
LinkedList supports item assignments: linkedlist_var[0] = 99
This updates the item's data at the specified position, to the specified
data
'''
def __setitem__(self, key: int, value):
self.updateAtPos(key, value)
'''
LinkedList supports indexing: linkedlist_var[0].
It also supports slices though step is not yet implemented.
'''
def __getitem__(self, index: Union[int, slice]) -> Union["LinkedList", Node]:
if not isinstance(index, slice):
return self.getAtPos(index)
else:
start = 0 if index.start is None else index.start
stop = self.GetLength() if index.stop is None else index.stop
step = 1 if index.step is None else index.step
if start < 0:
start = self.GetLength()-(abs(start)-1)
if stop < 0:
stop = self.GetLength()-(abs(stop)-1)
copy_of_current_linked_list = LinkedList()
currentIndex = 0
if step != 1:
if step == 0:
raise ValueError("slice step cannot be zero")
elif step < 0:
if step == -1:
step_startIndex = None
for element in self.DisplayAllNodes(getObj=True):
if start <= currentIndex < stop:
if step_startIndex is None:
step_startIndex = 0
if step_startIndex == 0:
copy_of_current_linked_list.insertAtEnd(
Node(element.data)
)
else:
if step_startIndex % step == 0:
copy_of_current_linked_list.insertAtEnd(
Node(element.data)
)
step_startIndex += 1
currentIndex += 1
else:
self.current = self.tail
currentNum = self.GetLength()-1
while currentNum >= 0:
if start <= currentIndex < stop:
copy_of_current_linked_list.insertAtEnd(
Node(self.current.data)
)
currentIndex += 1
self.current = self.current.getPrev()
return copy_of_current_linked_list
else:
step_startIndex = None
for element in self.DisplayAllNodes(getObj=True):
if start <= currentIndex < stop:
if step_startIndex is None:
step_startIndex = 0
if step_startIndex == 0:
copy_of_current_linked_list.insertAtEnd(
Node(element.data)
)
else:
if step_startIndex % step == 0:
copy_of_current_linked_list.insertAtEnd(
Node(element.data)
)
step_startIndex += 1
currentIndex += 1
return copy_of_current_linked_list
for element in self.DisplayAllNodes(getObj=True):
if start <= currentIndex < stop:
copy_of_current_linked_list.insertAtEnd(Node(element.data))
currentIndex += 1
return copy_of_current_linked_list
'''
# Iterating Through linkedlist
You can iterate through the linkedlist with a for loop.
'''
def __iter__(self) -> Iterable:
self.current = self.head
while self.current is not None:
yield self.current.getData()
self.current = self.current.getNext()
def __str__(self) -> str:
return f"[{', '.join(str(item) for item in self)}]"
@property
def head(self):
return self.__head
@head.setter
def head(self, value: Any):
if isinstance(value, Node):
self.__head = value
if self.tail is None:
self.tail = self.__head
else:
self.__head = Node(value)
if self.tail is None:
self.tail = self.__head
|
#!/usr/bin/env python3
import boxx
from boxx import *
from boxx import np
import os
import sys
sys.path.append(".")
import bpy
import bpycv
import random
from bpycv.dataset_utils.dataset_generator import MetaDatasetGenerator, uniform_by_mean
from cfg_utils import get_arguments, get_default_cfg
class LogGenerator(MetaDatasetGenerator):
def __init__(self, cfg):
super().__init__(cfg)
self.hdri_manager = bpycv.HdriManager(
hdri_dir=os.path.join(cfg.SOURCE_ASSET, "shared/hdri"), category="nature",
)
self.texture_paths = boxx.glob(
os.path.join(cfg.SOURCE_ASSET, "log/wood_texture/*")
)
def generate_one(self, dirr, index, meta_seed=0):
cfg = self.cfg
random.seed(f"{cfg.DIR},{meta_seed},{index}")
bpy.context.scene.frame_set(0)
bpycv.remove_useless_data()
bpycv.clear_all()
hdri_path = self.hdri_manager.sample()
bpycv.load_hdri_world(hdri_path)
cam_radius = random.choice([5, 8, 10, 15, 20])
cam_deg = random.uniform(0, 90)
bpycv.set_cam_pose(cam_radius=cam_radius, cam_deg=cam_deg)
if "Plane" not in bpy.data.objects:
bpy.ops.mesh.primitive_plane_add(size=100)
obj = bpy.data.objects["Plane"]
with bpycv.activate_obj(obj):
bpy.ops.rigidbody.object_add()
bpy.context.object.rigid_body.type = "PASSIVE"
obj.hide_render = True
obj_num = random.choice(cfg.OBJ_NUM_DIST)
for inst_id, idx in enumerate(range(obj_num), cfg.MAX_INST):
inst_id
bpy.ops.mesh.primitive_cylinder_add(
radius=uniform_by_mean(mean=0.25, rate=0.4),
depth=uniform_by_mean(12, 0.01),
)
obj = bpy.context.active_object
obj["is_artifact"] = True
bpy.ops.object.shade_smooth()
obj["inst_id"] = inst_id
area_scale = 4
location = (
random.uniform(-1, 1) * area_scale,
0,
random.uniform(0, 1) * area_scale,
)
obj.location = location
obj.rotation_euler.x = boxx.pi / 2
with bpycv.activate_obj(obj):
bpy.ops.rigidbody.object_add()
obj.rigid_body.type = "ACTIVE"
texture_path = random.choice(self.texture_paths)
material = bpycv.build_tex(texture_path)
obj.data.materials.clear()
obj.data.materials.append(material)
for i in range(120):
bpy.context.scene.frame_set(bpy.context.scene.frame_current + 1)
result = bpycv.render_data()
def qualify_result(result):
if len(np.unique(result["inst"])) < 5:
return False
if ((0 < result["depth"]) & (result["depth"] < 0.3)).mean() > 0.2:
return False
if np.mean(result["image"].mean(-1) < 2.55) > 0.5:
return False
return True
if qualify_result(result):
result.save(dirr, index, save_blend=False)
else:
self.generate_one(dirr, index, meta_seed=meta_seed + 1)
def get_cfg():
cfg = get_default_cfg()
cfg.OBJ_NUM_DIST = [70, 80, 90]
return cfg.clone()
if __name__ == "__main__":
args = get_arguments()
cfg = get_cfg()
cfg.merge_from_list_or_str(args.opts)
log_gen = LogGenerator(cfg)
log_gen.generate_all()
|
<filename>prose/core.py
from tqdm import tqdm
from astropy.io import fits
from .console_utils import TQDM_BAR_FORMAT
from astropy.wcs import WCS
from . import viz
from . import Telescope
from collections import OrderedDict
from tabulate import tabulate
import numpy as np
from time import time
from pathlib import Path
from astropy.time import Time
class Image:
def __init__(self, fitspath=None, data=None, header=None, **kwargs):
if fitspath is not None:
self.path = fitspath
self.get_data_header()
else:
self.data = data
self.header = header if header is not None else {}
self.path = None
self.telescope = None
self.discard = False
self.__dict__.update(kwargs)
self.check_telescope()
def get_data_header(self):
self.data = fits.getdata(self.path).astype(float)
self.header = fits.getheader(self.path)
def copy(self, data=True):
new_self = self.__class__(**self.__dict__)
if not data:
del new_self.__dict__["data"]
return new_self
def check_telescope(self):
if self.header:
self.telescope = Telescope.from_name(self.header["TELESCOP"])
def get(self, keyword, default=None):
return self.header.get(keyword, default)
@property
def wcs(self):
return WCS(self.header)
@property
def exposure(self):
return self.get(self.telescope.keyword_exposure_time, None)
@property
def jd_utc(self):
# if jd keyword not in header compute jd from date
if self.telescope.keyword_jd in self.header:
jd = self.get(self.telescope.keyword_jd, None) + self.telescope.mjd
else:
jd = Time(self.date, scale="utc").to_value('jd') + self.telescope.mjd
return Time(
jd,
format="jd",
scale=self.telescope.jd_scale,
location=self.telescope.earth_location).utc.value
@property
def date(self):
return self.telescope.date(self.header)
@property
def bjd_tdb(self):
jd_bjd = self.get(self.telescope.keyword_bjd, None)
if jd_bjd is not None:
jd_bjd += self.telescope.mjd
if self.telescope.keyword_jd in self.header:
time_format = "bjd"
else:
time_format = "jd"
return Time(jd_bjd,
format=time_format,
scale=self.telescope.jd_scale,
location=self.telescope.earth_location).tdb.value
else:
return None
@property
def seeing(self):
return self.get(self.telescope.keyword_seeing, None)
@property
def ra(self):
return self.get(self.telescope.keyword_ra, None)
@property
def dec(self):
return self.get(self.telescope.keyword_dec, None)
@property
def flip(self):
return self.get(self.telescope.keyword_flip, None)
@property
def airmass(self):
return self.get(self.telescope.keyword_airmass, None)
@property
def shape(self):
return np.array(self.data.shape)
class Block:
"""A ``Block`` is a single unit of processing acting on the ``Image`` object, reading, processing and writing its attributes. When placed in a sequence, it goes through three steps:
1. :py:meth:`~prose.Block.initialize` method is called before the sequence is run
2. *Images* go succesively and sequentially through its :py:meth:`~prose.run` methods
3. :py:meth:`~prose.Block.terminate` method is called after the sequence is terminated
Parameters
----------
name : [type], optional
[description], by default None
"""
def __init__(self, name=None):
"""[summary]
Parameters
----------
name : [type], optional
[description], by default None
"""
self.name = name
self.unit_data = None
self.processing_time = 0
self.runs = 0
def initialize(self, *args):
pass
def set_unit_data(self, unit_data):
self.unit_data = unit_data
def _run(self, *args, **kwargs):
t0 = time()
self.run(*args, **kwargs)
self.processing_time += time() - t0
self.runs += 1
def run(self, image, **kwargs):
raise NotImplementedError()
def terminate(self):
pass
def stack_method(self, image):
pass
def show_image(self, image):
viz.show_stars(image)
@staticmethod
def citations():
return None
@staticmethod
def doc():
return ""
def concat(self, block):
return self
class Sequence:
# TODO: add index self.i in image within unit loop
def __init__(self, blocks, files, name="default", loader=Image, **kwargs):
self.name = name
self.files_or_images = files if not isinstance(files, (str, Path)) else [files]
self.blocks = blocks
self.loader = loader
self.data = {}
self.n_processed_images = None
def __getattr__(self, item):
return self.blocks_dict[item]
@property
def blocks(self):
return list(self.blocks_dict.values())
@blocks.setter
def blocks(self, blocks):
self.blocks_dict = OrderedDict({
block.name if block.name is not None else "block{}".format(i): block
for i, block in enumerate(blocks)
})
def run(self, show_progress=True):
if show_progress:
progress = lambda x: tqdm(
x,
desc=self.name,
unit="images",
ncols=80,
bar_format=TQDM_BAR_FORMAT,
)
else:
progress = lambda x: x
if isinstance(self.files_or_images, list):
if len(self.files_or_images) == 0:
raise ValueError("No images to process")
elif self.files_or_images is None:
raise ValueError("No images to process")
# initialization
for block in self.blocks:
block.set_unit_data(self.data)
block.initialize()
self.n_processed_images = 0
# run
for i, file_or_image in enumerate(progress(self.files_or_images)):
if isinstance(file_or_image, (str, Path)):
image = self.loader(file_or_image)
else:
image = file_or_image
image.i = i
self._last_image = image
discard_message = False
last_block = None
for b, block in enumerate(self.blocks):
# This allows to discard image in any Block
if not image.discard:
block._run(image)
# except:
# # TODO
# if not last_block is None:
# print(f"{type(last_block).__name__} failed")
elif not discard_message:
last_block = self.blocks[b-1]
discard_message = True
print(f"Warning: image {i} discarded in {type(last_block).__name__}")
del image
self.n_processed_images += 1
# terminate
for block in self.blocks:
block.terminate()
def __str__(self):
rows = [[
block.name, block.__class__.__name__, f"{block.processing_time:.3f} s ({(block.processing_time/self.processing_time)*100:.0f}%)"]
for block in self.blocks
]
headers = ["name", "type", "processing"]
return tabulate(rows, headers, tablefmt="fancy_grid")
def citations(self):
citations = [block.citations() for block in self.blocks if block.citations() is not None]
return citations if len(citations) > 0 else None
def insert_before(self, before, block):
pass
@property
def processing_time(self):
return np.sum([block.processing_time for block in self.blocks])
|
# %%
import numpy as np
import pandas as pd
import gurobipy as gp
from gurobipy import GRB
import matplotlib.pyplot as plt
# Global vartiables(sizes):
PV_ARRAY_SIZE_KW = 660 # kWAC rating of the PV array
DIESEL_GEN_SIZE_KW = 1000 # kWAC rating of the diesel generator
# Diesel fuel consumption coefficients from https://ieeexplore.ieee.org/document/8494571
DIESEL_FUEL_CONS_A = 0.246 # Liters per kWh
DIESEL_FUEL_CONS_B = 0.08415 # Liters per kW (rating)
STORAGE_DURATION = 8 # Hours of storage duration at maximum power
ESS_EFF_DISCHG = 0.95 # Efficiency of discharging ESS
ESS_EFF_CHG = 0.95 # Efficiency of charging ESS
#%% Obtain aggregate load
# NOTE: Must run resi_data.py, building_data.py to obtain the following CSV files!
residf = pd.read_csv('resi_load.csv', index_col=0)
bldgdf = pd.read_csv('bldg_load.csv', index_col=0)
residf = residf * 10 # 18 * 10 = 180 homes!
residf_total = residf.sum(axis=1)
loaddf = pd.concat([residf, bldgdf], axis=1)
#Downscale hospital, school, market by 0.25
loaddf.hospital = loaddf.hospital * 0.25
loaddf.school = loaddf.school * 0.25
loaddf.supermarket = loaddf.supermarket * 0
agg_load = loaddf.sum(axis=1)
## Agg load stats:
# count 35040.000000
# mean 249.675353
# std 163.964776
# min 39.648000
# 25% 98.862156
# 50% 211.096125
# 75% 398.983500
# max 747.593000
#%% Obtain aggregate PV
# NOTE: Must run pv_data.py to obtain the following CSV file!
pvdf = pd.read_csv('pv_gen.csv', index_col=0)
# Upscale to PV array kWAC rating
pvdf = pvdf * PV_ARRAY_SIZE_KW/pvdf.gen.max()
# Agg pv stats:
# count 35040.000000
# mean 80.878783
# std 119.749443
# min 0.000000
# 25% 0.000000
# 50% 0.514689
# 75% 146.658497
# max 420.000000
#%%
''' ~.~.~.~ optimization time ~.~.~.~ '''
# randomly (or not so randomly) select 7-day intervals to optimize the dispatch
# first do for a set ESS size (500 kW, 950 kWh as in BLR Microgrid)
# then make the ESS size a part of the function!
# Constrain storage size to [min, max] and similarly power
# Then find the optimum, and then find the closest "round" value and present those power flows
# then add in degradation penalty
load = agg_load.to_numpy()
pv = pvdf.to_numpy()
week_len = 4*24*7
# week_start = 0
week_start = 30 * week_len
week_end = week_start + week_len
# Try a different week in the year??
ld_wk1 = load[week_start:week_end]
pv_wk1 = pv[week_start:week_end]
# Fraction of the hour
h = 15/60
#%%
plt.plot(ld_wk1)
plt.plot(pv_wk1)
#%%
# Create a new model
m = gp.Model('microgrid')
# Create variables for:
# ESS nominal energy and power
# Assume a four-hour system
# E_nom = 1500 # kWh
# P_nom = 500 # kW
P_nom = m.addMVar(1, lb=200, ub=2000, vtype=GRB.CONTINUOUS, name='P_nom')
E_nom = m.addMVar(1, vtype=GRB.CONTINUOUS, name='E_nom')
# each power flow
# format: to_from
pv_ess = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='pv_ess')
pv_load = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='pv_load')
pv_curtail = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='pv_curtail')
ess_load = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='ess_load')
dg_ess = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='dg_ess')
dg_load = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='dg_load')
load_curtail = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='load_curtail')
ess_c = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='ess_c')
ess_d = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='ess_d')
dg = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='dg')
E = m.addMVar(week_len, lb=0, vtype=GRB.CONTINUOUS, name='E')
# # Decision variable to discharge (1) or charge (0)
# dischg = m.addVars(week_len, vtype=GRB.BINARY, name='dischg')
m.addConstr(E[0] == 0.5 * E_nom)
m.addConstr(E_nom == STORAGE_DURATION*P_nom)
for t in range(week_len):
# Power flow constraints
m.addConstr(pv_wk1[t] == pv_ess[t] + pv_load[t] + pv_curtail[t])
m.addConstr(ld_wk1[t] == ess_load[t] + pv_load[t] + load_curtail[t] + dg_load[t])
m.addConstr(dg[t] == dg_load[t])
m.addConstr(ess_c[t] == pv_ess[t])
# m.addConstr(ess_c[t] == pv_ess[t] + dg_ess[t]) # uncomment to allow ESS to charge off of DG
m.addConstr(ess_d[t] == ess_load[t])
# ESS power constraints
m.addConstr(ess_c[t] <= P_nom)
m.addConstr(ess_d[t] <= P_nom)
m.addConstr(E[t] <= E_nom)
# Time evolution of stored energy
if t > 0:
m.addConstr(E[t] == h*(ESS_EFF_CHG*ess_c[t-1] - ESS_EFF_DISCHG*ess_d[t-1]) + E[t-1])
# Cost of fuel
#Ensure non-simultaneous charge and discharge aka why I downloaded Gurobi
m.addConstrs(0 == ess_d[i] @ ess_c[i] for i in range(week_len))
# TODO: Turn this into an explicit multi-objective problem via setObjectiveN
m.setObjective(h*DIESEL_FUEL_CONS_A*dg.sum() + load_curtail.sum() + P_nom, GRB.MINIMIZE)
# m.setObjective(h*DIESEL_FUEL_CONS_A*dg.sum() + load_curtail.sum() + P_nom + 0.00005*(ess_c@ess_c) + 0.00005*(ess_d@ess_d), GRB.MINIMIZE)
# m.setObjective(load_curtail.sum(), GRB.MINIMIZE)
# Maybe cannot use setObjectiveN because it only takes in linear objectives!
# m.setObjectiveN(load_curtail.sum(), 0, 3)
# m.setObjectiveN(h*DIESEL_FUEL_CONS_A*dg.sum(), 1, 2)
# m.setObjectiveN(P_nom, 2, 1)
# m.setObjectiveN((ess_c@ess_c), 3, 0)
#%% Solve the optimization
m.optimize()
# #%% Get objective final value
# m.getObjective().getValue()
#%% Plot data!
xvals = np.linspace(0,7,week_len)
#%% ESS power flow
plt.plot(xvals, -ess_c.getAttr('x'))
plt.plot(xvals, ess_d.getAttr('x'))
plt.legend(['Charge', 'Discharge'], bbox_to_anchor=(1.3, 0.6))
plt.xlabel('Time')
plt.ylabel('Power (kW)')
plt.grid()
# plt.title('ESS Power (Discharge positive)')
plt.figure(figsize=(10,10))
#%% DG power flow
plt.plot(xvals, dg_ess.getAttr('x'))
plt.plot(xvals, dg_load.getAttr('x'))
plt.legend(['ESS', 'Load'], bbox_to_anchor=(1.35, 0.6))
plt.xlabel('Time')
plt.ylabel('Power (kW)')
plt.grid()
plt.title('Diesel Power by End User')
plt.figure(figsize=(10,10))
#%% PV power flow
plt.plot(xvals, pv_load.getAttr('x'))
plt.plot(xvals, pv_ess.getAttr('x'))
plt.plot(xvals, pv_curtail.getAttr('x'))
plt.legend(['Load', 'ESS', 'Curtailed'], bbox_to_anchor=(1.3, 0.6))
plt.xlabel('Days')
plt.ylabel('Power (kW)')
plt.grid()
# plt.title('PV Power by End User')
plt.figure(figsize=(10,10))
#%% Load power flow
plt.plot(xvals, dg_load.getAttr('x'))
plt.plot(xvals, pv_load.getAttr('x'))
plt.plot(xvals, ess_load.getAttr('x'))
plt.legend(['Diesel', 'PV', 'ESS'], bbox_to_anchor=(1.35, 0.6))
plt.xlabel('Time')
plt.ylabel('Power (kW)')
plt.grid()
plt.title('Load Power by Source')
plt.figure(figsize=(10,10))
# %% plot all relevant quantities on one plot!
plt.plot(xvals, ld_wk1)
plt.plot(xvals, pv_wk1)
plt.plot(xvals, dg.getAttr('x'))
plt.plot(xvals, ess_d.getAttr('x') - ess_c.getAttr('x'))
plt.legend(['Load', 'PV', 'DG', 'ESS'], bbox_to_anchor=(1.25, 0.6))
plt.xlabel('Days')
plt.ylabel('Power (kW)')
plt.grid()
# plt.title('Power Flow Summary')
plt.figure(figsize=(10,10))
# %% plot all relevant quantities on one plot!
plt.plot(xvals, pv_wk1)
plt.plot(xvals, dg.getAttr('x'))
plt.plot(xvals, ess_d.getAttr('x') - ess_c.getAttr('x'))
plt.legend(['PV', 'DG', 'ESS'], bbox_to_anchor=(1.35, 0.6))
plt.xlabel('Time')
plt.ylabel('Power (kW)')
plt.grid()
# plt.title('Power Generation Summary')
plt.figure(figsize=(10,10))
# %%
plt.plot(xvals , E.getAttr('x'))
plt.xlabel('Days')
plt.ylabel('Energy (kWh)')
plt.grid()
# plt.title('ESS Stored Energy Summary')
plt.figure(figsize=(10,10))
# %%
plt.plot(xvals, load_curtail.getAttr('x'))
# %%
|
<gh_stars>0
from flask import Flask, request, Response, abort
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import os
import json
import pytz
import iso8601
import requests
import logging
app = Flask(__name__)
logger = None
base_url = "https://consumption.azure.com/"
def datetime_format(dt):
return '%04d' % dt.year + dt.strftime("-%m-%dT%H:%M:%SZ")
def to_transit_datetime(dt_int):
return "~t" + datetime_format(dt_int)
class DataAccess:
def __init__(self):
self._entities = {"balancesummary": [], "usagedetails": [], "marketplacecharges": [], "billingperiods": [], "reservationcharges": [], "reservationdetails": []}
def get_entities(self, since, datatype, jwt_token, enrollment_number):
if not datatype in self._entities:
abort(404)
return self.get_entitiesdata(datatype, since, jwt_token, enrollment_number)
def get_entitiesdata(self, datatype, since, jwt_token, enrollment_number):
# if datatype in self._entities:
# if len(self._entities[datatype]) > 0 and self._entities[datatype][0]["_updated"] > "%sZ" % (datetime.now() - timedelta(hours=12)).isoformat():
# return self._entities[datatype]
entities = []
end = datetime.now(pytz.UTC).date()
url = "%sv2/enrollments/%s/billingperiods" % (base_url, enrollment_number)
logger.info("Getting %s entities by %s" % (datatype, url))
response = requests.get(url, headers={'Authorization': "Bearer %s" % jwt_token})
logger.debug("Got result: %s" % (response.json()))
periods = response.json()
if "error" in periods:
logger.error("Error from billing service: %s" % (periods["error"]["message"]))
abort(int(periods["error"]["code"]), periods["error"]["message"])
period_nr = len(periods) -1
while period_nr >= 0:
logger.info("Processing period %s: %s" % (period_nr, periods[period_nr]))
try:
period_start = iso8601.parse_date(periods[period_nr]["billingStart"]).date()
except:
logger.error("Cant parse date %s" % (periods[period_nr]["billingStart"]))
if since is None:
start = period_start
else:
start = iso8601.parse_date(since).date()
if len(entities) == 0:
end = period_start + relativedelta(months=+2)
if start <= period_start:
more = True
url = ""
if datatype in ["usagedetails"]:
if periods[period_nr]["usageDetails"] > "":
url = "%s%s" % (base_url,periods[period_nr]["usageDetails"])
logger.info("Getting %s entities - from %s to %s" % (datatype, start, end))
elif datatype in ["reservationcharges"]:
url = "%sv3/enrollments/%s/%sbycustomdate?startTime=%s&endTime=%s" % (base_url,enrollment_number,datatype,start,end)
logger.info("Getting %s entities - from %s to %s" % (datatype, start, end))
else:
url = "%sv2/enrollments/%s/%s" % (base_url, enrollment_number, datatype)
logger.info("Getting %s entities by %s" % (datatype, url))
while more and url != "":
response = requests.get(url, headers={'Authorization': "Bearer %s" % jwt_token})
logger.info("Got result code %s" % (response))
while response.status_code > 400:
logger.info("Retry url: %s for better result..." % (url))
response = requests.get(url, headers={'Authorization': "Bearer %s" % jwt_token})
logger.info("Got result code %s" % (response))
result = response.json()
if "nextLink" in result and result["nextLink"] is not None:
url = result["nextLink"]
else:
more = False
if datatype in ["usagedetails", "reservationcharges", "reservationdetails"]:
if "data" in result:
if datatype == "usagedetails":
for e in result["data"]:
e.update({"_id": e["meterId"] + "-" + e["date"] + e["instanceId"].replace('/','-')})
e.update({"billingPeriodId": "%s" % periods[period_nr]["billingPeriodId"]})
e.update({"_updated": "%s" % period_start})
if "date" in e:
e.update({"date": "%s" % to_transit_datetime(iso8601.parse_date(e["date"]))})
entities.append(e)
if period_start >= end:
break
if datatype == "reservationcharges":
for e in result["data"]:
e.update({"_id": e["reservationOrderId"] + "-" + e["eventDate"] + e["eventDate"].replace('/', '-')})
if "eventDate" in e:
e.update({"_updated": "%s" % e["eventDate"]})
e.update({"eventDate": "%s" % to_transit_datetime(iso8601.parse_date(e["eventDate"]))})
entities.append(e)
if datatype == "reservationdetails":
for e in result["data"]:
e.update({"_id": e["reservationId"] + "-" + e["usageDate"] + e["instanceId"].replace('/', '-')})
if "eventDate" in e:
e.update({"_updated": "%s" % e["eventDate"]})
e.update({"eventDate": "%s" % to_transit_datetime(iso8601.parse_date(e["eventDate"]))})
entities.append(e)
logger.info("Gotten %s entities of type %s" % (len(entities), datatype))
if datatype == "billingperiods":
for e in result:
e.update({"_id": "%s-%s" % (enrollment_number, e["billingPeriodId"])})
e.update({"_updated": "%s" % e["billingEnd"]})
e.update({"billingEnd": "%s" % to_transit_datetime(iso8601.parse_date(e["billingEnd"]))})
e.update({"billingStart": "%s" % to_transit_datetime(iso8601.parse_date(e["billingStart"]))})
e.update({"balanceSummary": "%s%s" % (base_url,e["balanceSummary"])})
e.update({"usageDetails": "%s%s" % (base_url, e["usageDetails"])})
e.update({"marketplaceCharges": "%s%s" % (base_url, e["marketplaceCharges"])})
e.update({"priceSheet": "%s%s" % (base_url, e["priceSheet"])})
#response = requests.get("https://consumption.azure.com/v2/enrollments/68450484/billingperiods/201803/usagedetails", headers={'Authorization': "Bearer %s" % jwt_token})
#result = response.json()
#e.update({"price": result})
entities.append(e)
logger.info("Gotten %s entities of type %s" % (len(entities), datatype))
if period_start >= end:
break
period_nr -= 1
if period_start >= end:
break
return entities
data_access_layer = DataAccess()
def get_var(var, default = None):
envvar = default
if var.upper() in os.environ:
envvar = os.environ[var.upper()]
elif request:
envvar = request.args.get(var)
logger.debug("Setting %s = %s" % (var, envvar))
return envvar
@app.route('/<datatype>', methods=['GET'])
def get_entities(datatype):
since = get_var('since')
jwt_token = get_var('jwt_token')
enrollment_number = get_var('enrollment_number')
ent = data_access_layer.get_entities(since, datatype, jwt_token, enrollment_number)
#entities = sorted(ent, key=lambda k: k["_updated"])
return Response(json.dumps(ent), mimetype='application/json')
if __name__ == '__main__':
# Set up logging
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logger = logging.getLogger('azure-billing-microservice')
# Log to stdout
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
app.run(debug=False, host='0.0.0.0', port=5000)
|
<gh_stars>0
#!/usr/bin/env python
try:
import sys
import abc
from pygame_cards import game_object, card, card_sprite, card_holder, animation
except ImportError as err:
print("Fail loading a module in file:", __file__, "\n", err)
sys.exit(2)
class Controller(object, metaclass=abc.ABCMeta):
""" Abstract interface class that controls game logic and handles user events,
Should be inherited by concrete game controller classes.
Following methods are mandatory for all classes that derive from Controller:
- start_game()
- process_mouse_event()
Also these methods are not mandatory, but it can be helpful to define them:
- execute_game()
- restart_game()
- cleanup()
These methods are called from high level GameApp class. See details about each method below.
Other auxiliary methods can be added if needed and called from the mandatory methods.
"""
def __init__(self, objects_list=None, gui_interface=None, settings_json=None):
"""
Initializes Controller object.
:param objects_list: list of game objects that should be rendered
:param gui_interface: gui interface object
"""
self.rendered_objects = []
self.animations = []
if objects_list is not None and isinstance(objects_list, list):
self.rendered_objects = objects_list
self.gui_interface = gui_interface
self.settings_json = settings_json
self.started = False
# Make this a color tuple to override game app's background_color.
self.background_color = None
@abc.abstractmethod
def start_game(self):
""" Put game initialization code here.
For example: dealing of cards, initialization of game timer etc.
This method is triggered by GameApp.execute().
"""
pass
@abc.abstractmethod
def process_mouse_event(self, pos, down, double_click):
""" Put code that handles mouse events here. For example: grab card from a deck on mouse
down event, drop card to a pile on mouse up event etc.
This method is called every time mouse event is detected.
:param pos: tuple with mouse coordinates (x, y)
:param down: boolean, True for mouse down event, False for mouse up event
:param double_click: boolean, True if it's a double click event
"""
pass
def execute_game(self):
""" This method is called in an endless loop started by GameApp.execute().
IMPORTANT: do not put any "heavy" computations in this method! It is executed frequently in
an endless loop during the app runtime, so any "heavy" code will slow down the performance.
If you don't need to check something at every moment of the game, do not define this method.
Possible things to do in this method:
- Check game state conditions (game over, win etc.)
- Run bot (virtual player) actions
- Check timers etc.
"""
pass
def restart_game(self):
""" Put code that cleans up any current game progress and starts the game from scratch.
start_game() method can be called here to avoid code duplication.
For example this method can be used after game over or as a handler of "Restart" button.
"""
pass
def cleanup(self):
""" Called when user closes the app.
Add destruction of all objects, storing of game progress to a file etc. to this method.
"""
pass
def render_objects(self, screen):
""" Renders game objects.
:param screen: Screen to render objects on.
"""
if self.rendered_objects is not None:
for obj in self.rendered_objects:
if isinstance(obj, game_object.GameObject):
obj.render_all(screen)
# Advance all animations.
for animation in self.animations:
animation.update()
# Clear completed animations.
self.animations = [a for a in self.animations if not a.is_completed]
def add_rendered_object(self, obj):
""" Adds object to the list of objects to be rendered by the Controller.
:param obj: an instance of GameObject or derived class.
"""
if self.rendered_objects is None:
self.rendered_objects = []
if isinstance(obj, tuple):
self.rendered_objects.extend(obj)
elif isinstance(obj, game_object.GameObject):
self.rendered_objects.append(obj)
def remove_rendered_object(self, obj):
""" Removes an object from the list of rendered_objects.
:param obj: Rendered object to remove.
"""
self.rendered_objects.remove(obj)
def add_animation(self, animation):
"""Adds an animation to the list of active animations in the controller.
:param animation: The animation to add.
"""
self.animations.append(animation)
def animate_cards(self, cards, end_pos, speed=None, plotter_fn=None,
on_complete=None):
"""Run an animation for a list of Cards or CardsHolder that sends
them to the given end position in the given amount of time.
:param cards: Either a list of Cards or a CardsHolder to be animated.
:param end_pos: Position (x,y) tuple representing where the cards
should end up.
:param speed: Speed in pixels / second. If not given, uses
card.move_speed from settings.json.
:param plotter_fn: (Optional) lambda(start_pos, end_pos, duration_ms)
that returns a Plotter object. The plotter's responsibility is to
determine the position of the cards at any point during the
animation.
If not given, a simple LinearPlotter will be used (straight line
travel, constant speed.)
:param on_complete: (Optional) lambda(CardsHolder) called when
animation is over; returns original holder passed in, or, if a
list of cards was passed, a new holder containing those cards.
"""
self_ = self
holder = cards
temp_holder_required = not isinstance(cards, card_holder.CardsHolder)
if temp_holder_required:
if len(cards) == 0: return
holder = card_holder.StaticOffsetCardsHolder(pos=cards[0].get_pos())
for card_ in cards:
holder.add_card(card_)
self.add_rendered_object(holder)
def callback():
if on_complete: on_complete(holder)
if temp_holder_required: self_.remove_rendered_object(holder)
if speed is None:
speed = self.settings_json["card"]["move_speed"]
start_pos = holder.pos
duration_ms = animation.expected_duration_ms(start_pos, end_pos, speed)
if duration_ms > 0:
# Derive plotter.
plotter = None
if plotter_fn is None:
plotter = animation.LinearPlotter(start_pos, end_pos, duration_ms)
else:
plotter = plotter_fn(start_pos, end_pos, duration_ms)
animation_ = animation.CardsHolderAnimation(holder, plotter, callback)
self.add_animation(animation_)
else:
# Short-circuit; don't create animation for 0 ms; just invoke
# the callback immediately.
callback()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, List, Optional, Any
import torch, pdb, math
from torch import nn
from torch.nn import functional as F
import torch.utils.checkpoint as checkpoint
from detectron2.config import CfgNode
from detectron2.layers import Conv2d
from densepose.layers import sparse_conv_with_kaiming_uniform
import spconv
from ..utils import initialize_module_params
from .registry import ROI_DENSEPOSE_HEAD_REGISTRY
# class SparseGN(nn.Module):
# def __init__(self, num_groups, out_channels):
# super(SparseGN, self).__init__()
# self.gn = nn.GroupNorm(num_groups, out_channels)
# def forward(self, x: torch.tensor, batch_size: int, H: int, W: int,
# indices: torch.tensor)->spconv.SparseConvTensor:
# dim = x.shape[1]
# batch_indices = indices[:,:1].expand_as(x)
# out_batch = []
# for i in range(self.batch_size):
# pdb.set_trace()
# out = self.gn(x[batch_indices==i].reshape([1,dim,-1]))
# out_batch.append(out.reshape([-1,dim]))
# return spconv.SparseConvTensor(torch.cat(out_batch, dim=0), indices, (H,W), batch_size)
"TODO"
import spconv
class SubMConv2dInsDilate(spconv.conv.SubMConv2d):
# def __init__(self, num_groups, out_channels):
# super(SubMConv2dInsDilate, self).__init__()
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation_max=1,
groups=1,
bias=True,
algo=spconv.ops.ConvAlgo.Native):
super(SubMConv2dInsDilate, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=1,
groups=groups,
bias=bias,
indice_key=None,
use_hash=False,
algo=algo)
self.dilation_max = dilation_max
def forward(self, input, ins_indices_batch, ins_ids):
assert isinstance(input, spconv.SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
out_spatial_shape = spatial_shape
# input.update_grid(out_spatial_shape)
# t = time.time()
features_list = []
outids_list =[]
for i in ins_ids:
indice_key = "ins{}_dilatemax{}".format(i, self.dilation_max)
# datas = input.find_indice_pair(indice_key)
# if self.indice_key is not None and datas is not None:
# outids, _, indice_pairs, indice_pair_num, _ = datas
# else:
if indice_key not in input.indice_dict:
"Dilation depends on instance size"
ins_indices = input.indices[(ins_indices_batch==i).nonzero()].squeeze(1)
h_ratio = (ins_indices[:,1].max() - ins_indices[:,1].min()).float()/input.spatial_shape[0]
w_ratio = (ins_indices[:,2].max() - ins_indices[:,2].min()).float()/input.spatial_shape[1]
d = max(1, math.ceil(max(h_ratio, w_ratio)*self.dilation_max))
outids, indice_pairs, indice_pair_num = spconv.ops.get_indice_pairs(
ins_indices,
input.batch_size,
input.spatial_shape,
ksize=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=d,
out_padding=self.output_padding,
subm=self.subm,
transpose=self.transposed,
grid=input.grid,
use_hash=self.use_hash
)
input.indice_dict[indice_key] = (outids, input.indices,
indice_pairs,
indice_pair_num,
input.spatial_shape)
else:
datas = input.find_indice_pair(indice_key)
outids, _, indice_pairs, indice_pair_num, _ = datas
feat = features[(ins_indices_batch==i).nonzero()].squeeze(1)
feat = spconv.functional.indice_subm_conv(feat, self.weight,
indice_pairs.to(device),
indice_pair_num,
outids.shape[0], self.algo)
features_list.append(feat)
outids_list.append(outids)
out_features = torch.cat(features_list, dim=0)
outids = torch.cat(outids_list, dim=0)
if self.bias is not None:
out_features += self.bias
# datas = input.find_indice_pair(indice_key)
# if self.indice_key is not None and datas is not None:
# outids, _, indice_pairs, indice_pair_num, _ = datas
# else:
# outids, indice_pairs, indice_pair_num = ops.get_indice_pairs(
# indices,
# batch_size,
# spatial_shape,
# self.kernel_size,
# self.stride,
# self.padding,
# self.dilation,
# self.output_padding,
# self.subm,
# self.transposed,
# grid=input.grid,
# use_hash=self.use_hash)
# input.indice_dict[self.indice_key] = (outids, indices,
# indice_pairs,
# indice_pair_num,
# spatial_shape)
# if self.subm:
# out_features = Fsp.indice_subm_conv(features, self.weight,
# indice_pairs.to(device),
# indice_pair_num,
# outids.shape[0], self.algo)
# if self.bias is not None:
# out_features += self.bias
out_tensor = spconv.SparseConvTensor(out_features, outids,
out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
class SparseInsGNBNIN(nn.Module):
def __init__(self, num_groups, out_channels, norm):
super(SparseInsGNBNIN, self).__init__()
if norm=="InsGN":
self.norm = nn.GroupNorm(num_groups, out_channels)
elif norm=="InsBN":
self.norm = nn.BatchNorm1d(out_channels)
elif norm=="InsIN":
self.norm = nn.InstanceNorm1d(out_channels)
# def forward(self, x: spconv.SparseConvTensor, ins_indices_batch: torch.Tensor, ins_ids: Any, ins_indices_len):
def forward(self, x: spconv.SparseConvTensor, ins_indices_batch: torch.Tensor, ins_ids: Any):
N, C = x.features.shape
out_batch = []
for i in ins_ids:
# pdb.set_trace()
try:
out = self.norm(x.features[(ins_indices_batch==i).nonzero(),].reshape([-1,1,C]).permute([1,2,0])) ## HWxBxC -> BxCxHW
x.features[(ins_indices_batch==i).nonzero(),] = out.permute([2,0,1]) #.reshape(-1)
except Exception as e:
# print(e)
pass
# print((ins_indices_batch==i).nonzero())
# pdb.set_trace()
return x
class SparseGNBN(nn.Module):
def __init__(self, num_groups, out_channels, norm):
super(SparseGNBN, self).__init__()
if norm=="GN":
self.norm = nn.GroupNorm(num_groups, out_channels)
elif norm=="BN":
self.norm = nn.BatchNorm1d(out_channels)
def forward(self, x: spconv.SparseConvTensor):
# pdb.set_trace()
N, C = x.features.shape
batch_indices = x.indices[:,:1].expand_as(x.features)
out_batch = []
for i in range(x.batch_size):
# pdb.set_trace()
out = self.norm(x.features[batch_indices==i].reshape([-1,1,C]).permute([1,2,0])) ## HWxBxC -> BxCxHW
x.features[ins_indices_batch==i] = out.permute([2,0,1]).reshape(-1)
# out_batch.append(out.permute([2,0,1]).reshape([-1,C]))
# return spconv.SparseConvTensor(torch.cat(out_batch, dim=0),
# x.indices, x.spatial_shape, x.batch_size)
return x
class SparseReLU(nn.Module):
def __init__(self, inplace=True):
super(SparseReLU, self).__init__()
self.inplace = inplace
def forward(self, x: spconv.SparseConvTensor):
x.features = F.relu(x.features, inplace=self.inplace)
return x
# return spconv.SparseConvTensor(F.relu(x.features, inplace=self.inplace), x.indices, x.spatial_shape, x.batch_size)
# class ASPP(nn.Module):
# def __init__(self, in_channels, atrous_rates, out_channels):
# super(ASPP, self).__init__()
# modules = []
# modules.append(
# nn.Sequential(
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.GroupNorm(32, out_channels),
# nn.ReLU(),
# )
# )
# rate1, rate2, rate3 = tuple(atrous_rates)
# modules.append(ASPPConv(in_channels, out_channels, rate1))
# modules.append(ASPPConv(in_channels, out_channels, rate2))
# modules.append(ASPPConv(in_channels, out_channels, rate3))
# modules.append(ASPPPooling(in_channels, out_channels))
# self.convs = nn.ModuleList(modules)
# self.project = nn.Sequential(
# nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
# # nn.BatchNorm2d(out_channels),
# nn.ReLU()
# # nn.Dropout(0.5)
# )
# def forward(self, x):
# res = []
# for conv in self.convs:
# res.append(conv(x))
# res = torch.cat(res, dim=1)
# return self.project(res)
class SubMConv2dASPP(nn.Module):
# def __init__(self, num_groups, out_channels):
# super(SubMConv2dInsDilate, self).__init__()
def __init__(self,
in_channels,
out_channels,
kernel_size,
norm="InsIN",
stride=1,
padding=0,
atrous_rates=[1,3,5],
groups=1,
bias=True,
algo=spconv.ops.ConvAlgo.Native):
super(SubMConv2dASPP, self).__init__()
self.fuse_conv = spconv.conv.SubMConv2d(
in_channels+out_channels*3,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=1,
bias=bias,
algo=algo,
indice_key="subm0",)
self.aspp_conv1 = spconv.conv.SubMConv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=atrous_rates[0],
groups=groups,
bias=bias,
algo=algo,
indice_key="subm0_aspp1",)
self.aspp_norm1 = SparseInsGNBNIN(32, out_channels, norm)
self.aspp_relu1 = SparseReLU()
self.aspp_conv2 = spconv.conv.SubMConv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=atrous_rates[1],
groups=groups,
bias=bias,
algo=algo,
indice_key="subm0_aspp2")
self.aspp_norm2 = SparseInsGNBNIN(32, out_channels, norm)
self.aspp_relu2 = SparseReLU()
self.aspp_conv3 = spconv.conv.SubMConv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=atrous_rates[2],
groups=groups,
bias=bias,
algo=algo,
indice_key="subm0_aspp3")
self.aspp_norm3 = SparseInsGNBNIN(32, out_channels, norm)
self.aspp_relu3 = SparseReLU()
# self.aspp_conv1 = SubMConv2dInsDilate(
# in_channels,
# out_channels,
# kernel_size,
# stride=stride,
# padding=padding,
# dilation_max=dilation_max,
# groups=groups,
# bias=bias,
# algo=algo)
# self.aspp_conv2 = SubMConv2dInsDilate(
# in_channels,
# out_channels,
# kernel_size,
# stride=stride,
# padding=padding,
# dilation_max=dilation_max*2,
# groups=groups,
# bias=bias,
# algo=algo)
# self.aspp_conv3 = SubMConv2dInsDilate(
# in_channels,
# out_channels,
# kernel_size,
# stride=stride,
# padding=padding,
# dilation_max=dilation_max*4,
# groups=groups,
# bias=bias,
# algo=algo)
self.join_sparse = spconv.tables.JoinTable()
def forward(self, input, ins_indices_batch, ins_ids):
assert isinstance(input, spconv.SparseConvTensor)
aspp1 = self.aspp_relu1(self.aspp_norm1(self.aspp_conv1(input), ins_indices_batch, ins_ids))
aspp2 = self.aspp_relu2(self.aspp_norm2(self.aspp_conv2(input), ins_indices_batch, ins_ids))
aspp3 = self.aspp_relu3(self.aspp_norm3(self.aspp_conv3(input), ins_indices_batch, ins_ids))
# aspp1 = self.aspp_conv1(input, ins_indices_batch, ins_ids)
# aspp2 = self.aspp_conv2(input, ins_indices_batch, ins_ids)
# aspp3 = self.aspp_conv3(input, ins_indices_batch, ins_ids)
return self.fuse_conv(self.join_sparse([input,aspp1,aspp2,aspp3]))
class SubMConv2dInsDilateASPP(nn.Module):
# def __init__(self, num_groups, out_channels):
# super(SubMConv2dInsDilate, self).__init__()
def __init__(self,
in_channels,
out_channels,
kernel_size,
norm="InsIN",
stride=1,
padding=0,
atrous_rates=[1,3,5],
groups=1,
bias=True,
algo=spconv.ops.ConvAlgo.Native):
super(SubMConv2dInsDilateASPP, self).__init__()
self.fuse_conv = spconv.conv.SubMConv2d(
in_channels+out_channels*3,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=1,
bias=bias,
algo=algo,
indice_key="subm0",)
self.aspp_conv1 = SubMConv2dInsDilate(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation_max=atrous_rates[0],
groups=groups,
bias=bias,
algo=algo)
self.aspp_norm1 = SparseInsGNBNIN(32, out_channels, norm)
self.aspp_relu1 = SparseReLU()
self.aspp_conv2 = SubMConv2dInsDilate(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation_max=atrous_rates[1],
groups=groups,
bias=bias,
algo=algo)
self.aspp_norm2 = SparseInsGNBNIN(32, out_channels, norm)
self.aspp_relu2 = SparseReLU()
self.aspp_conv3 = SubMConv2dInsDilate(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation_max=atrous_rates[2],
groups=groups,
bias=bias,
algo=algo)
self.aspp_norm3 = SparseInsGNBNIN(32, out_channels, norm)
self.aspp_relu3 = SparseReLU()
self.join_sparse = spconv.tables.JoinTable()
def forward(self, input, ins_indices_batch, ins_ids):
assert isinstance(input, spconv.SparseConvTensor)
aspp1 = self.aspp_relu1(self.aspp_norm1(self.aspp_conv1(input, ins_indices_batch, ins_ids), ins_indices_batch, ins_ids))
aspp2 = self.aspp_relu2(self.aspp_norm2(self.aspp_conv2(input, ins_indices_batch, ins_ids), ins_indices_batch, ins_ids))
aspp3 = self.aspp_relu3(self.aspp_norm3(self.aspp_conv3(input, ins_indices_batch, ins_ids), ins_indices_batch, ins_ids))
# aspp1 = self.aspp_conv1(input, ins_indices_batch, ins_ids)
# aspp2 = self.aspp_conv2(input, ins_indices_batch, ins_ids)
# aspp3 = self.aspp_conv3(input, ins_indices_batch, ins_ids)
return self.fuse_conv(self.join_sparse([input,aspp1,aspp2,aspp3]))
class Conv1dWS(nn.Conv1d):
def forward(self, input):
## Weight Standardization
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
weight = weight / std.expand_as(weight)
if self.padding_mode != 'zeros':
return F.conv1d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_single(0), self.dilation, self.groups)
return F.conv1d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class SparseECA(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3, use_weight_std=False):
super(SparseECA, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d(1)
if use_weight_std:
self.conv = Conv1dWS(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
else:
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
"TODO: change sigmoid to hard-swish?"
self.activation = nn.Sigmoid()
def forward(self, x: spconv.SparseConvTensor):
N, C = x.features.shape
batch_indices = x.indices[:,:1].expand_as(x.features)
out_batch = []
for i in range(x.batch_size):
# pdb.set_trace()
out = x.features[batch_indices==i].reshape([-1,1,C]).mean(dim=0).unsqueeze(dim=1) ## HWxBxC -> Bx1xC
out_batch.append(out)
out_batch = self.activation(self.conv(torch.cat(out_batch, dim=0))).squeeze(dim=1) ## Bx1xC -> BxC
for i in range(x.batch_size):
# pdb.set_trace()
x.features[batch_indices==i] = (x.features[batch_indices==i].reshape([-1,C]) * out_batch[i:i+1]).reshape(-1)
# out_batch.append(out.permute([2,0,1]).reshape([-1,C]))
# return spconv.SparseConvTensor(torch.cat(out_batch, dim=0),
# x.indices, x.spatial_shape, x.batch_size)
return x
"TODO"
class SparseInsECA(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3, use_weight_std=False):
super(SparseInsECA, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d(1)
if use_weight_std:
self.conv = Conv1dWS(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
else:
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
"TODO: change sigmoid to hard-swish?"
self.activation = nn.Sigmoid()
def forward(self, x: spconv.SparseConvTensor, ins_indices_batch: torch.Tensor, ins_ids: Any):
N, C = x.features.shape
batch_indices = x.indices[:,:1].expand_as(x.features)
out_batch = []
# for i in range(x.batch_size):
# out = x.features[batch_indices==i].reshape([-1,1,C]).mean(dim=0).unsqueeze(dim=1) ## HWxBxC -> Bx1xC
# out_batch.append(out)
# out_batch = self.activation(self.conv(torch.cat(out_batch, dim=0))).squeeze(dim=1) ## Bx1xC -> BxC
# for i in range(x.batch_size):
# x.features[batch_indices==i] = (x.features[batch_indices==i].reshape([-1,C]) * out_batch[i:i+1]).reshape(-1)
# return x
for i in ins_ids:
out = x.features[(ins_indices_batch==i).nonzero(),].reshape([-1,1,C]).mean(dim=0).unsqueeze(dim=1) ## HWxBxC -> Bx1xC
out_batch.append(out)
# pdb.set_trace()
out_batch = self.activation(self.conv(torch.cat(out_batch, dim=0)))#.squeeze(dim=1) ## Bx1xC -> BxC
for i in ins_ids:
try:
x.features[(ins_indices_batch==i).nonzero(),] = x.features[(ins_indices_batch==i).nonzero(),].reshape([-1,1,C]) * out_batch[i:i+1]
except Exception as e:
pass
# print(e)
return x
# for i in ins_ids:
# out = self.norm(x.features[(ins_indices_batch==i).nonzero(),].reshape([-1,1,C]).permute([1,2,0])) ## HWxBxC -> BxCxHW
# x.features[(ins_indices_batch==i).nonzero(),] = out.permute([2,0,1]) #.reshape(-1)
# return x
@ROI_DENSEPOSE_HEAD_REGISTRY.register()
class DensePoseV1ConvXGNSparseGNHead(nn.Module):
"""
Fully convolutional DensePose head.
"""
def __init__(self, cfg: CfgNode, input_channels: int):
"""
Initialize DensePose fully convolutional head
Args:
cfg (CfgNode): configuration options
input_channels (int): number of input channels
"""
super(DensePoseV1ConvXGNSparseGNHead, self).__init__()
# fmt: off
hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM
self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
self.use_ins_gn = cfg.MODEL.CONDINST.IUVHead.INSTANCE_AWARE_GN
self.use_ins_conv = cfg.MODEL.CONDINST.IUVHead.INSTANCE_AWARE_CONV
self.use_weight_std = cfg.MODEL.CONDINST.IUVHead.WEIGHT_STANDARDIZATION
self.use_submconv = cfg.MODEL.CONDINST.IUVHead.SUBM_CONV
# self.use_eca = cfg.MODEL.CONDINST.IUVHead.Efficient_Channel_Attention
self.use_eca = False
self.use_ins_eca = cfg.MODEL.CONDINST.IUVHead.INSTANCE_EFFICIENT_CHANNEL_ATTENTION
self.use_res_input = cfg.MODEL.CONDINST.IUVHead.RESIDUAL_INPUT
self.use_res_after_relu = cfg.MODEL.CONDINST.IUVHead.RESIDUAL_SKIP_AFTER_RELU
self.use_res_later = cfg.MODEL.CONDINST.IUVHead.RESIDUAL_SKIP_LATER
self.use_res_skip_conv = cfg.MODEL.CONDINST.IUVHead.RESIDUAL_SKIP_CONV
self.dilated_conv_type = cfg.MODEL.CONDINST.IUVHead.DILATION_CONV
self.dilated_conv_r_max = cfg.MODEL.CONDINST.IUVHead.DILATION_CONV_R_MAX
self.add_sparse = spconv.tables.AddTable()
self.checkpoint_grad_num = cfg.MODEL.CONDINST.CHECKPOINT_GRAD_NUM
# self.replace_minus_one = cfg.MODEL.CONDINST.IUVHead.REPLACE_MINUS_ONE
# assert self.use_ins_gn
# fmt: on
# pad_size = kernel_size // 2
# pad_size = 0
n_channels = input_channels
from torch.cuda.amp import autocast
with autocast():
conv = sparse_conv_with_kaiming_uniform(norm=None, activation=None, use_sep=False,
use_submconv=self.use_submconv, use_deconv=False, use_weight_std=self.use_weight_std)
cnt = 0
self.layers = []
self.pad = 0
if self.use_res_skip_conv:
self.res_skip_convs = []
for ii in range(3):
layer_list = []
layer = conv(
n_channels,
hidden_dim,
kernel_size,
stride=1,
padding=self.pad,
dilation=1,
indice_key="subm0",
)
layer_list.append(layer)
self.add_module("res_skip_conv{}".format(ii), layer)
if norm in ["GN","BN"]:
layer = SparseGNBN(32, hidden_dim, norm)
elif norm in ["InsGN","InsBN","InsIN"]:
layer = SparseInsGNBNIN(32, hidden_dim, norm)
layer_list.append(layer)
self.add_module("res_skip_norm{}".format(ii), layer)
layer = SparseReLU(inplace=True)
layer_list.append(layer)
self.add_module("res_skip_relu{}".format(ii), layer)
self.res_skip_convs.append(layer_list)
for i in range(self.n_stacked_convs):
# pdb.set_trace()
if self.dilated_conv_type=="none":
layer = conv(
n_channels,
hidden_dim,
kernel_size,
stride=1,
padding=self.pad,
dilation=1,
indice_key="subm0",
)
# if self.dilated_conv_type=="one_layer_ori":
# if cnt<12:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt in [12]:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=self.dilated_conv_r_max,
# indice_key="subm0_insconv_adapt",
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0_insconv",
# )
# elif self.dilated_conv_type=="progressive_ori":
# if cnt<12:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt==12:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0_insconv_adapt",
# )
# elif cnt==15:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm1_insconv_adapt",
# )
# elif cnt==18:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm2_insconv_adapt",
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0_insconv",
# )
# elif self.dilated_conv_type=="one_layer":
# if cnt<12:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt in [12]:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=self.dilated_conv_r_max
# )
# # layer = conv(
# # n_channels,
# # hidden_dim,
# # kernel_size,
# # stride=1,
# # padding=self.pad,
# # dilation=self.dilated_conv_r_max,
# # indice_key="subm0_insconv_adapt",
# # )
# elif self.use_ins_conv:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=1
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# # layer = conv(
# # n_channels,
# # hidden_dim,
# # kernel_size,
# # stride=1,
# # padding=self.pad,
# # dilation=1,
# # indice_key="subm0_insconv",
# # )
# elif self.dilated_conv_type=="aspp":
# if cnt<6:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt in [6]:
# d = self.dilated_conv_r_max
# layer = SubMConv2dASPP(
# n_channels,
# hidden_dim,
# kernel_size,
# norm=norm,
# stride=1,
# padding=self.pad,
# atrous_rates=[d,d+2,d+4]
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif self.dilated_conv_type=="aspp_large":
# if cnt<6:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt in [6]:
# d = self.dilated_conv_r_max
# layer = SubMConv2dASPP(
# n_channels,
# hidden_dim,
# kernel_size,
# norm=norm,
# stride=1,
# padding=self.pad,
# atrous_rates=[d,d*2,d*4]
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif self.dilated_conv_type=="aspp_large":
# if cnt<6:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt in [6]:
# d = self.dilated_conv_r_max
# layer = SubMConv2dASPP(
# n_channels,
# hidden_dim,
# kernel_size,
# norm=norm,
# stride=1,
# padding=self.pad,
# atrous_rates=[d,d*2,d*4]
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif self.dilated_conv_type=="aspp_larger":
# if cnt<6:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt in [6]:
# d = self.dilated_conv_r_max
# layer = SubMConv2dASPP(
# n_channels,
# hidden_dim,
# kernel_size,
# norm=norm,
# stride=1,
# padding=self.pad,
# atrous_rates=[d,d*2,d*8]
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif self.dilated_conv_type=="ins_dilate_aspp":
# if cnt<6:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif cnt in [6]:
# d = self.dilated_conv_r_max
# layer = SubMConv2dInsDilateASPP(
# n_channels,
# hidden_dim,
# kernel_size,
# norm=norm,
# stride=1,
# padding=self.pad,
# atrous_rates=[d,d+2,d+4]
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif self.dilated_conv_type=="progressive":
# if cnt==0:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=self.dilated_conv_r_max
# )
# # layer = conv(
# # n_channels,
# # hidden_dim,
# # kernel_size,
# # stride=1,
# # padding=self.pad,
# # dilation=1,
# # indice_key="subm0_insconv_adapt",
# # )
# elif cnt==9:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=self.dilated_conv_r_max*2
# )
# # layer = conv(
# # n_channels,
# # hidden_dim,
# # kernel_size,
# # stride=1,
# # padding=self.pad,
# # dilation=1,
# # indice_key="subm1_insconv_adapt",
# # )
# elif cnt==18:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=self.dilated_conv_r_max*4
# )
# # layer = conv(
# # n_channels,
# # hidden_dim,
# # kernel_size,
# # stride=1,
# # padding=self.pad,
# # dilation=1,
# # indice_key="subm2_insconv_adapt",
# # )
# elif self.use_ins_conv:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=1
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# elif self.dilated_conv_type=="two_layers":
# if cnt==6:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=self.dilated_conv_r_max
# )
# elif cnt==15:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=self.dilated_conv_r_max
# )
# elif self.use_ins_conv:
# layer = SubMConv2dInsDilate(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation_max=1
# )
# else:
# layer = conv(
# n_channels,
# hidden_dim,
# kernel_size,
# stride=1,
# padding=self.pad,
# dilation=1,
# indice_key="subm0",
# )
# layer_name = self._get_layer_name(cnt)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
# pdb.set_trace()
# [p[1].data.dtype for p in layer.half().named_parameters()]
cnt += 1
if self.use_eca:
layer = SparseECA(channel=hidden_dim)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
cnt += 1
if self.use_ins_eca=="AfterConv":
layer = SparseInsECA(channel=hidden_dim)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
cnt += 1
# if self.use_ins_gn:
# layer = SparseInsGNBNIN(32, n_channels)
if norm in ["GN","BN"]:
layer = SparseGNBN(32, hidden_dim, norm)
elif norm in ["InsGN","InsBN","InsIN"]:
layer = SparseInsGNBNIN(32, hidden_dim, norm)
# layer_name = self._get_layer_name(cnt)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
cnt += 1
if self.use_ins_eca=="AfterNorm":
layer = SparseInsECA(channel=hidden_dim)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
cnt += 1
if self.use_ins_eca=="AfterRelu":
layer = SparseReLU(inplace=False)
# layer_name = self._get_layer_name(cnt)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
cnt += 1
layer = SparseInsECA(channel=hidden_dim)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
cnt += 1
else:
layer = SparseReLU(inplace=True)
# layer_name = self._get_layer_name(cnt)
self.add_module("layer{}".format(cnt), layer)
self.layers.append(layer)
cnt += 1
n_channels = hidden_dim
self.n_out_channels = n_channels
# initialize_module_params(self)
# if self.amp_enable:
# self = self.half()
# [p[1].data.dtype for p in self.named_parameters()]
# for layer in self.layers:
# for p in self.named_parameters():
# if p[1].data.dtype!=torch.float16:
# print(p[1].data.dtype)
# pdb.set_trace()
# ## Ref: https://github.com/prigoyal/pytorch_memonger/blob/master/tutorial/Checkpointing_for_PyTorch_models.ipynb
# def custom(self, module):
# def custom_forward(*inputs):
# inputs = module(inputs[0])
# return inputs
# return custom_forward
def rearrange_inputs(self, x, ins_indices_batch, ins_ids):
x_features_list =[]
x_indices_list =[]
ins_indices_list = []
for i in ins_ids:
x_features_list.append(x.features[(ins_indices_batch==i).nonzero()].squeeze(1))
x_indices_list.append(x.indices[(ins_indices_batch==i).nonzero()].squeeze(1))
ins_indices_list.append(ins_indices_batch[ins_indices_batch==i])
x.features = torch.cat(x_features_list, dim=0)
x.indices = torch.cat(x_indices_list, dim=0)
ins_indices_batch = torch.cat(ins_indices_list,dim=0)
return x, ins_indices_batch
def create_dilated_indices(self, x, ins_indices_batch, ins_ids, dilation_max, ksize=3):
features_list =[]
ins_indices_list = []
outids_list =[]
indice_pairs_list = []
indice_pair_num_list = []
cnt = 0
for i in ins_ids:
# # pdb.set_trace()
# try:
# out = self.norm(x.features[(ins_indices_batch==i).nonzero(),].reshape([-1,1,C]).permute([1,2,0])) ## HWxBxC -> BxCxHW
# x.features[(ins_indices_batch==i).nonzero(),] = out.permute([2,0,1]) #.reshape(-1)
# except Exception as e:
# print(e)
# # print((ins_indices_batch==i).nonzero())
# # pdb.set_trace()
"TODO: dilation depends on instance size"
ins_indices = x.indices[(ins_indices_batch==i).nonzero()].squeeze(1)
h_ratio = (ins_indices[:,1].max() - ins_indices[:,1].min()).float()/x.spatial_shape[0]
w_ratio = (ins_indices[:,2].max() - ins_indices[:,2].min()).float()/x.spatial_shape[1]
d = max(1, math.ceil(max(h_ratio, w_ratio)*dilation_max))
# outids0, indice_pairs0, indice_pair_num0 = spconv.ops.get_indice_pairs(x.indices, x.batch_size, x.spatial_shape, ksize=3, stride=1, padding=0, dilation=1,subm=True)
# try:
outids, indice_pairs, indice_pair_num = spconv.ops.get_indice_pairs(
ins_indices,
x.batch_size,
x.spatial_shape,
ksize=ksize,
stride=1,
padding=self.pad,
dilation=d,
out_padding=0,
subm=True,
transpose=False,
grid=None,
use_hash=False,
)
# pdb.set_trace()
# outids[:,0] = i
# except:
indice_pairs[indice_pairs!=-1] += cnt
"""
Replace -1 with center pixel value to avoid CUDA error: an illegal memory access was encountered.
Because, for each instance, there are -1 in the end of indice_pairs. When combining more than one
instance, there will be -1 in the middle which causes CUDA memory error.
"""
# if i==0:
# pdb.set_trace()
# center = (indice_pairs.shape[1]-1)//2
center = torch.argmax(indice_pair_num)
for t in range(indice_pairs.shape[1]):
if t!=center:
replace_idx = indice_pairs[0,t,:]==-1
indice_pairs[:,t,replace_idx] = indice_pairs[:,center,replace_idx]
# indice_pair_num[t] = indice_pair_num[center]
valid_idx = indice_pairs[0,center,:]!=-1
outids = outids[valid_idx]
indice_pairs = indice_pairs[:,:,valid_idx]
indice_pair_num = indice_pair_num*0 + valid_idx.int().sum()
# if i==0:
# pdb.set_trace()
# "Divide according to indice_pair_num"
# ins_indices_list.append(ins_indices)
outids_list.append(outids)
indice_pairs_list.append(indice_pairs)
indice_pair_num_list.append(indice_pair_num)
cnt += (ins_indices_batch==i).int().sum()
# ins_indices = torch.cat(ins_indices_list, dim=0)
outids = torch.cat(outids_list, dim=0)
indice_pairs = torch.cat(indice_pairs_list, dim=-1)
indice_pair_num = torch.stack(indice_pair_num_list).sum(dim=0).int()
# ins_indices0 = x.indices
# outids0, indice_pairs0, indice_pair_num0 = spconv.ops.get_indice_pairs(x.indices, x.batch_size, x.spatial_shape, ksize=3, stride=1, padding=0, dilation=1,subm=True)
# (indice_pairs[0,0]!=-1).int().sum()
# indice_pairs[0,0,10006:]
# -1 in indice_pairs[0,0,:10006]
# outids = outids0
# indice_pairs = indice_pairs0
# indice_pair_num = indice_pair_num0
# indice_pairs[indice_pairs==-1] = 0
# indice_pair_num = indice_pair_num-100
# if resort_input:
# features = torch.cat(features_list, dim=0)
# return features, ins_indices, indice_tuple
# else:
indice_tuple = (outids, x.indices, indice_pairs, indice_pair_num, x.spatial_shape)
return indice_tuple
def forward(self, features: spconv.SparseConvTensor, ins_indices_batch: List[torch.Tensor]=None, ins_indices_len=None):
"""
Apply DensePose fully convolutional head to the input features
Args:
features (tensor): input features
Result:
A tensor of DensePose head outputs
"""
# pdb.set_trace()
# features.batch_size
# x = spconv.SparseConvTensor(sparse_feat_batch, sparse_coord_batch, (H,W), N)
# batch_indices = features.indices[:,0:1].expand_as(features.features)
x = features
# pdb.set_trace()
# output = x
"TODO: change ins_indices_batch to start-end slice to save GPU Memory"
ins_ids = torch.unique(ins_indices_batch)
if self.dilated_conv_type!="none":
print("rearrange_inputs")
x, ins_indices_batch = self.rearrange_inputs(x, ins_indices_batch, ins_ids)
# ins_indices_batch = ins_indices_batch[...,None].expand_as(x.features)
# pdb.set_trace()
# if self.amp_enable:
# x.features = x.features.half()
# pdb.set_trace()
if "ori" in self.dilated_conv_type:
if self.use_ins_conv:
x.indice_dict["subm0_insconv"] = self.create_dilated_indices(x, ins_indices_batch, ins_ids, 1)
# if self.dilated_conv_type!="none":
# assert self.use_ins_conv
x.indice_dict["subm0_insconv_adapt"] = self.create_dilated_indices(x, ins_indices_batch, ins_ids, self.dilated_conv_r_max)
if "progressive" in self.dilated_conv_type:
x.indice_dict["subm1_insconv_adapt"] = self.create_dilated_indices(x, ins_indices_batch, ins_ids, self.dilated_conv_r_max*2)
x.indice_dict["subm2_insconv_adapt"] = self.create_dilated_indices(x, ins_indices_batch, ins_ids, self.dilated_conv_r_max*4)
res = None
for idx, layer in enumerate(self.layers):
# if self.use_res_later:
# if idx==3:
# res = x
# elif idx==8:
# x = self.add_sparse([x,res])
# if idx==12:
# res = x
# elif idx==17:
# x = self.add_sparse([x,res])
# if self.use_res_after_relu:
# if idx==3:
# res = x
# elif idx==9:
# x = self.add_sparse([x,res])
# if idx==12:
# res = x
# elif idx==18:
# x = self.add_sparse([x,res])
if self.use_res_input:
if self.use_ins_eca!="none":
# pdb.set_trace()
if self.use_ins_eca=="AfterRelu":
if idx in [0,9+3-1,18+6-1,27+9-1,36+12-1]:
res = x
else:
if idx in [0,9+3,18+6,27+9,36+12]:
res = x
else:
if idx in [0,9,18,27,36]:
res = x
# print(type(layer))
# if isinstance(layer, spconv.SubMConv2d):
# if self.checkpoint_grad_num>0:
# x = checkpoint.checkpoint(self.custom(layer), x)
# else:
# x = layer(x)
# if isinstance(layer, SubMConv2dInsDilate):
# pdb.set_trace()
if isinstance(layer, SparseInsGNBNIN) \
or isinstance(layer, SparseInsECA) \
or isinstance(layer, SubMConv2dInsDilate)\
or isinstance(layer, SubMConv2dInsDilateASPP)\
or isinstance(layer, SubMConv2dASPP):
# x = layer(x, ins_indices_batch, ins_ids, ins_indices_len)
x = layer(x, ins_indices_batch, ins_ids)
else:
# print(idx, x.indice_dict.keys(), x.indices.shape)
try:
x = layer(x)
except Exception as e:
print(e)
pdb.set_trace()
# print(idx, x.indice_dict.keys())
# pdb.set_trace()
if self.use_res_input:
if self.use_ins_eca!="none":
if self.use_ins_eca=="AfterRelu":
if idx in [5+3-1,14+6-1,23+9-1,32+12-1,41+15-1]:
x = self.add_sparse([x,res])
else:
if idx in [5+3,14+6,23+9,32+12,41+15]:
x = self.add_sparse([x,res])
else:
if idx in [5,14,23,32,41]:
if self.use_res_skip_conv:
if idx==5:
layer_list = self.res_skip_convs[0]
if idx==14:
layer_list = self.res_skip_convs[1]
if idx==23:
layer_list = self.res_skip_convs[2]
for l in layer_list:
if isinstance(l, SparseInsGNBNIN):
res = l(res, ins_indices_batch, ins_ids)
else:
res = l(res)
x = self.add_sparse([x,res])
x.features = x.features #.float()
output = x
return output
# def _get_layer_name(self, i: int):
# layer_name = "body_conv_fcn{}".format(i + 1)
# layer_name = "layer".format(i)
# return layer_name
|
<filename>ukb/models/mri.py
import torch
import logging
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .frame import LeNetFrameEncoder, FNNFrameEncoder, DenseNet121, vgg16_bn, densenet121, densenet_40_12_bc
from .sequence import RNN, MetaRNN, SeqSumPoolingEncoder
logger = logging.getLogger(__name__)
################################################################################
# Image Container Models (each image is independantly classified)
################################################################################
class MRINet(nn.Module):
"""
Simple container class for MRI net. This module consists of:
1) A frame encoder, e.g., a ConvNet/CNN
2) Linear output layer
"""
def __init__(self, frame_encoder, n_classes, output_size, layers, dropout,
vote_opt='mean', use_cuda=False):
super(MRINet, self).__init__()
self.n_classes = n_classes
self.fenc = frame_encoder
self.classifier = self._make_classifier(output_size, n_classes, layers, dropout)
self.vote_opt = vote_opt
self.use_cuda = use_cuda
def _make_classifier(self, output_size, n_classes, layers=[64,32], dropout=0.2):
layers = [output_size] + layers + [n_classes]
classifier = []
for i, size in enumerate(layers[:-1]):
classifier.append(nn.Linear(layers[i], layers[i+1]))
if size != layers[-1]:
classifier.append(nn.ReLU(True))
classifier.append(nn.Dropout(p=dropout))
return nn.Sequential(*classifier)
def init_hidden(self, batch_size):
return None
def embedding(self, x, hidden=None):
"""Get learned representation of MRI sequence"""
if self.use_cuda and not x.is_cuda:
x = x.cuda()
batch_size, num_frames, num_channels, width, height = x.size()
self.num_frames = num_frames
x = x.view(-1, num_channels, width, height)
x = self.fenc(x)
x = self.classifier(x)
if self.use_cuda:
return x.cpu()
else:
return x
def forward(self, x, hidden=None):
if self.use_cuda and not x.is_cuda:
x = x.cuda()
# collapse all frames into new batch = batch_size * num_frames
batch_size, num_frames, num_channels, width, height = x.size()
self.num_frames = num_frames
x = x.view(-1, num_channels, width, height)
# encode frames
x = self.fenc(x)
# feed-forward-classifier
x = self.classifier(x)
return x
def vote(self, y_pred, threshold=None):
if threshold is not None:
y_pred = (y_pred > threshold).astype(float)
num_frames = self.num_frames
num_samples = int(y_pred.shape[0]/num_frames)
ex_shape = y_pred.shape[1:]
y_pred = np.reshape(y_pred, (num_samples, num_frames,)+ex_shape)
y_pred = np.mean(y_pred, axis=1)
return y_pred
def predict_proba(self, data_loader, binary=True, pos_label=1, threshold=0.5):
""" Forward inference """
y_pred = []
for i, data in enumerate(data_loader):
x, y = data
x = Variable(x) if not self.use_cuda else Variable(x).cuda()
y = Variable(y) if not self.use_cuda else Variable(y).cuda()
h0 = self.init_hidden(x.size(0))
outputs = self(x, h0)
y_hat = F.softmax(outputs, dim=1)
y_hat = y_hat.data.numpy() if not self.use_cuda else y_hat.cpu().data.numpy()
y_pred.append(y_hat)
# empty cuda cache
if self.use_cuda:
torch.cuda.empty_cache()
y_pred = np.concatenate(y_pred)
if self.vote_opt=='mean':
y_pred = self.vote(y_pred)
elif self.vote_opt=='vote':
y_pred = self.vote(y_pred, threshold)
return y_pred[:, pos_label] if binary else y_pred
def predict(self, data_loader, binary=True, pos_label=1, threshold=0.5, return_proba=False):
"""
If binary classification, use threshold on positive class
If multinomial, just select the max probability as the predicted class
:param data_loader:
:param binary:
:param pos_label:
:param threshold:
:return:
"""
proba = self.predict_proba(data_loader, binary, pos_label, threshold)
if binary:
pred = np.array([1 if p > threshold else 0 for p in proba])
else:
pred = np.argmax(proba, 1)
if return_proba:
return (proba, pred)
else:
return pred
class DenseNet121Net(MRINet):
def __init__(self, n_classes, output_size, use_cuda, **kwargs):
super(DenseNet121Net, self).__init__(frame_encoder=None, n_classes=n_classes,
output_size=output_size, use_cuda=use_cuda)
self.name = "DenseNet121Net"
self.fenc = DenseNet121()
class VGG16Net(MRINet):
def __init__(self, n_classes, use_cuda, **kwargs):
input_shape = kwargs.get("input_shape", (3, 32, 32))
layers = kwargs.get("layers", [64, 32])
dropout = kwargs.get("dropout", 0.2)
vote_opt = kwargs.get("vote_opt", "mean")
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frm_output_size = self.get_frm_output_size(input_shape)
super(VGG16Net, self).__init__(frame_encoder=None, n_classes=n_classes,
output_size=frm_output_size,
layers=layers, dropout=dropout,
vote_opt=vote_opt, use_cuda=use_cuda)
self.name = "VGG16Net"
self.fenc = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
def get_frm_output_size(self, input_shape):
feature_output = int(min(input_shape[-1], input_shape[-2])/32)
feature_output = 1 if feature_output == 0 else feature_output
frm_output_size = pow(feature_output, 2) * 512
return frm_output_size
class LeNet(MRINet):
def __init__(self, n_classes, n_channels, output_size, use_cuda, **kwargs):
super(LeNet, self).__init__(frame_encoder=None, n_classes=n_classes,
output_size=output_size, use_cuda=use_cuda)
self.name = "LeNet"
self.fenc = LeNetFrameEncoder(n_channels=n_channels, output_size=output_size)
################################################################################
# Sequence Container Models
################################################################################
class MRISequenceNet(nn.Module):
"""
Simple container network for MRI sequence classification. This module consists of:
1) A frame encoder, e.g., a ConvNet/CNN
2) A sequence encoder for merging frame representations, e.g., an RNN
"""
def __init__(self, frame_encoder, seq_encoder, use_cuda=False):
super(MRISequenceNet, self).__init__()
self.fenc = frame_encoder
self.senc = seq_encoder
self.use_cuda = use_cuda
def init_hidden(self, batch_size):
return self.senc.init_hidden(batch_size)
def embedding(self, x, hidden):
"""Get learned representation of MRI sequence"""
if self.use_cuda and not x.is_cuda:
x = x.cuda()
batch_size, num_frames, num_channels, width, height = x.size()
x = x.view(-1, num_channels, width, height)
x = self.fenc(x)
x = x.view(batch_size, num_frames, -1)
x = self.senc.embedding(x, hidden)
if self.use_cuda:
return x.cpu()
else:
return x
def forward(self, x, hidden=None):
if self.use_cuda and not x.is_cuda:
x = x.cuda()
# collapse all frames into new batch = batch_size * num_frames
batch_size, num_frames, num_channels, width, height = x.size()
x = x.view(-1, num_channels, width, height)
# encode frames
x = self.fenc(x)
x = x.view(batch_size, num_frames, -1)
# encode sequence
x = self.senc(x, hidden)
return x
def predict_proba(self, data_loader, binary=True, pos_label=1):
""" Forward inference """
y_pred = []
for i, data in enumerate(data_loader):
x, y = data
x = Variable(x) if not self.use_cuda else Variable(x).cuda()
y = Variable(y) if not self.use_cuda else Variable(y).cuda()
h0 = self.init_hidden(x.size(0))
outputs = self(x, h0)
y_hat = F.softmax(outputs, dim=1)
y_hat = y_hat.data.numpy() if not self.use_cuda else y_hat.cpu().data.numpy()
y_pred.append(y_hat)
# empty cuda cache
if self.use_cuda:
torch.cuda.empty_cache()
y_pred = np.concatenate(y_pred)
return y_pred[:, pos_label] if binary else y_pred
def predict(self, data_loader, binary=True, pos_label=1, threshold=0.5, return_proba=False, topSelection=None):
"""
If binary classification, use threshold on positive class
If multinomial, just select the max probability as the predicted class
:param data_loader:
:param binary:
:param pos_label:
:param threshold:
:return:
"""
proba = self.predict_proba(data_loader, binary, pos_label)
if topSelection is not None and topSelection < proba.shape[0]:
threshold = proba[np.argsort(proba)[-topSelection-1]]
if binary:
pred = np.array([1 if p > threshold else 0 for p in proba])
else:
pred = np.argmax(proba, 1)
if return_proba:
return (proba, pred)
else:
return pred
################################################################################
# FNN Models
################################################################################
class FNNFrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(FNNFrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "FNNFrameSum"
self.n_classes = n_classes
frm_layers = kwargs.get("frm_layers", [64, 32])
input_shape = kwargs.get("input_shape", (1, 32, 32))
frm_input_size = input_shape[0]*input_shape[1]*input_shape[2]
self.fenc = FNNFrameEncoder(input_size=frm_input_size, layers=list(frm_layers))
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_layers[-1])
class FNNFrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(FNNFrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "FNNFrameRNN"
self.n_classes = n_classes
frm_layers = kwargs.get("frm_layers", [64, 32])
input_shape = kwargs.get("input_shape", (1, 32, 32))
frm_input_size = input_shape[0]*input_shape[1]*input_shape[2]
frm_output_size = frm_layers[-1]
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 30)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
self.fenc = FNNFrameEncoder(input_size=frm_input_size, layers=frm_layers)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
################################################################################
# LeNet Models
################################################################################
class LeNetFrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(LeNetFrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "LeNetFrameSum"
self.n_classes = n_classes
frm_output_size = kwargs.get("frm_output_size", 84)
input_shape = kwargs.get("input_shape", (1, 32, 32))
self.fenc = LeNetFrameEncoder(input_shape=input_shape, output_size=frm_output_size)
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_output_size)
class LeNetFrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(LeNetFrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "LeNetFrameRNN"
self.n_classes = n_classes
frm_output_size = kwargs.get("frm_output_size", 84)
input_shape = kwargs.get("input_shape", (1, 32, 32))
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
self.fenc = LeNetFrameEncoder(input_shape=input_shape, output_size=frm_output_size)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
################################################################################
# DenseNet 3-channel Models
################################################################################
class DenseNet121FrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(DenseNet121FrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "DenseNet121FrameSum"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frm_output_size = pow(int(input_shape[-1]/32), 2) * 1024
#self.fenc = DenseNet121()
self.fenc = densenet121(pretrained=pretrained, requires_grad=requires_grad)
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_output_size)
class DenseNet121FrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(DenseNet121FrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "DenseNet121FrameRNN"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
frm_output_size = pow(int(input_shape[-1]/32), 2) * 1024
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
#self.fenc = DenseNet121()
self.fenc = densenet121(pretrained=pretrained, requires_grad=requires_grad)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
################################################################################
# VGG 3-channel Models
################################################################################
class VGG16FrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(VGG16FrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "VGG16FrameSum"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
self.fenc = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
frm_output_size = self.get_frm_output_size(input_shape)
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_output_size)
def get_frm_output_size(self, input_shape):
feature_output = int(min(input_shape[-1], input_shape[-2])/32)
feature_output = 1 if feature_output == 0 else feature_output
frm_output_size = pow(feature_output, 2) * 512
return frm_output_size
class VGG16FrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(VGG16FrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "VGG16FrameRNN"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
self.fenc = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
frm_output_size = self.get_frm_output_size(input_shape)
#print(kwargs)
#print("seq_bidirectional" in kwargs)
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("seq_rnn_type", "LSTM")
self.senc = RNN(n_classes=n_classes, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
def get_frm_output_size(self, input_shape):
input_shape = list(input_shape)
input_shape.insert(0,1)
dummy_batch_size = tuple(input_shape)
x = torch.autograd.Variable(torch.zeros(dummy_batch_size))
frm_output_size = self.fenc.forward(x).size()[1]
return frm_output_size
class Dense4012FrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(Dense4012FrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "Dense4012FrameRNN"
input_shape = kwargs.get("input_shape", (3, 32, 32))
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
logger.info("============================")
logger.info("Dense4012FrameRNN parameters")
logger.info("============================")
logger.info("seq_output_size: {}".format(seq_output_size))
logger.info("seq_dropout: {}".format(seq_dropout))
logger.info("seq_attention: {}".format(seq_attention))
logger.info("seq_bidirectional: {}".format(seq_bidirectional))
logger.info("seq_max_seq_len: {}".format(seq_max_seq_len))
logger.info("seq_rnn_type: {}".format(seq_rnn_type))
logger.info("pretrained: {}".format(pretrained))
logger.info("requires_grad: {}\n".format(requires_grad))
self.fenc = densenet_40_12_bc(pretrained=pretrained, requires_grad=requires_grad)
frm_output_size = self.get_frm_output_size(input_shape)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
def get_frm_output_size(self, input_shape):
input_shape = list(input_shape)
input_shape.insert(0,1)
dummy_batch_size = tuple(input_shape)
x = torch.autograd.Variable(torch.zeros(dummy_batch_size))
frm_output_size = self.fenc.forward(x).size()[1]
return frm_output_size
################################################################################
# Sequence Container Meta Models
################################################################################
class MRIMetaSequenceRNN(MRISequenceNet):
def __init__(self, frame_encoder, n_classes, use_cuda, **kwargs):
super(MRIMetaSequenceRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
self.fenc = frame_encoder
frm_output_size = self.get_frm_output_size(input_shape)
#print(kwargs)
#print("seq_bidirectional" in kwargs)
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("seq_rnn_type", "LSTM")
self.senc = MetaRNN(n_classes=n_classes, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
meta_input_shape = kwargs.get("meta_input_shape", 3)
self.classifier = self.get_classifier(seq_output_size, n_classes, seq_bidirectional, meta_input_shape)
def get_frm_output_size(self, input_shape):
input_shape = list(input_shape)
input_shape.insert(0,1)
dummy_batch_size = tuple(input_shape)
x = torch.autograd.Variable(torch.zeros(dummy_batch_size))
frm_output_size = self.fenc.forward(x).size()[1]
return frm_output_size
def get_classifier(self, seq_output_size, n_classes, seq_bidirectional,
meta_input_shape):
b = 2 if seq_bidirectional else 1
meta_input_shape = np.prod([meta_input_shape])
classifier = nn.Linear(int(b * seq_output_size + meta_input_shape), int(n_classes))
return classifier
def embedding(self, x, hidden):
"""Get learned representation of MRI sequence"""
x, meta = x
return super(MRIMetaSequenceRNN, self).embedding(x, hidden)
def forward(self, x, hidden=None):
x, meta = x
if self.use_cuda and not meta.is_cuda:
meta = meta.cuda()
if self.use_cuda and not x.is_cuda:
x = x.cuda()
x = super(MRIMetaSequenceRNN, self).forward(x, hidden)
concats = torch.cat((x.view(x.size(0), -1).float(),
meta.view(meta.size(0), -1).float()), 1)
outputs = self.classifier(concats)
return outputs
def predict_proba(self, data_loader, binary=True, pos_label=1):
""" Forward inference """
y_pred = []
for i, data in enumerate(data_loader):
x, y = data
x = [Variable(x_) if not self.use_cuda else Variable(x_).cuda() for x_ in x]
y = Variable(y) if not self.use_cuda else Variable(y).cuda()
h0 = self.init_hidden(x[0].size(0))
outputs = self(x, h0)
y_hat = F.softmax(outputs, dim=1)
y_hat = y_hat.data.numpy() if not self.use_cuda else y_hat.cpu().data.numpy()
y_pred.append(y_hat)
# empty cuda cache
if self.use_cuda:
torch.cuda.empty_cache()
y_pred = np.concatenate(y_pred)
return y_pred[:, pos_label] if binary else y_pred
class MetaVGG16FrameRNN(MRIMetaSequenceRNN):
def __init__(self, n_classes, use_cuda, **kwargs):
self.name = "MetaVGG16FrameRNN"
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frame_encoder = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
super(MetaVGG16FrameRNN, self).__init__(frame_encoder=frame_encoder,
n_classes=n_classes,
use_cuda=use_cuda,
**kwargs)
class MetaDense4012FrameRNN(MRIMetaSequenceRNN):
def __init__(self, n_classes, use_cuda, **kwargs):
self.name = "MetaDense4012FrameRNN"
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frame_encoder = densenet_40_12_bc(pretrained=pretrained, requires_grad=requires_grad)
super(MetaDense4012FrameRNN, self).__init__(frame_encoder=frame_encoder,
n_classes=n_classes,
use_cuda=use_cuda,
**kwargs)
|
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from utils.wxlogger import WxLogger
class ElasticService:
"""Elastic Service Class."""
logger = None
def __init__(self, client: Elasticsearch):
if self.__class__.logger is None:
ElasticService.logger = WxLogger(__name__).getLogger()
self.client = client
ElasticService.logger.info("client initiated")
def create_index(self, name, delete=True):
if self.client.indices.exists(name):
if delete:
self.client.indices.delete(name)
ElasticService.logger.info("Index Deleted")
self.client.indices.create(index=name)
ElasticService.logger.info("Index Created")
else:
ElasticService.logger.info("Index Already Exist")
else:
self.client.indices.create(index=name)
ElasticService.logger.info("Index Created")
def create_mapping(self, index, mapping, delete=True):
if self.client.indices.exists(index):
if delete:
self.client.indices.delete(index)
ElasticService.logger.info("Index Deleted")
self.client.indices.create(index=index, body=mapping)
ElasticService.logger.info("Mapping Created")
else:
ElasticService.logger.info("Index Already Exists")
else:
self.client.indices.create(index=index, body=mapping)
ElasticService.logger.info("Mapping Created")
def search_data(self, index, query):
res = None
data = []
try:
res = self.client.search(index=index, body=query)
for doc in res["hits"]["hits"]:
source = doc["_source"]
source["_id"] = doc["_id"]
data.append(source)
return data
except Exception as ex:
ElasticService.logger.error(f"{query} failed - Exception: {ex}")
def search_by_id(self, index, id):
try:
res = self.client.get(index=index, id=id)
source = res["_source"]
source["_id"] = res["_id"]
return source
except Exception as ex:
ElasticService.logger.error(f"search_by_id failed - Exception: {ex}")
def create_doc(self, index, doc, refresh=False):
"""Create / Update a document in the index.
:param index: index name
:param doc: document
:return: True/False based on sucess/failure
"""
try:
self.client.index(index=index, body=doc, refresh=refresh)
ElasticService.logger.info("---doc_created---")
return True
except Exception as ex:
ElasticService.logger.error(ex)
return False
def create_doc_by_id(self, index, id, doc, refresh=False):
"""Create / Update a document in the index.
:param index: index name
:param doc: document
:return: True/False based on sucess/failure
"""
try:
resp = self.client.index(index=index, body=doc, id=id, refresh=refresh)
ElasticService.logger.info("---doc_created---")
return resp
except Exception as ex:
ElasticService.logger.error(ex)
return False
def update_doc(self, index, id, body, seq_no, primary_term, refresh=False):
"""Update the existing document.
:param index: index name
:type index: string
:param id: document id
:type id: string
:param script: body object for update query
:type script: object
:return: True/False
:rtype: bool
"""
try:
resp = self.client.update(
index=index,
id=id,
body=body,
refresh=refresh,
if_primary_term=primary_term,
if_seq_no=seq_no,
)
return resp
except Exception as ex:
ElasticService.logger.error(ex)
return False
def update_by_query(self, index, body):
"""updating the existing document by query."""
try:
self.client.update_by_query(
index=index,
body=body,
conflicts="proceed",
wait_for_completion=False,
)
return True
except Exception as ex:
ElasticService.logger.error(ex)
return False
def bulk_update(self, client, actions):
"""updating doc in bulk."""
bulk(client=client, actions=actions)
def build_bulk_create_body(self, index, data):
"""building bosy for bulk elastic doc creation."""
elastic_bulk_query = []
for item in data:
id_ = str(item["_id"])
del item["_id"]
elastic_bulk_query.append(
{
"_id": id_,
"_op_type": "index",
"_index": index,
"_source": item,
},
)
return elastic_bulk_query
def build_bulk_update_body(self, index, data):
"""building body for bulk update."""
elastic_bulk_query = []
for item in data:
id_ = str(item["_id"])
del item["_id"]
elastic_bulk_query.append(
{
"_id": id_,
"_op_type": "update",
"_index": index,
"doc": item,
},
)
return elastic_bulk_query
def get_doc_version(self, index, doc_id):
try:
res = self.client.get(index=index, id=doc_id)
return {"seq_no": res["_seq_no"], "primary_term": res["_primary_term"]}
except Exception as ex:
ElasticService.logger.error(f" query failed - Exception: {ex}")
return False
def fetch_scroll(self, index, doc_type, query, scroll, id=False, others=None):
"""
Fetch large ammount of data in a cursor manner where the data exceeds 10000 doc limit
:param index: index_name
:param doc_type: index_type
:param query: query body
:param scroll: scroll time
:param id: response to return doc_id or not
:return: array of documents from query result
"""
data = []
if doc_type is None:
res = self.es.search(index=index, body=query, scroll=scroll)
else:
res = self.es.search(index=index, doc_type=doc_type, body=query, scroll=scroll)
scroll_id = res["_scroll_id"]
count = 0
scrolling = True
while scrolling:
if res["hits"]["hits"]:
for doc in res["hits"]["hits"]:
source = doc["_source"]
if id:
source["_id"] = doc["_id"]
if others is not None:
docs = doc[others["key"]]
i = 0
# for attaching the array elements to their respective keys
if others["type"] == "array":
while i < len(docs):
source[others["columns"][i]] = docs[i]
i += 1
# for attaching the object to the respective key
if others["type"] == "object":
if others["tree"] is not None:
for leaf in others["tree"]:
docs = docs[leaf]
source[others["key"]] = docs
data.append(source)
res = self.es.scroll(scroll_id=scroll_id, scroll=scroll)
count += 1
ElasticIndividualClient.logger.info("scroll request : {}".format(count))
else:
scrolling = False
return data
|
<gh_stars>1-10
import os, binascii
from hashlib import sha256, sha1
try:
import json
json.__version__ # this is really here to hush pyflakes, which gets
# confused by this import-and-alternate pattern
except ImportError:
import simplejson as json
json.__version__
class JPAKEError(Exception):
pass
class DuplicateSignerID(JPAKEError):
"""Their signer ID must be different than ours, to keep them from merely
echoing back our own signature."""
class BadZeroKnowledgeProof(JPAKEError):
"""They failed to prove knowledge of their own secret."""
class GX4MustNotBeOne(JPAKEError):
pass
class SignerIDMustBeASCII(JPAKEError):
"""All signer IDs must be ASCII strings."""
def ASCII(s):
# s might be a unicode object (output of json.loads) that really contains
# ASCII, or it might be a bytestring (most other places). Since this will
# be JSONified later, require that it be in the ASCII subset of
# bytestrings.
try:
return s.encode("ascii")
except UnicodeDecodeError:
raise SignerIDMustBeASCII
def orderlen(order):
return (1+len("%x"%order))/2 # bytes
def number_to_string(num, orderlen):
if orderlen is None:
s = "%x" % num
if len(s)%2:
s = "0"+s
string = binascii.unhexlify(s)
else:
fmt_str = "%0" + str(2*orderlen) + "x"
string = binascii.unhexlify(fmt_str % num)
assert len(string) == orderlen, (len(string), orderlen)
return string
def string_to_number(string):
return int(binascii.hexlify(string), 16)
class Params:
def __init__(self, p, q, g):
self.p = p
self.q = q
self.g = g
self.orderlen = orderlen(self.p)
# params_80 is roughly as secure as an 80-bit symmetric key, and uses a
# 1024-bit modulus. params_112 uses a 2048-bit modulus, and params_128 uses a
# 3072-bit modulus.
params_80 = Params(p=0xfd7f53811d75122952df4a9c2eece4e7f611b7523cef4400c31e3f80b6512669455d402251fb593d8d58fabfc5f5ba30f6cb9b556cd7813b801d346ff26660b76b9950a5a49f9fe8047b1022c24fbba9d7feb7c61bf83b57e7c6a8a6150f04fb83f6d3c51ec3023554135a169132f675f3ae2b61d72aeff22203199dd14801c7,
q=0x9760508f15230bccb292b982a2eb840bf0581cf5,
g=0xf7e1a085d69b3ddecbbcab5c36b857b97994afbbfa3aea82f9574c0b3d0782675159578ebad4594fe67107108180b449167123e84c281613b7cf09328cc8a6e13c167a8b547c8d28e0a3ae1e2bb3a675916ea37f0bfa213562f1fb627a01243bcca4f1bea8519089a883dfe15ae59f06928b665e807b552564014c3bfecf492a)
# 112, 128 from NIST
params_112 = Params(p=0xC196BA05AC29E1F9C3C72D56DFFC6154A033F1477AC88EC37F09BE6C5BB95F51C296DD20D1A28A067CCC4D4316A4BD1DCA55ED1066D438C35AEBAABF57E7DAE428782A95ECA1C143DB701FD48533A3C18F0FE23557EA7AE619ECACC7E0B51652A8776D02A425567DED36EABD90CA33A1E8D988F0BBB92D02D1D20290113BB562CE1FC856EEB7CDD92D33EEA6F410859B179E7E789A8F75F645FAE2E136D252BFFAFF89528945C1ABE705A38DBC2D364AADE99BE0D0AAD82E5320121496DC65B3930E38047294FF877831A16D5228418DE8AB275D7D75651CEFED65F78AFC3EA7FE4D79B35F62A0402A1117599ADAC7B269A59F353CF450E6982D3B1702D9CA83,
q=0x90EAF4D1AF0708B1B612FF35E0A2997EB9E9D263C9CE659528945C0D,
g=0xA59A749A11242C58C894E9E5A91804E8FA0AC64B56288F8D47D51B1EDC4D65444FECA0111D78F35FC9FDD4CB1F1B79A3BA9CBEE83A3F811012503C8117F98E5048B089E387AF6949BF8784EBD9EF45876F2E6A5A495BE64B6E770409494B7FEE1DBB1E4B2BC2A53D4F893D418B7159592E4FFFDF6969E91D770DAEBD0B5CB14C00AD68EC7DC1E5745EA55C706C4A1C5C88964E34D09DEB753AD418C1AD0F4FDFD049A955E5D78491C0B7A2F1575A008CCD727AB376DB6E695515B05BD412F5B8C2F4C77EE10DA48ABD53F5DD498927EE7B692BBBCDA2FB23A516C5B4533D73980B2A3B60E384ED200AE21B40D273651AD6060C13D97FD69AA13C5611A51B9085)
params_128 = Params(p=0x90066455B5CFC38F9CAA4A48B4281F292C260FEEF01FD61037E56258A7795A1C7AD46076982CE6BB956936C6AB4DCFE05E6784586940CA544B9B2140E1EB523F009D20A7E7880E4E5BFA690F1B9004A27811CD9904AF70420EEFD6EA11EF7DA129F58835FF56B89FAA637BC9AC2EFAAB903402229F491D8D3485261CD068699B6BA58A1DDBBEF6DB51E8FE34E8A78E542D7BA351C21EA8D8F1D29F5D5D15939487E27F4416B0CA632C59EFD1B1EB66511A5A0FBF615B766C5862D0BD8A3FE7A0E0DA0FB2FE1FCB19E8F9996A8EA0FCCDE538175238FC8B0EE6F29AF7F642773EBE8CD5402415A01451A840476B2FCEB0E388D30D4B376C37FE401C2A2C2F941DAD179C540C1C8CE030D460C4D983BE9AB0B20F69144C1AE13F9383EA1C08504FB0BF321503EFE43488310DD8DC77EC5B8349B8BFE97C2C560EA878DE87C11E3D597F1FEA742D73EEC7F37BE43949EF1A0D15C3F3E3FC0A8335617055AC91328EC22B50FC15B941D3D1624CD88BC25F3E941FDDC6200689581BFEC416B4B2CB73,
q=0xCFA0478A54717B08CE64805B76E5B14249A77A4838469DF7F7DC987EFCCFB11D,
g=0x5E5CBA992E0A680D885EB903AEA78E4A45A469103D448EDE3B7ACCC54D521E37F84A4BDD5B06B0970CC2D2BBB715F7B82846F9A0C393914C792E6A923E2117AB805276A975AADB5261D91673EA9AAFFEECBFA6183DFCB5D3B7332AA19275AFA1F8EC0B60FB6F66CC23AE4870791D5982AAD1AA9485FD8F4A60126FEB2CF05DB8A7F0F09B3397F3937F2E90B9E5B9C9B6EFEF642BC48351C46FB171B9BFA9EF17A961CE96C7E7A7CC3D3D03DFAD1078BA21DA425198F07D2481622BCE45969D9C4D6063D72AB7A0F08B2F49A7CC6AF335E08C4720E31476B67299E231F8BD90B39AC3AE3BE0C6B6CACEF8289A2E2873D58E51E029CAFBD55E6841489AB66B5B4B9BA6E2F784660896AFF387D92844CCB8B69475496DE19DA2E58259B090489AC8E62363CDF82CFD8EF2A427ABCD65750B506F56DDE3B988567A88126B914D7828E2B63A6D7ED0747EC59E0E0A23CE7D8A74C1D2C2A7AFB6A29799620F00E11C33787F7DED3B30E1A22D09F1FBDA1ABBBFBF25CAE05A13F812E34563F99410E73B)
def randrange(order, entropy):
"""Return a random integer k such that 0 <= k < order, uniformly
distributed across that range. For simplicity, this only behaves well if
'order' is fairly close (but below) a power of 256. The try-try-again
algorithm we use takes longer and longer time (on average) to complete as
'order' falls, rising to a maximum of avg=512 loops for the worst-case
(256**k)+1 . All of the standard curves behave well. There is a cutoff at
10k loops (which raises RuntimeError) to prevent an infinite loop when
something is really broken like the entropy function not working.
Note that this function is not declared to be forwards-compatible: we may
change the behavior in future releases. The entropy= argument (which
should get a callable that behaves like os.entropy) can be used to
achieve stability within a given release (for repeatable unit tests), but
should not be used as a long-term-compatible key generation algorithm.
"""
# we could handle arbitrary orders (even 256**k+1) better if we created
# candidates bit-wise instead of byte-wise, which would reduce the
# worst-case behavior to avg=2 loops, but that would be more complex. The
# change would be to round the order up to a power of 256, subtract one
# (to get 0xffff..), use that to get a byte-long mask for the top byte,
# generate the len-1 entropy bytes, generate one extra byte and mask off
# the top bits, then combine it with the rest. Requires jumping back and
# forth between strings and integers a lot.
assert order > 1
bytes = orderlen(order)
dont_try_forever = 10000 # gives about 2**-60 failures for worst case
while dont_try_forever > 0:
dont_try_forever -= 1
candidate = string_to_number(entropy(bytes))
if candidate < order:
return candidate
continue
raise RuntimeError("randrange() tried hard but gave up, either something"
" is very wrong or you got realllly unlucky. Order was"
" %x" % order)
class JPAKE:
"""This class manages one half of a J-PAKE key negotiation.
Create an instance with JPAKE(password), where 'password' is either a
number (0 < number < params.q-1) or a bytestring. You can also pass an
optional params= value (one of [params_80, params_112, params_128], for
increasing levels of security and CPU usage), and a signerid= value
(which must be an ASCII string). Any two JPAKE communicating instances
must use different signerid= values (to prevent simply reflecting a
message back to its sender): the default achieves this by using a random
string, but you could use 'client' and 'server' if you only ever use this
class in that way.
Once constructed, you will need to call one(), two(), and three() in
order, passing the output of one over the wire, where it forms the input
to the next:
my_msg1 = j.one()
send(my_msg1)
their_msg1 = receive()
my_msg2 = j.two(their_msg1)
send(my_msg2)
their_msg2 = receive()
key = j.three(their_msg2)
The secret 'key' that comes out will be a bytestring (the output of a
hash function). If both sides used the same password, both sides will
wind up with the same key, otherwise they will have different keys. You
will probably want to confirm this equivalence before relying upon it
(but don't reveal the key to the other side in doing so, in case you
aren't talking to the right party and your keys are really different).
Note that this introduces an asymmetry to the protocol. For example:
A: hhkey = sha256(sha256(Akey).digest()).digest()
A: send(hhkey)
B: hhkey = receive()
B: assert sha256(sha256(Bkey).digest()).digest() == hhkey
B: hkey = sha256(Bkey).digest()
B: send(hkey)
A: hkey = receive()
A: assert sha256(Akey).digest() == hkey
If you can't keep the JPAKE instance alive for the whole negotiation, you
can persist the important data from an instance with data=j.to_json(),
and then reconstruct the instance with j=JPAKE.from_json(data). The
instance data is sensitive: protect it better than you would the original
password. An attacker who learns the instance state from both sides will
be able to reconstruct the shared key. These functions return a
dictionary: you are responsible for invoking e.g. json.dumps() to
serialize it into a string that can be written to disk. For params_80,
the serialized JSON is typically about 750 bytes after construction, 1300
bytes after one(), and 1800 bytes after two().
j = JPAKE(password)
send(j.one())
open('save.json','w').write(json.dumps(j.to_json()))
...
j = JPAKE.from_json(json.loads(open('save.json').read()))
send(j.two(receive()))
open('save.json','w').write(json.dumps(j.to_json()))
...
j = JPAKE.from_json(json.loads(open('save.json').read()))
key = j.three(receive())
The messages returned by one() and two() are small dictionaries, safe to
serialize as JSON objects, and will survive being deserialized in a
javascript environment (i.e. the large numbers are encoded as hex
strings, since JS does not have bigints). If you wish for smaller
messages, the JPAKE instance has pack_msg1(), unpack_msg1(), pack_msg2(),
unpack_msg2() methods to encode/decode these strings into smaller
bytestrings. The encoding scheme is slightly different for each params=
value. For params_80, a JSON encoding of one()/two() is 1218/606 bytes,
while the output of pack_one()/pack_two() is 773/389 byes.
send(j.pack_one(j.one()))
msg2 = j.two(j.unpack_one(receive()))
send(j.pack_two(msg2))
key = j.three(j.unpack_two(receive()))
"""
def __init__(self, password, params=params_80, signerid=None, entropy=None):
if entropy is None:
entropy = os.urandom
self.entropy = entropy
if signerid is None:
signerid = binascii.hexlify(self.entropy(16))
self.signerid = ASCII(signerid)
self.params = params
q = params.q
if isinstance(password, (int,long)):
assert password > 0
assert password < q-1
self.s = password
else:
assert isinstance(password, str)
# we must convert the password (a variable-length string) into a
# number from 1 to q-1 (inclusive).
self.s = 1 + (string_to_number(sha256(password).digest()) % (q-1))
def createZKP(self, generator, exponent, gx):
# This returns a proof that I know a secret value 'exponent' that
# satisfies the equation A^exponent=B mod P, where A,B,P are known to
# the recipient of this proof (A=generator, P=self.params.p). It
# happens that everywhere createZKP() is called, we already have
# A^exponent, so we pass it in to save some computation time.
p = self.params.p; q = self.params.q
r = randrange(q, self.entropy) # [0,q)
gr = pow(generator, r, p)
#gx = pow(generator, exponent, p) # the verifier knows this already
# Ben's C implementation hashes the pieces this way:
def hashbn(bn):
bns = number_to_string(bn, None)
assert len(bns) <= 0xffff
return number_to_string(len(bns), 2) + bns
assert len(self.signerid) <= 0xffff
# we match the way OpenSSL does the hash:
# http://git.infradead.org/openssl.git/blob/HEAD:/crypto/jpake/jpake.c#l342
s = "".join([hashbn(generator), hashbn(gr), hashbn(gx),
number_to_string(len(self.signerid), 2),
self.signerid])
h = string_to_number(sha1(s).digest())
b = (r - exponent*h) % q
return {"gr": "%x"%gr, # gr and b are the important values
"b": "%x"%b,
"id": self.signerid,
}
def checkZKP(self, generator, gx, zkp):
# confirm the sender's proof (contained in 'zkp') that they know 'x'
# such that generator^x==gx
p = self.params.p
gr = int(zkp["gr"], 16)
b = int(zkp["b"], 16)
if zkp["id"] == self.signerid:
raise DuplicateSignerID
# Ben's C implementation hashes the pieces this way:
def hashbn(bn):
bns = number_to_string(bn, None)
assert len(bns) <= 0xffff
return number_to_string(len(bns), 2) + bns
assert len(zkp["id"]) <= 0xffff
s = "".join([hashbn(generator), hashbn(gr), hashbn(gx),
number_to_string(len(zkp["id"]), 2),
str(zkp["id"])])
h = string_to_number(sha1(s).digest())
gb = pow(generator, b, p)
y = pow(gx, h, p)
if gr != (gb*y)%p:
raise BadZeroKnowledgeProof
def one(self):
g = self.params.g; p = self.params.p; q = self.params.q
self.x1 = randrange(q, self.entropy) # [0,q)
self.x2 = 1+randrange(q-1, self.entropy) # [1,q)
gx1 = self.gx1 = pow(g, self.x1, p)
gx2 = self.gx2 = pow(g, self.x2, p)
zkp_x1 = self.createZKP(g, self.x1, gx1)
zkp_x2 = self.createZKP(g, self.x2, gx2)
# now serialize all four. Use simple jsonable dict for now
return {"gx1": "%x"%gx1,
"gx2": "%x"%gx2,
"zkp_x1": zkp_x1,
"zkp_x2": zkp_x2,
}
def pack_one(self, data):
orderlen = self.params.orderlen
def n2s(hexint):
return number_to_string(int(hexint,16), orderlen)
assert data["zkp_x1"]["id"] == data["zkp_x2"]["id"]
packed = "".join([n2s(data["gx1"]),
n2s(data["gx2"]),
n2s(data["zkp_x1"]["gr"]),
n2s(data["zkp_x1"]["b"]),
n2s(data["zkp_x2"]["gr"]),
n2s(data["zkp_x2"]["b"]),
# the rest of the string is signerid
data["zkp_x1"]["id"],
])
return packed
def unpack_one(self, packed):
orderlen = self.params.orderlen
def generate_substrings(packed):
for i in range(6):
yield binascii.hexlify(packed[i*orderlen:(i+1)*orderlen])
yield packed[6*orderlen:] # signerid
g = generate_substrings(packed)
data = { "gx1": g.next(),
"gx2": g.next(),
"zkp_x1": {"gr": g.next(), "b": g.next() },
"zkp_x2": {"gr": g.next(), "b": g.next() },
}
signerid = ASCII(g.next())
assert isinstance(signerid, str)
data["zkp_x1"]["id"] = signerid
data["zkp_x2"]["id"] = signerid
return data
def two(self, m1):
g = self.params.g; p = self.params.p
gx3 = self.gx3 = int(m1["gx1"], 16) % p
gx4 = self.gx4 = int(m1["gx2"], 16) % p
if gx4 == 1:
raise GX4MustNotBeOne
self.checkZKP(g, gx3, m1["zkp_x1"])
self.checkZKP(g, gx4, m1["zkp_x2"])
# now compute A = g^((x1+x3+x4)*x2*s), i.e. (gx1*gx3*gx4)^(x2*s)
t1 = (((self.gx1*gx3) % p) * gx4) % p # (gx1*gx3*gx4)%p
t2 = (self.x2*self.s) % p
A = pow(t1, t2, p)
# also create a ZKP for x2*s
zkp_A = self.createZKP(t1, t2, A)
return {"A": "%x"%A,
"zkp_A": zkp_A,
}
def pack_two(self, data):
orderlen = self.params.orderlen
def n2s(hexint):
return number_to_string(int(hexint,16), orderlen)
packed = "".join([n2s(data["A"]),
n2s(data["zkp_A"]["gr"]),
n2s(data["zkp_A"]["b"]),
# the rest of the string is signerid
data["zkp_A"]["id"],
])
return packed
def unpack_two(self, packed):
orderlen = self.params.orderlen
def generate_substrings(packed):
for i in range(3):
yield binascii.hexlify(packed[i*orderlen:(i+1)*orderlen])
yield packed[3*orderlen:] # signerid
g = generate_substrings(packed)
data = { "A": g.next(),
"zkp_A": {"gr": g.next(), "b": g.next() },
}
signerid = ASCII(g.next())
assert isinstance(signerid, str)
data["zkp_A"]["id"] = signerid
return data
def three(self, m2):
p = self.params.p; q = self.params.q
B = int(m2["A"], 16)
generator = (((self.gx1*self.gx2)%p)*self.gx3) % p
self.checkZKP(generator, B, m2["zkp_A"])
# we want (B/(g^(x2*x4*s)))^x2, using the g^x4 that we got from them
# (stored in gx4). We start with gx4^x2, then (gx4^x2)^-s, then
# (B*(gx4^x2)^-s), then finally apply the ^x2.
t3 = pow(self.gx4, self.x2, p)
t3 = pow(t3, q-self.s, p)
t4 = (B * t3) % p
K = pow(t4, self.x2, p)
# the paper suggests this can be reduced to two pow() calls, but I'm
# not seeing it.
self.K = K # stash it, so that folks trying to be compatible with
# some OpenSSL-based implementation (which returns the raw
# K from JPAKE_get_shared_key()) can use alternative
# hashing schemes to get from K to the final key. It's
# important to hash K before using it, to not expose the
# actual number to anybody.
key = sha256(number_to_string(K, self.params.orderlen)).digest()
return key
def getattr_hex(self, name):
if hasattr(self, name):
return "%x" % getattr(self, name)
return None
def to_json(self):
return {"signerid": self.signerid,
"params.p": "%x" % self.params.p,
"params.g": "%x" % self.params.g,
"params.q": "%x" % self.params.q,
"s": self.s,
"x1": self.getattr_hex("x1"),
"x2": self.getattr_hex("x2"),
"gx1": self.getattr_hex("gx1"),
"gx2": self.getattr_hex("gx2"),
"gx3": self.getattr_hex("gx3"),
"gx4": self.getattr_hex("gx4"),
}
@classmethod
def from_json(klass, data, entropy=None):
p = Params(int(data["params.p"], 16),
int(data["params.q"], 16),
int(data["params.g"], 16))
self = klass(data["s"], params=p, signerid=data["signerid"],
entropy=entropy)
for name in ["x1", "x2", "gx1", "gx2", "gx3", "gx4"]:
if data[name]:
setattr(self, name, int(data[name], 16))
return self
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from collections import defaultdict
from datetime import datetime
import logging
from volttron.platform.agent.known_identities import CONTROL_CONNECTION, PROCESS_IDENTITIES
from volttron.platform.agent.utils import format_timestamp
from volttron.platform.vip.agent import Agent, Core, RPC
_log = logging.getLogger(__name__)
class HealthService(Agent):
def __init__(self, **kwargs):
super(HealthService, self).__init__(**kwargs)
# Store the health stats for given peers in a dictionary with
# keys being the identity of the connected agent.
self._health_dict = defaultdict(dict)
def peer_added(self, peer):
"""
The `peer_added` method should be called whenever an agent is connected to the
platform.
:param peer: The identity of the agent connected to the platform
"""
health = self._health_dict[peer]
health['peer'] = peer
health['service_agent'] = peer in PROCESS_IDENTITIES
health['connected'] = format_timestamp(datetime.now())
def peer_dropped(self, peer):
# TODO: Should there be an option for a db/log file for agents coming and going from the platform?
self._health_dict[peer]['disconnected'] = format_timestamp(datetime.now())
del self._health_dict[peer]
@RPC.export
def get_platform_health(self):
"""
The `get_platform_health` retrieves all of the connected agent's health structures,
except for the `CONTROL_CONNECTION` (vctl's known identity). Vctl's identity is used for short
term connections and is not relevant to the core health system.
This function returns a dictionary in the form identity: values such as the following:
.. code-block :: json
{
"listeneragent-3.3_35":
{
"peer": "listeneragent-3.3_35",
"service_agent": False,
"connected": "2020-10-28T12:46:58.701119",
"last_heartbeat": "2020-10-28T12:47:03.709605",
"message": "GOOD"
}
}
:return:
"""
# Ignore the connection from control in the health as it will only be around for a short while.
agents = {k: v for k, v in self._health_dict.items()
if not v.get('peer') == CONTROL_CONNECTION}
return agents
def _heartbeat_updates(self, peer, sender, bus, topic, headers, message):
"""
This method is called whenever a publish goes on the message bus from the
heartbeat* topic.
:param peer:
:param sender:
:param bus:
:param topic:
:param headers:
:param message:
:return:
"""
health = self._health_dict[sender]
time_now = format_timestamp(datetime.now())
if not health:
health['connected'] = time_now
health['peer'] = sender
health['service_agent'] = sender in PROCESS_IDENTITIES
health['last_heartbeat'] = time_now
health['message'] = message
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
# Start subscribing to heartbeat topic to get updates from the health subsystem.
self.vip.pubsub.subscribe('pubsub', 'heartbeat', callback=self._heartbeat_updates)
|
# -*- coding: utf-8 -*-
# Create your views here.
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.context_processors import request
from django.core.urlresolvers import reverse
from django.db.models import ProtectedError
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.views.generic.edit import ModelFormMixin
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext, Context
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import gettext_lazy as _
from pyjasperclient import JasperClient
from appcc.forms import *
from appcc.models import *
from reportes.models import Informes
from siva.utils import *
from imagen.forms import ImagenReportForms
CREADO =_("creado")
ACTUA =_("actualizado")
class ViewBase(object):
extra_context={}
model =None
form_class =None
def __init__(self,modulo,etiqueta,tabla,form):
self.modulo= modulo
self.acciones = { "crear" : "/appcc/%s/crear" % modulo, "eliminar": "/appcc/%s/eliminar" % modulo, 'ira':"" }
self.auxiliar = {"etiqueta" : etiqueta}
self.cabezera = [ etiqueta ]
ViewBase.extra_context = {"acciones" : self.acciones, "auxiliar" : self.auxiliar, "cabezera" : self.cabezera}
ViewBase.model = eval(tabla)
ViewBase.form_class = eval(form)
def get_context_data(self, **kwargs):
context = super(ViewBase, self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
def form_valid(self, form):
self.object = form.save(commit=False,user=self.request.user)
self.object.save(user=self.request.user)
return super(ModelFormMixin,self).form_valid(form)
def form_invalid(self,form):
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
return reverse('%s_list' % self.modulo )
def get_object(self, *args, **kwargs):
obj = super(ViewBase, self).get_object(*args, **kwargs)
return obj
@method_decorator(login_required(login_url="/"))
def dispatch(self, *args, **kwargs):
return super(ViewBase, self).dispatch(*args, **kwargs)
def delete(self,request,*args,**kwargs):
self.object = self.get_object()
try:
self.object.delete()
except ProtectedError, error:
messages.add_message(request, messages.ERROR, _("No se elimina, se encuentra referenciado"))
return HttpResponseRedirect(self.get_success_url())
return HttpResponseRedirect(self.get_success_url())
# ----------------------------------------------Definicion Panel Principal ------------------------------------------------------------------
@login_required(login_url='/')
def appcc(request):
#objeto = ContentType.objects.filter(app_label='maestros', user=request.user);
#Construimos el diccionario
request.breadcrumbs(generarBreadCrumb(request.path_info))
#request.breadcrumbs(_("APPCC"),request.path_info)
lista_objeto = {'titulobox' : _("Gestion del APPCC"), "contenido" : [
{"apartado" :"APPCC" ,
"botones" :[
#{ "href":"/appcc/appcc/lista", "titulo" : "APPCC", "icon1" : "" , "icon2" : "" } ,
{ "href":"/appcc/appcc", "titulo" : "APPCC", "icon1" : "" , "icon2" : "" } ,
# { "href":"/appcc/manualautocontrol/lista", "titulo" : "Manual de Auto Control", "icon1" : "icofont-chevron-up" , "icon2" : "icon-adjust" },
# { "href":"/appcc/planautocontrol/lista", "titulo" : "Plan de Auto Control", "icon1" : "icofont-chevron-up" , "icon2" : "icon-adjust" },
]
},
],}
return render_to_response("base/panel.html",{"lista_objeto" : lista_objeto },context_instance=RequestContext(request) )
#--------------------------------------------------- APPCC -----------------------------------------------------------------------------------#
lista_template = {'template_name' : "appcc/list_appcc.html",}
class APPCCMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(APPCCMixin,self).__init__("appcc",_("APPCC"),"APPCC","APPCCForms")
#self.acciones['ira']='/appcc/manualautocontrol/lista' #Sobrescribimos para pasar el id de APPC para filtrar o crear un nuevo Manual de AutoControl
self.acciones['ira']='/appcc/appcc/manualautocontrol' #Sobrescribimos para pasar el id de APPC para filtrar o crear un nuevo Manual de AutoControl
self.acciones['iradoc']='appcc/documentos/lista/appcc_id/'
#self.acciones['ira1']='/appcc/auditorias/lista'
self.acciones['ira1']='/appcc/appcc/auditorias'
self.cabezera.append(_(u'Auditorias'))
self.cabezera.append(_(u'Manual AutoControl'))
self.cabezera.append(_(u'Cuadro Gestion'))
self.cabezera.append(_(u'Incidencias'))
self.cabezera.append(_(u'Ver'))
self.cabezera.append(_(u'Impresion'))
self.cabezera.append(_(u"Acciones"))
def get_queryset(self):
return APPCC.objects.filter(empresa__in=Empresas.objects.filter(usuario__username=self.request.user))
baselista = "APPCCMixin"
AppccListaView = type(genericaview,(eval(baselista),ListView,), lista_template )
AppccDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template ) # ¿Donde se usa esto?
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def AppccCrearView(request):
#request.breadcrumbs(_("Nuevo"),request.path_info)
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/formset.html"
auxiliar = {"etiqueta" : "Crear APPCC",}
form = APPCCForms(request.POST or None)
form_detail = HistorialRevisionesFormset(request.POST or None, prefix="dappcc")
if form.is_valid():
padre = form.save(commit=False,request=request)
form_detail = HistorialRevisionesFormset( request.POST or None, instance = padre, prefix="dappcc")
if form_detail.is_valid():
padre.save(user=request.user)
form_detail.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %("APPCC",CREADO) )
return redirect(reverse('appcc_list'))
else:
messages.add_message(request, messages.ERROR, form_detail.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail, 'auxiliar' : auxiliar },context_instance=RequestContext(request))
AppccEliminarView = type(genericaview,(eval(baselista),DeleteView,), eliminar_template )
@login_required(login_url='/')
def AppccActualizarView(request,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/formset.html"
auxiliar = {"etiqueta" : "Editar APPCC"}
cabecera = get_object_or_404(APPCC, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = APPCCForms( request.POST or None,instance= cabecera)
if form.is_valid():
padre = form.save(commit=False, request=request)
form_detail = HistorialRevisionesFormset( request.POST, instance = padre, prefix="dappcc")
if form_detail.is_valid():
padre.save(user=request.user)
form_detail.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %("APPCC",ACTUA ))
return redirect(reverse('appcc_list'))
else:
messages.add_message(request, messages.ERROR, form_detail.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
form_detail = HistorialRevisionesFormset( instance= cabecera, prefix="dappcc")
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail, 'auxiliar' : auxiliar },context_instance=RequestContext(request))
#---------------------------------------Manual Auto Control -------------------------------------------------------------------------------------#
class ManualAutoControlMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(ManualAutoControlMixin,self).__init__("manualautocontrol",_("Manual de Auto Control"),"ManualAutoControl","ManualAutoControlForms")
#self.acciones['ira']='/appcc/planautocontrol/lista' #Sobrescribimos para pasar el id de APPC para filtrar o crear un nuevo Manual de AutoControl
#print self.modulo
#self.acciones['crear']='/appcc/appcc/manualautocontrol/80/crear' hay que mirar esto
#self.acciones['ira1']='/appcc/cabregistros/lista'
self.cabezera.append('Ir a Plan')
self.cabezera.append("Ir a Registro")
self.cabezera.append("Ver")
self.cabezera.append('Impresion')
self.cabezera.append("Acciones")
baselista ="ManualAutoControlMixin"
class ManualautocontrolListaView(ManualAutoControlMixin,ListView):
template_name = "appcc/list_manualautocontrol.html"
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pappccid']
self.acciones["appccid"] = id
return ManualAutoControl.objects.filter(appcc = id, empresa__in=Empresas.objects.filter(usuario=self.request.user))
ManualautocontrolDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def ManualautocontrolCrearView(request,pappccid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : "Crear Manual Auto Control"}
form = ManualAutoControlForms(request.POST or None)
form.appcc_id = pappccid
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.appcc_id = pappccid
padre.save(user=request.user)
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Manual Autocontrol"),CREADO) )
return redirect(reverse('manualautocontrol_list', args=(pappccid,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class ManualAutoElimina(ManualAutoControlMixin):
def get_success_url(self):
return reverse('%s_list' % self.modulo,args=(self.object.appcc_id,))
ManualautocontrolEliminarView = type(genericaview,(ManualAutoElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
def ManualautocontrolActualizarView(request,pappccid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : "Editar Plan Auto Control"}
cabecera = get_object_or_404(ManualAutoControl, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = ManualAutoControlForms(request.POST or None, instance=cabecera)
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.save(user=request.user)
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Manual Acutocontrol"),ACTUA) )
return redirect(reverse('manualautocontrol_list', args=(padre.appcc_id,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#-------------------------------------Plan Auto Control ---------------------------------------------------------------------------------------------
class PlanAutoControlMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
print 'model0'
super(PlanAutoControlMixin,self).__init__("planautocontrol",_("Plan de auto Control"),"PlanAutoControl","PlanAutoControlForms")
self.cabezera.append("Ver")
self.cabezera.append('Impresion')
self.cabezera.append("Acciones")
baselista ="PlanAutoControlMixin"
class PlanautocontrolListaView(PlanAutoControlMixin,ListView):
template_name = "appcc/list_planautocontrol.html"
print 'model1'
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pmanuctrid']
manual = ManualAutoControl.objects.filter(pk=id)
self.auxiliar['padre']= manual.first().tpplancontrol.denominacion
self.acciones['manautctrlid']=id #Guardamos para añadir en la creacion button
self.acciones["appccid"] = self.kwargs['pappccid']
return PlanAutoControl.objects.filter( manautctrl = id,empresa__in=Empresas.objects.filter(usuario=self.request.user))
PlanautocontrolDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def PlanautocontrolCrearView(request,pappccid,pmanuctrid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "appcc/planautocontrol.html"
auxiliar = {"etiqueta" : "Crear Plan Auto Control"}
objmanu = get_object_or_404(ManualAutoControl,pk=pmanuctrid,empresa__in=Empresas.objects.filter(usuario__username=request.user))
etiq = ( "" if objmanu.etiquetasTemplate() is None else objmanu.etiquetasTemplate())
if len(etiq) !=0:
etiquetas = eval(etiq)
else:
etiquetas= None
form = PlanAutoControlForms(request.POST or None)
form_detail_cd = ConsumiblesDosisFormset(request.POST or None, prefix="dcd")
form_detail_va = ValoresAnaliticasFormset(request.POST or None, prefix="dva")
form.manautctrl_id = pmanuctrid
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.manautctrl_id = pmanuctrid
form_detail_cd = ConsumiblesDosisFormset( request.POST or None, instance = padre, prefix="dcd")
form_detail_va = ValoresAnaliticasFormset( request.POST or None, instance = padre, prefix="dva")
if form_detail_cd.is_valid() and form_detail_va.is_valid():
padre.save(user=request.user)
form_detail_cd.save()
form_detail_va.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Plan Acutocontrol"),CREADO) )
#return redirect(reverse('planautocontrol_list', args=(pmanuctrid,) ))
return redirect(reverse('planautocontrol_list', args=(pappccid,pmanuctrid,) ))
else:
messages.add_message(request, messages.ERROR, form_detail_cd.errors)
messages.add_message(request, messages.ERROR, form_detail_va.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail_cd' : form_detail_cd, 'form_detail_va' : form_detail_va, 'auxiliar': auxiliar, 'etiquetas' : etiquetas },context_instance=RequestContext(request))
class PlanAutoElimina(PlanAutoControlMixin):
def get_success_url(self):
#return reverse('%s_list' % self.modulo,args=(self.object.manautctrl_id,))
return reverse('%s_list' % self.modulo,args=(self.object.manautctrl.appcc.id,self.object.manautctrl_id,))
PlanautocontrolEliminarView = type(genericaview,(PlanAutoElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
def PlanautocontrolActualizarView(request,pappccid,pmanuctrid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "appcc/planautocontrol.html"
auxiliar = {"etiqueta" : "Editar Plan Auto Control"}
cabecera = get_object_or_404(PlanAutoControl, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
etiq = ( "" if cabecera.manautctrl.etiquetasTemplate() is None else cabecera.manautctrl.etiquetasTemplate())
if len(etiq) !=0:
etiquetas = eval(etiq)
else:
etiquetas= None
form = PlanAutoControlForms(request.POST or None,instance= cabecera)
form_detail_cd = ConsumiblesDosisFormset(request.POST or None, instance=cabecera, prefix="dcd")
form_detail_va = ValoresAnaliticasFormset(request.POST or None,instance=cabecera, prefix="dva")
if form.is_valid():
padre = form.save(commit=False,request=request)
pmanuctrid = get_object_or_404(ManualAutoControl,pk=pmanuctrid)
form_detail_cd = ConsumiblesDosisFormset( request.POST or None, instance= pmanuctrid, prefix="dcd")
form_detail_va = ValoresAnaliticasFormset( request.POST or None, instance = pmanuctrid, prefix="dva")
if form_detail_cd.is_valid() and form_detail_va.is_valid():
padre.save(user=request.user)
form_detail_cd.save()
form_detail_va.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Plan Acutocontrol"),ACTUA) )
return redirect(reverse('planautocontrol_list', args=(pappccid,pmanuctrid.id,) ))
else:
messages.add_message(request, messages.ERROR, form_detail_cd.errors)
messages.add_message(request, messages.ERROR, form_detail_va.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail_cd' : form_detail_cd, 'form_detail_va' : form_detail_va, 'auxiliar': auxiliar, 'etiquetas' : etiquetas },context_instance=RequestContext(request))
# --------------------------------------------------- Cabecera de Inicio REGISTROS -----------------------------------------------------------------
class CabRegistrosMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(CabRegistrosMixin,self).__init__("cabregistros",_("Configuracion Registros"),"CabRegistros","CabRegistrosForms")
#self.acciones['ira']='/appcc/detallesregistros/lista' #Sobrescribimos para pasar el id filtrar o crear un nuevo Manual de AutoControl
self.cabezera.append('Fecha')
self.cabezera.append('Analiticas')
self.cabezera.append('Registros')
self.cabezera.append("Ver")
self.cabezera.append('Impresion')
self.cabezera.append("Acciones")
baselista ="CabRegistrosMixin"
class CabRegistrosListaView(CabRegistrosMixin,ListView):
template_name = "appcc/list_cabregistros.html"
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pmanautctrid']
manual = ManualAutoControl.objects.filter(pk=id)
self.auxiliar['padre']= manual.first().tpplancontrol.denominacion
self.acciones['manautctrlid']=id
self.acciones["appccid"] = self.kwargs['pappccid']
return CabRegistros.objects.filter(manautctrl = id,empresa__in=Empresas.objects.filter(usuario=self.request.user))
CabRegistrosDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def CabRegistrosCrearView(request,pappccid,pmanautctrid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : "Crear Configuración Registros"}
form = CabRegistrosForms(request.POST or None)
form.appcc_id = pmanautctrid
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.manautctrl_id = pmanautctrid
padre.save(user=request.user)
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Definición de Registro"),CREADO) )
return redirect(reverse('cabregistros_list', args=(pappccid,pmanautctrid,) ))
else:
print "Error en cabecera %s" % form.errors.as_text
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class CabregistroElimina(CabRegistrosMixin):
def get_success_url(self):
#return reverse('%s_list' % self.modulo,args=(self.object.manautctrl_id,))
return reverse('%s_list' % self.modulo,args=(self.object.manautctrl.appcc.id,self.object.manautctrl_id,))
CabRegistrosEliminarView = type(genericaview,(CabregistroElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
def CabRegistrosActualizarView(request,pappccid,pmanautctrid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : "Editar Configuración Registros"}
cabecera = get_object_or_404(CabRegistros, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = CabRegistrosForms(request.POST or None, instance=cabecera)
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.save(user=request.user)
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Definción Registro"),ACTUA) )
#return redirect(reverse('cabregistros_list', args=(padre.manautctrl_id,) ))
return redirect(reverse('cabregistros_list', args=(pappccid,pmanautctrid,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#--------------------------------------Detalles Registros -----------------------------------------------------
#class DetallesRegistrosMixin(ViewBase):
class DetallesRegistrosMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(DetallesRegistrosMixin,self).__init__("detallesregistros",_("Detalle de registros"),"DetallesRegistros","DetallesRegistrosForms")
self.cabezera.append("Ver")
self.cabezera.append('Impresion')
self.cabezera.append("Acciones")
baselista ="DetallesRegistrosMixin"
class DetallesRegistrosListaView(DetallesRegistrosMixin,ListView):
template_name = "appcc/list_detallesregistro.html"
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pcabregid']
self.acciones['cabregid']=id #Guardamos para añadir en la creacion button
self.acciones['manautctrlid']=self.kwargs['pmanautctrid'] #Guardamos para añadir en la creacion button
self.acciones["appccid"] = self.kwargs['pappccid']
return DetallesRegistros.objects.filter( cabreg_id = id,empresa__in=Empresas.objects.filter(usuario=self.request.user))
DetallesRegistrosDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def DetallesRegistrosCrearView(request,pappccid,pmanautctrid,pcabregid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "appcc/detallesregistro.html"
auxiliar = {"etiqueta" : "Crear Registros"}
cabecera = get_object_or_404(CabRegistros, pk=pcabregid,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = DetallesRegistrosForms(request.POST or None, idmanctrl = cabecera.manautctrl_id, iduser=request.user)
form_detail_reg = RegistrosFormset(request.POST or None, prefix="registro")
if cabecera.frecuencia.nounidades <= 24:
form.helper.layout = Layout( Fieldset(
Div('id',css_class="control-group hidden"),
Div('cabreg_id',css_class="control-group hidden"),
Div( Div(Field('actividades'), css_class=s6 ),Div(Field('fechaalta',template=fecha,css_class=small),css_class=s6) ,css_class=s12 ),
Div( Div('tplimitcrit', css_class=s4),Div('valanali',css_class=s4 ),Div(Field('ordagenda',css_class=mini),css_class=s4 ),css_class=s12 ),
Div(Div(Field('equipos',css_class=xlarge),css_class=s3) , Div(Field('zonas',css_class=xlarge),css_class=s3) ,Div(Field('diaejecuta'),css_class=s3 ),Div(Field('tpturnos',css_class=small),css_class=s3),css_class=s12),))
form.cabreg_id = pcabregid
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.cabreg_id = pcabregid
form_detail_reg = RegistrosFormset( request.POST or None, instance = padre, prefix="registro")
if form_detail_reg.is_valid():
nunidades = cabecera.frecuencia.nounidades
if nunidades<168 and padre.diaejecuta is not None:
messages.add_message(request, messages.ERROR, _("Error dia ejecución, frecuencia incorrecta"))
elif padre.actividades.agenda == True and (padre.zonas_id is None or padre.equipos is None ):
if padre.zonas_id is None:
messages.add_message(request, messages.ERROR, _("Zona requerida, actividad en agenda"))
if padre.equipos is None:
messages.add_message(request, messages.ERROR, _("Equipo requerido, actividad en agenda"))
else:
padre.save(user=request.user)
form_detail_reg.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Detalle de Registro"),CREADO) )
#return redirect(reverse('detallesregistros_list', args=(pcabregid,) ))
return redirect(reverse('detallesregistros_list', args=(pappccid,pmanautctrid,pcabregid,) ))
else:
messages.add_message(request, messages.ERROR, form_detail_reg.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail_reg, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class DRegistroElimina(DetallesRegistrosMixin):
def get_success_url(self):
#return reverse('%s_list' % self.modulo,args=(self.object.cabreg_id,))
return reverse('%s_list' % self.modulo,args=(self.object.cabreg.manautctrl.appcc.id,self.object.cabreg.manautctrl.id,self.object.cabreg_id,))
DetallesRegistrosEliminarView = type(genericaview,(DRegistroElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
def DetallesRegistrosActualizarView(request,pappccid,pmanautctrid,pcabregid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "appcc/detallesregistro.html"
auxiliar = {"etiqueta" : "Editar Registros"}
cabecera = get_object_or_404(DetallesRegistros, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = DetallesRegistrosForms( request.POST or None,instance= cabecera, idmanctrl = cabecera.cabreg.manautctrl_id, iduser=request.user)
form_detail_reg = RegistrosFormset(request.POST or None, instance=cabecera, prefix="registro")
if cabecera.cabreg.frecuencia.nounidades <= 24:
form.helper.layout = Layout( Fieldset(
Div('id',css_class="control-group hidden"),
Div('cabreg_id',css_class="control-group hidden"),
Div( Div(Field('actividades'), css_class=s4 ),Div(Field('fechaalta',template=fecha,css_class=small),css_class=s4),Div(Field('tracksondas'),css_class=s4) ,css_class=s12 ),
Div( Div('tplimitcrit', css_class=s4),Div('valanali',css_class=s4 ),Div(Field('ordagenda',css_class=mini),css_class=s4 ),css_class=s12 ),
Div(Div(Field('equipos',css_class=xlarge),css_class=s3) , Div(Field('zonas',css_class=xlarge),css_class=s3) ,Div(Field('diaejecuta'),css_class=s3),Div(Field('tpturnos'),css_class=s3), css_class=s12),))
if form.is_valid():
padre = form.save(commit=False,request=request)
pcabregid = padre.cabreg_id
form_detail_reg = RegistrosFormset( request.POST or None, instance = padre, prefix="registro")
if form_detail_reg.is_valid():
nunidades = cabecera.cabreg.frecuencia.nounidades
if nunidades<168 and padre.diaejecuta is None:
messages.add_message(request, messages.ERROR, _("Error dia ejecución, frecuencia incorrecta"))
elif padre.actividades.agenda ==True and (padre.zonas_id is None or padre.equipos is None ):
if padre.zonas_id is None:
messages.add_message(request, messages.ERROR, _("Zona requerida, actividad en agenda"))
if padre.equipos is None:
messages.add_message(request, messages.ERROR, _("Equipo requeridd, actividad en agenda"))
else:
padre.save(user=request.user)
form_detail_reg.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Detalle de Registro"),ACTUA) )
#return redirect(reverse('detallesregistros_list', args=(pcabregid,)))
return redirect(reverse('detallesregistros_list', args=(pappccid,pmanautctrid,pcabregid,)))
else:
messages.add_message(request, messages.ERROR, form_detail_reg.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail_reg, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#---------------------------------------------------- INFORMES TECNICOS -------------------------------------------------------------------------------------------------#
#class CabInfTecnicosMixin(ViewBase):
class CabInfTecnicosMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(CabInfTecnicosMixin,self).__init__("auditorias",_("Cabecera de Informes"),"CabInformesTecnicos","CabInfTecnicosForms")
self.cabezera.append("Ver")
self.cabezera.append('Impresion')
self.cabezera.append("Acciones")
baselista ="CabInfTecnicosMixin"
class CabInfTecnicosListaView(CabInfTecnicosMixin,ListView):
template_name = "appcc/list_cabinftecnicol.html"
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pappccid']
self.acciones["appccid"] = id
return CabInformesTecnicos.objects.filter(appcc = id, empresa__in=Empresas.objects.filter(usuario=self.request.user))
CabInfTecnicosDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def CabInfTecnicosCrearView(request,pappccid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
print request.FILES
template_name = "appcc/detinformestecnicos.html"
auxiliar = {"etiqueta" : "Crear Informe"}
form = CabInfTecnicosForms(request.POST or None)
form_detail_inftec = DetInfTecnicosFormset(request.POST or None, prefix="detinftec")
print form
#form_imagen = ImagenReportForms(request.FILES or None)
if form.is_valid():
padre = form.save(commit=False)
padre.appcc_id = pappccid
form_detail_inftec = DetInfTecnicosFormset( request.POST or None, instance = padre, prefix="detinftec")
if form_detail_inftec.is_valid():
padre.save(user=request.user)
details = form_detail_inftec.save(commit=False)
iter1 = 0
for detail in details:
iter2 = 0
detail.save()
for fichero in request.FILES:
if iter1 == iter2:
#ct = ContentType.objects.get(model=detail.__class__.__name__.lower())
ct = get_object_or_404(ContentType,model=detail.__class__.__name__.lower())
detail.imagen.create(denominacion=request.FILES[fichero].name,file=request.FILES[fichero].read(),content_type_file=request.FILES[fichero].content_type,object_id=detail.pk,content_type=ct.pk)
iter2 = iter2 + 1
iter1 = iter1 + 1
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Informe Tecnico"),CREADO) )
return redirect(reverse('cabinftecnicos_list', args=(pappccid,) ))
else:
messages.add_message(request, messages.ERROR, form_detail_inftec.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail_inftec, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class CabInfTecnicosElimina(CabInfTecnicosMixin):
def get_success_url(self):
return reverse('cabinftecnicos_list', args=(self.object.appcc_id,))
CabInfTecnicosEliminarView = type(genericaview,(CabInfTecnicosElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
#def CabInfTecnicosActualizarView(request,pk):
def CabInfTecnicosActualizarView(request,pappccid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "appcc/detinformestecnicos.html"
auxiliar = {"etiqueta" : "Editar Informe"}
cabecera = get_object_or_404(CabInformesTecnicos, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = CabInfTecnicosForms( request.POST or None,instance= cabecera)
form_detail_inftec = DetInfTecnicosFormset(request.POST or None, instance=cabecera,prefix="detinftec")
if form.is_valid():
padre = form.save(commit=False)
pappcc_id = padre.appcc_id
form_detail_inftec = DetInfTecnicosFormset( request.POST or None, instance = padre, prefix="detinftec")
if form_detail_inftec.is_valid():
# padre.save(user=request.user)
# form_detail_inftec.save()
padre.save(user=request.user)
details = form_detail_inftec.save()
iter1 = 0
for detail in details:
iter2 = 0
detail.save()
for fichero in request.FILES:
if iter1 == iter2:
#ct = ContentType.objects.get(model=detail.__class__.__name__.lower())
ct = get_object_or_404(ContentType,model=detail.__class__.__name__.lower())
try:
imagen = Imagen.objects.get(content_type=ct,object_id=detail.pk)
#detail.imagen.update_or_create(pk=imagen.pk,denominacion=request.FILES[fichero].name,file=request.FILES[fichero].read(),content_type_file=request.FILES[fichero].content_type,object_id=detail.pk,content_type=ct.pk)
imagen.file = request.FILES[fichero].read()
imagen.content_type_file=request.FILES[fichero].content_type
imagen.save()
except Imagen.DoesNotExist:
# Se accede a las imagenes del detail y se cambia
detail.imagen.create(denominacion=request.FILES[fichero].name,file=request.FILES[fichero].read(),content_type_file=request.FILES[fichero].content_type,object_id=detail.pk,content_type=ct.pk)
iter2 = iter2 + 1
iter1 = iter1 + 1
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Informe Tecnico"),ACTUA) )
return redirect(reverse('cabinftecnicos_list', args=(pappcc_id,)))
else:
messages.add_message(request, messages.ERROR, form_detail_inftec.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail_inftec, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#------------------------------------------ANALITICAS ----------------------------------------------#
#class CabAnaliticasMixin(ViewBase):
class CabAnaliticasMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(CabAnaliticasMixin,self).__init__("cabanaliticas",_("Analiticas"),"CabAnaliticas","CabAnaliticasForms")
self.cabezera.append("Ver")
self.cabezera.append('Impresion')
self.cabezera.append("Acciones")
baselista ="CabAnaliticasMixin"
class CabAnaliticasListaView(CabAnaliticasMixin,ListView):
template_name = "appcc/list_cabanaliticas.html"
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pcabregid']
self.acciones['cabregid']=id #Guardamos para añadir en la creacion button
self.acciones['manautctrlid']=self.kwargs['pmanautctrid'] #Guardamos para añadir en la creacion button
self.acciones["appccid"] = self.kwargs['pappccid']
return CabAnaliticas.objects.filter( cabreg_id = id,empresa__in=Empresas.objects.filter(usuario=self.request.user)).order_by('-fecha')
CabAnaliticasDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def CabAnaliticasCrearView(request,pappccid,pmanautctrid,pcabregid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "appcc/detallesanaliticas.html"
auxiliar = {"etiqueta" : _("Crear Analitica")}
form = CabAnaliticasForms(request.POST or None)
form_detail_reg = DetAnaliticasFormset(request.POST or None, prefix="detanaliticas")
form.cabreg_id = pcabregid
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.cabreg_id = pcabregid
form_detail_reg = DetAnaliticasFormset( request.POST or None, instance = padre, prefix="detanaliticas")
if form_detail_reg.is_valid():
padre.save(user=request.user)
form_detail_reg.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Analitica"),CREADO) )
#return redirect(reverse('cabanaliticas_list', args=(pcabregid,) ))
return redirect(reverse('cabanaliticas_list', args=(pappccid,pmanautctrid,pcabregid,) ))
else:
messages.add_message(request, messages.ERROR, form_detail_reg.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail_reg, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class CabAnaliticasElimina(DetallesRegistrosMixin):
def get_success_url(self):
#return reverse('%s_list' % self.modulo,args=(self.object.cabreg_id,))
return reverse('%s_list' % self.modulo,args=(self.object.cabreg.manautctrl.appcc.id,self.object.cabreg.manautctrl.id,self.object.cabreg_id,))
CabAnaliticasEliminarView = type(genericaview,(CabAnaliticasElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
def CabAnaliticasActualizarView(request,pappccid,pmanautctrid,pcabregid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "appcc/detallesanaliticas.html"
auxiliar = {"etiqueta" : _("Editar Analiticas")}
cabecera = get_object_or_404(CabAnaliticas, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = CabAnaliticasForms( request.POST or None,instance= cabecera)
form_detail_reg = DetAnaliticasFormset(request.POST or None, instance=cabecera, prefix="detanaliticas")
if form.is_valid():
padre = form.save(commit=False,request=request)
pcabregid = padre.cabreg_id
form_detail_reg = DetAnaliticasFormset( request.POST or None, instance = padre, prefix="detanaliticas")
if form_detail_reg.is_valid():
padre.save(user=request.user)
form_detail_reg.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Analiticas"),ACTUA) )
#return redirect(reverse('cabanaliticas_list', args=(pcabregid,)))
return redirect(reverse('cabanaliticas_list', args=(pappccid,pmanautctrid,pcabregid,) ))
else:
messages.add_message(request, messages.ERROR, form_detail_reg.errors)
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'form_detail' : form_detail_reg, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#------------------------------------------Documentos ----------------------------------------------#
def idModeloappcc(modelo=None):
if modelo.appcc_id is not None:
#return {'modelo' : 'appcc_id', 'id': modelo.appcc_id, 'cabecera': APPCC.objects.get(pk=modelo.appcc_id), 'url': 'appcc/'}
return {'modelo' : 'appcc_id', 'id': modelo.appcc_id, 'cabecera': get_object_or_404(APPCC,pk=modelo.appcc_id), 'url': 'appcc/'}
if modelo.manautctrl_id is not None:
#return {'modelo' : 'manautctrl_id' , 'id': modelo.manautctrl_id, 'cabecera' :ManualAutoControl.objects.get(pk=modelo.manautctrl_id),
return {'modelo' : 'manautctrl_id' , 'id': modelo.manautctrl_id, 'cabecera' : get_object_or_404(ManualAutoControl,pk=modelo.manautctrl_id),
'url': 'appcc/manualautocontrol/%s' % modelo.manautctrl.appcc.id}
if modelo.planautoctrl_id is not None:
#return { 'modelo' : 'planautoctrl_id' , 'id': modelo.planautoctrl_id, 'cabecera' : PlanAutoControl.objects.get(pk=modelo.planautoctrl_id),
return { 'modelo' : 'planautoctrl_id' , 'id': modelo.planautoctrl_id, 'cabecera' : get_object_or_404(PlanAutoControl,pk=modelo.planautoctrl_id),
'url': 'appcc/manualautocontrol/%s/planautocontrol/%s' % (modelo.planautoctrl.manautctrl.appcc.id,modelo.planautoctrl.manautctrl.id)}
if modelo.cabreg_id is not None:
#return {'modelo' : 'cabreg_id' , 'id': modelo.cabreg_id ,'cabecera' : CabRegistros.objects.get(pk=modelo.cabreg_id),
return {'modelo' : 'cabreg_id' , 'id': modelo.cabreg_id ,'cabecera' : get_object_or_404(CabRegistros,pk=modelo.cabreg_id),
'url': 'appcc/manualautocontrol/%s/cabregistros/%s' % (modelo.cabreg.manautctrl.appcc.id,modelo.cabreg.manautctrl.id)}
if modelo.detreg_id is not None:
#return {'modelo': 'detreg_id', 'id' :modelo.detreg_id, 'cabecera' : DetallesRegistros.objects.get(pk= modelo.detreg_id),
return {'modelo': 'detreg_id', 'id' :modelo.detreg_id, 'cabecera' : get_object_or_404(DetallesRegistros,pk= modelo.detreg_id),
'url': 'appcc/manualautocontrol/%s/cabregistros/%s/detallesregistros/%s' % (modelo.detreg.cabreg.manautctrl.appcc.id,modelo.detreg.cabreg.manautctrl.id,modelo.detreg.cabreg.id)}
if modelo.cabanali_id is not None:
#return {'modelo': 'cabanali_id', 'id':modelo.cabanali_id ,' cabecera' : CabAnaliticas.objects.get(pk=modelo.cabanali_id),
return {'modelo': 'cabanali_id', 'id':modelo.cabanali_id ,' cabecera' : get_object_or_404(CabAnaliticas,pk=modelo.cabanali_id),
'url': 'appcc/manualautocontrol/%s/cabregistros/%s/detallesregistros/%s' % (modelo.cabanali.cabreg.manautctrl.appcc.id,modelo.cabanali.cabreg.manautctrl.id,modelo.cabanali.cabreg.id)}
############### NO ENTIENDO##################
if modelo.registros_id is not None:
#return {'modelo': 'registros_id', 'id':modelo.registros_id ,' cabecera' : Registros.objects.get(pk=modelo.registros_id)}
return {'modelo': 'registros_id', 'id':modelo.registros_id ,' cabecera' : get_object_or_404(Registros,pk=modelo.registros_id)}
if modelo.cuadgest_id is not None:
return {'modelo': 'cuadgest_id', 'id':modelo.cuadgest_id ,' cabecera' : get_object_or_404(CuadrosGestion,pk=modelo.cuadgest_id),
'url': 'appcc/cuadrosgestion/%s' % modelo.cuadgest.appcc.id}
if modelo.relentes_id is not None:
if modelo.relentes.manautctrl.tpplancontrol.campoprimario == "RELACION_FORMACION":
rel = "relacionespersonal"
else:
rel = "relacionesterceros"
#return {'modelo': 'relentes_id', 'id':modelo.relentes_id ,' cabecera' : RelacionesEntes.objects.get(pk=modelo.relentes_id),
return {'modelo': 'relentes_id', 'id':modelo.relentes_id ,' cabecera' : get_object_or_404(RelacionesEntes,pk=modelo.relentes_id),
'url': 'appcc/manualautocontrol/%s/%s/%s' % (modelo.relentes.manautctrl.appcc.id,rel,modelo.relentes.manautctrl.id)}
if modelo.gestincid_id is not None:
#return {'modelo': 'gestincid_id', 'id':modelo.gestincid_id ,' cabecera' : GestorIncidencias.objects.get(pk=modelo.gestincid_id),
return {'modelo': 'gestincid_id', 'id':modelo.gestincid_id ,' cabecera' : get_object_or_404(GestorIncidencias,pk=modelo.gestincid_id),
'url': 'appcc/gestorincidencias/%s' % modelo.gestincid.appcc.id}
if modelo.cabinftec_id is not None:
#return {'modelo': 'cabinftec_id', 'id':modelo.cabinftec_id ,' cabecera' : CabInformesTecnicos.objects.get(pk=modelo.cabinftec_id),
return {'modelo': 'cabinftec_id', 'id':modelo.cabinftec_id ,' cabecera' : get_object_or_404(CabInformesTecnicos,pk=modelo.cabinftec_id),
'url': 'appcc/auditorias/%s' % modelo.cabinftec.appcc.id}
class DocumentosMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(DocumentosMixin,self).__init__("documentos",_("Documentos"),"Documentos","DocumentosForms")
self.cabezera.append("Abrir")
self.cabezera.append("Acciones")
baselista ="DocumentosMixin"
class DocumentosListaView(DocumentosMixin,ListView):
#template_name = "base/listmodal_documentos.html"
template_name = "base/list_documentos.html"
def get_queryset(self):
id = self.kwargs['pid']
modelo = self.kwargs['pmodelo']
self.acciones['modelo']= modelo
self.acciones ['id'] = id
q = { "%s" % modelo : id}
self.acciones['urlAux'] = self.kwargs['purl']
print self.acciones['urlAux']
return Documentos.objects.filter(**q)
DocumentosDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
def seleccionFormDocumentos(pmodelo,pid,post,files):
if pmodelo=='appcc_id':
#cabecera= APPCC.objects.get(pk=pid)
cabecera= get_object_or_404(APPCC,pk=pid)
return AppccDocFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo=='manautctrl_id':
#cabecera = ManualAutoControl.objects.get(pk=pid)
cabecera = get_object_or_404(ManualAutoControl,pk=pid)
return ManualDocFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo=='planautoctrl_id':
#cabecera= PlanAutoControl.objects.get(pk=pid)
cabecera= get_object_or_404(PlanAutoControl,pk=pid)
return PlanDocFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo=='cabreg_id':
#cabecera=CabRegistros.objects.get(pk=pid)
cabecera= get_object_or_404(CabRegistros,pk=pid)
return CabRegDocFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo =='detreg_id':
#cabecera= DetallesRegistros.objects.get(pk=pid)
cabecera= get_object_or_404(DetallesRegistros,pk=pid)
return DetRegDocFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo == 'registros_id':
#cabecera= Registros.objects.get(pk=pid)
cabecera= get_object_or_404(Registros,pk=pid)
return RegistrosDocFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo == 'cuadgest_id':
#cabecera= CuadrosGestion.objects.get(pk=pid)
cabecera= get_object_or_404(CuadrosGestion,pk=pid)
return CuadGestFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo == 'relentes_id':
#cabecera= RelacionesEntes.objects.get(pk=pid)
cabecera= get_object_or_404(RelacionesEntes,pk=pid)
return RelEntesFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo == 'gestincid_id':
#cabecera= GestorIncidencias.objects.get(pk=pid)
cabecera= get_object_or_404(GestorIncidencias,pk=pid)
return GestorIncidenciasFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo == 'cabanali_id':
#cabecera= CabAnaliticas.objects.get(pk=pid)
cabecera= get_object_or_404(CabAnaliticas,pk=pid)
return CabAnaliticasFormset(post or None,files or None, instance=cabecera, prefix="documentos")
if pmodelo == 'cabinftec_id':
#cabecera= CabInformesTecnicos.objects.get(pk=pid)
cabecera= get_object_or_404(CabInformesTecnicos,pk=pid)
return CabInfTecnicosFormset(post or None,files or None, instance=cabecera, prefix="documentos")
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
#def DocumentosCrearView(request,pmodelo,pid):
def DocumentosCrearView(request,purl,pmodelo,pid):
#template_name = "base/modal_formset.html"
template_name = "base/modal_formset_attach.html"
auxiliar = {"etiqueta" : "Adjuntar Documento"}
#form_detail = seleccionFormDocumentos(pmodelo,pid,request.POST,request.FILES)
if request.method == 'POST':
if 'terminar' in request.POST:
return redirect(reverse('documentos_list', args=(purl,pmodelo,pid,) ))
else:
form_detail = UploadForm(request.POST,request.FILES)
print request.method
print pmodelo
print pid
if form_detail.is_valid():
#form_detail.appcc_id= pid
if pmodelo=='appcc_id':
form_detail.appcc_id= pid
newdoc = Documentos(appcc_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today(),contenido="")
if pmodelo=='manautctrl_id':
form_detail.manautctrl_id= pid
newdoc = Documentos(manautctrl_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo=='planautoctrl_id':
form_detail.planautoctrl_id= pid
newdoc = Documentos(planautoctrl_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo=='cabreg_id':
form_detail.cabreg_id= pid
newdoc = Documentos(cabreg_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo =='detreg_id':
form_detail.detreg_id= pid
newdoc = Documentos(detreg_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo == 'registros_id':
form_detail.registros_id= pid
newdoc = Documentos(registros_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo == 'cuadgest_id':
form_detail.cuadgest_id= pid
newdoc = Documentos(cuadgest_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo == 'relentes_id':
form_detail.relentes_id= pid
newdoc = Documentos(relentes_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo == 'gestincid_id':
form_detail.gestincid_id= pid
newdoc = Documentos(gestincid_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo == 'cabanali_id':
form_detail.cabanali_id= pid
newdoc = Documentos(cabanali_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
if pmodelo == 'cabinftec_id':
form_detail.cabinftec_id= pid
newdoc = Documentos(cabinftec_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
#form_detail.pmodelo= pid
#print form_detail
#newdoc = Documentos(appcc_id= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
#newdoc = Documentos(pmodelo= pid,denominacion = request.FILES['file'].name,archivos = request.FILES['file'],fecha=datetime.date.today())
newdoc.save(form_detail)
#documento = Documentos
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Documento"),CREADO) )
#return redirect(reverse('documentos_list', args=(pmodelo,pid,) ))
else:
messages.add_message(request, messages.ERROR, form_detail.errors)
else:
form_detail = UploadForm()
return render_to_response(template_name, {'form_detail' : form_detail, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class DocumentosElimina(DocumentosMixin):
def get_success_url(self):
parametros = idModeloappcc(self.object)
#return reverse('%s_list' % self.modulo,args=(parametros['modelo'],parametros['id'],))
return reverse('%s_list' % self.modulo,args=(parametros['url'],parametros['modelo'],parametros['id'],))
DocumentosEliminarView = type(genericaview,(DocumentosElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
def DocumentosActualizarView(request,pk):
print pk
#template_name = "base/modal_formset.html"
template_name = "base/edit_formDocumentos.html"
auxiliar = {"etiqueta" : "Editar Documentos"}
#parametros = idModeloappcc(Documentos.objects.get(pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user)))
#parametros = idModeloappcc(Documentos.objects.get(pk=pk))
parametros = idModeloappcc(get_object_or_404(Documentos,pk=pk))
if request.method == 'POST':
#parametros = idModeloappcc(Documentos.objects.get(pk=pk))
#form_detail = seleccionFormDocumentos(parametros['modelo'],parametros['id'],request.POST,request.FILES)
form_detail = UploadForm(request.POST,request.FILES)
if form_detail.is_valid():
#doc = Documentos.objects.get(id=pk)
doc = get_object_or_404(Documentos,id=pk)
doc.denominacion = request.POST['denominacion']
#print request.POST['fecha']
doc.fecha = datetime.date.today()
if 'archivos' in request.FILES:
doc.archivos = request.FILES['archivos']
doc.save()
#guarda =form_detail.save()
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Documento"),ACTUA) )
#return redirect(reverse('documentos_list', args=(parametros['modelo'],parametros['id'],)))
return redirect(reverse('documentos_list', args=(parametros['url'],parametros['modelo'],parametros['id'],)))
else:
messages.add_message(request, messages.ERROR, form_detail.errors)
else:
#documento = Documentos.objects.get(id=pk)
documento = get_object_or_404(Documentos,id=pk)
form_detail = UploadForm(initial={'id' : documento.id,'denominacion' : documento.denominacion, 'fecha' : documento.fecha,'archivos' : documento.archivos})
return render_to_response(template_name, {'form_detail' : form_detail, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#-------------------------------------------------------Relaciones Personal y Terceros --------------------------------------------
#class RelacionesEntesMixin(ViewBase):
class RelacionesEntesMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(RelacionesEntesMixin,self).__init__("relaciones",_("Relaciones"),"RelacionesEntes","RelacionPersonalForms")
self.cabezera.append(_("Documentos"))
self.cabezera.append(_("Impresión"))
self.cabezera.append(_("Acciones"))
baselista ="RelacionesEntesMixin"
class RelacionesEntesListaView(RelacionesEntesMixin,ListView):
def get_queryset(self):
id = self.kwargs['pmanautctrid']
self.acciones['manautctrlid']=id #Guardamos para añadir en la creacion button
self.acciones["appccid"] = self.kwargs['pappccid']
self.acciones["relacion"] = self.kwargs['prelacion']
return RelacionesEntes.objects.filter(manautctrl = id,empresa__in=Empresas.objects.filter(usuario=self.request.user))
class RelacionPersonalListaView(RelacionesEntesListaView):
template_name = "appcc/list_relacionespersonal.html"
class RelacionTercerosListaView(RelacionesEntesListaView):
template_name = "appcc/list_relacionesterceros.html"
RelacionesEntesDetallelView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
#def RelacionesEntesCrearView(request,pmanautctrid):
def RelacionesEntesCrearView(request,pappccid,prelacion,pmanautctrid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
manual = get_object_or_404(ManualAutoControl, pk=pmanautctrid,empresa__in=Empresas.objects.filter(usuario__username=request.user))
if manual.tpplancontrol.campoprimario == "RELACION_FORMACION":
titulo="Relación Formación"
form = RelacionPersonalForms(request.POST or None)
else:
titulo="Relación Control Proveedores"
form = RelacionTercerosForms(request.POST or None)
auxiliar = {"etiqueta" : titulo}
form.manautctrl_id = pmanautctrid
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.manautctrl_id = pmanautctrid
padre.save(user=request.user)
if titulo=="Relación Formación":
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Formación"),CREADO) )
#return redirect(reverse('relacionpersonal_list', args=(pmanautctrid,) ))
return redirect(reverse('relacionpersonal_list', args=(pappccid,pmanautctrid,) ))
else:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Proveedores"),CREADO) )
#return redirect(reverse('relacionterceros_list', args=(pmanautctrid,) ))
return redirect(reverse('relacionterceros_list', args=(pappccid,pmanautctrid,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class RelacionesEntesElimina(RelacionesEntesMixin):
def get_success_url(self):
#return reverse('%s_list' % self.modulo,args=(self.object.manautctrl_id,))
if self.object.manautctrl.tpplancontrol.campoprimario == "RELACION_FORMACION":
lis = "relacionpersonal"
else:
lis = "relacionterceros"
return reverse('%s_list' % lis,args=(self.object.manautctrl.appcc.id,self.object.manautctrl_id,))
RelacionesEntesEliminarView = type(genericaview,(RelacionesEntesElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
def RelacionesEntesActualizarView(request,pappccid,prelacion,pmanautctrid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
relentes = get_object_or_404(RelacionesEntes, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
if relentes.personal is not None:
form = RelacionPersonalForms(request.POST or None, instance=relentes )
titulo="Relación Formación"
else:
form = RelacionTercerosForms(request.POST or None, instance=relentes)
titulo="Relación Control Proveedores"
auxiliar = {"etiqueta" : titulo}
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.save(user=request.user)
if titulo=="Relación Formación":
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Formación"),ACTUA) )
#return redirect(reverse('relacionpersonal_list', args=(padre.manautctrl_id,) ))
return redirect(reverse('relacionpersonal_list', args=(pappccid,pmanautctrid,) ))
else:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Proveedores"),ACTUA) )
#return redirect(reverse('relacionterceros_list', args=(padre.manautctrl_id,) ))
return redirect(reverse('relacionterceros_list', args=(pappccid,pmanautctrid,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#------------------------------------------Cuadros Gestion -------------------------------------------------------------------------------------
#class CuadrosGestionMixin(ViewBase):
class CuadrosGestionMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(CuadrosGestionMixin,self).__init__("cuadrosgestion",_("Cuadro de Gestión"),"CuadrosGestion","CuadrosGestionForms")
self.cabezera.append("Ver")
self.cabezera.append('Impresion')
self.cabezera.append("Acciones")
baselista ="CuadrosGestionMixin"
class CuadrosgestionListaView(CuadrosGestionMixin,ListView):
template_name = "appcc/list_cuadgestion.html"
#template_name = "trazabilidad/listarbol_tiposubicaciones.html"
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pappccid']
self.acciones["appccid"] = id
objetos = CuadrosGestion.objects.filter(appcc = id,empresa__in=Empresas.objects.filter(usuario=self.request.user)).order_by('orden')
for objeto in objetos:
if objeto.is_child_node():
print 'es hijo'
return CuadrosGestion.objects.filter(appcc = id,empresa__in=Empresas.objects.filter(usuario=self.request.user)).order_by('orden')
CuadrosgestionDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def CuadrosgestionCrearView(request,pappccid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : "Crear Cuadros de Gestion"}
form = CuadrosGestionForms(request.POST or None)
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.appcc_id = pappccid
padre.save(user=request.user)
ReordenarCuadrosGestion(usuario=request.user).reordenar()
#Si queremos crear un hijo ---
if 'subetapa' in request.POST:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Item padre cuadro gestión"),CREADO) )
return redirect(reverse('cuadrosgestion_crearhijo', args=(padre.appcc_id,padre.id,) ))
else:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Item cuadro gestión"),CREADO) )
return redirect(reverse('cuadrosgestion_list', args=(pappccid,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def CuadrogestionHijoCrear(request,pappcid,padreid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
cabecera = get_object_or_404(CuadrosGestion, pk=padreid,empresa__in=Empresas.objects.filter(usuario__username=request.user))
auxiliar = {"etiqueta" : " viene de Etapa -> %s (%s) " % (cabecera.etapa,cabecera.peligro) }
form = CuadrosGestionForms(request.POST or None)
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.appcc_id = pappcid
#Verificamos si es un hijo el que graba
#padre.parent = CuadrosGestion.objects.get(id=padreid)
padre.parent = get_object_or_404(CuadrosGestion,id=padreid)
padre.save(user=request.user)
ReordenarCuadrosGestion(usuario=request.user).reordenar()
#Si queremos crear un hijo ---
if 'subetapa' in request.POST:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Item hijo cuadro gestión"),CREADO) )
return redirect(reverse('cuadrosgestion_crearhijo', args=(pappcid,padre.id,) ))
else:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Item hijo cuadro gestión"),CREADO) )
return redirect(reverse('cuadrosgestion_list', args=(pappcid,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class CuadrosgestionElimina(CuadrosGestionMixin):
def get_success_url(self):
ReordenarCuadrosGestion(usuario=self.request.user).reordenar()
return reverse('%s_list' % self.modulo,args=(self.object.appcc_id,))
CuadrosgestionEliminarView = type(genericaview,(CuadrosgestionElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
#def CuadrosgestionActualizarView(request,pk):
def CuadrosgestionActualizarView(request,pappccid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : "Editar Cuadros Gestión"}
cabecera = get_object_or_404(CuadrosGestion, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = CuadrosGestionForms(request.POST or None, instance=cabecera)
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.save(user=request.user)
#Si queremos crear un hijo ---
if 'subetapa' in request.POST:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Item padre cuadro gestión"),ACTUA) )
return redirect(reverse('cuadrosgestion_crearhijo', args=(padre.appcc_id,padre.id,) ))
else:
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Item cuadro gestión"),ACTUA) )
return redirect(reverse('cuadrosgestion_list', args=(padre.appcc_id,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
########################################### GESTOR DE INCIDENCIAS ####################################################################
class GestorIncidenciasMixin(ViewBase,BreadcrumbTemplateMixin):
def __init__(self):
super(GestorIncidenciasMixin,self).__init__("gestorincidencias",_("Gestor de Incidencias"),"GestorIncidencias","GestorIncidenciasForms")
self.cabezera.append(_(u"Ver"))
self.cabezera.append(_(u"Impresion"))
self.cabezera.append(_(u"Acciones"))
baselista ="GestorIncidenciasMixin"
class GestorincidenciasListaView(GestorIncidenciasMixin,ListView):
template_name = "appcc/list_gestorincidencias.html"
def get_queryset(self): #filtramos el id del appc mostramos solo sus hijos
id = self.kwargs['pappccid']
self.acciones["appccid"] = id
return GestorIncidencias.objects.filter(appcc = id,empresa__in=Empresas.objects.filter(usuario=self.request.user)).order_by('-fincidencia')
GestorincidenciasDetalleView = type(genericaview,(eval(baselista),DetailView,), detalle_template )
@login_required(login_url='/')
@user_passes_test(is_allowed_edit)
def GestorincidenciasCrearView(request,pappccid):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : _(u"Crear Incidencia")}
form = GestorIncidenciasForms(request.POST or None)
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.appcc_id = pappccid
padre.save(user=request.user)
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Incidencia"),CREADO) )
return redirect(reverse('gestorincidencias_list', args=(pappccid,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
class GestorincidenciasElimina(GestorIncidenciasMixin):
def get_success_url(self):
return reverse('%s_list' % self.modulo,args=(self.object.appcc_id,))
GestorincidenciasEliminarView = type(genericaview,(GestorincidenciasElimina,DeleteView,), eliminar_template )
@login_required(login_url='/')
#def GestorincidenciasActualizarView(request,pk):
def GestorincidenciasActualizarView(request,pappccid,pk):
request.breadcrumbs(generarBreadCrumb(request.path_info))
template_name = "base/form.html"
auxiliar = {"etiqueta" : _(u"Editar Incidencias")}
cabecera = get_object_or_404(GestorIncidencias, pk=pk,empresa__in=Empresas.objects.filter(usuario__username=request.user))
form = GestorIncidenciasForms(request.POST or None, instance=cabecera)
if form.is_valid():
padre = form.save(commit=False,request=request)
padre.save(user=request.user)
#Si queremos crear un hijo ---
messages.add_message(request, messages.SUCCESS, _(" %s %s con exito") %(_("Incidencia"),ACTUA) )
return redirect(reverse('gestorincidencias_list', args=(padre.appcc_id,) ))
else:
messages.add_message(request, messages.ERROR, form.errors)
return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
@login_required(login_url='/')
def ImprimirListaTareas(request,empresaid):
hoy = datetime.datetime.now().date()
fecha = str(hoy.year)+str(hoy.month).zfill(2)+str(hoy.day).zfill(2)
appccid = get_object_or_404(APPCC,empresa_id=int(empresaid)).id
#informe = Informes.objects.get(pk= 25 )
informe = get_object_or_404(Informes,pk= 25 )
j = JasperClient(informe.url,settings.USUARIO_JASPER,settings.PASSWD_JASPER)
ret = j.runReport(informe.nombrereport,"PDF",params={"pid": str(appccid), "pfinicio":fecha })
return HttpResponse(ret['data'], content_type='application/pdf')
@login_required(login_url='/')
def ImprimirRegistrosEmpresas(request,empresaid):
informe = get_object_or_404(Informes,pk= 76 )
j = JasperClient(informe.url,settings.USUARIO_JASPER,settings.PASSWD_JASPER)
ret = j.runReport(informe.nombrereport,"PDF",params={"pempresaid": str(empresaid)})
return HttpResponse(ret['data'], content_type='application/pdf')
@login_required(login_url='/')
def CuadrosGestionArbol(request,empresaid):
arbol = JsonCuadrosGestion(empresaid)
listarbol = arbol.generar()
resjson = simplejson.dumps(listarbol,cls=JSONEncoder)
return HttpResponse(resjson,content_type="application/json")
#############################################Registros Rapidos ###################################################################
#@login_required(login_url='/')
#def RegistrosRapidosCrearView(request,pk,year,month,day):
# template_name = "base/modal_registros.html"
# fechadesde = datetime.datetime.strptime( "%s-%s-%s" %(day,month,year), '%d-%m-%Y')
# cabecera = get_object_or_404(DetallesRegistros, pk=pk)
# auxiliar = {"etiqueta" : cabecera.actividades, 'zona': cabecera.zonas, 'equipos' : cabecera.equipos }
# form = RegistrosRapidosForms(request.POST or None, initial={'fechadesde': fechadesde})
# #Ocultamos los campos a completar dependiendo del tipo de actividad
# if cabecera.actividades.tipo == 'C':
# form.helper['valor'].wrap(Div,css_class="control-group hidden")
# else:
# form.helper['estado'].wrap(Div,css_class="control-group hidden")
# if form.is_valid():
# padre = form.save(commit=False)
# padre.detreg = cabecera
# padre.fechahasta = padre.fechadesde
# padre.save()
# return redirect('/panelprincipal/')
# else:
# messages.add_message(request, messages.ERROR, form.errors)
#
#
# return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request))
#
#
#@login_required(login_url='/')
#def RegistrosRapidosActualizarView(request,pk,detregid):
# template_name = "base/modal_registros.html"
# cabecera = get_object_or_404(DetallesRegistros, pk=detregid)
# detalle = get_object_or_404(Registros, pk=pk)
# auxiliar = {"etiqueta" : cabecera.actividades, 'zona': cabecera.zonas, 'equipos' : cabecera.equipos }
# form = RegistrosRapidosForms(request.POST or None, instance=detalle)
# #Ocultamos los campos a completar dependiendo del tipo de actividad
# if cabecera.actividades.tipo == 'C':
# form.helper['valor'].wrap(Div,css_class="control-group hidden")
# else:
# form.helper['estado'].wrap(Div,css_class="control-group hidden")
# if form.is_valid():
# padre = form.save(commit=False)
# padre.detreg = cabecera
# padre.fechahasta = padre.fechadesde
# padre.save()
# return redirect('/panelprincipal/')
# else:
# messages.add_message(request, messages.ERROR, form.errors)
#
#
# return render_to_response(template_name, {'form' : form, 'auxiliar': auxiliar },context_instance=RequestContext(request)) |
<reponame>schwettmann/pretorched-x<gh_stars>1-10
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
# The is a pytorch model translated from a Caffe model.
# Note that I left out any dropout layers
# http://memorability.csail.mit.edu/
# Source for the original model:
# Understanding and Predicting Image Memorability at a Large Scale
# <NAME>, <NAME>, <NAME> and <NAME>
# International Conference on Computer Vision (ICCV), 2015
# DOI 10.1109/ICCV.2015.275
__all__ = ['MemNet', 'memnet']
pretrained_settings = {
'memnet': {
'lamem': {
'url': 'http://pretorched-x.csail.mit.edu/models/memnet_lamem-a92fdac2.pth',
'input_space': 'RGB',
'input_size': [3, 227, 227],
'input_range': [0, 1],
'output_range': [0, 1],
'output_mean': 0.7626,
'output_bias': 0.65,
'output_scale': 2,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
},
}
}
class MemNet(nn.Module):
def __init__(self, output_mean=0.7626, output_bias=0.65, output_scale=2, output_range=[0, 1]):
super().__init__()
self.conv1 = nn.Conv2d(3, 96, 11, 4)
self.pool = nn.MaxPool2d(3, 2)
self.norm = nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75)
self.conv2 = nn.Conv2d(96, 256, 5, padding=2, groups=2)
self.conv3 = nn.Conv2d(256, 384, 3, padding=1)
self.conv4 = nn.Conv2d(384, 384, 3, padding=1, groups=2)
self.conv5 = nn.Conv2d(384, 256, 3, padding=1, groups=2)
self.fc6 = nn.Linear(256 * 6 * 6, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, 1)
self.output_mean = output_mean
self.output_bias = output_bias
self.output_scale = output_scale
self.output_range = output_range
def forward(self, x):
out = self.forward_all(x)['fc8']
out = (out - self.output_mean) * self.output_scale + self.output_bias
out = out.clamp(*self.output_range)
return out
def forward_all(self, x):
conv1 = F.relu(self.conv1(x))
pool1 = self.pool(conv1)
norm1 = self.norm(pool1)
conv2 = F.relu(self.conv2(norm1))
pool2 = self.pool(conv2)
norm2 = self.norm(pool2)
conv3 = F.relu(self.conv3(norm2))
conv4 = F.relu(self.conv4(conv3))
conv5 = F.relu(self.conv5(conv4))
pool5 = self.pool(conv5)
# x = x.view(-1, self.num_flat_features(x))
flattened = pool5.view(-1, self.num_flat_features(pool5))
fc6 = F.relu(self.fc6(flattened))
fc7 = F.relu(self.fc7(fc6))
fc8 = self.fc8(fc7)
return ({"conv1": conv1,
"pool1": pool1,
"norm1": norm1,
"conv2": conv2,
"pool2": pool2,
"norm2": norm2,
"conv3": conv3,
"conv4": conv4,
"conv5": conv5,
"pool5": pool5,
"flattened": flattened,
"fc6": fc6,
"fc7": fc7,
"fc8": fc8})
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def memnet(pretrained='lamem'):
"""Constructs memnet model."""
model = MemNet()
if pretrained is not None:
settings = pretrained_settings['memnet'][pretrained]
model.load_state_dict(model_zoo.load_url(settings['url']))
model.output_mean = settings['output_mean']
model.output_bias = settings['output_bias']
model.output_scale = settings['output_scale']
model.output_range = settings['output_range']
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
|
import sys
import time
import pprint
import time
import urllib.error
import urllib.request
import urllib.parse
import io
import random
import string
import json
from flask import Flask, request, send_file, jsonify, make_response, Response
from werkzeug.wsgi import FileWrapper
import numpy as np
import pandas as pd
from PIL import Image
import chainer
import chainer.functions as F
from chainer import Variable
from chainer.backends import cuda
from chainercv.datasets import voc_bbox_label_names
from chainercv.links import YOLOv3
from chainercv.utils import read_image
from chainercv.visualizations import vis_bbox
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, MetaData, Table, Column, Integer,ForeignKey, String, Float
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from sqlalchemy import desc
engine = create_engine('sqlite:///db.sqlite3', echo=True)
Base = declarative_base()
class LineName(Base):
"""
key と nameのマップ
"""
__tablename__="linename"
device=Column(String, primary_key=True)
name=Column(String)
aveleavetime = Column(Float)
lineque = relationship("LineQue", back_populates="linename")
class LineQue(Base):
# テーブル名
__tablename__ = 'lineque'
# 個々のカラムを定義
id = Column(String, primary_key=True)
device = Column(String, ForeignKey('linename.device'))
ob_time = Column(Float)
count = Column(Integer)
que_time = Column(Float)
diff = Column(Integer)
linename = relationship("LineName", back_populates="lineque")
meta = Base.metadata
meta.create_all(engine)
# Read an RGB image and return it in CHW format.
model = YOLOv3(n_fg_class=None,pretrained_model='voc0712')
print(voc_bbox_label_names[14])
idx_person = int(voc_bbox_label_names.index("person"))
print(idx_person)
# flask app
app = Flask(__name__)
# limit upload file size : 2MB
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024 * 2
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route("/count", methods=["get"])
def count():
name_query = urllib.parse.unquote(request.args.get('name'))
Session = sessionmaker(bind=engine)
session = Session()
res_dev = session.query(LineName).filter(LineName.name == name_query).first()
print("res dev", res_dev.__dict__)
res_sort = session.query(LineQue)\
.filter(LineQue.device == res_dev.device)\
.first()
session.close()
result = {
"data":{
"time":str(res_sort.ob_time),
"count":str(res_sort.count),
"que_time":str(res_sort.que_time)
}
}
return jsonify(result)
@app.route('/image', methods=["get", "post"])
def image():
img = request.files["file"].read()
key=request.args.get('key')
print("key", key)
time_posted=time.time()
# open by chainer
img = read_image(io.BytesIO(img))
bboxes, labels, scores = model.predict([img])
#print("labels", type(labels) , labels)
#print(bboxes)
for label, score in zip(labels[0],scores[0] ) :
print(label, score)
# count
num_person = np.sum(labels[0]==idx_person)
# db用にデータを作成
idd = key + str(time_posted)
Session = sessionmaker(bind=engine)
session = Session()
# 直前のn個のデータ
res_pre = session.query(LineQue)\
.filter(LineQue.device==key)\
.order_by(desc(LineQue.ob_time))\
.first()
#直前のデータとの差分
if res_pre is None:
diff = 0
else:
diff = num_person - res_pre.count
diff_time = time_posted - res_pre.ob_time
# 待ち時間を計算する
res_ln = session.query(LineName)\
.filter(LineName.device==key)\
.first()
que_time = num_person * float(res_ln.aveleavetime)
print("\nque_time", que_time, "\n")
# record
record = LineQue(id=idd , device=key, ob_time=float(time_posted),
count= int(num_person), diff=diff, que_time=que_time
)
print(record)
session.add(record)
# average leave time の計算
row_count = session.query(LineQue).filter(LineQue.device==key).count()
if row_count % 10000000==0:
res_new_ave = session.query(LineQue)\
.filter(LineQue.diff < 0).all()
su = 0
for re in res_new_ave:
su -= re.diff
print("sum", su)
res_ln.aveleavetime = diff_time / float(su+ 0.000000001 / row_count) + 0.000000001
session.add(res_ln)
session.commit()
session.close()
result = {
"data": {
"time":str(time_posted),
"count": str(num_person)
}
}
return jsonify(result)
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0', port=80)
|
<reponame>andrewsmike/jasmine<gh_stars>1-10
# Generated from SQLParser.g4 by ANTLR 4.9.3
from antlr4 import *
if __name__ is not None and "." in __name__:
from .SQLParser import SQLParser
else:
from SQLParser import SQLParser
"""
Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is also distributed with certain software (including
but not limited to OpenSSL) that is licensed under separate terms, as
designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have included with MySQL.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
# mypy: ignore-errors
from jasmine.sql.parser.sql_base import *
# This class defines a complete generic visitor for a parse tree produced by SQLParser.
class SQLParserVisitor(ParseTreeVisitor):
# Visit a parse tree produced by SQLParser#sqlProgram.
def visitSqlProgram(self, ctx:SQLParser.SqlProgramContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#statement.
def visitStatement(self, ctx:SQLParser.StatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleStatement.
def visitSimpleStatement(self, ctx:SQLParser.SimpleStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterStatement.
def visitAlterStatement(self, ctx:SQLParser.AlterStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterDatabase.
def visitAlterDatabase(self, ctx:SQLParser.AlterDatabaseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterEvent.
def visitAlterEvent(self, ctx:SQLParser.AlterEventContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterLogfileGroup.
def visitAlterLogfileGroup(self, ctx:SQLParser.AlterLogfileGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterLogfileGroupOptions.
def visitAlterLogfileGroupOptions(self, ctx:SQLParser.AlterLogfileGroupOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterLogfileGroupOption.
def visitAlterLogfileGroupOption(self, ctx:SQLParser.AlterLogfileGroupOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterServer.
def visitAlterServer(self, ctx:SQLParser.AlterServerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterTable.
def visitAlterTable(self, ctx:SQLParser.AlterTableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterTableActions.
def visitAlterTableActions(self, ctx:SQLParser.AlterTableActionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterCommandList.
def visitAlterCommandList(self, ctx:SQLParser.AlterCommandListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterCommandsModifierList.
def visitAlterCommandsModifierList(self, ctx:SQLParser.AlterCommandsModifierListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#standaloneAlterCommands.
def visitStandaloneAlterCommands(self, ctx:SQLParser.StandaloneAlterCommandsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterPartition.
def visitAlterPartition(self, ctx:SQLParser.AlterPartitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterList.
def visitAlterList(self, ctx:SQLParser.AlterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterCommandsModifier.
def visitAlterCommandsModifier(self, ctx:SQLParser.AlterCommandsModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterListItem.
def visitAlterListItem(self, ctx:SQLParser.AlterListItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#place.
def visitPlace(self, ctx:SQLParser.PlaceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#restrict.
def visitRestrict(self, ctx:SQLParser.RestrictContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterOrderList.
def visitAlterOrderList(self, ctx:SQLParser.AlterOrderListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterAlgorithmOption.
def visitAlterAlgorithmOption(self, ctx:SQLParser.AlterAlgorithmOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterLockOption.
def visitAlterLockOption(self, ctx:SQLParser.AlterLockOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexLockAndAlgorithm.
def visitIndexLockAndAlgorithm(self, ctx:SQLParser.IndexLockAndAlgorithmContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#withValidation.
def visitWithValidation(self, ctx:SQLParser.WithValidationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#removePartitioning.
def visitRemovePartitioning(self, ctx:SQLParser.RemovePartitioningContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#allOrPartitionNameList.
def visitAllOrPartitionNameList(self, ctx:SQLParser.AllOrPartitionNameListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterTablespace.
def visitAlterTablespace(self, ctx:SQLParser.AlterTablespaceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterUndoTablespace.
def visitAlterUndoTablespace(self, ctx:SQLParser.AlterUndoTablespaceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#undoTableSpaceOptions.
def visitUndoTableSpaceOptions(self, ctx:SQLParser.UndoTableSpaceOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#undoTableSpaceOption.
def visitUndoTableSpaceOption(self, ctx:SQLParser.UndoTableSpaceOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterTablespaceOptions.
def visitAlterTablespaceOptions(self, ctx:SQLParser.AlterTablespaceOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterTablespaceOption.
def visitAlterTablespaceOption(self, ctx:SQLParser.AlterTablespaceOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#changeTablespaceOption.
def visitChangeTablespaceOption(self, ctx:SQLParser.ChangeTablespaceOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterView.
def visitAlterView(self, ctx:SQLParser.AlterViewContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewTail.
def visitViewTail(self, ctx:SQLParser.ViewTailContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewSelect.
def visitViewSelect(self, ctx:SQLParser.ViewSelectContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewCheckOption.
def visitViewCheckOption(self, ctx:SQLParser.ViewCheckOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createStatement.
def visitCreateStatement(self, ctx:SQLParser.CreateStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createDatabase.
def visitCreateDatabase(self, ctx:SQLParser.CreateDatabaseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createDatabaseOption.
def visitCreateDatabaseOption(self, ctx:SQLParser.CreateDatabaseOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createTable.
def visitCreateTable(self, ctx:SQLParser.CreateTableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableElementList.
def visitTableElementList(self, ctx:SQLParser.TableElementListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableElement.
def visitTableElement(self, ctx:SQLParser.TableElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#duplicateAsQueryExpression.
def visitDuplicateAsQueryExpression(self, ctx:SQLParser.DuplicateAsQueryExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#queryExpressionOrParens.
def visitQueryExpressionOrParens(self, ctx:SQLParser.QueryExpressionOrParensContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createRoutine.
def visitCreateRoutine(self, ctx:SQLParser.CreateRoutineContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createProcedure.
def visitCreateProcedure(self, ctx:SQLParser.CreateProcedureContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createFunction.
def visitCreateFunction(self, ctx:SQLParser.CreateFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createUdf.
def visitCreateUdf(self, ctx:SQLParser.CreateUdfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#routineCreateOption.
def visitRoutineCreateOption(self, ctx:SQLParser.RoutineCreateOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#routineAlterOptions.
def visitRoutineAlterOptions(self, ctx:SQLParser.RoutineAlterOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#routineOption.
def visitRoutineOption(self, ctx:SQLParser.RoutineOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createIndex.
def visitCreateIndex(self, ctx:SQLParser.CreateIndexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexNameAndType.
def visitIndexNameAndType(self, ctx:SQLParser.IndexNameAndTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createIndexTarget.
def visitCreateIndexTarget(self, ctx:SQLParser.CreateIndexTargetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createLogfileGroup.
def visitCreateLogfileGroup(self, ctx:SQLParser.CreateLogfileGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#logfileGroupOptions.
def visitLogfileGroupOptions(self, ctx:SQLParser.LogfileGroupOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#logfileGroupOption.
def visitLogfileGroupOption(self, ctx:SQLParser.LogfileGroupOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createServer.
def visitCreateServer(self, ctx:SQLParser.CreateServerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#serverOptions.
def visitServerOptions(self, ctx:SQLParser.ServerOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#serverOption.
def visitServerOption(self, ctx:SQLParser.ServerOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createTablespace.
def visitCreateTablespace(self, ctx:SQLParser.CreateTablespaceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createUndoTablespace.
def visitCreateUndoTablespace(self, ctx:SQLParser.CreateUndoTablespaceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsDataFileName.
def visitTsDataFileName(self, ctx:SQLParser.TsDataFileNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsDataFile.
def visitTsDataFile(self, ctx:SQLParser.TsDataFileContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tablespaceOptions.
def visitTablespaceOptions(self, ctx:SQLParser.TablespaceOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tablespaceOption.
def visitTablespaceOption(self, ctx:SQLParser.TablespaceOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionInitialSize.
def visitTsOptionInitialSize(self, ctx:SQLParser.TsOptionInitialSizeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionUndoRedoBufferSize.
def visitTsOptionUndoRedoBufferSize(self, ctx:SQLParser.TsOptionUndoRedoBufferSizeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionAutoextendSize.
def visitTsOptionAutoextendSize(self, ctx:SQLParser.TsOptionAutoextendSizeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionMaxSize.
def visitTsOptionMaxSize(self, ctx:SQLParser.TsOptionMaxSizeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionExtentSize.
def visitTsOptionExtentSize(self, ctx:SQLParser.TsOptionExtentSizeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionNodegroup.
def visitTsOptionNodegroup(self, ctx:SQLParser.TsOptionNodegroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionEngine.
def visitTsOptionEngine(self, ctx:SQLParser.TsOptionEngineContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionWait.
def visitTsOptionWait(self, ctx:SQLParser.TsOptionWaitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionComment.
def visitTsOptionComment(self, ctx:SQLParser.TsOptionCommentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionFileblockSize.
def visitTsOptionFileblockSize(self, ctx:SQLParser.TsOptionFileblockSizeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tsOptionEncryption.
def visitTsOptionEncryption(self, ctx:SQLParser.TsOptionEncryptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createView.
def visitCreateView(self, ctx:SQLParser.CreateViewContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewReplaceOrAlgorithm.
def visitViewReplaceOrAlgorithm(self, ctx:SQLParser.ViewReplaceOrAlgorithmContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewAlgorithm.
def visitViewAlgorithm(self, ctx:SQLParser.ViewAlgorithmContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewSuid.
def visitViewSuid(self, ctx:SQLParser.ViewSuidContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createTrigger.
def visitCreateTrigger(self, ctx:SQLParser.CreateTriggerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#triggerFollowsPrecedesClause.
def visitTriggerFollowsPrecedesClause(self, ctx:SQLParser.TriggerFollowsPrecedesClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createEvent.
def visitCreateEvent(self, ctx:SQLParser.CreateEventContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createRole.
def visitCreateRole(self, ctx:SQLParser.CreateRoleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createSpatialReference.
def visitCreateSpatialReference(self, ctx:SQLParser.CreateSpatialReferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#srsAttribute.
def visitSrsAttribute(self, ctx:SQLParser.SrsAttributeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropStatement.
def visitDropStatement(self, ctx:SQLParser.DropStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropDatabase.
def visitDropDatabase(self, ctx:SQLParser.DropDatabaseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropEvent.
def visitDropEvent(self, ctx:SQLParser.DropEventContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropFunction.
def visitDropFunction(self, ctx:SQLParser.DropFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropProcedure.
def visitDropProcedure(self, ctx:SQLParser.DropProcedureContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropIndex.
def visitDropIndex(self, ctx:SQLParser.DropIndexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropLogfileGroup.
def visitDropLogfileGroup(self, ctx:SQLParser.DropLogfileGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropLogfileGroupOption.
def visitDropLogfileGroupOption(self, ctx:SQLParser.DropLogfileGroupOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropServer.
def visitDropServer(self, ctx:SQLParser.DropServerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropTable.
def visitDropTable(self, ctx:SQLParser.DropTableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropTableSpace.
def visitDropTableSpace(self, ctx:SQLParser.DropTableSpaceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropTrigger.
def visitDropTrigger(self, ctx:SQLParser.DropTriggerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropView.
def visitDropView(self, ctx:SQLParser.DropViewContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropRole.
def visitDropRole(self, ctx:SQLParser.DropRoleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropSpatialReference.
def visitDropSpatialReference(self, ctx:SQLParser.DropSpatialReferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropUndoTablespace.
def visitDropUndoTablespace(self, ctx:SQLParser.DropUndoTablespaceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#renameTableStatement.
def visitRenameTableStatement(self, ctx:SQLParser.RenameTableStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#renamePair.
def visitRenamePair(self, ctx:SQLParser.RenamePairContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#truncateTableStatement.
def visitTruncateTableStatement(self, ctx:SQLParser.TruncateTableStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#importStatement.
def visitImportStatement(self, ctx:SQLParser.ImportStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#callStatement.
def visitCallStatement(self, ctx:SQLParser.CallStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#deleteStatement.
def visitDeleteStatement(self, ctx:SQLParser.DeleteStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionDelete.
def visitPartitionDelete(self, ctx:SQLParser.PartitionDeleteContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#deleteStatementOption.
def visitDeleteStatementOption(self, ctx:SQLParser.DeleteStatementOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#doStatement.
def visitDoStatement(self, ctx:SQLParser.DoStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#handlerStatement.
def visitHandlerStatement(self, ctx:SQLParser.HandlerStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#handlerReadOrScan.
def visitHandlerReadOrScan(self, ctx:SQLParser.HandlerReadOrScanContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#insertStatement.
def visitInsertStatement(self, ctx:SQLParser.InsertStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#insertLockOption.
def visitInsertLockOption(self, ctx:SQLParser.InsertLockOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#insertFromConstructor.
def visitInsertFromConstructor(self, ctx:SQLParser.InsertFromConstructorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fields.
def visitFields(self, ctx:SQLParser.FieldsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#insertValues.
def visitInsertValues(self, ctx:SQLParser.InsertValuesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#insertQueryExpression.
def visitInsertQueryExpression(self, ctx:SQLParser.InsertQueryExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#valueList.
def visitValueList(self, ctx:SQLParser.ValueListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#values.
def visitValues(self, ctx:SQLParser.ValuesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#valuesReference.
def visitValuesReference(self, ctx:SQLParser.ValuesReferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#insertUpdateList.
def visitInsertUpdateList(self, ctx:SQLParser.InsertUpdateListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#loadStatement.
def visitLoadStatement(self, ctx:SQLParser.LoadStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dataOrXml.
def visitDataOrXml(self, ctx:SQLParser.DataOrXmlContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#xmlRowsIdentifiedBy.
def visitXmlRowsIdentifiedBy(self, ctx:SQLParser.XmlRowsIdentifiedByContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#loadDataFileTail.
def visitLoadDataFileTail(self, ctx:SQLParser.LoadDataFileTailContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#loadDataFileTargetList.
def visitLoadDataFileTargetList(self, ctx:SQLParser.LoadDataFileTargetListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fieldOrVariableList.
def visitFieldOrVariableList(self, ctx:SQLParser.FieldOrVariableListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#replaceStatement.
def visitReplaceStatement(self, ctx:SQLParser.ReplaceStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#selectStatement.
def visitSelectStatement(self, ctx:SQLParser.SelectStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#selectStatementWithInto.
def visitSelectStatementWithInto(self, ctx:SQLParser.SelectStatementWithIntoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#queryExpression.
def visitQueryExpression(self, ctx:SQLParser.QueryExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#queryExpressionBody.
def visitQueryExpressionBody(self, ctx:SQLParser.QueryExpressionBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#queryExpressionParens.
def visitQueryExpressionParens(self, ctx:SQLParser.QueryExpressionParensContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#queryPrimary.
def visitQueryPrimary(self, ctx:SQLParser.QueryPrimaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#querySpecification.
def visitQuerySpecification(self, ctx:SQLParser.QuerySpecificationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#subquery.
def visitSubquery(self, ctx:SQLParser.SubqueryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#querySpecOption.
def visitQuerySpecOption(self, ctx:SQLParser.QuerySpecOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#limitClause.
def visitLimitClause(self, ctx:SQLParser.LimitClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleLimitClause.
def visitSimpleLimitClause(self, ctx:SQLParser.SimpleLimitClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#limitOptions.
def visitLimitOptions(self, ctx:SQLParser.LimitOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#limitOption.
def visitLimitOption(self, ctx:SQLParser.LimitOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#intoClause.
def visitIntoClause(self, ctx:SQLParser.IntoClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#procedureAnalyseClause.
def visitProcedureAnalyseClause(self, ctx:SQLParser.ProcedureAnalyseClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#havingClause.
def visitHavingClause(self, ctx:SQLParser.HavingClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowClause.
def visitWindowClause(self, ctx:SQLParser.WindowClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowDefinition.
def visitWindowDefinition(self, ctx:SQLParser.WindowDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowSpec.
def visitWindowSpec(self, ctx:SQLParser.WindowSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowSpecDetails.
def visitWindowSpecDetails(self, ctx:SQLParser.WindowSpecDetailsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFrameClause.
def visitWindowFrameClause(self, ctx:SQLParser.WindowFrameClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFrameUnits.
def visitWindowFrameUnits(self, ctx:SQLParser.WindowFrameUnitsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFrameExtent.
def visitWindowFrameExtent(self, ctx:SQLParser.WindowFrameExtentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFrameStart.
def visitWindowFrameStart(self, ctx:SQLParser.WindowFrameStartContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFrameBetween.
def visitWindowFrameBetween(self, ctx:SQLParser.WindowFrameBetweenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFrameBound.
def visitWindowFrameBound(self, ctx:SQLParser.WindowFrameBoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFrameExclusion.
def visitWindowFrameExclusion(self, ctx:SQLParser.WindowFrameExclusionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#withClause.
def visitWithClause(self, ctx:SQLParser.WithClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#commonTableExpression.
def visitCommonTableExpression(self, ctx:SQLParser.CommonTableExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#groupByClause.
def visitGroupByClause(self, ctx:SQLParser.GroupByClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#olapOption.
def visitOlapOption(self, ctx:SQLParser.OlapOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#orderClause.
def visitOrderClause(self, ctx:SQLParser.OrderClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#direction.
def visitDirection(self, ctx:SQLParser.DirectionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fromClause.
def visitFromClause(self, ctx:SQLParser.FromClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableReferenceList.
def visitTableReferenceList(self, ctx:SQLParser.TableReferenceListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableValueConstructor.
def visitTableValueConstructor(self, ctx:SQLParser.TableValueConstructorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#explicitTable.
def visitExplicitTable(self, ctx:SQLParser.ExplicitTableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#rowValueExplicit.
def visitRowValueExplicit(self, ctx:SQLParser.RowValueExplicitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#selectOption.
def visitSelectOption(self, ctx:SQLParser.SelectOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lockingClauseList.
def visitLockingClauseList(self, ctx:SQLParser.LockingClauseListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lockingClause.
def visitLockingClause(self, ctx:SQLParser.LockingClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lockStrengh.
def visitLockStrengh(self, ctx:SQLParser.LockStrenghContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lockedRowAction.
def visitLockedRowAction(self, ctx:SQLParser.LockedRowActionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#selectItemList.
def visitSelectItemList(self, ctx:SQLParser.SelectItemListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#selectItem.
def visitSelectItem(self, ctx:SQLParser.SelectItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#selectAlias.
def visitSelectAlias(self, ctx:SQLParser.SelectAliasContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#whereClause.
def visitWhereClause(self, ctx:SQLParser.WhereClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableReference.
def visitTableReference(self, ctx:SQLParser.TableReferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#escapedTableReference.
def visitEscapedTableReference(self, ctx:SQLParser.EscapedTableReferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#joinedTable.
def visitJoinedTable(self, ctx:SQLParser.JoinedTableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#naturalJoinType.
def visitNaturalJoinType(self, ctx:SQLParser.NaturalJoinTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#innerJoinType.
def visitInnerJoinType(self, ctx:SQLParser.InnerJoinTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#outerJoinType.
def visitOuterJoinType(self, ctx:SQLParser.OuterJoinTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableFactor.
def visitTableFactor(self, ctx:SQLParser.TableFactorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#singleTable.
def visitSingleTable(self, ctx:SQLParser.SingleTableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#singleTableParens.
def visitSingleTableParens(self, ctx:SQLParser.SingleTableParensContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#derivedTable.
def visitDerivedTable(self, ctx:SQLParser.DerivedTableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableReferenceListParens.
def visitTableReferenceListParens(self, ctx:SQLParser.TableReferenceListParensContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableFunction.
def visitTableFunction(self, ctx:SQLParser.TableFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnsClause.
def visitColumnsClause(self, ctx:SQLParser.ColumnsClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#jtColumn.
def visitJtColumn(self, ctx:SQLParser.JtColumnContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#onEmptyOrError.
def visitOnEmptyOrError(self, ctx:SQLParser.OnEmptyOrErrorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#onEmpty.
def visitOnEmpty(self, ctx:SQLParser.OnEmptyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#onError.
def visitOnError(self, ctx:SQLParser.OnErrorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#jtOnResponse.
def visitJtOnResponse(self, ctx:SQLParser.JtOnResponseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#unionOption.
def visitUnionOption(self, ctx:SQLParser.UnionOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableAlias.
def visitTableAlias(self, ctx:SQLParser.TableAliasContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexHintList.
def visitIndexHintList(self, ctx:SQLParser.IndexHintListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexHint.
def visitIndexHint(self, ctx:SQLParser.IndexHintContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexHintType.
def visitIndexHintType(self, ctx:SQLParser.IndexHintTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyOrIndex.
def visitKeyOrIndex(self, ctx:SQLParser.KeyOrIndexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#constraintKeyType.
def visitConstraintKeyType(self, ctx:SQLParser.ConstraintKeyTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexHintClause.
def visitIndexHintClause(self, ctx:SQLParser.IndexHintClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexList.
def visitIndexList(self, ctx:SQLParser.IndexListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexListElement.
def visitIndexListElement(self, ctx:SQLParser.IndexListElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#updateStatement.
def visitUpdateStatement(self, ctx:SQLParser.UpdateStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#transactionOrLockingStatement.
def visitTransactionOrLockingStatement(self, ctx:SQLParser.TransactionOrLockingStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#transactionStatement.
def visitTransactionStatement(self, ctx:SQLParser.TransactionStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#beginWork.
def visitBeginWork(self, ctx:SQLParser.BeginWorkContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#transactionCharacteristic.
def visitTransactionCharacteristic(self, ctx:SQLParser.TransactionCharacteristicContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#savepointStatement.
def visitSavepointStatement(self, ctx:SQLParser.SavepointStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lockStatement.
def visitLockStatement(self, ctx:SQLParser.LockStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lockItem.
def visitLockItem(self, ctx:SQLParser.LockItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lockOption.
def visitLockOption(self, ctx:SQLParser.LockOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#xaStatement.
def visitXaStatement(self, ctx:SQLParser.XaStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#xaConvert.
def visitXaConvert(self, ctx:SQLParser.XaConvertContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#xid.
def visitXid(self, ctx:SQLParser.XidContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#replicationStatement.
def visitReplicationStatement(self, ctx:SQLParser.ReplicationStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#resetOption.
def visitResetOption(self, ctx:SQLParser.ResetOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#masterResetOptions.
def visitMasterResetOptions(self, ctx:SQLParser.MasterResetOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#replicationLoad.
def visitReplicationLoad(self, ctx:SQLParser.ReplicationLoadContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#changeMaster.
def visitChangeMaster(self, ctx:SQLParser.ChangeMasterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#changeMasterOptions.
def visitChangeMasterOptions(self, ctx:SQLParser.ChangeMasterOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#masterOption.
def visitMasterOption(self, ctx:SQLParser.MasterOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#privilegeCheckDef.
def visitPrivilegeCheckDef(self, ctx:SQLParser.PrivilegeCheckDefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tablePrimaryKeyCheckDef.
def visitTablePrimaryKeyCheckDef(self, ctx:SQLParser.TablePrimaryKeyCheckDefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#masterTlsCiphersuitesDef.
def visitMasterTlsCiphersuitesDef(self, ctx:SQLParser.MasterTlsCiphersuitesDefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#masterFileDef.
def visitMasterFileDef(self, ctx:SQLParser.MasterFileDefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#serverIdList.
def visitServerIdList(self, ctx:SQLParser.ServerIdListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#changeReplication.
def visitChangeReplication(self, ctx:SQLParser.ChangeReplicationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#filterDefinition.
def visitFilterDefinition(self, ctx:SQLParser.FilterDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#filterDbList.
def visitFilterDbList(self, ctx:SQLParser.FilterDbListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#filterTableList.
def visitFilterTableList(self, ctx:SQLParser.FilterTableListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#filterStringList.
def visitFilterStringList(self, ctx:SQLParser.FilterStringListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#filterWildDbTableString.
def visitFilterWildDbTableString(self, ctx:SQLParser.FilterWildDbTableStringContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#filterDbPairList.
def visitFilterDbPairList(self, ctx:SQLParser.FilterDbPairListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#slave.
def visitSlave(self, ctx:SQLParser.SlaveContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#slaveUntilOptions.
def visitSlaveUntilOptions(self, ctx:SQLParser.SlaveUntilOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#slaveConnectionOptions.
def visitSlaveConnectionOptions(self, ctx:SQLParser.SlaveConnectionOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#slaveThreadOptions.
def visitSlaveThreadOptions(self, ctx:SQLParser.SlaveThreadOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#slaveThreadOption.
def visitSlaveThreadOption(self, ctx:SQLParser.SlaveThreadOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#groupReplication.
def visitGroupReplication(self, ctx:SQLParser.GroupReplicationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#preparedStatement.
def visitPreparedStatement(self, ctx:SQLParser.PreparedStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#executeStatement.
def visitExecuteStatement(self, ctx:SQLParser.ExecuteStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#executeVarList.
def visitExecuteVarList(self, ctx:SQLParser.ExecuteVarListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#cloneStatement.
def visitCloneStatement(self, ctx:SQLParser.CloneStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dataDirSSL.
def visitDataDirSSL(self, ctx:SQLParser.DataDirSSLContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ssl.
def visitSsl(self, ctx:SQLParser.SslContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#accountManagementStatement.
def visitAccountManagementStatement(self, ctx:SQLParser.AccountManagementStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterUser.
def visitAlterUser(self, ctx:SQLParser.AlterUserContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterUserTail.
def visitAlterUserTail(self, ctx:SQLParser.AlterUserTailContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#userFunction.
def visitUserFunction(self, ctx:SQLParser.UserFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createUser.
def visitCreateUser(self, ctx:SQLParser.CreateUserContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createUserTail.
def visitCreateUserTail(self, ctx:SQLParser.CreateUserTailContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#defaultRoleClause.
def visitDefaultRoleClause(self, ctx:SQLParser.DefaultRoleClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#requireClause.
def visitRequireClause(self, ctx:SQLParser.RequireClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#connectOptions.
def visitConnectOptions(self, ctx:SQLParser.ConnectOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#accountLockPasswordExpireOptions.
def visitAccountLockPasswordExpireOptions(self, ctx:SQLParser.AccountLockPasswordExpireOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropUser.
def visitDropUser(self, ctx:SQLParser.DropUserContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#grant.
def visitGrant(self, ctx:SQLParser.GrantContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#grantTargetList.
def visitGrantTargetList(self, ctx:SQLParser.GrantTargetListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#grantOptions.
def visitGrantOptions(self, ctx:SQLParser.GrantOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exceptRoleList.
def visitExceptRoleList(self, ctx:SQLParser.ExceptRoleListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#withRoles.
def visitWithRoles(self, ctx:SQLParser.WithRolesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#grantAs.
def visitGrantAs(self, ctx:SQLParser.GrantAsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#versionedRequireClause.
def visitVersionedRequireClause(self, ctx:SQLParser.VersionedRequireClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#renameUser.
def visitRenameUser(self, ctx:SQLParser.RenameUserContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#revoke.
def visitRevoke(self, ctx:SQLParser.RevokeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#onTypeTo.
def visitOnTypeTo(self, ctx:SQLParser.OnTypeToContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#aclType.
def visitAclType(self, ctx:SQLParser.AclTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleOrPrivilegesList.
def visitRoleOrPrivilegesList(self, ctx:SQLParser.RoleOrPrivilegesListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleOrPrivilege.
def visitRoleOrPrivilege(self, ctx:SQLParser.RoleOrPrivilegeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#grantIdentifier.
def visitGrantIdentifier(self, ctx:SQLParser.GrantIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#requireList.
def visitRequireList(self, ctx:SQLParser.RequireListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#requireListElement.
def visitRequireListElement(self, ctx:SQLParser.RequireListElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#grantOption.
def visitGrantOption(self, ctx:SQLParser.GrantOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#setRole.
def visitSetRole(self, ctx:SQLParser.SetRoleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleList.
def visitRoleList(self, ctx:SQLParser.RoleListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#role.
def visitRole(self, ctx:SQLParser.RoleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableAdministrationStatement.
def visitTableAdministrationStatement(self, ctx:SQLParser.TableAdministrationStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#histogram.
def visitHistogram(self, ctx:SQLParser.HistogramContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#checkOption.
def visitCheckOption(self, ctx:SQLParser.CheckOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#repairType.
def visitRepairType(self, ctx:SQLParser.RepairTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#installUninstallStatment.
def visitInstallUninstallStatment(self, ctx:SQLParser.InstallUninstallStatmentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#setStatement.
def visitSetStatement(self, ctx:SQLParser.SetStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#startOptionValueList.
def visitStartOptionValueList(self, ctx:SQLParser.StartOptionValueListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#transactionCharacteristics.
def visitTransactionCharacteristics(self, ctx:SQLParser.TransactionCharacteristicsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#transactionAccessMode.
def visitTransactionAccessMode(self, ctx:SQLParser.TransactionAccessModeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#isolationLevel.
def visitIsolationLevel(self, ctx:SQLParser.IsolationLevelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#optionValueListContinued.
def visitOptionValueListContinued(self, ctx:SQLParser.OptionValueListContinuedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#optionValueNoOptionType.
def visitOptionValueNoOptionType(self, ctx:SQLParser.OptionValueNoOptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#optionValue.
def visitOptionValue(self, ctx:SQLParser.OptionValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#setSystemVariable.
def visitSetSystemVariable(self, ctx:SQLParser.SetSystemVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#startOptionValueListFollowingOptionType.
def visitStartOptionValueListFollowingOptionType(self, ctx:SQLParser.StartOptionValueListFollowingOptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#optionValueFollowingOptionType.
def visitOptionValueFollowingOptionType(self, ctx:SQLParser.OptionValueFollowingOptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#setExprOrDefault.
def visitSetExprOrDefault(self, ctx:SQLParser.SetExprOrDefaultContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#showStatement.
def visitShowStatement(self, ctx:SQLParser.ShowStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#showCommandType.
def visitShowCommandType(self, ctx:SQLParser.ShowCommandTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#nonBlocking.
def visitNonBlocking(self, ctx:SQLParser.NonBlockingContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fromOrIn.
def visitFromOrIn(self, ctx:SQLParser.FromOrInContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#inDb.
def visitInDb(self, ctx:SQLParser.InDbContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#profileType.
def visitProfileType(self, ctx:SQLParser.ProfileTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#otherAdministrativeStatement.
def visitOtherAdministrativeStatement(self, ctx:SQLParser.OtherAdministrativeStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyCacheListOrParts.
def visitKeyCacheListOrParts(self, ctx:SQLParser.KeyCacheListOrPartsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyCacheList.
def visitKeyCacheList(self, ctx:SQLParser.KeyCacheListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#assignToKeycache.
def visitAssignToKeycache(self, ctx:SQLParser.AssignToKeycacheContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#assignToKeycachePartition.
def visitAssignToKeycachePartition(self, ctx:SQLParser.AssignToKeycachePartitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#cacheKeyList.
def visitCacheKeyList(self, ctx:SQLParser.CacheKeyListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyUsageElement.
def visitKeyUsageElement(self, ctx:SQLParser.KeyUsageElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyUsageList.
def visitKeyUsageList(self, ctx:SQLParser.KeyUsageListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#flushOption.
def visitFlushOption(self, ctx:SQLParser.FlushOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#logType.
def visitLogType(self, ctx:SQLParser.LogTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#flushTables.
def visitFlushTables(self, ctx:SQLParser.FlushTablesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#flushTablesOptions.
def visitFlushTablesOptions(self, ctx:SQLParser.FlushTablesOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#preloadTail.
def visitPreloadTail(self, ctx:SQLParser.PreloadTailContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#preloadList.
def visitPreloadList(self, ctx:SQLParser.PreloadListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#preloadKeys.
def visitPreloadKeys(self, ctx:SQLParser.PreloadKeysContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#adminPartition.
def visitAdminPartition(self, ctx:SQLParser.AdminPartitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#resourceGroupManagement.
def visitResourceGroupManagement(self, ctx:SQLParser.ResourceGroupManagementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createResourceGroup.
def visitCreateResourceGroup(self, ctx:SQLParser.CreateResourceGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#resourceGroupVcpuList.
def visitResourceGroupVcpuList(self, ctx:SQLParser.ResourceGroupVcpuListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#vcpuNumOrRange.
def visitVcpuNumOrRange(self, ctx:SQLParser.VcpuNumOrRangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#resourceGroupPriority.
def visitResourceGroupPriority(self, ctx:SQLParser.ResourceGroupPriorityContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#resourceGroupEnableDisable.
def visitResourceGroupEnableDisable(self, ctx:SQLParser.ResourceGroupEnableDisableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterResourceGroup.
def visitAlterResourceGroup(self, ctx:SQLParser.AlterResourceGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#setResourceGroup.
def visitSetResourceGroup(self, ctx:SQLParser.SetResourceGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#threadIdList.
def visitThreadIdList(self, ctx:SQLParser.ThreadIdListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dropResourceGroup.
def visitDropResourceGroup(self, ctx:SQLParser.DropResourceGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#utilityStatement.
def visitUtilityStatement(self, ctx:SQLParser.UtilityStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#describeStatement.
def visitDescribeStatement(self, ctx:SQLParser.DescribeStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#explainStatement.
def visitExplainStatement(self, ctx:SQLParser.ExplainStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#explainableStatement.
def visitExplainableStatement(self, ctx:SQLParser.ExplainableStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#helpCommand.
def visitHelpCommand(self, ctx:SQLParser.HelpCommandContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#useCommand.
def visitUseCommand(self, ctx:SQLParser.UseCommandContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#restartServer.
def visitRestartServer(self, ctx:SQLParser.RestartServerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprOr.
def visitExprOr(self, ctx:SQLParser.ExprOrContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprNot.
def visitExprNot(self, ctx:SQLParser.ExprNotContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprIs.
def visitExprIs(self, ctx:SQLParser.ExprIsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprAnd.
def visitExprAnd(self, ctx:SQLParser.ExprAndContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprXor.
def visitExprXor(self, ctx:SQLParser.ExprXorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#primaryExprPredicate.
def visitPrimaryExprPredicate(self, ctx:SQLParser.PrimaryExprPredicateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#primaryExprCompare.
def visitPrimaryExprCompare(self, ctx:SQLParser.PrimaryExprCompareContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#primaryExprAllAny.
def visitPrimaryExprAllAny(self, ctx:SQLParser.PrimaryExprAllAnyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#primaryExprIsNull.
def visitPrimaryExprIsNull(self, ctx:SQLParser.PrimaryExprIsNullContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#compOp.
def visitCompOp(self, ctx:SQLParser.CompOpContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#predicate.
def visitPredicate(self, ctx:SQLParser.PredicateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#predicateExprIn.
def visitPredicateExprIn(self, ctx:SQLParser.PredicateExprInContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#predicateExprBetween.
def visitPredicateExprBetween(self, ctx:SQLParser.PredicateExprBetweenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#predicateExprLike.
def visitPredicateExprLike(self, ctx:SQLParser.PredicateExprLikeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#predicateExprRegex.
def visitPredicateExprRegex(self, ctx:SQLParser.PredicateExprRegexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#bitExpr.
def visitBitExpr(self, ctx:SQLParser.BitExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprConvert.
def visitSimpleExprConvert(self, ctx:SQLParser.SimpleExprConvertContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprVariable.
def visitSimpleExprVariable(self, ctx:SQLParser.SimpleExprVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprCast.
def visitSimpleExprCast(self, ctx:SQLParser.SimpleExprCastContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprUnary.
def visitSimpleExprUnary(self, ctx:SQLParser.SimpleExprUnaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprOdbc.
def visitSimpleExprOdbc(self, ctx:SQLParser.SimpleExprOdbcContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprRuntimeFunction.
def visitSimpleExprRuntimeFunction(self, ctx:SQLParser.SimpleExprRuntimeFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprFunction.
def visitSimpleExprFunction(self, ctx:SQLParser.SimpleExprFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprCollate.
def visitSimpleExprCollate(self, ctx:SQLParser.SimpleExprCollateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprMatch.
def visitSimpleExprMatch(self, ctx:SQLParser.SimpleExprMatchContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprWindowingFunction.
def visitSimpleExprWindowingFunction(self, ctx:SQLParser.SimpleExprWindowingFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprBinary.
def visitSimpleExprBinary(self, ctx:SQLParser.SimpleExprBinaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprColumnRef.
def visitSimpleExprColumnRef(self, ctx:SQLParser.SimpleExprColumnRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprParamMarker.
def visitSimpleExprParamMarker(self, ctx:SQLParser.SimpleExprParamMarkerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprSum.
def visitSimpleExprSum(self, ctx:SQLParser.SimpleExprSumContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprConvertUsing.
def visitSimpleExprConvertUsing(self, ctx:SQLParser.SimpleExprConvertUsingContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprSubQuery.
def visitSimpleExprSubQuery(self, ctx:SQLParser.SimpleExprSubQueryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprGroupingOperation.
def visitSimpleExprGroupingOperation(self, ctx:SQLParser.SimpleExprGroupingOperationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprNot.
def visitSimpleExprNot(self, ctx:SQLParser.SimpleExprNotContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprValues.
def visitSimpleExprValues(self, ctx:SQLParser.SimpleExprValuesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprDefault.
def visitSimpleExprDefault(self, ctx:SQLParser.SimpleExprDefaultContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprList.
def visitSimpleExprList(self, ctx:SQLParser.SimpleExprListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprInterval.
def visitSimpleExprInterval(self, ctx:SQLParser.SimpleExprIntervalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprCase.
def visitSimpleExprCase(self, ctx:SQLParser.SimpleExprCaseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprConcat.
def visitSimpleExprConcat(self, ctx:SQLParser.SimpleExprConcatContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprLiteral.
def visitSimpleExprLiteral(self, ctx:SQLParser.SimpleExprLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#arrayCast.
def visitArrayCast(self, ctx:SQLParser.ArrayCastContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#jsonOperator.
def visitJsonOperator(self, ctx:SQLParser.JsonOperatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#sumExpr.
def visitSumExpr(self, ctx:SQLParser.SumExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#groupingOperation.
def visitGroupingOperation(self, ctx:SQLParser.GroupingOperationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowFunctionCall.
def visitWindowFunctionCall(self, ctx:SQLParser.WindowFunctionCallContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowingClause.
def visitWindowingClause(self, ctx:SQLParser.WindowingClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#leadLagInfo.
def visitLeadLagInfo(self, ctx:SQLParser.LeadLagInfoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#nullTreatment.
def visitNullTreatment(self, ctx:SQLParser.NullTreatmentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#jsonFunction.
def visitJsonFunction(self, ctx:SQLParser.JsonFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#inSumExpr.
def visitInSumExpr(self, ctx:SQLParser.InSumExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identListArg.
def visitIdentListArg(self, ctx:SQLParser.IdentListArgContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identList.
def visitIdentList(self, ctx:SQLParser.IdentListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fulltextOptions.
def visitFulltextOptions(self, ctx:SQLParser.FulltextOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#runtimeFunctionCall.
def visitRuntimeFunctionCall(self, ctx:SQLParser.RuntimeFunctionCallContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#geometryFunction.
def visitGeometryFunction(self, ctx:SQLParser.GeometryFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#timeFunctionParameters.
def visitTimeFunctionParameters(self, ctx:SQLParser.TimeFunctionParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fractionalPrecision.
def visitFractionalPrecision(self, ctx:SQLParser.FractionalPrecisionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#weightStringLevels.
def visitWeightStringLevels(self, ctx:SQLParser.WeightStringLevelsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#weightStringLevelListItem.
def visitWeightStringLevelListItem(self, ctx:SQLParser.WeightStringLevelListItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dateTimeTtype.
def visitDateTimeTtype(self, ctx:SQLParser.DateTimeTtypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#trimFunction.
def visitTrimFunction(self, ctx:SQLParser.TrimFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#substringFunction.
def visitSubstringFunction(self, ctx:SQLParser.SubstringFunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#functionCall.
def visitFunctionCall(self, ctx:SQLParser.FunctionCallContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#udfExprList.
def visitUdfExprList(self, ctx:SQLParser.UdfExprListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#udfExpr.
def visitUdfExpr(self, ctx:SQLParser.UdfExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#variable.
def visitVariable(self, ctx:SQLParser.VariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#userVariable.
def visitUserVariable(self, ctx:SQLParser.UserVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#systemVariable.
def visitSystemVariable(self, ctx:SQLParser.SystemVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#internalVariableName.
def visitInternalVariableName(self, ctx:SQLParser.InternalVariableNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#whenExpression.
def visitWhenExpression(self, ctx:SQLParser.WhenExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#thenExpression.
def visitThenExpression(self, ctx:SQLParser.ThenExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#elseExpression.
def visitElseExpression(self, ctx:SQLParser.ElseExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#castType.
def visitCastType(self, ctx:SQLParser.CastTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprList.
def visitExprList(self, ctx:SQLParser.ExprListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#charset.
def visitCharset(self, ctx:SQLParser.CharsetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#notRule.
def visitNotRule(self, ctx:SQLParser.NotRuleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#not2Rule.
def visitNot2Rule(self, ctx:SQLParser.Not2RuleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#interval.
def visitInterval(self, ctx:SQLParser.IntervalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#intervalTimeStamp.
def visitIntervalTimeStamp(self, ctx:SQLParser.IntervalTimeStampContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprListWithParentheses.
def visitExprListWithParentheses(self, ctx:SQLParser.ExprListWithParenthesesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#exprWithParentheses.
def visitExprWithParentheses(self, ctx:SQLParser.ExprWithParenthesesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleExprWithParentheses.
def visitSimpleExprWithParentheses(self, ctx:SQLParser.SimpleExprWithParenthesesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#orderList.
def visitOrderList(self, ctx:SQLParser.OrderListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#orderExpression.
def visitOrderExpression(self, ctx:SQLParser.OrderExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#groupList.
def visitGroupList(self, ctx:SQLParser.GroupListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#groupingExpression.
def visitGroupingExpression(self, ctx:SQLParser.GroupingExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#channel.
def visitChannel(self, ctx:SQLParser.ChannelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#compoundStatement.
def visitCompoundStatement(self, ctx:SQLParser.CompoundStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#returnStatement.
def visitReturnStatement(self, ctx:SQLParser.ReturnStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ifStatement.
def visitIfStatement(self, ctx:SQLParser.IfStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ifBody.
def visitIfBody(self, ctx:SQLParser.IfBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#thenStatement.
def visitThenStatement(self, ctx:SQLParser.ThenStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#compoundStatementList.
def visitCompoundStatementList(self, ctx:SQLParser.CompoundStatementListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#caseStatement.
def visitCaseStatement(self, ctx:SQLParser.CaseStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#elseStatement.
def visitElseStatement(self, ctx:SQLParser.ElseStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#labeledBlock.
def visitLabeledBlock(self, ctx:SQLParser.LabeledBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#unlabeledBlock.
def visitUnlabeledBlock(self, ctx:SQLParser.UnlabeledBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#label.
def visitLabel(self, ctx:SQLParser.LabelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#beginEndBlock.
def visitBeginEndBlock(self, ctx:SQLParser.BeginEndBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#labeledControl.
def visitLabeledControl(self, ctx:SQLParser.LabeledControlContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#unlabeledControl.
def visitUnlabeledControl(self, ctx:SQLParser.UnlabeledControlContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#loopBlock.
def visitLoopBlock(self, ctx:SQLParser.LoopBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#whileDoBlock.
def visitWhileDoBlock(self, ctx:SQLParser.WhileDoBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#repeatUntilBlock.
def visitRepeatUntilBlock(self, ctx:SQLParser.RepeatUntilBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#spDeclarations.
def visitSpDeclarations(self, ctx:SQLParser.SpDeclarationsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#spDeclaration.
def visitSpDeclaration(self, ctx:SQLParser.SpDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#variableDeclaration.
def visitVariableDeclaration(self, ctx:SQLParser.VariableDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#conditionDeclaration.
def visitConditionDeclaration(self, ctx:SQLParser.ConditionDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#spCondition.
def visitSpCondition(self, ctx:SQLParser.SpConditionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#sqlstate.
def visitSqlstate(self, ctx:SQLParser.SqlstateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#handlerDeclaration.
def visitHandlerDeclaration(self, ctx:SQLParser.HandlerDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#handlerCondition.
def visitHandlerCondition(self, ctx:SQLParser.HandlerConditionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#cursorDeclaration.
def visitCursorDeclaration(self, ctx:SQLParser.CursorDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#iterateStatement.
def visitIterateStatement(self, ctx:SQLParser.IterateStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#leaveStatement.
def visitLeaveStatement(self, ctx:SQLParser.LeaveStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#getDiagnostics.
def visitGetDiagnostics(self, ctx:SQLParser.GetDiagnosticsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#signalAllowedExpr.
def visitSignalAllowedExpr(self, ctx:SQLParser.SignalAllowedExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#statementInformationItem.
def visitStatementInformationItem(self, ctx:SQLParser.StatementInformationItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#conditionInformationItem.
def visitConditionInformationItem(self, ctx:SQLParser.ConditionInformationItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#signalInformationItemName.
def visitSignalInformationItemName(self, ctx:SQLParser.SignalInformationItemNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#signalStatement.
def visitSignalStatement(self, ctx:SQLParser.SignalStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#resignalStatement.
def visitResignalStatement(self, ctx:SQLParser.ResignalStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#signalInformationItem.
def visitSignalInformationItem(self, ctx:SQLParser.SignalInformationItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#cursorOpen.
def visitCursorOpen(self, ctx:SQLParser.CursorOpenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#cursorClose.
def visitCursorClose(self, ctx:SQLParser.CursorCloseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#cursorFetch.
def visitCursorFetch(self, ctx:SQLParser.CursorFetchContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#schedule.
def visitSchedule(self, ctx:SQLParser.ScheduleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnDefinition.
def visitColumnDefinition(self, ctx:SQLParser.ColumnDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#checkOrReferences.
def visitCheckOrReferences(self, ctx:SQLParser.CheckOrReferencesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#checkConstraint.
def visitCheckConstraint(self, ctx:SQLParser.CheckConstraintContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#constraintEnforcement.
def visitConstraintEnforcement(self, ctx:SQLParser.ConstraintEnforcementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableConstraintDef.
def visitTableConstraintDef(self, ctx:SQLParser.TableConstraintDefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#constraintName.
def visitConstraintName(self, ctx:SQLParser.ConstraintNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fieldDefinition.
def visitFieldDefinition(self, ctx:SQLParser.FieldDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnAttribute.
def visitColumnAttribute(self, ctx:SQLParser.ColumnAttributeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnFormat.
def visitColumnFormat(self, ctx:SQLParser.ColumnFormatContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#storageMedia.
def visitStorageMedia(self, ctx:SQLParser.StorageMediaContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#gcolAttribute.
def visitGcolAttribute(self, ctx:SQLParser.GcolAttributeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#references.
def visitReferences(self, ctx:SQLParser.ReferencesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#deleteOption.
def visitDeleteOption(self, ctx:SQLParser.DeleteOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyList.
def visitKeyList(self, ctx:SQLParser.KeyListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyPart.
def visitKeyPart(self, ctx:SQLParser.KeyPartContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyListWithExpression.
def visitKeyListWithExpression(self, ctx:SQLParser.KeyListWithExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyPartOrExpression.
def visitKeyPartOrExpression(self, ctx:SQLParser.KeyPartOrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#keyListVariants.
def visitKeyListVariants(self, ctx:SQLParser.KeyListVariantsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexType.
def visitIndexType(self, ctx:SQLParser.IndexTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexOption.
def visitIndexOption(self, ctx:SQLParser.IndexOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#commonIndexOption.
def visitCommonIndexOption(self, ctx:SQLParser.CommonIndexOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#visibility.
def visitVisibility(self, ctx:SQLParser.VisibilityContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexTypeClause.
def visitIndexTypeClause(self, ctx:SQLParser.IndexTypeClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fulltextIndexOption.
def visitFulltextIndexOption(self, ctx:SQLParser.FulltextIndexOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#spatialIndexOption.
def visitSpatialIndexOption(self, ctx:SQLParser.SpatialIndexOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dataTypeDefinition.
def visitDataTypeDefinition(self, ctx:SQLParser.DataTypeDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dataType.
def visitDataType(self, ctx:SQLParser.DataTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#nchar.
def visitNchar(self, ctx:SQLParser.NcharContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#realType.
def visitRealType(self, ctx:SQLParser.RealTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fieldLength.
def visitFieldLength(self, ctx:SQLParser.FieldLengthContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fieldOptions.
def visitFieldOptions(self, ctx:SQLParser.FieldOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#charsetWithOptBinary.
def visitCharsetWithOptBinary(self, ctx:SQLParser.CharsetWithOptBinaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ascii.
def visitAscii(self, ctx:SQLParser.AsciiContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#unicode.
def visitUnicode(self, ctx:SQLParser.UnicodeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#wsNumCodepoints.
def visitWsNumCodepoints(self, ctx:SQLParser.WsNumCodepointsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#typeDatetimePrecision.
def visitTypeDatetimePrecision(self, ctx:SQLParser.TypeDatetimePrecisionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#charsetName.
def visitCharsetName(self, ctx:SQLParser.CharsetNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#collationName.
def visitCollationName(self, ctx:SQLParser.CollationNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createTableOptions.
def visitCreateTableOptions(self, ctx:SQLParser.CreateTableOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createTableOptionsSpaceSeparated.
def visitCreateTableOptionsSpaceSeparated(self, ctx:SQLParser.CreateTableOptionsSpaceSeparatedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createTableOption.
def visitCreateTableOption(self, ctx:SQLParser.CreateTableOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ternaryOption.
def visitTernaryOption(self, ctx:SQLParser.TernaryOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#defaultCollation.
def visitDefaultCollation(self, ctx:SQLParser.DefaultCollationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#defaultEncryption.
def visitDefaultEncryption(self, ctx:SQLParser.DefaultEncryptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#defaultCharset.
def visitDefaultCharset(self, ctx:SQLParser.DefaultCharsetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionClause.
def visitPartitionClause(self, ctx:SQLParser.PartitionClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionDefKey.
def visitPartitionDefKey(self, ctx:SQLParser.PartitionDefKeyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionDefHash.
def visitPartitionDefHash(self, ctx:SQLParser.PartitionDefHashContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionDefRangeList.
def visitPartitionDefRangeList(self, ctx:SQLParser.PartitionDefRangeListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#subPartitions.
def visitSubPartitions(self, ctx:SQLParser.SubPartitionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionKeyAlgorithm.
def visitPartitionKeyAlgorithm(self, ctx:SQLParser.PartitionKeyAlgorithmContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionDefinitions.
def visitPartitionDefinitions(self, ctx:SQLParser.PartitionDefinitionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionDefinition.
def visitPartitionDefinition(self, ctx:SQLParser.PartitionDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionValuesIn.
def visitPartitionValuesIn(self, ctx:SQLParser.PartitionValuesInContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionOption.
def visitPartitionOption(self, ctx:SQLParser.PartitionOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#subpartitionDefinition.
def visitSubpartitionDefinition(self, ctx:SQLParser.SubpartitionDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionValueItemListParen.
def visitPartitionValueItemListParen(self, ctx:SQLParser.PartitionValueItemListParenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#partitionValueItem.
def visitPartitionValueItem(self, ctx:SQLParser.PartitionValueItemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#definerClause.
def visitDefinerClause(self, ctx:SQLParser.DefinerClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ifExists.
def visitIfExists(self, ctx:SQLParser.IfExistsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ifNotExists.
def visitIfNotExists(self, ctx:SQLParser.IfNotExistsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#procedureParameter.
def visitProcedureParameter(self, ctx:SQLParser.ProcedureParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#functionParameter.
def visitFunctionParameter(self, ctx:SQLParser.FunctionParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#collate.
def visitCollate(self, ctx:SQLParser.CollateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#typeWithOptCollate.
def visitTypeWithOptCollate(self, ctx:SQLParser.TypeWithOptCollateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#schemaIdentifierPair.
def visitSchemaIdentifierPair(self, ctx:SQLParser.SchemaIdentifierPairContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewRefList.
def visitViewRefList(self, ctx:SQLParser.ViewRefListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#updateList.
def visitUpdateList(self, ctx:SQLParser.UpdateListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#updateElement.
def visitUpdateElement(self, ctx:SQLParser.UpdateElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#charsetClause.
def visitCharsetClause(self, ctx:SQLParser.CharsetClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fieldsClause.
def visitFieldsClause(self, ctx:SQLParser.FieldsClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fieldTerm.
def visitFieldTerm(self, ctx:SQLParser.FieldTermContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#linesClause.
def visitLinesClause(self, ctx:SQLParser.LinesClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lineTerm.
def visitLineTerm(self, ctx:SQLParser.LineTermContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#userList.
def visitUserList(self, ctx:SQLParser.UserListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createUserList.
def visitCreateUserList(self, ctx:SQLParser.CreateUserListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterUserList.
def visitAlterUserList(self, ctx:SQLParser.AlterUserListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#createUserEntry.
def visitCreateUserEntry(self, ctx:SQLParser.CreateUserEntryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#alterUserEntry.
def visitAlterUserEntry(self, ctx:SQLParser.AlterUserEntryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#retainCurrentPassword.
def visitRetainCurrentPassword(self, ctx:SQLParser.RetainCurrentPasswordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#discardOldPassword.
def visitDiscardOldPassword(self, ctx:SQLParser.DiscardOldPasswordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#replacePassword.
def visitReplacePassword(self, ctx:SQLParser.ReplacePasswordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#userIdentifierOrText.
def visitUserIdentifierOrText(self, ctx:SQLParser.UserIdentifierOrTextContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#user.
def visitUser(self, ctx:SQLParser.UserContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#likeClause.
def visitLikeClause(self, ctx:SQLParser.LikeClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#likeOrWhere.
def visitLikeOrWhere(self, ctx:SQLParser.LikeOrWhereContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#onlineOption.
def visitOnlineOption(self, ctx:SQLParser.OnlineOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#noWriteToBinLog.
def visitNoWriteToBinLog(self, ctx:SQLParser.NoWriteToBinLogContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#usePartition.
def visitUsePartition(self, ctx:SQLParser.UsePartitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#fieldIdentifier.
def visitFieldIdentifier(self, ctx:SQLParser.FieldIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnName.
def visitColumnName(self, ctx:SQLParser.ColumnNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnInternalRef.
def visitColumnInternalRef(self, ctx:SQLParser.ColumnInternalRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnInternalRefList.
def visitColumnInternalRefList(self, ctx:SQLParser.ColumnInternalRefListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#columnRef.
def visitColumnRef(self, ctx:SQLParser.ColumnRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#insertIdentifier.
def visitInsertIdentifier(self, ctx:SQLParser.InsertIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexName.
def visitIndexName(self, ctx:SQLParser.IndexNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#indexRef.
def visitIndexRef(self, ctx:SQLParser.IndexRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableWild.
def visitTableWild(self, ctx:SQLParser.TableWildContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#schemaName.
def visitSchemaName(self, ctx:SQLParser.SchemaNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#schemaRef.
def visitSchemaRef(self, ctx:SQLParser.SchemaRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#procedureName.
def visitProcedureName(self, ctx:SQLParser.ProcedureNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#procedureRef.
def visitProcedureRef(self, ctx:SQLParser.ProcedureRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#functionName.
def visitFunctionName(self, ctx:SQLParser.FunctionNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#functionRef.
def visitFunctionRef(self, ctx:SQLParser.FunctionRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#triggerName.
def visitTriggerName(self, ctx:SQLParser.TriggerNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#triggerRef.
def visitTriggerRef(self, ctx:SQLParser.TriggerRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewName.
def visitViewName(self, ctx:SQLParser.ViewNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#viewRef.
def visitViewRef(self, ctx:SQLParser.ViewRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tablespaceName.
def visitTablespaceName(self, ctx:SQLParser.TablespaceNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tablespaceRef.
def visitTablespaceRef(self, ctx:SQLParser.TablespaceRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#logfileGroupName.
def visitLogfileGroupName(self, ctx:SQLParser.LogfileGroupNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#logfileGroupRef.
def visitLogfileGroupRef(self, ctx:SQLParser.LogfileGroupRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#eventName.
def visitEventName(self, ctx:SQLParser.EventNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#eventRef.
def visitEventRef(self, ctx:SQLParser.EventRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#udfName.
def visitUdfName(self, ctx:SQLParser.UdfNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#serverName.
def visitServerName(self, ctx:SQLParser.ServerNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#serverRef.
def visitServerRef(self, ctx:SQLParser.ServerRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#engineRef.
def visitEngineRef(self, ctx:SQLParser.EngineRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableName.
def visitTableName(self, ctx:SQLParser.TableNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#filterTableRef.
def visitFilterTableRef(self, ctx:SQLParser.FilterTableRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableRefWithWildcard.
def visitTableRefWithWildcard(self, ctx:SQLParser.TableRefWithWildcardContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableRef.
def visitTableRef(self, ctx:SQLParser.TableRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableRefList.
def visitTableRefList(self, ctx:SQLParser.TableRefListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#tableAliasRefList.
def visitTableAliasRefList(self, ctx:SQLParser.TableAliasRefListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#parameterName.
def visitParameterName(self, ctx:SQLParser.ParameterNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#labelIdentifier.
def visitLabelIdentifier(self, ctx:SQLParser.LabelIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#labelRef.
def visitLabelRef(self, ctx:SQLParser.LabelRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleIdentifier.
def visitRoleIdentifier(self, ctx:SQLParser.RoleIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleRef.
def visitRoleRef(self, ctx:SQLParser.RoleRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#pluginRef.
def visitPluginRef(self, ctx:SQLParser.PluginRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#componentRef.
def visitComponentRef(self, ctx:SQLParser.ComponentRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#resourceGroupRef.
def visitResourceGroupRef(self, ctx:SQLParser.ResourceGroupRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#windowName.
def visitWindowName(self, ctx:SQLParser.WindowNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#pureIdentifier.
def visitPureIdentifier(self, ctx:SQLParser.PureIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifier.
def visitIdentifier(self, ctx:SQLParser.IdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierList.
def visitIdentifierList(self, ctx:SQLParser.IdentifierListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierListWithParentheses.
def visitIdentifierListWithParentheses(self, ctx:SQLParser.IdentifierListWithParenthesesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#qualifiedIdentifier.
def visitQualifiedIdentifier(self, ctx:SQLParser.QualifiedIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#simpleIdentifier.
def visitSimpleIdentifier(self, ctx:SQLParser.SimpleIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#dotIdentifier.
def visitDotIdentifier(self, ctx:SQLParser.DotIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ulong_number.
def visitUlong_number(self, ctx:SQLParser.Ulong_numberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#real_ulong_number.
def visitReal_ulong_number(self, ctx:SQLParser.Real_ulong_numberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#ulonglong_number.
def visitUlonglong_number(self, ctx:SQLParser.Ulonglong_numberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#real_ulonglong_number.
def visitReal_ulonglong_number(self, ctx:SQLParser.Real_ulonglong_numberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#literal.
def visitLiteral(self, ctx:SQLParser.LiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#signedLiteral.
def visitSignedLiteral(self, ctx:SQLParser.SignedLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#stringList.
def visitStringList(self, ctx:SQLParser.StringListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#textStringLiteral.
def visitTextStringLiteral(self, ctx:SQLParser.TextStringLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#textString.
def visitTextString(self, ctx:SQLParser.TextStringContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#textStringHash.
def visitTextStringHash(self, ctx:SQLParser.TextStringHashContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#textLiteral.
def visitTextLiteral(self, ctx:SQLParser.TextLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#textStringNoLinebreak.
def visitTextStringNoLinebreak(self, ctx:SQLParser.TextStringNoLinebreakContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#textStringLiteralList.
def visitTextStringLiteralList(self, ctx:SQLParser.TextStringLiteralListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#numLiteral.
def visitNumLiteral(self, ctx:SQLParser.NumLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#boolLiteral.
def visitBoolLiteral(self, ctx:SQLParser.BoolLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#nullLiteral.
def visitNullLiteral(self, ctx:SQLParser.NullLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#temporalLiteral.
def visitTemporalLiteral(self, ctx:SQLParser.TemporalLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#floatOptions.
def visitFloatOptions(self, ctx:SQLParser.FloatOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#standardFloatOptions.
def visitStandardFloatOptions(self, ctx:SQLParser.StandardFloatOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#precision.
def visitPrecision(self, ctx:SQLParser.PrecisionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#textOrIdentifier.
def visitTextOrIdentifier(self, ctx:SQLParser.TextOrIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lValueIdentifier.
def visitLValueIdentifier(self, ctx:SQLParser.LValueIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleIdentifierOrText.
def visitRoleIdentifierOrText(self, ctx:SQLParser.RoleIdentifierOrTextContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#sizeNumber.
def visitSizeNumber(self, ctx:SQLParser.SizeNumberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#parentheses.
def visitParentheses(self, ctx:SQLParser.ParenthesesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#equal.
def visitEqual(self, ctx:SQLParser.EqualContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#optionType.
def visitOptionType(self, ctx:SQLParser.OptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#varIdentType.
def visitVarIdentType(self, ctx:SQLParser.VarIdentTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#setVarIdentType.
def visitSetVarIdentType(self, ctx:SQLParser.SetVarIdentTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierKeyword.
def visitIdentifierKeyword(self, ctx:SQLParser.IdentifierKeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierKeywordsAmbiguous1RolesAndLabels.
def visitIdentifierKeywordsAmbiguous1RolesAndLabels(self, ctx:SQLParser.IdentifierKeywordsAmbiguous1RolesAndLabelsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierKeywordsAmbiguous2Labels.
def visitIdentifierKeywordsAmbiguous2Labels(self, ctx:SQLParser.IdentifierKeywordsAmbiguous2LabelsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#labelKeyword.
def visitLabelKeyword(self, ctx:SQLParser.LabelKeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierKeywordsAmbiguous3Roles.
def visitIdentifierKeywordsAmbiguous3Roles(self, ctx:SQLParser.IdentifierKeywordsAmbiguous3RolesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierKeywordsUnambiguous.
def visitIdentifierKeywordsUnambiguous(self, ctx:SQLParser.IdentifierKeywordsUnambiguousContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleKeyword.
def visitRoleKeyword(self, ctx:SQLParser.RoleKeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#lValueKeyword.
def visitLValueKeyword(self, ctx:SQLParser.LValueKeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#identifierKeywordsAmbiguous4SystemVariables.
def visitIdentifierKeywordsAmbiguous4SystemVariables(self, ctx:SQLParser.IdentifierKeywordsAmbiguous4SystemVariablesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleOrIdentifierKeyword.
def visitRoleOrIdentifierKeyword(self, ctx:SQLParser.RoleOrIdentifierKeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SQLParser#roleOrLabelKeyword.
def visitRoleOrLabelKeyword(self, ctx:SQLParser.RoleOrLabelKeywordContext):
return self.visitChildren(ctx)
del SQLParser |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import pickle
import sys
import traceback
from typing import Callable, Dict, List, Tuple, Set
from languages import SExpressionLanguage, WebQSPSExpressionLanguage
from sparql_executor import exec_verify_sparql_xsl_only_fix_literal
from utils import ComputableEntityClass, Universe
from utils import Relation, Entity, Class
from worlds.grail_world import KBWorld
from functools import reduce
from copy import copy
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
EARLY_STOPPING = "EARLY STOPPING HERE"
ANY_STRING = "ANY"
MAXIMUM_PARAMETER_LEN = 1000
class TreeNode(object):
def __init__(self, node_slot, node_id, node_val, child=[]):
"""
Build a tree node
:param node_slot: value is either AND_OP, or arguments for inference
:param child: for leaf node, child_nodes is set as None; otherwise, it is allocated as place-holder.
"""
self.node_id = node_id
self.node_val = node_val
# the slot is used to judge if satisfy the requirement
# slot values may by altered frequently and slot values of
# intermediate nodes only depend on the leaf nodes
self.node_slot = node_slot
self.requirement = None
self.child = child
def able_to_reduce(self, node_table) -> bool:
"""
test if an action could be inserted into self's child, if fail, return false; otherwise, return true.
:return:
"""
# if is a non terminal
if None in [node_table[ind].node_val for ind in self.child]:
return False
# successfully add the child, return true.
return all([node_table[ind].able_to_reduce(node_table) for ind in self.child])
def add_child(self, action_node, node_table):
ind = [node_table[ind].node_val for ind in self.child].index(None)
# keep the original requirement
action_node.requirement = node_table[self.child[ind]].requirement
# keep the original reference
self.child[ind] = action_node.node_id
def update_slot(self, slot_val):
self.node_slot = slot_val
def export_sexpression(self, node_table):
# export current node into standard sexpression
if len(self.child) == 0:
# TODO: we do not use slot since it will be changed frequently
return self.node_val
else:
lchild_slot = node_table[self.child[0]].export_sexpression(node_table)
if len(self.child) == 1:
return "({} {})".format(self.node_val, lchild_slot)
else:
rchild_slot = node_table[self.child[1]].export_sexpression(node_table)
return "({} {} {})".format(self.node_val, lchild_slot, rchild_slot)
def satisfy_require(requirement: Set, value: Set):
"""
whether condition satisfies the requirement, e.g. there is any one of value inside requirement
:param requirement: List[Optional[Tuple, str]]
:param value: List[Optional[Tuple, str]]
:return: if match, return True; otherwise, return False.
"""
def set_compatiable(_val: Set, _require: Set):
if ANY_STRING in _require:
return True
if ANY_STRING in _val:
return True
return any(_val & _require)
if requirement is None or len(requirement) == 0:
# no requirement
return True
else:
assert isinstance(requirement, set)
assert isinstance(value, set)
# no valid value pass
if len(value) == 0:
return False
elif type(next(iter(value))) != type(next(iter(requirement))):
return False
elif any(requirement & value):
return True
# require_ins and cond_ins may be `.+`
if isinstance(next(iter(value)), str):
# to speed up
if ANY_STRING in requirement or ANY_STRING in value:
return True
else:
# at least one position of entity is ANY_STRING
require_zero, require_one = zip(*requirement)
value_zero, value_one = zip(*value)
require_zero, require_one, value_zero, value_one = set(require_zero), set(require_one), \
set(value_zero), set(value_one)
return set_compatiable(value_zero, require_zero) & set_compatiable(value_one, require_one)
return False
def _infer_reverse(entity_tuples: Set[Tuple]) -> Set[Tuple]:
# given the relation as parameter, return its reverse
return {(class_tuple[1], class_tuple[0]) for class_tuple in entity_tuples}
def _validate_join_ent(entity_tuples: Set[Tuple]) -> Set[str]:
return {example[1] for example in entity_tuples}
def _infer_join_ent(entity_tuples: Set[Tuple], entities: Set[str]) -> Set[str]:
# inner join which returns entities
results = {example[0] for example in entity_tuples
if example[1] in entities or example[1] == ANY_STRING}
if len(results) > MAXIMUM_PARAMETER_LEN:
return {ANY_STRING}
else:
return results
def _validate_join_rel(entity_tuples: Set[Tuple]) -> Set[Tuple]:
return {(class_tuple[1], ANY_STRING) for class_tuple in entity_tuples}
def _infer_join_rel(entity_tuples1: Set[Tuple], entity_tuples2: Set[Tuple]) -> Set[Tuple]:
# entity_tuples1: b1
# entity_tuples2: b2
# Inner join based on the first element of items in b2 and the second element of items in b1
entity_tuples1_entities = {example[1] for example in entity_tuples1}
entity_tuples2_entities = {example[0] for example in entity_tuples2}
join_entities = entity_tuples1_entities & entity_tuples2_entities
# A -> ANY, ANY -> B ---> A x B
if ANY_STRING in entity_tuples1_entities or ANY_STRING in entity_tuples2_entities:
join_entities.add(ANY_STRING)
infer_entity_tuples = set()
for out_example in entity_tuples1:
# if ANY in entity 2, we should not skip
if out_example[1] not in join_entities and ANY_STRING not in entity_tuples2_entities:
continue
for in_example in entity_tuples2:
# if ANY, we should add all
if in_example[0] not in join_entities and out_example[1] != ANY_STRING:
continue
infer_entity_tuples.add((out_example[0], in_example[1]))
if (ANY_STRING, ANY_STRING) in infer_entity_tuples or len(infer_entity_tuples) > MAXIMUM_PARAMETER_LEN:
infer_entity_tuples = {(ANY_STRING, ANY_STRING)}
return infer_entity_tuples
def _validate_math(entity_tuples: Set[Tuple]) -> Set[str]:
"""
Given an relation, its second parameter must be computable.
"""
if any([example[1] for example in entity_tuples
if example[1] in ComputableEntityClass.values() or example[1] == ANY_STRING]):
return ComputableEntityClass.values()
else:
return {EARLY_STOPPING}
def _infer_math(entity_tuples: Set[Tuple], numbers: Set[str]):
# entity_tuples: (x, v)
# numbers: n
# Return all x such that v inside (x,v) < / ≤ / > / ≥ n
return {example[0] for example in entity_tuples}
def _validate_and_op(entities: Set[str]) -> Set[str]:
# at least the second argument should contain current entity class
return entities
def _infer_and_op(entities1: Set[str], entities2: Set[str]) -> Set[str]:
if ANY_STRING in entities1 or ANY_STRING in entities2:
return {ANY_STRING}
else:
return entities1 & entities2
def _infer_count(entities: Set[str]) -> Set[str]:
# TODO: the return value of COUNT should be compatible with NumberEntity
# no requirement on input entity class
return {ComputableEntityClass.type_int}
def _validate_arg(entities: Set[str]) -> Set[Tuple]:
"""
Constraint on its neighbor relation: return List[Tuple]
"""
# you should generate a requirement for relation - entity class tuples
return {(example, ANY_STRING) for example in entities}
def _infer_arg(entities: Set[str], entity_tuples: Set[Tuple]) -> Set[str]:
# you should generate a requirement for relation - entity class tuples
return entities
def _validate_tc_now(entities: Set[str]) -> Set[Tuple]:
return {(ANY_STRING, ANY_STRING)}
def _infer_tc_now(entities: Set[str], entity_tuples: Set[Tuple]) -> Set[str]:
return {ANY_STRING}
def _infer_tc(entities: Set[str], entity_tuples: Set[Tuple], number_entity: int) -> Set[str]:
return {ANY_STRING}
class SExpressionState:
action_validate_dict = {
SExpressionLanguage.AND_OP.__name__: _validate_and_op,
SExpressionLanguage.JOIN_ENT.__name__: _validate_join_ent,
SExpressionLanguage.JOIN_REL.__name__: _validate_join_rel,
SExpressionLanguage.JOIN_CLASS.__name__: _validate_join_ent,
SExpressionLanguage.ARGMAX.__name__: _validate_arg,
SExpressionLanguage.ARGMIN.__name__: _validate_arg,
SExpressionLanguage.lt.__name__: _validate_math,
SExpressionLanguage.le.__name__: _validate_math,
SExpressionLanguage.gt.__name__: _validate_math,
SExpressionLanguage.ge.__name__: _validate_math,
# extra action name
WebQSPSExpressionLanguage.TC_NOW.__name__: _validate_tc_now,
WebQSPSExpressionLanguage.TC.__name__: _validate_tc_now
}
action_infer_dict = {
SExpressionLanguage.AND_OP.__name__: _infer_and_op,
SExpressionLanguage.R.__name__: _infer_reverse,
SExpressionLanguage.COUNT.__name__: _infer_count,
SExpressionLanguage.JOIN_ENT.__name__: _infer_join_ent,
SExpressionLanguage.JOIN_REL.__name__: _infer_join_rel,
SExpressionLanguage.JOIN_CLASS.__name__: _infer_join_ent,
SExpressionLanguage.ARGMAX.__name__: _infer_arg,
SExpressionLanguage.ARGMIN.__name__: _infer_arg,
SExpressionLanguage.lt.__name__: _infer_math,
SExpressionLanguage.le.__name__: _infer_math,
SExpressionLanguage.gt.__name__: _infer_math,
SExpressionLanguage.ge.__name__: _infer_math,
# extra action name
WebQSPSExpressionLanguage.TC_NOW.__name__: _infer_tc_now,
WebQSPSExpressionLanguage.TC.__name__: _infer_tc
}
def __init__(self,
possible_actions,
world: KBWorld,
is_nonterminal: Callable[[str], bool],
relation_to_argument: Dict[str, List] = None,
anchor_to_argument: Dict[str, Dict] = None,
literal_relation: Set[str] = None,
entity_to_argument: Dict[str, str] = None,
model_in_training: bool = False,
enabled_type: bool = False,
enabled_virtual: bool = False,
enabled_anchor: bool = False,
enabled_runtime_prune: bool = False):
self.possible_actions = possible_actions
# argument stack stores the requirement (e.g. .+, and specific ones)
self.world = world
self.language = world.language
self.is_nonterminal = is_nonterminal
self.enabled_virtual = enabled_virtual
self.enabled_anchor = enabled_anchor
self.enabled_runtime_prune = enabled_runtime_prune
self.enabled_type = enabled_type
# if in training, no checking is executed; otherwise, depends on each option.
self.enabled = (not model_in_training) & (enabled_virtual | enabled_anchor |
enabled_runtime_prune | enabled_type)
# FORMAT SPEC: relation to argument is as
# "education.educational_institution.school_type": [
# {"education.educational_institution":
# "education.school_category"}
# ]
# ]
if entity_to_argument is None:
self.entity_to_argument = {}
for entity in self.world.kb_context.entity_list:
# all arguments as list to be compatible
if len(entity.all_entity_classes) > 0:
self.entity_to_argument[entity.entity_id] = set(entity.all_entity_classes)
if "type.type" in self.entity_to_argument[entity.entity_id]:
self.entity_to_argument[entity.entity_id].remove("type.type")
else:
self.entity_to_argument = entity_to_argument
self.relation_to_argument = relation_to_argument
self.anchor_to_argument = anchor_to_argument
self.literal_relation = literal_relation
if self.anchor_to_argument.keys():
self.anchor_in_relations = reduce(lambda x, y: set(x) | set(y),
[self.anchor_to_argument[key]['in_relation']
for key in self.anchor_to_argument.keys()])
self.anchor_out_relations = reduce(lambda x, y: set(x) | set(y),
[self.anchor_to_argument[key]['out_relation']
for key in self.anchor_to_argument.keys()])
else:
self.anchor_in_relations = {}
self.anchor_out_relations = {}
self.node_accessed = False
# if this is set as True, remove all linked ones to construct a dead-loop one
self.early_stop = False
self.action_history = []
# manually maintain the connection between different node
self.node_table: Dict[int, TreeNode] = {}
self.node_queue: List[int] = []
def copy_self(self):
# shallow copy
new_sexpression_state = copy(self)
# deep copy
new_sexpression_state.action_history = self.action_history[:]
new_sexpression_state.node_queue = self.node_queue[:]
# only the node table record the necessary node information
new_sexpression_state.node_table = pickle.loads(pickle.dumps(self.node_table))
return new_sexpression_state
def collapse_finished_nodes_and_validate(self, node_queue, node_table, validate=True) -> bool:
while len(node_queue) > 0 and node_table[node_queue[-1]].able_to_reduce(node_table):
cur_node = node_table[node_queue[-1]]
# call function
func_name = cur_node.node_slot
# using
parameters = [node_table[node_id].node_slot for node_id in cur_node.child]
# inference on the entity class level
infer_slot = self.action_infer_dict[func_name](*parameters)
if validate:
match_requirement = satisfy_require(cur_node.requirement, infer_slot)
if not match_requirement:
# TODO: return None to avoid decoding on this sequence
return False
# update slot
cur_node.update_slot(infer_slot)
# to its parent
node_queue.pop(-1)
return True
def runtime_prune(self, node_queue, node_table) -> Tuple[bool, List[str]]:
"""
Given current stack of nodes and node_tables, returns the largest node which can be reduced
Note this function will not effect the node status, it just convert it into a sexpression
"""
if not (len(node_queue) > 0 and node_table[node_queue[-1]].able_to_reduce(node_table)):
return False, []
# reduce from top to down
try:
history_nontermnial = set()
existing_predicates = {"JOIN_REL", "JOIN_ENT", "JOIN_REL"}
for i in range(len(node_queue)):
# Only JOIN utilizes the KB information
cur_node: TreeNode = node_table[node_queue[i]]
history_nontermnial.add(cur_node.node_val)
if cur_node.able_to_reduce(node_table):
if cur_node.node_val in existing_predicates:
s_expression = cur_node.export_sexpression(node_table)
# this s_expression should be post-processed to process JOIN_REL and etc.
s_expression = self.world.postprocess(s_expression)
query = self.world.sparql_converter(s_expression)
# obtain slot values List[List[str]]
slot_values, status_code = exec_verify_sparql_xsl_only_fix_literal(query)
flatten_slot_values = [slot[0] for slot in slot_values]
if status_code == 200:
return True, flatten_slot_values
else:
return False, []
except Exception as e:
print("Error on runtime_prune:\n")
exec_info = sys.exc_info()
traceback.print_exception(*exec_info)
# no way to execute
return True, []
return False, []
def detect_early_stop(self, history: List[str]):
# to avoid invalid rules to rank the first
lhs_seq = [rule.split(' -> ')[0] for rule in history]
rhs_first_seq = [rule.split(' -> ')[1].strip().split(', ')[0] for rule in history]
rhs_seq = [rule.split(' -> ')[1].strip().split(', ') for rule in history]
# @start@ -> Class, Class -> instance
if len(lhs_seq) == 2 and lhs_seq[1] == Class.__name__ and len(rhs_seq[1]) == 1:
return True
# <Class,Class:Class> -> AND_OP, Class -> ins1, Class -> ins2
if SExpressionLanguage.AND_OP.__name__ in rhs_first_seq:
for i in range(len(rhs_first_seq) - 2):
if rhs_first_seq[i] == SExpressionLanguage.AND_OP.__name__ and \
len(rhs_seq[i + 1]) == 1 and len(rhs_seq[i + 2]) == 1:
return True
# <Relation:Relation> -> R, Relation -> [<Relation:Relation>, Relation], <Relation:Relation> -> R
if SExpressionLanguage.R.__name__ in rhs_first_seq:
for i in range(len(rhs_first_seq) - 2):
if rhs_first_seq[i] == SExpressionLanguage.R.__name__ and \
rhs_first_seq[i + 2] == SExpressionLanguage.R.__name__:
return True
return False
def take_action(self, production_rule: str) -> 'SExpressionState':
if not self.enabled or self.early_stop:
return self
new_sql_state = self.copy_self()
lhs, rhs = production_rule.split(' -> ')
rhs_tokens = rhs.strip('[]').split(', ')
# TODO: if you write the language by inheriting DomainLanguage,
# multiple tokens must not be an terminal production rule
if len(rhs_tokens) == 1:
is_terminal = not self.is_nonterminal(rhs)
# use right side to build tree node
if is_terminal:
# AND_OP, JOIN_REL and etc.
if self.world.language.is_global_rule(rhs):
# WARNING: this only applies for language which follows the DomainLanguage
last_action = new_sql_state.action_history[-1]
lhs, rhs_param = last_action.split(' -> ')
num_parameters = len(rhs_param.strip('[]').split(', ')) - 1
# Here we want to alloc a non-terminal node
# And so the node_id is actually node_ids
child_nodes = []
for _ in range(num_parameters):
node_id = len(new_sql_state.node_table)
child_node = TreeNode(node_slot=None, node_val=None, node_id=node_id)
new_sql_state.node_table[node_id] = child_node
child_nodes.append(node_id)
node_id = len(new_sql_state.node_table)
tree_node = TreeNode(node_slot=rhs,
node_val=rhs,
node_id=node_id,
child=child_nodes)
new_sql_state.node_table[node_id] = tree_node
# if relation, append entity tuples; otherwise, append entity class
else:
if rhs in new_sql_state.entity_to_argument:
infer_slot = new_sql_state.entity_to_argument[rhs]
elif rhs in new_sql_state.relation_to_argument:
infer_slot = new_sql_state.relation_to_argument[rhs]
else:
# TODO: hard code for relations which cannot be found in freebase
if self.language.obtain_leaf_type(rhs) == Universe.entity_repr:
infer_slot = {ANY_STRING}
else:
infer_slot = {(ANY_STRING, ANY_STRING)}
# add new node to node table
node_id = len(new_sql_state.node_table)
tree_node = TreeNode(node_slot=infer_slot,
node_val=rhs,
node_id=node_id)
new_sql_state.node_table[node_id] = tree_node
# assign to expression node
if new_sql_state.node_accessed is False:
# set as True
new_sql_state.node_accessed = True
else:
# collapse the full tree
new_sql_state.node_table[new_sql_state.node_queue[-1]]. \
add_child(tree_node, new_sql_state.node_table)
if self.world.language.is_global_rule(rhs):
new_sql_state.node_queue.append(tree_node.node_id)
# We use the nearest grammar rule (JOIN_ENT, AND and so on) and its
# first argument to identify if the next parameter could satisfy its
# type constraint. E.g. ( JOIN_REL ( R ( relation_1 ) entity_1 ) )
# the first parameter of JOIN_REL is reverse of relation_1
# When each grammar rule ends, it will be used to infer the type of its
# next parameter (with the help of action history)
new_sql_state.action_history.append(production_rule)
early_stop = self.detect_early_stop(new_sql_state.action_history)
if early_stop:
new_sql_state.early_stop = True
else:
# must do this
result = self.collapse_finished_nodes_and_validate(new_sql_state.node_queue,
new_sql_state.node_table,
validate=self.enabled_virtual)
if result is False:
new_sql_state.early_stop = True
return new_sql_state
def get_valid_actions(self, valid_actions: dict):
if not self.enabled or self.early_stop or 'linked' not in valid_actions:
return valid_actions
valid_actions_ids = [rule_id for rule_id in valid_actions['linked'][2]]
valid_actions_rules = [self.possible_actions[rule_id] for rule_id in valid_actions_ids]
# use lhs to determine whether to trigger
lhs_nonterminal = valid_actions_rules[0].split(' -> ')[0]
last_requirement = None
# execute operator on the argument if the minimum count is satisfied
if len(self.node_queue) > 0:
# take the nearest node in node queue
peek_node = self.node_table[self.node_queue[-1]]
if len(peek_node.child) > 1 and \
self.node_table[peek_node.child[0]].node_slot and \
self.node_table[peek_node.child[1]].requirement is None:
# pop an operator and an argument, and execute it
operator_name = peek_node.node_val
first_argument = self.node_table[peek_node.child[0]].node_slot
# TODO: note that all _validate functions return the minimum requirement
# not the strict requirement
next_argument_require = self.action_validate_dict[operator_name](first_argument)
# must be set
next_argument_require = set(next_argument_require)
# reduce the space
if len(next_argument_require) >= MAXIMUM_PARAMETER_LEN:
if isinstance(next(iter(next_argument_require)), Tuple):
next_argument_require = {(ANY_STRING, ANY_STRING)}
else:
next_argument_require = {ANY_STRING}
# append into argument stack for checking
# WARNING: somehow may be hard code
self.node_table[peek_node.child[1]].requirement = next_argument_require
last_requirement = next_argument_require
actions_to_remove = set()
# instance-level checking
if self.enabled_anchor:
# 1. <Relation,Entity:Class> -> JOIN_ENT, (Optional) Relation -> [<Relation:Relation>, Relation],
# (Optional) <Relation:Relation> -> R, Relation -> ?
# proactive predicting on the valid relations based on CANDIDATE entities
if lhs_nonterminal == Relation.__name__ and self.literal_relation:
normal_setting = self.action_history[-1].split(' -> ')[1] == SExpressionLanguage.JOIN_ENT.__name__
reverse_setting = self.action_history[-1].split(' -> ')[1] == SExpressionLanguage.R.__name__ and \
self.action_history[-3].split(' -> ')[1] == SExpressionLanguage.JOIN_ENT.__name__
if normal_setting or reverse_setting:
for rule_id, rule in zip(valid_actions_ids, valid_actions_rules):
temp_rel_rhs = rule.split(' -> ')[1]
# literal relations cannot be for judged
if temp_rel_rhs in self.literal_relation:
continue
if normal_setting and temp_rel_rhs not in self.anchor_in_relations:
actions_to_remove.add(rule_id)
elif reverse_setting and temp_rel_rhs not in self.anchor_out_relations:
actions_to_remove.add(rule_id)
# 2. <Relation,Entity:Class> -> JOIN_ENT, (Optional) Relation -> [<Relation:Relation>, Relation],
# (Optional) <Relation:Relation> -> R, Relation -> rel, Entity -> ?
# post-checking to prune invalid relations
if lhs_nonterminal == Entity.__name__:
action_rules = self.action_history[-1].split(' -> ')
reverse_flag = self.action_history[-2].split(' -> ')[1] == SExpressionLanguage.R.__name__
should_remove = action_rules[0] == Relation.__name__
if should_remove:
for rule_id, rule in zip(valid_actions_ids, valid_actions_rules):
temp_ent_rhs = rule.split(' -> ')[1]
if temp_ent_rhs in self.anchor_to_argument:
if reverse_flag:
check_relations = self.anchor_to_argument[temp_ent_rhs]['out_relation']
else:
check_relations = self.anchor_to_argument[temp_ent_rhs]['in_relation']
if action_rules[1] not in check_relations:
actions_to_remove.add(rule_id)
# update ids and rules
valid_actions_ids = [action_id for action_id in valid_actions_ids if action_id not in actions_to_remove]
valid_actions_rules = [self.possible_actions[rule_id] for rule_id in valid_actions_ids]
# type-level checking
if self.enabled_type and last_requirement is not None:
# preprocess to identify valid actions
for rule_id, rule in zip(valid_actions_ids, valid_actions_rules):
_, rhs = rule.split(' -> ')
if rhs in self.entity_to_argument:
# decision on an entity
# last_requirement: List[str]
# condition: List[str]
condition = self.entity_to_argument[rhs]
match_requirement = satisfy_require(last_requirement, condition)
elif rhs in self.relation_to_argument:
# decision on an relation
# last_requirement: List[Tuple[str, str]]
# condition: List[Tuple[str, str]]
condition = self.relation_to_argument[rhs]
match_requirement = satisfy_require(last_requirement, condition)
else:
match_requirement = True
if not match_requirement:
actions_to_remove.add(rule_id)
# update ids and rules
valid_actions_ids = [action_id for action_id in valid_actions_ids if action_id not in actions_to_remove]
valid_actions_rules = [self.possible_actions[rule_id] for rule_id in valid_actions_ids]
# execution semantic level: virtual execution
# try to apply each rule and decide if the filled one satisfy the requirement
if self.enabled_virtual and len(self.node_queue) > 0:
# virtually forward to try
for rule_id, rule in zip(valid_actions_ids, valid_actions_rules):
_, rhs = rule.split(' -> ')
if rhs in self.entity_to_argument:
# decision on an entity
# last_requirement: List[str]
# condition: List[str]
argument = self.entity_to_argument[rhs]
elif rhs in self.relation_to_argument:
# decision on an relation
# last_requirement: List[Tuple[str, str]]
# condition: List[Tuple[str, str]]
argument = self.relation_to_argument[rhs]
else:
# ignore these
continue
# alloc a totally new node because we will change its child.
# but for other nodes, we will only changed its slot
# (which can be overwritten for many times)
local_node_table = pickle.loads(pickle.dumps(self.node_table))
local_node_queue = self.node_queue[:]
# these nodes are temporal nodes, we should delete them after judgement
temp_node_id = len(local_node_table)
tree_node = TreeNode(node_slot=argument, node_id=temp_node_id, node_val=rhs)
local_node_table[temp_node_id] = tree_node
local_node_table[local_node_queue[-1]].add_child(tree_node, local_node_table)
# the reference is changed after copying!
result = self.collapse_finished_nodes_and_validate(local_node_queue,
local_node_table)
if result is False:
actions_to_remove.add(rule_id)
# update ids and rules
valid_actions_ids = [action_id for action_id in valid_actions_ids if action_id not in actions_to_remove]
valid_actions_rules = [self.possible_actions[rule_id] for rule_id in valid_actions_ids]
# execution semantic level: subprogram induction
if self.enabled_runtime_prune and len(self.node_queue) > 0 and \
len(valid_actions_ids) > 0 and \
lhs_nonterminal in [Entity.__name__,
Class.__name__,
Relation.__name__]:
# here we will fake a node and append it into current node table
local_node_queue = self.node_queue[:]
local_node_table = pickle.loads(pickle.dumps(self.node_table))
# allocate node id and record it
node_id = len(local_node_table)
fake_tree_node = TreeNode(node_slot="SLOT", node_id=node_id, node_val="SLOT")
local_node_table[node_id] = fake_tree_node
local_node_table[local_node_queue[-1]].add_child(fake_tree_node, local_node_table)
is_pruning, accept_slots = self.runtime_prune(node_queue=local_node_queue, node_table=local_node_table)
if is_pruning:
for rule_id, rule in zip(valid_actions_ids, valid_actions_rules):
_, rhs = rule.split(' -> ')
if self.language.is_number_entity(rhs):
continue
if rhs not in accept_slots:
actions_to_remove.add(rule_id)
# here we want to filter those ones which cannot be covered
new_valid_actions = {}
if 'global' in valid_actions:
new_valid_actions['global'] = valid_actions['global']
new_linked_actions = self._remove_actions(valid_actions, 'linked',
actions_to_remove) if 'linked' in valid_actions else None
if new_linked_actions is not None:
new_valid_actions['linked'] = new_linked_actions
elif 'global' not in valid_actions:
self.early_stop = True
new_valid_actions['linked'] = valid_actions['linked']
return new_valid_actions
@staticmethod
def _remove_actions(valid_actions, key, ids_to_remove):
if len(ids_to_remove) == 0:
return valid_actions[key]
if len(ids_to_remove) == len(valid_actions[key][2]):
return None
current_ids = valid_actions[key][2]
keep_ids = []
keep_ids_loc = []
for loc, rule_id in enumerate(current_ids):
if rule_id not in ids_to_remove:
keep_ids.append(rule_id)
keep_ids_loc.append(loc)
items = list(valid_actions[key])
items[0] = items[0][keep_ids_loc]
items[1] = items[1][keep_ids_loc]
items[2] = keep_ids
if len(items) >= 4:
items[3] = items[3][keep_ids_loc]
return tuple(items)
|
<reponame>ed-ortizm/L-G-opt
#!/usr/bin/env python3
import numpy as np
from annealing import GR
import matplotlib
import matplotlib.pyplot as plt
# Stats for convergence ratios:
# data = np.loadtxt('20_chains_100_runs/conv_rates.txt')
# cr_mean = np.zeros((data.shape[0],2))
# i = 0
# for n in data:
# print('For n = ', int(n[0]), ', the convergence ratio has')
# cr_mean[i][0] = int(n[0])
# cr_mean[i][1] = np.mean(n[1:])
# print('A mean value of: ','{:.2f}'.format(cr_mean[i][0]))
# print('A median of : ','{:.2f}'.format(np.median(n[1:])))
# print('A standard deviation of: ','{:.2f}'.format(np.std(n[1:])),'\n')
# i = i+1
# print(cr_mean[:,0])
# plt.figure()
# plt.title('Convergence Ratio')
# plt.ylabel('Convergence Ratio')
# plt.xlabel('n')
# plt.axis([0,22,0,1.0])
# plt.plot(cr_mean[:,0],cr_mean[:,1],'bo--')
# plt.xticks(np.array([0,1,3,5,7,9,11,13,15,17,19,21]))
# plt.savefig('conv_ratio.png')
# plt.close()
# GR statistics
nn = [2*i+1 for i in range(11)]
runs = 100
x_PSRFs = np.zeros((len(nn),runs+1))
y_PSRFs = np.zeros((len(nn),runs+1))
for run in range(runs):
i = 0
for n in nn:
x_file = '20_chains_100_runs/chains_x_n_' + str(n) + '_run_' + str(run) + '.txt'
y_file = '20_chains_100_runs/chains_y_n_' + str(n) + '_run_' + str(run) + '.txt'
x_data = np.loadtxt(x_file)
y_data = np.loadtxt(y_file)
x,y = GR(x_data), GR(y_data)
x_PSRF,a,b = x.GelmanRubin()
y_PSRF,a,b = y.GelmanRubin()
x_PSRFs[i][0] = n
x_PSRFs[i][run+1] = x_PSRF
y_PSRFs[i][0] = n
y_PSRFs[i][run+1] = y_PSRF
i = i +1
# print('For n=', n, ', the PSRFs (x,y) are: ', \
# '{:.4f}'.format(x_PSRF),'{:.4f}'.format(y_PSRF))
np.savetxt('20_chains_100_runs/x_PSRFs.txt',x_PSRFs, delimiter='\t', fmt='%1.4f')
np.savetxt('20_chains_100_runs/y_PSRFs.txt',y_PSRFs, delimiter='\t', fmt='%1.4f')
mx = np.mean(x_PSRFs[:,1:], axis=1)
my = np.mean(y_PSRFs[:,1:], axis=1)
for i in range(len(nn)):
print('For n=', 2*i+1, ', the mean values of the PSRFs (x,y) are: ', \
'{:.4f}'.format(mx[i]),'{:.4f}'.format(my[i]))
plt.figure()
plt.title('Average PSRFs')
plt.ylabel('PSRFs')
plt.xlabel('n')
plt.axis([0,22,1.,5.])
plt.plot(np.arange(1,23,2),mx,'bo--',label= 'x')
plt.plot(np.arange(1,23,2),my,'ro--',label='y')
plt.legend()
plt.xticks(np.arange(1,23,2))
plt.savefig('PSRFs.png')
plt.close()
|
# Standard Library
import asyncio
import gc
import json
import logging
import os
import time
import urllib.request
import zipfile
from collections import defaultdict
# Third Party
import boto3
import numpy as np
import pandas as pd
from botocore.config import Config
from botocore.exceptions import ClientError
from elasticsearch import AsyncElasticsearch
from elasticsearch.helpers import async_bulk
from HyperParamaters import HyperParameters
from nats.aio.errors import ErrTimeout
from NulogServer import NulogServer
from NulogTrain import consume_signal, train_model
from opni_nats import NatsWrapper
LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "INFO")
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__file__)
logger.setLevel(LOGGING_LEVEL)
params = HyperParameters()
THRESHOLD = params.MODEL_THRESHOLD
if "MODEL_THRESHOLD" in os.environ:
THRESHOLD = float(os.environ["MODEL_THRESHOLD"])
MIN_LOG_TOKENS = params.MIN_LOG_TOKENS
if "MIN_LOG_TOKENS" in os.environ:
MIN_LOG_TOKENS = int(os.environ["MIN_LOG_TOKENS"])
IS_CONTROL_PLANE_SERVICE = params.IS_CONTROL_PLANE
if "IS_CONTROL_PLANE" in os.environ:
IS_CONTROL_PLANE_SERVICE = bool(os.environ["IS_CONTROL_PLANE_SERVICE"])
ES_ENDPOINT = os.environ["ES_ENDPOINT"]
ES_USERNAME = os.getenv("ES_USERNAME", "admin")
ES_PASSWORD = os.getenv("ES_PASSWORD", "<PASSWORD>")
S3_ENDPOINT = os.environ["S3_ENDPOINT"]
S3_ACCESS_KEY = os.environ["S3_ACCESS_KEY"]
S3_SECRET_KEY = os.environ["S3_SECRET_KEY"]
S3_BUCKET = os.getenv("S3_BUCKET", "opni-nulog-models")
IS_GPU_SERVICE = bool(os.getenv("IS_GPU_SERVICE", False))
CACHED_PREDS_SAVEFILE = (
"control-plane-preds.txt"
if IS_CONTROL_PLANE_SERVICE
else "gpu-preds.txt"
if IS_GPU_SERVICE
else "cpu-preds.txt"
)
SAVE_FREQ = 25
logger.debug(f"model threshold is {THRESHOLD}")
logger.debug(f"min log tokens is is {MIN_LOG_TOKENS}")
logger.info(f"is controlplane is {IS_CONTROL_PLANE_SERVICE}")
nw = NatsWrapper()
es = AsyncElasticsearch(
[ES_ENDPOINT],
port=9200,
http_compress=True,
http_auth=(ES_USERNAME, ES_PASSWORD),
verify_certs=False,
use_ssl=True,
)
s3_client = boto3.resource(
"s3",
endpoint_url=S3_ENDPOINT,
aws_access_key_id=S3_ACCESS_KEY,
aws_secret_access_key=S3_SECRET_KEY,
config=Config(signature_version="s3v4"),
)
if IS_CONTROL_PLANE_SERVICE:
script_source = 'ctx._source.anomaly_level = ctx._source.anomaly_predicted_count != 0 ? "Anomaly" : "Normal";'
else:
script_source = 'ctx._source.anomaly_level = ctx._source.anomaly_predicted_count == 0 ? "Normal" : ctx._source.anomaly_predicted_count == 1 ? "Suspicious" : "Anomaly";'
script_source += "ctx._source.nulog_confidence = params['nulog_score'];"
script_for_anomaly = (
"ctx._source.anomaly_predicted_count += 1; ctx._source.nulog_anomaly = true;"
)
async def consume_logs(logs_queue):
"""
coroutine to consume logs from NATS and put messages to the logs_queue
"""
if IS_CONTROL_PLANE_SERVICE:
await nw.subscribe(
nats_subject="preprocessed_logs_control_plane",
payload_queue=logs_queue,
nats_queue="workers",
)
else:
await nw.subscribe(nats_subject="model_ready", payload_queue=logs_queue)
if IS_GPU_SERVICE:
await nw.subscribe(
nats_subject="gpu_service_inference_internal", payload_queue=logs_queue
)
else:
await nw.subscribe(
nats_subject="preprocessed_logs",
payload_queue=logs_queue,
nats_queue="workers",
)
await nw.subscribe(
nats_subject="gpu_service_predictions", payload_queue=logs_queue
)
async def update_preds_to_es(df):
async def doc_generator(df):
for index, document in df.iterrows():
doc_dict = document.to_dict()
yield doc_dict
df["predictions"] = [1 if p < THRESHOLD else 0 for p in df["nulog_confidence"]]
df["_op_type"] = "update"
df["_index"] = "logs"
df.rename(columns={"log_id": "_id"}, inplace=True)
df["script"] = [
{
"source": (script_for_anomaly + script_source)
if nulog_score < THRESHOLD
else script_source,
"lang": "painless",
"params": {"nulog_score": nulog_score},
}
for nulog_score in df["nulog_confidence"]
]
try:
await async_bulk(es, doc_generator(df[["_id", "_op_type", "_index", "script"]]))
logger.info(
"Updated {} anomalies from {} logs to ES".format(
len(df[df["predictions"] > 0]),
len(df["predictions"]),
)
)
except Exception as e:
logger.error(e)
def s3_setup(s3_client):
# Function to set up a S3 bucket if it does not already exist.
try:
s3_client.meta.client.head_bucket(Bucket=S3_BUCKET)
logger.debug(f"{S3_BUCKET} bucket exists")
except ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = e.response["Error"]["Code"]
if error_code == "404":
logger.warning(f"{S3_BUCKET} bucket does not exist so creating it now")
s3_client.create_bucket(Bucket=S3_BUCKET)
return True
def load_cached_preds(saved_preds: dict):
bucket_name = S3_BUCKET
try:
s3_client.meta.client.download_file(
bucket_name, CACHED_PREDS_SAVEFILE, CACHED_PREDS_SAVEFILE
)
with open(CACHED_PREDS_SAVEFILE) as fin:
for line in fin:
ml, score = line.split("\t")
saved_preds[ml] = float(score)
except Exception as e:
logger.error("cached preds files do not exist.")
logger.debug(f"loaded from cached preds: {len(saved_preds)}")
return saved_preds
def save_cached_preds(new_preds: dict, saved_preds: dict):
update_to_s3 = False
bucket_name = S3_BUCKET
with open(CACHED_PREDS_SAVEFILE, "a") as fout:
for ml in new_preds:
logger.debug("ml :" + str(ml))
saved_preds[ml] = new_preds[ml]
fout.write(ml + "\t" + str(new_preds[ml]) + "\n")
if len(saved_preds) % SAVE_FREQ == 0:
update_to_s3 = True
logger.debug(f"saved cached preds, current num of cache: {len(saved_preds)}")
if update_to_s3:
try:
s3_client.meta.client.upload_file(
CACHED_PREDS_SAVEFILE, bucket_name, CACHED_PREDS_SAVEFILE
)
except Exception as e:
logger.error("Failed to update predictions to s3.")
def reset_cached_preds(saved_preds: dict):
bucket_name = S3_BUCKET
saved_preds.clear()
try:
os.remove(CACHED_PREDS_SAVEFILE)
s3_client.meta.client.delete_object(
Bucket=bucket_name, Key=CACHED_PREDS_SAVEFILE
)
except Exception as e:
logger.error("cached preds files failed to delete.")
async def infer_logs(logs_queue):
"""
coroutine to get payload from logs_queue, call inference rest API and put predictions to elasticsearch.
"""
s3_setup(s3_client)
saved_preds = defaultdict(float)
load_cached_preds(saved_preds)
nulog_predictor = NulogServer(MIN_LOG_TOKENS)
if IS_CONTROL_PLANE_SERVICE:
nulog_predictor.load(save_path="control-plane-output/")
else:
nulog_predictor.download_from_s3()
nulog_predictor.load()
max_payload_size = 128 if IS_CONTROL_PLANE_SERVICE else 512
while True:
payload = await logs_queue.get()
if payload is None:
continue
start_time = time.time()
decoded_payload = json.loads(payload)
if "bucket" in decoded_payload and decoded_payload["bucket"] == S3_BUCKET:
# signal to reload model
if IS_CONTROL_PLANE_SERVICE:
nulog_predictor.load(save_path="control-plane-output/")
else:
nulog_predictor.download_from_s3(decoded_payload)
nulog_predictor.load()
reset_cached_preds(saved_preds)
continue
df_payload = pd.read_json(payload, dtype={"_id": object})
if (
"gpu_service_result" in df_payload.columns
): ## memorize predictions from GPU services.
logger.info("saved predictions from GPU service.")
save_cached_preds(
dict(zip(df_payload["masked_log"], df_payload["nulog_confidence"])),
saved_preds,
)
else:
for i in range(0, len(df_payload), max_payload_size):
df = df_payload[i : min(i + max_payload_size, len(df_payload))]
is_log_cached = np.array([ml in saved_preds for ml in df["masked_log"]])
df_cached_logs, df_new_logs = df[is_log_cached], df[~is_log_cached]
if len(df_cached_logs) > 0:
df_cached_logs["nulog_confidence"] = [
saved_preds[ml] for ml in df_cached_logs["masked_log"]
]
await update_preds_to_es(df_cached_logs)
if IS_GPU_SERVICE:
logger.info("send cached results back.")
df_cached_logs["gpu_service_result"] = True
await nw.publish(
nats_subject="gpu_service_predictions",
payload_df=df_cached_logs.to_json().encode(),
)
if len(df_new_logs) > 0:
if not (IS_GPU_SERVICE or IS_CONTROL_PLANE_SERVICE):
try: # try to post request to GPU service. response would be b"YES" if accepted, b"NO" for declined/timeout
response = await nw.request(
"gpu_service_inference",
df_new_logs.to_json().encode(),
timeout=1,
)
response = response.data.decode()
except ErrTimeout:
logger.warning("request to GPU service timeout.")
response = "NO"
logger.info(f"{response} for GPU service")
if IS_GPU_SERVICE or IS_CONTROL_PLANE_SERVICE or response == "NO":
unique_masked_logs = list(df_new_logs["masked_log"].unique())
logger.info(
f" {len(unique_masked_logs)} unique logs to inference."
)
pred_scores_dict = nulog_predictor.predict(unique_masked_logs)
if pred_scores_dict is None:
logger.warning("fail to make predictions.")
else:
df_new_logs["nulog_confidence"] = [
pred_scores_dict[ml] for ml in df_new_logs["masked_log"]
]
save_cached_preds(pred_scores_dict, saved_preds)
await update_preds_to_es(df_new_logs)
if IS_GPU_SERVICE:
logger.info("send new results back.")
df_new_logs["gpu_service_result"] = True
await nw.publish(
nats_subject="gpu_service_predictions",
payload_df=df_new_logs.to_json().encode(),
)
logger.info(
f"payload size :{len(df_payload)}. processed in {(time.time() - start_time)} second"
)
del decoded_payload
del df_payload
gc.collect()
async def init_nats():
logger.info("Attempting to connect to NATS")
await nw.connect()
async def get_pretrain_model():
url = "https://opni-public.s3.us-east-2.amazonaws.com/pretrain-models/version.txt"
try:
latest_version = urllib.request.urlopen(url).read().decode("utf-8")
except Exception as e:
logger.error(e)
logger.error("can't locate the version info from opni-public bucket")
return False
try:
with open("version.txt") as fin:
local_version = fin.read()
except Exception as e:
logger.warning(e)
local_version = "None"
logger.info(
f"latest model version: {latest_version}; local model version: {local_version}"
)
if latest_version != local_version:
urllib.request.urlretrieve(url, "version.txt")
model_zip_file = f"control-plane-model-{latest_version}.zip"
urllib.request.urlretrieve(
f"https://opni-public.s3.us-east-2.amazonaws.com/pretrain-models/{model_zip_file}",
model_zip_file,
)
with zipfile.ZipFile(model_zip_file, "r") as zip_ref:
zip_ref.extractall("./")
logger.info("update to latest model")
return True
else:
logger.info("model already up to date")
return False
async def schedule_update_pretrain_model(logs_queue):
while True:
await asyncio.sleep(86400) # try to update after 24 hours
update_status = await get_pretrain_model()
if update_status:
logs_queue.put(
json.dumps({"bucket": S3_BUCKET})
) # send a signal to reload model
if __name__ == "__main__":
loop = asyncio.get_event_loop()
logs_queue = asyncio.Queue(loop=loop)
consumer_coroutine = consume_logs(logs_queue)
inference_coroutine = infer_logs(logs_queue)
task = loop.create_task(init_nats())
loop.run_until_complete(task)
if IS_CONTROL_PLANE_SERVICE:
init_model_task = loop.create_task(get_pretrain_model())
loop.run_until_complete(init_model_task)
if IS_CONTROL_PLANE_SERVICE:
loop.run_until_complete(
asyncio.gather(
inference_coroutine,
consumer_coroutine,
schedule_update_pretrain_model(logs_queue),
)
)
elif IS_GPU_SERVICE:
job_queue = asyncio.Queue(loop=loop)
signal_coroutine = consume_signal(job_queue, nw)
training_coroutine = train_model(job_queue, nw)
loop.run_until_complete(
asyncio.gather(
inference_coroutine,
consumer_coroutine,
signal_coroutine,
training_coroutine,
)
)
else: # CPU SERVICE
loop.run_until_complete(asyncio.gather(inference_coroutine, consumer_coroutine))
try:
loop.run_forever()
finally:
loop.close()
|
<gh_stars>0
import numpy as np
import tensorflow as tf
import Nn
from .base import Base
class MADDPG(Base):
def __init__(self,
s_dim,
a_dim_or_list,
action_type,
base_dir=None,
gamma=0.99,
ployak=0.995,
actor_lr=5.0e-4,
critic_lr=1.0e-3,
max_episode=50000,
n=1,
i=0,
hidden_units={
'actor': [32, 32],
'q': [32, 32]
},
logger2file=False,
out_graph=False):
assert action_type == 'continuous', 'maddpg only support continuous action space'
super().__init__(a_dim_or_list=a_dim_or_list, action_type=action_type, base_dir=base_dir)
self.n = n
self.i = i
self.s_dim = s_dim
self.a_dim_or_list = a_dim_or_list
self.gamma = gamma
self.max_episode = max_episode
self.ployak = ployak
# self.action_noise = Nn.NormalActionNoise(mu=np.zeros(self.a_counts), sigma=1 * np.ones(self.a_counts))
self.action_noise = Nn.OrnsteinUhlenbeckActionNoise(mu=np.zeros(self.a_counts), sigma=0.2 * np.exp(-self.episode / 10) * np.ones(self.a_counts))
self.actor_net = Nn.actor_dpg(self.s_dim, 0, self.a_counts, 'actor_net', hidden_units['actor'])
self.actor_target_net = Nn.actor_dpg(self.s_dim, 0, self.a_counts, 'actor_target_net', hidden_units['actor'])
self.q_net = Nn.critic_q_one((self.s_dim) * self.n, 0, (self.a_counts) * self.n, 'q_net', hidden_units['q'])
self.q_target_net = Nn.critic_q_one((self.s_dim) * self.n, 0, (self.a_counts) * self.n, 'q_target_net', hidden_units['q'])
self.update_target_net_weights(
self.actor_target_net.weights + self.q_target_net.weights,
self.actor_net.weights + self.q_net.weights
)
self.actor_lr = tf.keras.optimizers.schedules.PolynomialDecay(actor_lr, self.max_episode, 1e-10, power=1.0)
self.critic_lr = tf.keras.optimizers.schedules.PolynomialDecay(critic_lr, self.max_episode, 1e-10, power=1.0)
self.optimizer_critic = tf.keras.optimizers.Adam(learning_rate=self.critic_lr(self.episode))
self.optimizer_actor = tf.keras.optimizers.Adam(learning_rate=self.actor_lr(self.episode))
self.generate_recorder(
logger2file=logger2file,
model=self
)
self.recorder.logger.info('''
xxxx xxx xx xxxxxxx xxxxxxx xxxxxxxx xxxxxx
xxx xx xxx x xxx x xxx xx xx xxx xx
xxx xxx xxx x xx x xx x xxx xx x
xxx xxx x xx x xx x xx x xxx xx
xxxx x x xx xx x xxx x xxx xxxxxx x xxxxx
x xxxx x xxxxxx x xx x xx x xx xxx
x xxx x xx xx x xx x xx x xx x
x xx x xx xx x xxx x xxx x xxx xx
xxxx xxxxxx xxx xxxxx xxxxxxx xxxxxxx xxxxx xxxxxx
xx
''')
self.recorder.logger.info(self.action_noise)
def choose_action(self, s):
return self._get_action(s)[-1].numpy()
def choose_inference_action(self, s):
return self._get_action(s)[0].numpy()
def get_target_action(self, s):
return self._get_target_action(s)[-1].numpy()
@tf.function
def _get_action(self, vector_input):
with tf.device(self.device):
mu = self.actor_net(vector_input, None)
return mu, tf.clip_by_value(mu + self.action_noise(), -1, 1)
@tf.function
def _get_target_action(self, vector_input):
with tf.device(self.device):
target_mu = self.actor_target_net(vector_input, None)
return target_mu, tf.clip_by_value(target_mu + self.action_noise(), -1, 1)
def learn(self, episode, ap, al, ss, ss_, aa, aa_, s, r):
self.episode = episode
actor_loss, q_loss = self.train(ap, al, ss, ss_, aa, aa_, s, r)
self.update_target_net_weights(
self.actor_target_net.weights + self.q_target_net.weights,
self.actor_net.weights + self.q_net.weights,
self.ployak)
tf.summary.experimental.set_step(self.global_step)
tf.summary.scalar('LOSS/actor_loss', actor_loss)
tf.summary.scalar('LOSS/critic_loss', q_loss)
tf.summary.scalar('LEARNING_RATE/actor_lr', self.actor_lr(self.episode))
tf.summary.scalar('LEARNING_RATE/critic_lr', self.critic_lr(self.episode))
self.recorder.writer.flush()
def get_max_episode(self):
"""
get the max episode of this training model.
"""
return self.max_episode
@tf.function(experimental_relax_shapes=True)
def train(self, q_actor_a_previous, q_actor_a_later, ss, ss_, aa, aa_, s, r):
with tf.device(self.device):
with tf.GradientTape() as tape:
q = self.q_net(ss, None, aa)
q_target = self.q_target_net(ss_, None, aa_)
dc_r = tf.stop_gradient(r + self.gamma * q_target)
td_error = q - dc_r
q_loss = 0.5 * tf.reduce_mean(tf.square(td_error))
q_grads = tape.gradient(q_loss, self.q_net.trainable_variables)
self.optimizer_critic.apply_gradients(
zip(q_grads, self.q_net.trainable_variables)
)
with tf.GradientTape() as tape:
mu = self.actor_net(s, None)
mumu = tf.concat((q_actor_a_previous, mu, q_actor_a_later), axis=-1)
q_actor = self.q_net(ss, None, mumu)
actor_loss = -tf.reduce_mean(q_actor)
actor_grads = tape.gradient(actor_loss, self.actor_net.trainable_variables)
self.optimizer_actor.apply_gradients(
zip(actor_grads, self.actor_net.trainable_variables)
)
self.global_step.assign_add(1)
return actor_loss, q_loss
@tf.function(experimental_relax_shapes=True)
def train_persistent(self, q_actor_a_previous, q_actor_a_later, ss, ss_, aa, aa_, s, r):
with tf.device(self.device):
with tf.GradientTape(persistent=True) as tape:
q = self.q_net(ss, None, aa)
q_target = self.q_target_net(ss_, None, aa_)
dc_r = tf.stop_gradient(r + self.gamma * q_target)
td_error = q - dc_r
q_loss = 0.5 * tf.reduce_mean(tf.square(td_error))
mu = self.actor_net(s, None)
mumu = tf.concat((q_actor_a_previous, mu, q_actor_a_later), axis=-1)
q_actor = self.q_net(ss, None, mumu)
actor_loss = -tf.reduce_mean(q_actor)
q_grads = tape.gradient(q_loss, self.q_net.trainable_variables)
self.optimizer_critic.apply_gradients(
zip(q_grads, self.q_net.trainable_variables)
)
actor_grads = tape.gradient(actor_loss, self.actor_net.trainable_variables)
self.optimizer_actor.apply_gradients(
zip(actor_grads, self.actor_net.trainable_variables)
)
self.global_step.assign_add(1)
return actor_loss, q_loss
|
<filename>pusion/core/dempster_shafer_combiner.py
from pusion.core.decision_templates_combiner import *
class DempsterShaferCombiner(TrainableCombiner):
"""
The :class:`DempsterShaferCombiner` (DS) fuses decision outputs by means of the Dempster Shafer evidence theory
referenced by Polikar :footcite:`polikar2006ensemble` and Ghosh et al. :footcite:`ghosh2011evaluation`.
DS involves computing the `proximity` and `belief` values per classifier and class, depending on a sample.
Then, the total class support is calculated using the Dempster's rule as the product of belief values across all
classifiers to each class, respectively. The class with the highest product is considered as a fused decision.
DS shares the same training procedure with the :class:`DecisionTemplatesCombiner`.
.. footbibliography::
"""
_SUPPORTED_PAC = [
(Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.REDUNDANT),
(Problem.MULTI_CLASS, AssignmentType.CONTINUOUS, CoverageType.REDUNDANT),
(Problem.MULTI_LABEL, AssignmentType.CRISP, CoverageType.REDUNDANT),
(Problem.MULTI_LABEL, AssignmentType.CONTINUOUS, CoverageType.REDUNDANT),
]
SHORT_NAME = 'DS'
def __init__(self):
TrainableCombiner.__init__(self)
self.decision_templates = None
self.distinct_labels = None
def train(self, decision_tensor, true_assignments):
"""
Train the Dempster Shafer Combiner model by precalculating decision templates from given decision outputs and
true class assignments. Both continuous and crisp classification outputs are supported. This procedure involves
calculations mean decision profiles (decision templates) for each true class assignment.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of either crisp or continuous decision outputs by different classifiers per sample.
:param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of either crisp or continuous class assignments which are considered true for each sample during
the training procedure.
"""
dt_combiner = DecisionTemplatesCombiner()
dt_combiner.train(decision_tensor, true_assignments)
self.decision_templates = dt_combiner.get_decision_templates()
self.distinct_labels = dt_combiner.get_distinct_labels()
def combine(self, decision_tensor):
"""
Combine decision outputs by using the Dempster Shafer method.
Both continuous and crisp classification outputs are supported. Combining requires a trained
:class:`DempsterShaferCombiner`.
This procedure involves computing the proximity, the belief values, and the total class support using the
Dempster's rule.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of either crisp or continuous decision outputs by different classifiers per sample.
:return: A matrix (`numpy.array`) of either crisp or continuous class assignments which represents fused
decisions obtained by the maximum class support. Axis 0 represents samples and axis 1 the class
assignments which are aligned with axis 2 in ``decision_tensor`` input tensor.
"""
decision_profiles = decision_tensor_to_decision_profiles(decision_tensor)
fused_decisions = np.zeros_like(decision_tensor[0])
for i in range(len(decision_profiles)):
dp = decision_profiles[i]
n_label = len(self.decision_templates)
n_classifiers = len(decision_tensor)
# Compute proximity
prox = np.empty((n_label, n_classifiers)) # Phi_{j,k}
for j in range(n_label):
dt = self.decision_templates[j]
for k in range(n_classifiers):
d = 0.0
for j_ in range(n_label):
d = d + (1 + np.linalg.norm(self.decision_templates[j_][k] - dp[k]))**(-1)
prox[j, k] = (1 + np.linalg.norm(dt[k] - dp[k]))**(-1) / d
# Compute belief
bel = np.empty((n_label, n_classifiers)) # bel_{j,k}
for j in range(n_label):
for k in range(n_classifiers):
prod = 1.0
for j_ in range(n_label):
if j_ != j:
prod = prod * (1 - prox[j_, k])
bel[j, k] = prox[j, k] * prod / (1 - prox[j, k] * (1 - prod))
# Compute support for each label (Dempster's rule)
mu = np.zeros(n_label)
for j in range(n_label):
prod = 1.0
for k in range(n_classifiers):
prod = prod * bel[j, k]
mu[j] = prod
# normalization
mu = mu / np.sum(mu)
fused_decisions[i] = self.distinct_labels[np.argmax(mu)]
return fused_decisions
class CRDempsterShaferCombiner(DempsterShaferCombiner):
"""
The :class:`CRDempsterShaferCombiner` is a modification of :class:`DempsterShaferCombiner` that
also supports complementary-redundant decision outputs. Therefore the input is transformed, such that all missing
classification assignments are considered as a constant, respectively. To use methods :meth:`train` and
:meth:`combine` a coverage needs to be set first by the inherited :meth:`set_coverage` method.
"""
_SUPPORTED_PAC = [
(Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.COMPLEMENTARY_REDUNDANT),
(Problem.MULTI_CLASS, AssignmentType.CONTINUOUS, CoverageType.COMPLEMENTARY_REDUNDANT),
]
def __init__(self):
super().__init__()
def train(self, decision_outputs, true_assignments):
"""
Train the Dempster Shafer Combiner model by precalculating decision templates from given decision outputs and
true class assignments. Both continuous and crisp classification outputs are supported. This procedure involves
calculations mean decision profiles (decision templates) for each true class assignment.
:param decision_outputs: `list` of `numpy.array` matrices, each of shape `(n_samples, n_classes')`,
where `n_classes'` is classifier-specific and described by the coverage.
Each matrix corresponds to one of `n_classifiers` classifiers and contains either crisp or continuous
decision outputs per sample.
:param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of either crisp or continuous class assignments which are considered true for each sample during
the training procedure.
"""
t_decision_outputs = self.__transform_to_uniform_decision_tensor(decision_outputs, self.coverage)
super().train(t_decision_outputs, true_assignments)
def combine(self, decision_outputs):
"""
Combine decision outputs by using the Dempster Shafer method.
Both continuous and crisp classification outputs are supported. Combining requires a trained
:class:`DempsterShaferCombiner`.
This procedure involves computing the proximity, the belief values, and the total class support using the
Dempster's rule.
:param decision_outputs: `list` of `numpy.array` matrices, each of shape `(n_samples, n_classes')`,
where `n_classes'` is classifier-specific and described by the coverage. Each matrix corresponds to
one of `n_classifiers` classifiers and contains crisp or continuous decision outputs per sample.
:return: A matrix (`numpy.array`) of crisp or continuous class assignments which represents fused decisions.
Axis 0 represents samples and axis 1 the class labels which are aligned with axis 2 in
``decision_tensor`` input tensor.
"""
t_decision_outputs = self.__transform_to_uniform_decision_tensor(decision_outputs, self.coverage)
return super().combine(t_decision_outputs)
@staticmethod
def __transform_to_uniform_decision_tensor(decision_outputs, coverage):
n_classifiers = len(decision_outputs)
n_decisions = len(decision_outputs[0])
n_classes = len(np.unique(np.concatenate(coverage)))
# tensor for transformed decision outputs
t_decision_outputs = np.negative(np.ones((n_classifiers, n_decisions, n_classes)))
for i in range(n_classifiers):
t_decision_outputs[i, :, coverage[i]] = decision_outputs[i].T
return t_decision_outputs
|
<gh_stars>1000+
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file convert.py is referred and derived from project NetworkX,
#
# https://github.com/networkx/networkx/blob/master/networkx/convert.py
#
# which has the following license:
#
# Copyright (C) 2004-2020, NetworkX Developers
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
#
import warnings
import networkx.convert
from networkx.convert import from_dict_of_dicts
from networkx.convert import from_dict_of_lists
from networkx.convert import from_edgelist
from graphscope import nx
from graphscope.framework.dag_utils import arrow_to_dynamic
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.convert)
def to_nx_graph(data, create_using=None, multigraph_input=False): # noqa: C901
"""Make a graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d = {0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G = nx.Graph(d)
instead of the equivalent
>>> G = nx.from_dict_of_dicts(d)
Parameters
----------
data : object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dict-of-lists
container (ie set, list, tuple, iterator) of edges
Pandas DataFrame (row per edge)
numpy matrix
numpy ndarray
scipy sparse matrix
create_using : nx graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# networkx graph or graphscope.nx graph
if hasattr(data, "adj"):
try:
result = from_dict_of_dicts(
data.adj,
create_using=create_using,
multigraph_input=data.is_multigraph(),
)
if hasattr(data, "graph"): # data.graph should be dict-like
result.graph.update(data.graph)
if hasattr(data, "nodes"): # data.nodes should be dict-like
result.add_nodes_from(data.nodes.items())
return result
except Exception as e:
raise nx.NetworkXError("Input is not a correct NetworkX-like graph.") from e
# dict of dicts/lists
if isinstance(data, dict):
try:
return from_dict_of_dicts(
data, create_using=create_using, multigraph_input=multigraph_input
)
except Exception:
try:
return from_dict_of_lists(data, create_using=create_using)
except Exception as e:
raise TypeError("Input is not known type.") from e
# list or generator of edges
if isinstance(data, (list, tuple)) or any(
hasattr(data, attr) for attr in ["_adjdict", "next", "__next__"]
):
try:
return from_edgelist(data, create_using=create_using)
except Exception as e:
raise nx.NetworkXError("Input is not a valid edge list") from e
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
if data.shape[0] == data.shape[1]:
try:
return nx.from_pandas_adjacency(data, create_using=create_using)
except Exception as e:
msg = "Input is not a correct Pandas DataFrame adjacency matrix."
raise nx.NetworkXError(msg) from e
else:
try:
return nx.from_pandas_edgelist(
data, edge_attr=True, create_using=create_using
)
except Exception as e:
msg = "Input is not a correct Pandas DataFrame edge-list."
raise nx.NetworkXError(msg) from e
except ImportError:
msg = "pandas not found, skipping conversion test."
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data, (numpy.matrix, numpy.ndarray)):
try:
return nx.from_numpy_matrix(data, create_using=create_using)
except Exception as e:
raise nx.NetworkXError(
"Input is not a correct numpy matrix or array."
) from e
except ImportError:
warnings.warn("numpy not found, skipping conversion test.", ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data, "format"):
try:
return nx.from_scipy_sparse_matrix(data, create_using=create_using)
except Exception as e:
raise nx.NetworkXError(
"Input is not a correct scipy sparse matrix type."
) from e
except ImportError:
warnings.warn("scipy not found, skipping conversion test.", ImportWarning)
raise nx.NetworkXError("Input is not a known data type for conversion.")
def to_networkx_graph(nx_graph):
import networkx
if not nx_graph.is_directed() and not nx_graph.is_multigraph():
g = networkx.Graph()
edges = nx_graph.edges.data()
elif nx_graph.is_directed() and not nx_graph.is_multigraph():
g = networkx.DiGraph()
edges = nx_graph.edges.data()
elif not nx_graph.is_directed() and nx_graph.is_multigraph():
g = networkx.MultiGraph()
edges = nx_graph.edges.data(keys=True)
else:
g = networkx.MultiDiGraph()
edges = nx_graph.edges.data(keys=True)
nodes = nx_graph.nodes.data()
g.update(edges, nodes)
g.graph.update(nx_graph.graph)
return g
|
<filename>examples/examine_local_projects.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 <NAME> ( http://krause-software.com/ ).
# You are free to use this code under the MIT license:
# http://opensource.org/licenses/MIT
"""Test local project.pbxproj files.
This script basically runs a lint over many Xcode project files and
reports every file when the unparse of the parse look different
then the original project file contents.
To use it first run
$ test_local_projects.py --find
which creates local-projects.txt in the tests directory containing
filenames that look like valid project files.
Then run
$ test_local_projects.py --test
which reports the findings about the filenames listed in local-projects.txt.
"""
from __future__ import print_function
import sys
import argparse
import time
import codecs
import types
import os
from os.path import abspath, dirname
from io import StringIO
import multiprocessing
import traceback
import errno
from collections import namedtuple
import utils
# Set up the Python path so we find the xcodeprojer module in the parent directory
# relative to this file.
sys.path.insert(1, dirname(dirname(abspath(__file__))))
import xcodeprojer
from xcodeprojer import bytestr, unistr
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
LISTFILENAME = 'local-projects.txt'
IGNOREDFILENAME = 'ignored-local-projects.txt'
PBXPROJNAME = 'project.pbxproj'
ExcInfo = namedtuple('ExcInfo', 'exc_info')
LintResult = namedtuple('LintResult', ['filename', 'success', 'text', 'parsetime', 'unparsetime', 'numbytes'])
if PY2:
exec ('def reraise(tp, value, tb):\n raise tp, value, tb')
else:
def reraise(tp, value, tb):
raise value.with_traceback(tb)
def rel(filename):
return os.path.join(dirname(abspath(__file__)), filename)
def write(s='', end='\n'):
s = unistr(s) + unistr(end)
s = s.encode('utf-8')
sys.stdout.write(s)
def handle_file(pbxfilename, parsertype='normal'):
try:
with open(bytestr(pbxfilename), 'rb') as f:
xcodeproj = f.read()
t0 = time.time()
root, parseinfo = xcodeprojer.parse(xcodeproj, dictionarytype=dict, parsertype=parsertype)
buf = StringIO()
xcodeprojer.report_parse_status(root, parseinfo, filename=pbxfilename, fp=buf)
if root is None:
return LintResult(pbxfilename, False, buf.getvalue(), 0, 0, len(xcodeproj))
t1 = time.time()
projname = xcodeprojer.projectname_for_path(pbxfilename)
text = xcodeprojer.unparse(root, format='xcode', projectname=projname, parseinfo=parseinfo)
t2 = time.time()
return LintResult(pbxfilename, True, text, t1-t0, t2-t1, len(xcodeproj))
except Exception as e:
e.traceback = traceback.format_exc()
raise
def filenames_to_examine():
xcodeprojects = projects_from_list()
ignored_projects = set()
try:
ignxcodeprojects = codecs.open(rel(IGNOREDFILENAME), 'r', encoding='utf-8').read()
for filename in ignxcodeprojects.strip().splitlines():
ignored_projects.add(filename)
except IOError:
pass
projects_filenames = xcodeprojects
filenames = [x for x in projects_filenames if x not in ignored_projects]
return filenames
def run_lint(args, filtered_idx_filenames):
use_pool = not args.disable_parallel
if not use_pool:
for pbxfilename in filtered_idx_filenames:
try:
yield handle_file(pbxfilename, parsertype=args.parser)
except Exception:
yield ExcInfo(sys.exc_info())
if use_pool:
pool = multiprocessing.Pool(initializer=utils.per_process_init)
try:
async_results = [pool.apply_async(handle_file, [x], {'parsertype': args.parser}) for x in filtered_idx_filenames]
pool.close()
while async_results:
try:
asyncres = async_results.pop(0)
yield asyncres.get()
except (KeyboardInterrupt, GeneratorExit):
raise
except Exception as e:
t, v, tb = sys.exc_info()
try:
# Report the textual traceback of the subprocess rather than
# this local exception that was triggered by the other side.
tb = e.traceback
except AttributeError:
pass
yield ExcInfo((t, v, tb))
except (KeyboardInterrupt, GeneratorExit):
pool.terminate()
finally:
pool.join()
def examine_projects(args):
start_index = args.start_index
max_files = args.max_files
filenames = filenames_to_examine()
filenames = filenames[start_index:]
if max_files is not None:
filenames = filenames[:max_files]
total_numbytes = 0
total_parsetime = 0
total_unparsetime = 0
num_files = 0
num_successes = 0
t0 = time.time()
for idx, result in enumerate(run_lint(args, filenames)):
num_files += 1
globalidx = start_index + idx
try:
tbtext = None
if isinstance(result, ExcInfo):
t, v, tb = result.exc_info
if not isinstance(tb, types.TracebackType):
tbtext = tb
tb = None
reraise(t, v, tb)
sys.stdout.write("%d " % globalidx)
sys.stdout.flush()
handle_result(args, result.success, result.text, result.filename)
if result.success:
num_successes += 1
if args.reportstats:
total_numbytes += result.numbytes
total_parsetime += result.parsetime
total_unparsetime += result.unparsetime
except IOError as e:
write('\n%d "%s" failed: %s' % (globalidx, unistr(filenames[idx]), repr(e)))
except Exception as e:
write('\n%d "%s" failed:' % (globalidx, unistr(filenames[idx])))
if tbtext is not None:
print(tbtext)
else:
traceback.print_exc()
if args.reportstats and num_successes > 0:
tdelta = time.time() - t0
print("\nparse rate:%9d Bps unparse rate:%9d Bps (per core)" % (total_numbytes / total_parsetime, total_numbytes / total_unparsetime))
print("Processed %d Bps, avg. time per project: %f" % (total_numbytes / tdelta, tdelta / num_successes))
if args.reportstats:
print("Processed %d project files of which %d were unsuccessful" % (num_files, num_files - num_successes))
def handle_result(args, success, text, filename):
if not success:
print()
print(text)
return
try:
with open(bytestr(filename), 'rb') as f:
origtext = f.read()
if origtext[:1] not in [b'/', b'{']:
# Only handle files in plist format.
return
except IOError as e:
if e.errno not in (errno.ENOTDIR, errno.ENOENT):
raise
return
if text == origtext:
return
xcodeprojer.print_diff(origtext, text, difftype=args.diff, filename=filename)
def find_projects(args, parser):
root = args.find
filenames = []
for name in xcodeprojer.find_projectfiles(root):
filenames.append(name)
# This might take a while, report progress
sys.stdout.write('.')
sys.stdout.flush()
print()
if not filenames:
print('No project.pbxproj files found in "%s"' % root)
return
fn = rel(LISTFILENAME)
with open(fn, 'wb') as f:
text = '\n'.join(filenames) + '\n'
f.write(bytestr(text))
print('\nWrote %d filename to "%s"' % (len(filenames), fn))
def projects_from_list():
filename = rel(LISTFILENAME)
with codecs.open(filename, 'r', encoding='utf-8') as f:
return f.read().splitlines()
def examine_filelist(args, parser):
filename = rel(LISTFILENAME)
filelist = []
try:
filelist = projects_from_list()
errmsg = 'does not contain any filenames.'
except IOError:
errmsg = 'does not exist or is not readable.'
if len(filelist) < 1:
print('"%s" %s\n'
'If you could run something like:\n'
' %s --find /some/path/with/project/files/beneath\n'
'before running the test, so we know about some project files to examine,'
' that would be great.' % (filename, errmsg, sys.argv[0]))
return
t0 = time.time()
examine_projects(args)
t1 = time.time() - t0
print()
if args.reportstats:
print("Elapsed time: %f seconds" % t1)
def main():
parser = argparse.ArgumentParser(description='Find and test local project files.')
parser.add_argument('--parser', choices=['normal', 'fast', 'classic'], default='normal')
parser.add_argument('-f', '--find', metavar='PATH', help='find local project files')
parser.add_argument('-t', '--test', action='store_true', help='run all tests')
parser.add_argument('-s', '--start-index', action='store', type=int, dest='start_index', default=0)
parser.add_argument('-n', '--max-files', action='store', type=int, dest='max_files', help='maximum number of files to process')
parser.add_argument('-d', '--disable-parallel', action='store_true', help='do not run tests in parallel')
parser.add_argument('--diff', choices=['unified', 'html', 'opendiff'], default='opendiff',
help='how to display the diffs')
parser.add_argument('--reportstats', action='store_true', help='print performance statistics')
parser.add_argument('--profile', action='store_true', help='run everything through the profiler')
args = parser.parse_args()
num_actions = 0
actions = 'find test'.split()
for act in actions:
if getattr(args, act):
num_actions += 1
if num_actions != 1:
parser.error('Please specify exactly one of the options %s.' % ', '.join('--' + x for x in actions))
if args.profile:
print('Profiling...')
utils.profile('call_command(args, parser)', locals(), globals())
else:
call_command(args, parser)
def call_command(args, parser):
if args.find:
find_projects(args, parser)
elif args.test:
examine_filelist(args, parser)
else:
parser.error('Something is wrong with the options or the handling of them.')
if __name__ == '__main__':
if PY3:
sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer)
sys.stderr = codecs.getwriter('utf8')(sys.stderr.buffer)
main()
|
from unittest.mock import ANY
from django.urls import reverse
import pytest
from rest_framework import status
from apps.cars.models import Car
from apps.cars.tests.factories import CarFactory, RateFactory
pytestmark = pytest.mark.django_db
class TestCarListView:
def setup(self):
CarFactory.create_batch(4)
CarFactory.create(make="Volkswagen", model="Passat")
self.endpoint = reverse("cars")
def test_list(self, api_client):
response = api_client.get(self.endpoint)
assert response.status_code == status.HTTP_200_OK
assert len(response.data) == 5
def test_create_ok(self, api_client):
response = api_client.post(
self.endpoint,
data={"make": "Volkswagen", "model": "Golf"},
format="json",
)
assert response.status_code == status.HTTP_201_CREATED
assert response.json() == {
"id": ANY,
"make": "Volkswagen",
"model": "Golf",
"avg_rating": None,
}
assert Car.objects.all().count() == 6
def test_create_exists(self, api_client):
response = api_client.post(
self.endpoint,
data={"make": "Volkswagen", "model": "Passat"},
format="json",
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert Car.objects.all().count() == 5
def test_wrong_make(self, api_client):
response = api_client.post(
self.endpoint,
data={"make": "A", "model": "Golf"},
format="json",
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {"make": ["Make 'A' does't exist."]}
def test_wrong_model(self, api_client):
response = api_client.post(
self.endpoint,
data={"make": "Volkswagen", "model": "B"},
format="json",
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {"model": ["Model 'B' does't exist."]}
def test_wrong_make_and_model(self, api_client):
response = api_client.post(
self.endpoint,
data={"make": "A", "model": "B"},
format="json",
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {
"make": ["Make 'A' does't exist."],
}
class TestCarDestroyView:
def setup(self):
CarFactory.create_batch(4)
car = Car.objects.first()
self.endpoint = reverse("cars_detail", kwargs={"pk": car.id})
def test_delete(self, api_client):
response = api_client.delete(self.endpoint)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert Car.objects.all().count() == 3
class TestPopularCarsListView:
def setup(self):
self.endpoint = reverse("popular")
car_1 = CarFactory.create(make="BMW")
car_2 = CarFactory.create(make="Mercedes")
CarFactory.create(make="Volkswagen")
RateFactory(car=car_1)
RateFactory(car=car_1)
RateFactory(car=car_1)
RateFactory(car=car_2)
RateFactory(car=car_2)
RateFactory(car=car_2)
RateFactory(car=car_2)
RateFactory(car=car_2)
def test_list(self, api_client):
response = api_client.get(self.endpoint)
assert response.status_code == status.HTTP_200_OK
assert response.json() == [
{
"id": ANY,
"make": "Mercedes",
"model": ANY,
"rates_number": 5,
},
{
"id": ANY,
"make": "BMW",
"model": ANY,
"rates_number": 3,
},
{"id": ANY, "make": "Volkswagen", "model": ANY, "rates_number": 0},
]
class TestRateCarView:
def setup(self):
self.endpoint = reverse("rate")
def test_create_ok(self, api_client):
car = CarFactory.create(id=2)
car.refresh_from_db()
response = api_client.post(
self.endpoint,
data={
"car_id": 2,
"rating": 3,
},
format="json",
)
assert response.status_code == status.HTTP_201_CREATED
assert response.json() == {
"car_id": 2,
"rating": 3,
}
def test_create_no_car(self, api_client):
response = api_client.post(
self.endpoint,
data={
"car_id": 1,
"rating": 3,
},
format="json",
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 21:32:16 2016
@author: HZJ
"""
import uuid
import numpy as np
from . import FrameCrossSection
from .orm import Material,FrameSection
import logger
class Rectangle(FrameCrossSection):
def __init__(self,mat,h,b,name=None):
"""
h - height\n
b - width\n
"""
self.h=h
self.b=b
A=h*b
J=h*b**3/3 #WRONG!!!
I33=b*h**3/12
I22=h*b**3/12
W33=I33/h*2
W22=I22/b*2
super(Rectangle,self).__init__(mat,A,J,I33,I22,W33,W22,name)
# self.gamma33=1.05
# self.gamma22=1.05
class Circle(FrameCrossSection):
def __init__(self,mat,d,name=None):
"""
d - diameter
"""
self.d=d
A=np.pi*d**2/4
J=np.pi*d**4/32
I33=np.pi*d**4/64
I22=I33
W33=I33/d*2
W22=W33
super(Circle,self).__init__(mat,A,J,I33,I22,W33,W22,name)
# self.gamma33=1.15
# self.gamma22=1.15
class Pipe(FrameCrossSection):
def __init__(self,mat,d,t,name=None):
"""
d - diameter\n
t - thickness of wall\n
fab - fabrication\n
'r' - rolled\n
'w' - welded\n
"""
self._d=d
self._t=t
A=np.pi*d**2/4-np.pi*(d-2*t)**2/4
J=np.pi*(d-t)/t*2*A
I33=np.pi*d**4/64*(1-((d-2*t)/d)**4)
I22=I33
W33=I33/d*2
W22=W33
super(Pipe,self).__init__(mat,A,J,I33,I22,W33,W22,name)
# self.gamma33=1.15
# self.gamma22=1.15
# if fab=='r':
# self.cls33='b'
# self.cls22='b'
# elif fab=='w':
# self.cls33='c'
# self.cls22='c'
# else:
# raise ValueError('wrong fabrication!')
class HollowBox(FrameCrossSection):
def __init__(self,mat,h,b,tw,tf,name=None):
"""
h - height\n
b - width\n
tw - thickness of web\n
tf - thickness of flange\n
"""
self.h=h
self.b=b
self.tw=tw
self.tf=tf
A=h*b-(h-2*tf)*(b-2*tw)
J=(2*tw*(h-tf)/tw+2*tf*(b-tw)/tf)*2*A
I33=b*h**3/12-(b-2*tw)*(h-2*tf)**3/12
I22=h*b**3/12-(h-2*tf)*(b-2*tw)**3/12
W33=I33/h*2
W22=I22/b*2
super(HollowBox,self).__init__(mat,A,J,I33,I22,W33,W22,name)
# self.gamma33=1.05
# self.gamma22=1.05
# self.cls33='c'
# self.cls22='c'
class ISection(FrameCrossSection):
def __init__(self,mat,h,b,tw,tf,name=None):
"""
h - height\n
b - width\n
tw - thickness of web\n
tf - thickness of flange\n
fab - fabrication\n
'r' - rolled\n
'w' - welded\n
"""
self.h=h
self.b=b
self.tw=tw
self.tf=tf
A=b*tf*2+tw*(h-2*tf)
J=(b*tf**3*2+(h-tf)*tw**3)/3 #should be confirm!!!!!!!!!!
I33=b*h**3/12-(b-tw)*(h-2*tf)**3/12
I22=2*tf*b**3/12+(h-2*tf)*tw**3/12
W33=I33/h*2
W22=I22/b*2
super(ISection,self).__init__(mat,A,J,I33,I22,W33,W22,name)
# self.gamma33=1.05
# self.gamma22=1.2
# self.cls33='c'
# self.cls22='c'
class ISection2(FrameCrossSection):
def __init__(self,mat,h,b1,tf1,tw,b2,tf2,name=None):
"""
h - height\n
b1,b2 - width\n
tw - thickness of web\n
tf1,tf2 - thickness of flange\n
fab - fabrication\n
'r' - rolled\n
'w' - welded\n
"""
self.h=h
self.b1=b1
self.b2=b2
self.tw=tw
self.tf1=tf1
self.tf2=tf2
hw=h-tf1-tf2
A=b1*tf1+b2*tf2+tw*hw
self.y0=y0=(b1*tf1*(h-tf1/2)+b2*tf2*tf2/2+hw*tw*(hw/2+tf2))/A
J=(b1*tf1**3+b2*tf2**3+(h-tf1/2-tf2/2)*tw**3)/3 #should be confirm!!!!!!!!!!
I33=tw*hw**3/12
I33+=b1*tf1**3/12+b1*tf1*(hw/2+tf1/2)**2
I33+=b2*tf2**3/12+b2*tf2*(hw/2+tf2/2)**2
I33-=A*(y0-h/2)**2
I22=b1**3*tf1/12+b2**3*tf2/12+tw**3*hw/12
W33=I33/max([y0,h-y0])
W22=I22/max([b1/2,b2/2])
super(ISection2,self).__init__(mat,A,J,I33,I22,W33,W22,name)
# self.gamma33=1.05
# self.gamma22=1.2
# self.cls33='c'
# self.cls22='c'
class TSection(FrameCrossSection):
def __init__(self,mat,h,b,tw,tf,name=None):
#your codes here
pass
class CSection(FrameCrossSection):
pass
class LSection(FrameCrossSection):
pass
class ZSection(FrameCrossSection):
pass
def add_frame_section(self,name,material,type,size):
"""
Add frame section to model, if the name already exists, an exception will be raised.
param:
name: str. name of the section.
material: str, name of material.
type:
'O':pipe,
'o':circle
'I':I-profile
'B':hollow-box
'L':angle
'T':T-profile
'C':C-profile
'Z':Z-profile
size:
if type is 'O', the following parameters are available:
d: float, diameter in current unit
t: float, wall thickness in current unit
if type is 'o', the following parameters are available:
d: float, diameter in current unit
if type is 'I', the following parameters are available:
h,b,tw,tf: float in current unit
if type is 'B', the following parameters are available:
h,b,tw,tf: float in current unit
if type is 'L', the following parameters are available:
h,b,tw,tf: float in current unit
if type is 'T', the following parameters are available:
h,b,tw,tf: float in current unit
if type is 'C', the following parameters are available:
h,b,tw,tf: float in current unit
if type is 'Z', the following parameters are available:
h,b,tw,tf: float in current unit
return:
boolean, status of success.
"""
try:
assert(type in 'OoIBLTCZ' and len(type)==1)
scale=self.scale()
if self.session.query(FrameSection).filter_by(name=name).first()!=None:
raise Exception('Name already exists!')
frmsec=FrameSection()
frmsec.name=name
frmsec.uuid=str(uuid.uuid1())
if self.session.query(Material).filter_by(name=material).first() is None:
raise Exception('Material not exist!')
frmsec.material_name=material
frmsec.type=type
sec=None
if type=='o':
assert len(size)==1
frmsec.size_0=size[0]*scale['L']
sec=Circle(None,frmsec.size_0)
elif type=='O':
assert len(size)==2
frmsec.size_0=size[0]*scale['L']
frmsec.size_1=size[1]*scale['L']
sec=Pipe(None,frmsec.size_0,frmsec.size_1)
elif type=='I':
assert len(size)==4
frmsec.size_0=size[0]*scale['L']
frmsec.size_1=size[1]*scale['L']
frmsec.size_2=size[0]*scale['L']
frmsec.size_3=size[1]*scale['L']
sec=ISection(None,frmsec.size_0,frmsec.size_1,frmsec.size_2,frmsec.size_3)
elif type=='L':####should be refined!!
assert len(size)==4
frmsec.size_0=size[0]*scale['L']
frmsec.size_1=size[1]*scale['L']
frmsec.size_2=size[0]*scale['L']
frmsec.size_3=size[1]*scale['L']
sec=HollowBox(None,frmsec.size_0,frmsec.size_1,frmsec.size_2,frmsec.size_3)
frmsec.A=sec.A
frmsec.J=sec.J
# frmsec.S2=sec.S2
# frmsec.S3=sec.S3
frmsec.I2=sec.I22
frmsec.I3=sec.I33
self.session.add(frmsec)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def add_frame_section_SD(self):
pass
def add_frame_section_variate(self):
pass
def get_frame_section_names(self):
"""
Get all the name of frame sections in the database
returns:
point list satisfies the coordiniates
"""
try:
sections=self.session.query(FrameSection)
names=[i.name for i in sections.all()]
return names
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def delete_frame_section(self,name):
try:
sec=self.session.query(FrameSection).filter_by(name=name)
if sec is None:
raise Exception("Frame section doen't exist!")
self.session.delete(sec)
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False |
from apps.oob.models import project
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.utils import timezone
from unittest import mock
from datetime import timedelta
from ..models import Project, Task
class ProjectModelTest(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create(email='<EMAIL>', password='<PASSWORD>')
self.main_project = Project.objects.create(title='Test Project', user=self.user)
self.sub_project = Project.objects.create(title='Sub Project', user=self.user, parent=self.main_project)
self.main_task = self.main_project.tasks.create(title='Main Project Task', user=self.user)
self.sub_task = self.sub_project.tasks.create(title='Sub Project Task', user=self.user)
def test_project_string_representation(self):
self.assertEqual(str(self.main_project), self.main_project.title)
def test_project_verbose_name_plural(self):
self.assertEqual(Project._meta.verbose_name_plural, 'projects')
def test_project_with_no_user(self):
with self.assertRaises(ValidationError):
Project.objects.create(title='No User Project')
def test_project_with_no_title(self):
with self.assertRaises(ValidationError):
Project.objects.create(user=self.user)
with self.assertRaises(ValidationError):
Project.objects.create(title='', user=self.user)
def test_project_create_invalid_task(self):
with self.assertRaises(ValidationError):
self.main_project.tasks.create(user=self.user)
def test_project_cannot_be_its_own_parent(self):
with self.assertRaises(ValidationError):
self.main_project.parent = self.main_project
self.main_project.save()
def test_project_task_addition_methods(self):
# Create a new task via Project.tasks
task_1 = self.main_project.tasks.create(title='One', user=self.user)
# Create a new task and add it to Project.tasks
task_2 = Task.objects.create(title='Two', user=self.user)
self.main_project.tasks.add(task_2)
# Create a new task and set its project
task_3 = Task.objects.create(title='Three', user=self.user, project=self.main_project)
self.assertEqual(task_1.project, task_2.project, task_3.project)
def test_project_sub_project_addition_methods(self):
# Create a sub-project through the parent project
sub_1 = self.main_project.children.create(title='One', user=self.user)
# Create a sub-project and add it to the parent project's children
sub_2 = Project.objects.create(title='Two', user=self.user)
self.main_project.children.add(sub_2)
# Create a sub-project and set its parent project
sub_3 = Project.objects.create(title='Three', user=self.user, parent=self.main_project)
# All children have the same parent
self.assertEqual(sub_1.parent, sub_2.parent, sub_3.parent)
# Parent contains each child
self.assertEqual(self.main_project.children.filter(title='One').get(),
sub_1)
self.assertEqual(self.main_project.children.filter(title='Two').get(),
sub_2)
self.assertEqual(self.main_project.children.filter(title='Three').get(),
sub_3)
def test_project_sub_project_cannot_be_child_of_sub_project(self):
with self.assertRaises(ValidationError):
self.sub_project.children.create(title='Sub-Sub-Project', user=self.user)
def test_project_sub_project_cannot_have_parent_and_children(self):
with self.assertRaises(ValidationError):
new_main = Project.objects.create(title='New Main Project')
new_main.children.add(self.main_project)
def test_project_modified_date_updated(self):
yesterday = timezone.now() - timedelta(days=1)
with mock.patch('django.utils.timezone.now', mock.Mock(return_value=yesterday)):
old_project = Project.objects.create(title='Yesterday', user=self.user)
initial_date = old_project.modified_on
old_project.title = "Today"
old_project.save()
modified_date = old_project.modified_on
self.assertNotEqual(initial_date, modified_date) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.