input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>CoeGSS-Project/SN4SP
# Copyright (C) 2018 by
# <NAME> <<EMAIL>> HLRS
# <NAME> <<EMAIL>> IMT
# All rights reserved.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""Base class for similarity networks.
"""
from __future__ import division, absolute_import, print_function
from sn4sp import parallel
# TODO: switch from logging to warnings in library core
import warnings
import logging
from mpi4py import MPI
import numpy
# NOTE: `math.log` has better performance than `numpy.log` and `numpy.log2`.
# In our codes we operate with fractions of the logarithms,
# so log and log2 are interchangeable.
from math import log as log2
from math import sqrt
from math import atan2 as arctan2
from itertools import islice
from itertools import islice
import sys
if sys.version_info[0] >= 3:
izip=zip
else:
from itertools import izip
class SimilarityGraph:
"""
Class for probabilistic (undirected) graph model based on Lin similarity
with geo-spatial damping.
Parameters
----------
attr_table : numpy.array or pandas.table
Table (or numpy array) with attribute values
attr_types : list
Attribute types
attr_names : list
Names of the attributes
comm : mpi4py.MPI.Intracomm object
MPI communicator
hss : float
Half-similarity scale
damping : float
Damping coefficient (if 0 use exponential damping)
sample_fraction : float
Percentage of the population
"""
R_EARTH=6.3781*10**6 # Earth radius in meters
def __init__(self, attr_table, attr_types, attr_names=None, comm=MPI.COMM_WORLD, hss=5000, damping=0, sample_fraction=1e-1):
"""
Initialize a probabilistic (undirected) graph model based on Lin similarity
with geo-spatial damping.
Parameters
----------
attr_table : numpy.array or pandas.table
Table (or numpy array) with attribute values
attr_types : list
Attribute types
attr_names : list
Names of the attributes
comm : mpi4py.MPI_Comm
MPI communicator
hss : float
Half-similarity scale
damping : float
Damping coefficient (if 0 use exponential damping)
sample_fraction : float
Percentage of the population
"""
self.comm=comm
comm_rank=self.comm.Get_rank()
num_vertices=len(attr_table)
self.damping=damping
self.hss=hss
self.similarity_threshold=1e-6
# Normalization constant (half-similarity scale)
hss_0=float(hss)/(numpy.power(2, self.damping)-1) \
if 0 < self.damping and self.damping < 1 else hss
self.geo_scaling=self.R_EARTH/hss_0
# Group attribute names by types
attr_names=numpy.array(attr_names or attr_table.dtype.names)
attr_types=numpy.array(attr_types)
attr_name_groups={ attr_type : attr_names[numpy.where(attr_types == attr_type)[0]] \
for attr_type in ('g', 'o', 'c') }
logging.debug( 'attribute names: {0}'.format(attr_name_groups) )
# Store categorical and ordinal attributes as-is
nongeo_attr_names=numpy.hstack((attr_name_groups['c'], attr_name_groups['o']))
self.num_categorical=len(attr_name_groups['c'])
self.num_ordinal=len(attr_name_groups['o'])
self.nongeo_attrs=attr_table[nongeo_attr_names]
# Convert attributes from degrees to radians
if len(attr_name_groups['g']) % 2 != 0:
raise ValueError( "number of geo-attributes must be odd to hold (longitude,latitude) pairs,"\
" whereas we have {0} geo-attributes".format(len(attr_name_groups['g'])) )
self.geo_attrs=numpy.array( [numpy.radians(attr_table[attr_name]) \
for attr_name in attr_name_groups['g']] ).T
# logging.debug( 'geo-attributes: [{0}] [shape={1}]'.\
# format(",".join(attr_name_groups['g']), self.geo_attrs.shape) )
# Prepare representative sample of the original synthetic population
# in order to reduce time to compute Lin similarity.
# Take the sample to be a `sample_fraction` fraction of the original dataset.
sample_size=max(100, int(num_vertices*sample_fraction))
if sample_size < num_vertices:
if comm_rank==0:
sampled_indices=numpy.random.choice(num_vertices, sample_size, replace=False)
else:
sampled_indices=numpy.empty(sample_size, dtype='i') #numpy.int
# Broadcast the list of the sampled agents between all processes.
self.comm.Bcast([sampled_indices, sample_size, MPI.INT], root=0)
# Select sampled attributes
self.sampled_nongeo_attrs=self.nongeo_attrs[sampled_indices]
self.sampled_geo_attrs=self.geo_attrs[sampled_indices]
self.sample_mask=numpy.zeros(num_vertices, bool)
self.sample_mask[sampled_indices]=True
else: # Since population size is small, use the whole population as sample
# NOTE: no need to make deep copy of vertex_attrs with numpy.copy
# since sampled_vertex_attrs is not modified further in algorithm
self.sampled_nongeo_attrs=self.nongeo_attrs
self.sampled_geo_attrs=self.geo_attrs
self.sample_mask=numpy.ones(num_vertices, bool)
self.sampled_nongeo_attrs=[self.sampled_nongeo_attrs[attr_name] \
for attr_name in nongeo_attr_names]
# TODO: remove when not needed
self.sampled_vertex_attrs=attr_table[self.sample_mask]
self.vertex_attrs=attr_table
@property
def sample_size(self):
return len(self.sampled_geo_attrs)
def edge_probability(self, u, v):
""" Probability of edge in the similarity graph based on geo-damped Lin similarity.
Edge probability consists of two independent contributions:
- probability induced by graphical distance between agents
- probability induced by similarity between agents
Parameters
----------
u, v : int
Indices of vertices
"""
# Compute contribution of geo-attributes to the edge probability.
# Get vectors with (lon,lat)-pairs of all locations for 2 nodes.
geo_attrs_u=self.geo_attrs[u]
geo_attrs_v=self.geo_attrs[v]
# Compute minimum geo-distance between locations of a and b
# NOTE: For the moment, it selects the closest distance for
# matching types of locations (e.g., between 2 households,
# but not between household of `a` and workplace of `b`)
min_dist=numpy.PINF
for i in xrange(0, len(geo_attrs_u), 2):
dlon=geo_attrs_u[i] - geo_attrs_v[i]
# TODO: precompute sines and cosines before computing individual edge probabilities
lat1, lat2=geo_attrs_u[i+1], geo_attrs_v[i+1]
cos_lat1, cos_lat2, cos_dlon=numpy.cos(lat1), numpy.cos(lat2), numpy.cos(dlon)
sin_lat1, sin_lat2, sin_dlon=numpy.sin(lat1), numpy.sin(lat2), numpy.sin(dlon)
y=sqrt((cos_lat2*sin_dlon)**2 + (cos_lat1*sin_lat2 - sin_lat1*cos_lat2*cos_dlon)**2)
x=sin_lat1*sin_lat2 + cos_lat1*cos_lat2*cos_dlon
# TODO: clarify about negative distances
min_dist=min(min_dist, arctan2(y, x))
if self.damping > 0.:
# Scale distance by half-similarity scale to make it adimensional and damp by a factor 2.
prob_geo=(1. + self.geo_scaling*min_dist)**(-self.damping)
else:
# Compute geo-similarity with exponential damping.
prob_geo=2**(-self.geo_scaling*min_dist)
# logging.debug( "minimum distance rho({0},{1})={2}; geo-induced probability Pr({0},{1})={3}".\
# format(a, b, min_dist, prob_geo) )
# If probability induced by the geo-attributes is smaller than a certain lower bound threshold,
# the Lin similarity contribution is disregarded.
if prob_geo <= self.similarity_threshold:
return 0.
# Compute contribution of non-geographic attributes to the edge probability.
# This contribution is based on Lin similarity metric.
# Lin similarity handles categorical ('c') and ordinal ('o') attributes
# (while geographic ('g') attributes are subject of geo-damping).
# In order to reduce calculation time, code below evaluates
# the similarity between node `a` and node `b` on the sampled attributes.
# First, find the frequency of agents sharing all attributes shared by the two analysed nodes.
# The idea is that the lower the number of agents sharing the same attribute,
# the more information this shared attribute contains about the two agents.
# Since attributes are dependent, one must estimate the probability (get frequency)
# of observing all different attributes at the same time. If they were independent,
# one might handle attributes independently by computing their contributions separately
# and summing up.
attrs_u, attrs_v = self.nongeo_attrs[u], self.nongeo_attrs[v]
# Select nodes as similar if categorical attributes are the same to `a` and `b`
# If vertices share categorical attribute, the number of agents sharing this attribute is considered.
# TODO: ensure that we do not have to cut of by categorical_attrs
similar_nodes=True # start with all sampled items as similar
for sample, attr_u, attr_v in islice(izip(self.sampled_nongeo_attrs, attrs_u, attrs_v), self.num_categorical):
if attr_u == attr_v:
# filter out indices of samples with the same attribute
similar_nodes &= sample == attr_u
# logging.debug( "similar categorical attributes in the sample: {0} out of {1}".\
# format(numpy.sum(similar_nodes) if not isinstance(similar_nodes, bool) else "all",
# self.sample_size) )
# Select nodes as similar if ordinal attributes are between values for `a` and `b`.
# If vertices do not share an attribute and the attribute is ordinal,
# the number of agents sharing attributes between the two values is considered.
for sample, attr_u, attr_v in islice(izip(self.sampled_nongeo_attrs, attrs_u, attrs_v), self.num_categorical, self.num_categorical+self.num_ordinal):
attr_min, attr_max=min(attr_u, attr_v), max(attr_u, attr_v)
# filter out indices of samples with the attribute in the range of values between `a` and `b`
similar_nodes &= (attr_min <= sample) & (sample <= attr_max)
num_similar=numpy.sum(similar_nodes) if not isinstance(similar_nodes, bool) else self.sample_size
# logging.debug( "similar attributes in the sample after ordinal attributes filtering: {0} out of {1}".\
# format(num_similar, self.sample_size) )
# If the superposition is zero on the sample, then there is no agents with the same characteristics.
# The probability of finding something with the same feature of both `a` and `b` is really small.
if num_similar == 0:
return prob_geo # TODO: consult why not zero
# Second, find the frequency of agents sharing all attributes with each analysed node separately.
# TODO: clarify whether we need to take geo-filtering into account as in original script
# NOTE: `numpy.sum` performs better than `sum` on numpy-arrays
num_equal_u=numpy.sum(self.sampled_vertex_attrs==self.vertex_attrs[u])
num_equal_v=numpy.sum(self.sampled_vertex_attrs==self.vertex_attrs[v])
# logging.debug( "similar attributes (in the sample) to the 1st vertex: {0}, to the 2nd vertex: {1}".\
# format(num_equal_u, num_equal_v) )
# Compute Lin similarity (use inverses of frequencies estimated above)
num_sample=float(self.sample_size)
num_total=len(self)
prob_lin=log2(num_sample/num_similar)
if num_equal_u == 0: # there is no agents as `a`
# We make assumption that `a` is the only one with this characteristic in the whole dataset.
if num_equal_v == 0: # the same thing for `b`
prob_lin /= log2(num_total)
else: # `a` is unique, but `b` has similar vertices (agents) in the dataset (population)
prob_lin /= 0.5*log2(num_sample*num_total/num_equal_v)
elif num_equal_v == 0: | |
from __future__ import print_function
from __future__ import with_statement
from os.path import exists
from twisted.python import log, failure
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, error
from txtorcon import TorControlProtocol, TorProtocolFactory, TorState
from txtorcon import ITorControlProtocol
from txtorcon.torcontrolprotocol import parse_keywords, DEFAULT_VALUE
from txtorcon.util import hmac_sha256
import functools
import tempfile
import base64
from binascii import b2a_hex, a2b_hex
class CallbackChecker:
def __init__(self, expected):
self.expected_value = expected
self.called_back = False
def __call__(self, *args, **kwargs):
v = args[0]
if v != self.expected_value:
print("WRONG")
raise RuntimeError(
'Expected "%s" but got "%s"' % (self.expected_value, v)
)
self.called_back = True
return v
class InterfaceTests(unittest.TestCase):
def test_implements(self):
self.assertTrue(ITorControlProtocol.implementedBy(TorControlProtocol))
def test_object_implements(self):
self.assertTrue(ITorControlProtocol.providedBy(TorControlProtocol()))
class LogicTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.protocol.makeConnection(self.transport)
def test_set_conf_wrong_args(self):
ctl = TorControlProtocol()
d = ctl.set_conf('a')
self.assertTrue(d.called)
self.assertTrue(d.result)
self.assertTrue('even number' in d.result.getErrorMessage())
# ignore the error so trial doesn't get unhappy
d.addErrback(lambda foo: True)
return d
class FactoryTests(unittest.TestCase):
def test_create(self):
TorProtocolFactory().buildProtocol(None)
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.transport = proto_helpers.StringTransport()
def send(self, line):
assert type(line) == bytes
self.protocol.dataReceived(line.strip() + b"\r\n")
def test_authenticate_cookie(self):
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
cookie_data = b'cookiedata!cookiedata!cookiedata'
with open('authcookie', 'wb') as f:
f.write(cookie_data)
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=COOKIE,HASHEDPASSWORD COOKIEFILE="authcookie"')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(cookie_data) + b'\r\n',
)
def test_authenticate_password(self):
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n'
)
def test_authenticate_password_not_bytes(self):
self.protocol.password_function = lambda: u'<PASSWORD>'
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n'
)
def test_authenticate_null(self):
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=NULL')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertEqual(self.transport.value(), b'AUTHENTICATE\r\n')
def test_authenticate_password_deferred(self):
d = defer.Deferred()
self.protocol.password_function = lambda: d
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
# make sure we haven't tried to authenticate before getting
# the password callback
self.assertEqual(self.transport.value(), b'')
d.callback('foo')
# now make sure we DID try to authenticate
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b"foo") + b'\r\n'
)
def test_authenticate_password_deferred_but_no_password(self):
d = defer.Deferred()
self.protocol.password_function = lambda: d
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
d.callback(None)
return self.assertFailure(self.protocol.post_bootstrap, RuntimeError)
def confirmAuthFailed(self, *args):
self.auth_failed = True
def test_authenticate_no_password(self):
self.protocol.post_bootstrap.addErrback(self.confirmAuthFailed)
self.auth_failed = False
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), b'PROTOCOLINFO 1\r\n')
self.send(b'250-PROTOCOLINFO 1')
self.send(b'250-AUTH METHODS=HASHEDPASSWORD')
self.send(b'250-VERSION Tor="0.2.2.34"')
self.send(b'250 OK')
self.assertTrue(self.auth_failed)
class DisconnectionTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransportWithDisconnection()
self.protocol.makeConnection(self.transport)
# why doesn't makeConnection do this?
self.transport.protocol = self.protocol
def tearDown(self):
self.protocol = None
def test_disconnect_callback(self):
"""
see that we get our callback on_disconnect if the transport
goes away
"""
def it_was_called(*args):
it_was_called.yes = True
return None
it_was_called.yes = False
self.protocol.on_disconnect.addCallback(it_was_called)
self.protocol.on_disconnect.addErrback(it_was_called)
f = failure.Failure(error.ConnectionDone("It's all over"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_when_disconnect(self):
"""
see that we get our callback for when_disconnected if the
transport goes away
"""
def it_was_called(arg):
it_was_called.yes = True
return None
it_was_called.yes = False
d = self.protocol.when_disconnected()
d.addCallback(it_was_called)
f = failure.Failure(error.ConnectionDone("It's all over"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_when_disconnect_error(self):
"""
see that we get our errback for when_disconnected if the
transport goes away
"""
def it_was_called(arg):
it_was_called.yes = True
return None
it_was_called.yes = False
d = self.protocol.when_disconnected()
d.addErrback(it_was_called)
f = failure.Failure(RuntimeError("sadness"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_disconnect_errback(self):
"""
see that we get our callback on_disconnect if the transport
goes away
"""
def it_was_called(*args):
it_was_called.yes = True
return None
it_was_called.yes = False
self.protocol.on_disconnect.addCallback(it_was_called)
self.protocol.on_disconnect.addErrback(it_was_called)
f = failure.Failure(RuntimeError("The thing didn't do the stuff."))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_disconnect_outstanding_commands(self):
"""
outstanding commands should errback on disconnect
"""
def it_was_called(f):
str(f)
it_was_called.count += 1
return None
it_was_called.count = 0
# we want to make sure outstanding commands get errbacks
d0 = self.protocol.queue_command("some command0")
d1 = self.protocol.queue_command("some command1")
d0.addErrback(it_was_called)
d1.addErrback(it_was_called)
self.protocol.on_disconnect.addErrback(lambda _: None)
f = failure.Failure(RuntimeError("The thing didn't do the stuff."))
self.protocol.connectionLost(f)
self.assertEqual(it_was_called.count, 2)
class ProtocolTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.protocol.makeConnection(self.transport)
def tearDown(self):
self.protocol = None
def send(self, line):
assert type(line) == bytes
self.protocol.dataReceived(line.strip() + b"\r\n")
def test_statemachine_broadcast_no_code(self):
try:
self.protocol._broadcast_response("foo")
self.fail()
except RuntimeError as e:
self.assertTrue('No code set yet' in str(e))
def test_statemachine_broadcast_unknown_code(self):
try:
self.protocol.code = 999
self.protocol._broadcast_response("foo")
self.fail()
except RuntimeError as e:
self.assertTrue('Unknown code' in str(e))
def test_statemachine_is_finish(self):
self.assertTrue(not self.protocol._is_finish_line(''))
self.assertTrue(self.protocol._is_finish_line('.'))
self.assertTrue(self.protocol._is_finish_line('300 '))
self.assertTrue(not self.protocol._is_finish_line('250-'))
def test_statemachine_singleline(self):
self.assertTrue(not self.protocol._is_single_line_response('foo'))
def test_statemachine_continuation(self):
try:
self.protocol.code = 250
self.protocol._is_continuation_line("123 ")
self.fail()
except RuntimeError as e:
self.assertTrue('Unexpected code' in str(e))
def test_statemachine_multiline(self):
try:
self.protocol.code = 250
self.protocol._is_multi_line("123 ")
self.fail()
except RuntimeError as e:
self.assertTrue('Unexpected code' in str(e))
def test_response_with_no_request(self):
with self.assertRaises(RuntimeError) as ctx:
self.protocol.code = 200
self.protocol._broadcast_response('200 OK')
self.assertTrue(
"didn't issue a command" in str(ctx.exception)
)
def auth_failed(self, msg):
self.assertEqual(str(msg.value), '551 go away')
self.got_auth_failed = True
def test_authenticate_fail(self):
self.got_auth_failed = False
self.protocol._auth_failed = self.auth_failed
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=HASHEDPASSWORD
VERSION Tor="0.2.2.35"
OK''')
self.send(b'551 go away\r\n')
self.assertTrue(self.got_auth_failed)
def test_authenticate_no_auth_line(self):
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
FOOAUTH METHODS=COOKIE,SAFECOOKIE COOKIEFILE="/dev/null"
VERSION Tor="0.2.2.35"
OK''')
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('find AUTH line' in str(e))
def test_authenticate_not_enough_cookie_data(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookietmp.write(b'x' * 35) # too much data
cookietmp.flush()
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=COOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % cookietmp.name)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('cookie to be 32' in str(e))
def test_authenticate_not_enough_safecookie_data(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookietmp.write(b'x' * 35) # too much data
cookietmp.flush()
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % cookietmp.name)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('cookie to be 32' in str(e))
def test_authenticate_safecookie(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookiedata = bytes(bytearray([0] * 32))
cookietmp.write(cookiedata)
cookietmp.flush()
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(cookietmp.name))
self.assertTrue(
b'AUTHCHALLENGE SAFECOOKIE ' in self.transport.value()
)
x = self.transport.value().split()[-1]
client_nonce = a2b_hex(x)
self.transport.clear()
server_nonce = bytes(bytearray([0] * 32))
server_hash = hmac_sha256(
b"Tor safe cookie authentication server-to-controller hash",
cookiedata + client_nonce + server_nonce,
)
self.send(
b'250 AUTHCHALLENGE SERVERHASH=' +
base64.b16encode(server_hash) + b' SERVERNONCE=' +
base64.b16encode(server_nonce) + b'\r\n'
)
self.assertTrue(b'AUTHENTICATE ' in self.transport.value())
def test_authenticate_cookie_without_reading(self):
server_nonce = bytes(bytearray([0] * 32))
server_hash = bytes(bytearray([0] * 32))
try:
self.protocol._safecookie_authchallenge(
'250 AUTHCHALLENGE SERVERHASH=%s SERVERNONCE=%s' %
(base64.b16encode(server_hash), base64.b16encode(server_nonce))
)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('not read' in str(e))
def test_authenticate_unexisting_cookie_file(self):
unexisting_file = __file__ + "-unexisting"
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=COOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % unexisting_file)
self.assertTrue(False)
except RuntimeError:
pass
def test_authenticate_unexisting_safecookie_file(self):
unexisting_file = __file__ + "-unexisting"
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(unexisting_file))
self.assertTrue(False)
except RuntimeError:
pass
def test_authenticate_dont_send_cookiefile(self):
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE
VERSION Tor="0.2.2.35"
OK''')
self.assertTrue(False)
except RuntimeError:
pass
def test_authenticate_password_when_cookie_unavailable(self):
unexisting_file = __file__ + "-unexisting"
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=COOKIE,HASHEDPASSWORD COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(unexisting_file))
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n',
)
def test_authenticate_password_when_safecookie_unavailable(self):
unexisting_file = __file__ + "-unexisting"
self.protocol.password_function = lambda: '<PASSWORD>'
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE,HASHEDPASSWORD COOKIEFILE="{}"
VERSION Tor="0.2.2.35"
OK'''.format(unexisting_file))
self.assertEqual(
self.transport.value(),
b'AUTHENTICATE ' + b2a_hex(b'foo') + b'\r\n',
)
def test_authenticate_safecookie_wrong_hash(self):
cookiedata = bytes(bytearray([0] * 32))
server_nonce = bytes(bytearray([0] * 32))
server_hash = bytes(bytearray([0] * 32))
# pretend we already did PROTOCOLINFO and read the cookie
# file
self.protocol._cookie_data = cookiedata
self.protocol.client_nonce = server_nonce # all 0's anyway
try:
self.protocol._safecookie_authchallenge(
'250 AUTHCHALLENGE SERVERHASH={} SERVERNONCE={}'.format(
b2a_hex(server_hash).decode('ascii'),
b2a_hex(server_nonce).decode('ascii'),
)
)
self.assertTrue(False)
except RuntimeError as e:
self.assertTrue('hash not expected' in str(e))
def confirm_version_events(self, arg):
self.assertEqual(self.protocol.version, 'foo')
events = 'GUARD STREAM CIRC NS NEWCONSENSUS ORCONN NEWDESC ADDRMAP STATUS_GENERAL'.split()
self.assertEqual(len(self.protocol.valid_events), len(events))
self.assertTrue(all(x in self.protocol.valid_events for x in events))
def test_bootstrap_callback(self):
d = self.protocol.post_bootstrap
d.addCallback(CallbackChecker(self.protocol))
d.addCallback(self.confirm_version_events)
events = b'GUARD STREAM CIRC NS NEWCONSENSUS ORCONN NEWDESC ADDRMAP STATUS_GENERAL'
self.protocol._bootstrap()
# answer all the requests generated by boostrapping etc.
self.send(b"250-signal/names=")
self.send(b"250 OK")
self.send(b"250-version=foo")
self.send(b"250 OK")
self.send(b"250-events/names=" + events)
self.send(b"250 OK")
self.send(b"250 OK") # for USEFEATURE
return d
def test_bootstrap_tor_does_not_support_signal_names(self):
self.protocol._bootstrap()
self.send(b'552 Unrecognized key "signal/names"')
valid_signals = ["RELOAD", "DUMP", "DEBUG", "NEWNYM", "CLEARDNSCACHE"]
self.assertEqual(self.protocol.valid_signals, valid_signals)
def test_async(self):
"""
test the example from control-spec.txt to see that we
handle interleaved async notifications properly.
"""
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener('CIRC', lambda _: None)
self.send(b"250 OK")
d = self.protocol.get_conf("SOCKSPORT ORPORT")
self.send(b"650 CIRC 1000 EXTENDED moria1,moria2")
self.send(b"250-SOCKSPORT=9050")
self.send(b"250 ORPORT=0")
return d
def test_async_multiline(self):
# same as above, but i think the 650's can be multline,
# too. Like:
# 650-CIRC 1000 EXTENDED moria1,moria2 0xBEEF
# 650-EXTRAMAGIC=99
# 650 ANONYMITY=high
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener(
'CIRC',
CallbackChecker(
"1000 EXTENDED moria1,moria2\nEXTRAMAGIC=99\nANONYMITY=high"
)
)
self.send(b"250 OK")
d = self.protocol.get_conf("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker({"ORPORT": "0", "SOCKSPORT": "9050"}))
self.send(b"650-CIRC 1000 EXTENDED moria1,moria2")
self.send(b"650-EXTRAMAGIC=99")
self.send(b"650 ANONYMITY=high")
self.send(b"250-SOCKSPORT=9050")
self.send(b"250 | |
= np.zeros((DNB_NUM_Y - 3) * (DNB_NUM_X - 3))
i = 0
for y in range(2, DNB_NUM_Y - 1):
left_x = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_x[0] \
+ float(y) / DNB_NUM_Y * vertices_x[3]
left_y = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_y[0] \
+ float(y) / DNB_NUM_Y * vertices_y[3]
right_x = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_x[1] \
+ float(y) / DNB_NUM_Y * vertices_x[2]
right_y = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_y[1] \
+ float(y) / DNB_NUM_Y * vertices_y[2]
for x in range(2, DNB_NUM_X - 1):
dnb_x = float(DNB_NUM_X - x) / DNB_NUM_X * \
left_x + float(x) / DNB_NUM_X * right_x
dnb_y = float(DNB_NUM_X - x) / DNB_NUM_X * \
left_y + float(x) / DNB_NUM_X * right_y
THE_X = floor(dnb_x)
THE_Y = floor(dnb_y)
if THE_X >= 0 and THE_X + 1 < img.shape[1] \
and THE_Y >= 0 and THE_Y + 1 < img.shape[0]: # 使用双线性插值
u = dnb_x - float(THE_X)
v = dnb_y - float(THE_Y)
blk_ints_1D[i] = (1 - u) * (1 - v) * img[THE_Y][THE_X] \
+ (1 - u) * v * img[THE_Y + 1][THE_X] \
+ u * (1 - v) * img[THE_Y][THE_X + 1] \
+ u * v * img[THE_Y + 1][THE_X + 1]
else:
print('[Warning]: DNB out of boundary.')
blk_ints_1D[i] = 0 # 用0填充图像外面的部分
i += 1
return blk_ints_1D
# calculate pixel intensity of block
def calcPixelIntsOfBlock(img, TCs, X, Y):
pass
# calculate block's DNB background
# using different strategies according to block ID
def calcBlockDNBBkgSpe(img, TCs, blk_id, R, DNB_NUM_X, DNB_NUM_Y):
if blk_id < 0 or blk_id > 99 or len(TCs) != 121:
print('[Error]: parameters wrong.')
return None
X = int(blk_id % 10)
Y = int(blk_id / 10)
# prepare 4 vertices and width, height for given block
vertices_x = np.zeros(4)
vertices_y = np.zeros(4)
id_0 = Y * 11 + X
id_1 = id_0 + 1
id_2 = id_0 + 12
id_3 = id_0 + 11
vertices_x[0] = TCs[id_0][0]
vertices_x[1] = TCs[id_1][0]
vertices_x[2] = TCs[id_2][0]
vertices_x[3] = TCs[id_3][0]
vertices_y[0] = TCs[id_0][1]
vertices_y[1] = TCs[id_1][1]
vertices_y[2] = TCs[id_2][1]
vertices_y[3] = TCs[id_3][1]
width = (vertices_x[1] - vertices_x[0] +
vertices_x[2] - vertices_x[3]) * 0.5 # block width
height = (vertices_y[3] - vertices_y[0] +
vertices_y[2] - vertices_y[1]) * 0.5 # block height
print('width,height[%.2f, %.2f]' % (width, height))
# prepare 4 vertices's background
# parameters are in consistent with the v0.1 doc
vertex_bkgs = np.zeros(4)
vertex_bkgs[0] = computeTrackAreaBkg(img, TCs[id_0], 0.5, R)
vertex_bkgs[1] = computeTrackAreaBkg(img, TCs[id_1], 0.5, R)
vertex_bkgs[2] = computeTrackAreaBkg(img, TCs[id_2], 0.5, R)
vertex_bkgs[3] = computeTrackAreaBkg(img, TCs[id_3], 0.5, R)
# test DNB spots' coordinates and background
blk_bkgs = np.zeros((DNB_NUM_Y - 3, DNB_NUM_X - 3)) # numpy 2d array
i = 0
for y in range(2, DNB_NUM_Y - 1):
left_x = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_x[0] \
+ float(y) / DNB_NUM_Y * vertices_x[3]
left_y = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_y[0] \
+ float(y) / DNB_NUM_Y * vertices_y[3]
right_x = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_x[1] \
+ float(y) / DNB_NUM_Y * vertices_x[2]
right_y = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_y[1] \
+ float(y) / DNB_NUM_Y * vertices_y[2]
for x in range(2, DNB_NUM_X - 1):
dnb_x = float(DNB_NUM_X - x) / DNB_NUM_X * \
left_x + float(x) / DNB_NUM_X * right_x
dnb_y = float(DNB_NUM_X - x) / DNB_NUM_X * \
left_y + float(x) / DNB_NUM_X * right_y
if X > 0 and X < 9 and Y > 0 and Y < 9: # inside
u = (dnb_x - (vertices_x[0] +
vertices_x[3]) * 0.5) / width * 0.5
v = (dnb_y - (vertices_y[0] +
vertices_y[1]) * 0.5) / height * 0.5
bkg = (1 - u) * (1 - v) * vertex_bkgs[0] + (1 - u) * v * vertex_bkgs[3] \
+ u * (1 - v) * vertex_bkgs[1] + u * v * vertex_bkgs[2]
elif Y == 0 and X > 0 and X < 9: # up border
dist_3 = sqrt((dnb_x - vertices_x[3]) * (dnb_x - vertices_x[3])
+ (dnb_y - vertices_y[3]) * (dnb_y - vertices_y[3]))
dist_2 = sqrt((dnb_x - vertices_x[2]) * (dnb_x - vertices_x[2])
+ (dnb_y - vertices_y[2]) * (dnb_y - vertices_y[2]))
ratio = dist_3 / (dist_3 + dist_2)
bkg = (1 - ratio) * vertex_bkgs[3] + ratio * vertex_bkgs[2]
elif Y == 9 and X > 0 and X < 9: # down border
dist_0 = sqrt((dnb_x - vertices_x[0]) * (dnb_x - vertices_x[0])
+ (dnb_y - vertices_y[0]) * (dnb_y - vertices_y[0]))
dist_1 = sqrt((dnb_x - vertices_x[1]) * (dnb_x - vertices_x[1])
+ (dnb_y - vertices_y[1]) * (dnb_y - vertices_y[1]))
ratio = dist_0 / (dist_0 + dist_1)
bkg = (1 - ratio) * vertex_bkgs[0] + ratio * vertex_bkgs[1]
elif X == 0 and Y > 0 and Y < 9: # left border
dist_1 = sqrt((dnb_x - vertices_x[1]) * (dnb_x - vertices_x[1])
+ (dnb_y - vertices_y[1]) * (dnb_y - vertices_y[1]))
dist_2 = sqrt((dnb_x - vertices_x[2]) * (dnb_x - vertices_x[2])
+ (dnb_y - vertices_y[2]) * (dnb_y - vertices_y[2]))
ratio = dist_1 / (dist_1 + dist_2)
bkg = (1 - ratio) * vertex_bkgs[1] + ratio * vertex_bkgs[2]
elif X == 9 and Y > 0 and Y < 9: # right border
dist_0 = sqrt((dnb_x - vertices_x[0]) * (dnb_x - vertices_x[0])
+ (dnb_y - vertices_y[0]) * (dnb_y - vertices_y[0]))
dist_3 = sqrt((dnb_x - vertices_x[3]) * (dnb_x - vertices_x[3])
+ (dnb_y - vertices_y[3]) * (dnb_y - vertices_y[3]))
ratio = dist_0 / (dist_0 + dist_3)
bkg = (1 - ratio) * vertex_bkgs[0] + ratio * vertex_bkgs[3]
elif blk_id == 0: # corner 0
bkg = vertex_bkgs[2]
elif blk_id == 9: # corner 1
bkg = vertex_bkgs[3]
elif blk_id == 99: # corner 2
bkg = vertex_bkgs[0]
elif blk_id == 90: # corner 3
bkg = vertex_bkgs[1]
blk_bkgs[y - 2][x - 2] = bkg
# print('DNB %d Bkg: %.2f' %(i, bkg))
i += 1
# storePts(dnbs, './dnbs.txt')
return blk_bkgs
# extract block DNB spot's mean intensity
def extractBlkIntsMean(img, TCs, blk_id, DNB_NUM_X, DNB_NUM_Y):
if blk_id < 0 or blk_id > 99 or len(TCs) != 121:
print('[Error]: parameters wrong.')
return None
X = int(blk_id % 10)
Y = int(blk_id / 10)
# prepare 4 vertices and width, height for given block
vertices_x = np.zeros(4)
vertices_y = np.zeros(4)
id_0 = Y * 11 + X
id_1 = id_0 + 1
id_2 = id_0 + 12
id_3 = id_0 + 11
vertices_x[0] = TCs[id_0][0]
vertices_x[1] = TCs[id_1][0]
vertices_x[2] = TCs[id_2][0]
vertices_x[3] = TCs[id_3][0]
vertices_y[0] = TCs[id_0][1]
vertices_y[1] = TCs[id_1][1]
vertices_y[2] = TCs[id_2][1]
vertices_y[3] = TCs[id_3][1]
width = (vertices_x[1] - vertices_x[0] +
vertices_x[2] - vertices_x[3]) * 0.5 # block width
height = (vertices_y[3] - vertices_y[0] +
vertices_y[2] - vertices_y[1]) * 0.5 # block height
print('width,height[%.2f, %.2f]' % (width, height))
# extract each DNB spot's intensity
blk_ints = np.zeros((DNB_NUM_Y - 3, DNB_NUM_X - 3)) # numpy 2d array
i = 0
for y in range(2, DNB_NUM_Y - 1):
left_x = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_x[0] \
+ float(y) / DNB_NUM_Y * vertices_x[3]
left_y = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_y[0] \
+ float(y) / DNB_NUM_Y * vertices_y[3]
right_x = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_x[1] \
+ float(y) / DNB_NUM_Y * vertices_x[2]
right_y = float(DNB_NUM_Y - y) / DNB_NUM_Y * vertices_y[1] \
+ float(y) / DNB_NUM_Y * vertices_y[2]
for x in range(2, DNB_NUM_X - 1):
dnb_x = float(DNB_NUM_X - x) / DNB_NUM_X * \
left_x + float(x) / DNB_NUM_X * right_x
dnb_y = float(DNB_NUM_X - x) / DNB_NUM_X * \
left_y + float(x) / DNB_NUM_X * right_y
# if dnb_x >= 0 and dnb_x < img.shape[1] \
# and dnb_y >= 0 and dnb_y < img.shape[0]: # 使用最近邻法
# the_x = int(dnb_x)
# the_y = int(dnb_y)
# blk_ints[y-2][x-2] = img[the_y][the_x]
THE_X = floor(dnb_x)
THE_Y = floor(dnb_y)
if THE_X >= 0 and THE_X + 1 < img.shape[1] \
and THE_Y >= 0 and THE_Y + 1 < img.shape[0]: # 使用双线性插值
u = dnb_x - | |
<reponame>saroudant/Percolate
import torch, os
import numpy as np
from copy import deepcopy
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.neighbors import KNeighborsRegressor
from sklearn.pipeline import Pipeline
from sklearn.kernel_ridge import KernelRidge
from sklearn.preprocessing import StandardScaler
from scipy.stats import ortho_group
from joblib import Parallel, delayed
from pickle import load, dump
from .GLMPCA import GLMPCA
from .difference_GLMPCA import difference_GLMPCA
from .residualGLMPCA import ResidualGLMPCA
from .generalized_SVD import generalized_SVD
class GLMJIVE:
def __init__(
self,
n_factors,
n_joint,
families,
maxiter=1000,
max_param=None,
learning_rates=None,
batch_size=None,
n_glmpca_init=None,
step_size=None,
gamma=None,
n_jobs=1
):
"""
Method can be 'svd' or 'likelihood'.
"""
self.n_factors = n_factors
self.n_joint = n_joint
self.families = families
if type(maxiter) is int:
self.maxiter = {k: maxiter for k in n_factors}
else:
self.maxiter = maxiter
if type(max_param) is int:
self.max_param = {k: max_param for k in n_factors}
else:
self.max_param = max_param if max_param is not None else {k:None for k in n_factors}
self.learning_rates = learning_rates if learning_rates is not None else {k:0.01 for k in n_factors}
self.batch_size = batch_size if batch_size is not None else {k:128 for k in n_factors}
self.n_glmpca_init = n_glmpca_init if n_glmpca_init is not None else {k:1 for k in n_factors}
self.step_size = step_size if step_size is not None else {k:20 for k in n_factors}
self.gamma = gamma if gamma is not None else {k:0.5 for k in n_factors}
# self.with_intercept = with_intercept
self.factor_models = {}
self.joint_models = {}
# For parallelization
self.n_jobs = n_jobs
def fit(self, X, no_alignment=False, exp_family_params=None):
"""
X must be a dictionary of data with same keys than n_factors and families.
- no_alignment: bool, default to False
Whether joint and individual components must be computed. If set to yes, process stops to
the computation of the matrix M.
"""
# Train GLM-PCA instances.
self._train_glmpca_instances(X, exp_family_params=exp_family_params)
# Compute the matrix M and decompose it by SVD.
self._aggregate_scores()
# Stop if required
if no_alignment:
return True
# Initialize models
self._initialize_models(X)
self._computation_joint_individual_factor_model(X)
return True
def set_out_of_sample_extension(self, known_data_type, cv=10, n_jobs=1):
"""
Set up the out-of-sample computation by training kNN regression models from the
known data type to the other unknown type.
known_data_type: str
Data-type to regress on.
"""
# Set known and unknown data-type
self.known_data_type = known_data_type
self.unknown_data_type = [e for e in self.data_types if e != self.known_data_type]
assert len(self.unknown_data_type) == 1
self.unknown_data_type = self.unknown_data_type[0]
# Train regression model
self.trans_type_regressors_ = {
joint_factor_idx: self._train_trans_type_regression_model(joint_factor_idx, cv=cv, n_jobs=n_jobs)
for joint_factor_idx in range(self.n_joint)
}
return True
def _train_glmpca_instances(self, X, exp_family_params=None):
"""
Train the GLM-PCA instances needed for the GLM-JIVE.
"""
# Train factor models
self.factor_models = {}
self.orthogonal_scores = []
self.data_types = list(X.keys())
exp_family_params = exp_family_params if exp_family_params is not None else {data_type:None for data_type in X}
# self.factor_models = dict(Parallel(n_jobs=2, verbose=1)(
# delayed(self._train_one_glmpca_instance)(data_type, X, exp_family_params)
# for data_type in self.data_types
# ))
self.factor_models = dict([
self._train_one_glmpca_instance(data_type, X, exp_family_params)
for data_type in self.data_types
])
self.orthogonal_scores = [
self.factor_models[data_type].compute_saturated_orthogonal_scores(X[data_type], correct_loadings=False)
for data_type in self.factor_models
]
return True
def _train_one_glmpca_instance(self, data_type, X, exp_family_params):
glmpca_clf = GLMPCA(
self.n_factors[data_type],
family=self.families[data_type],
maxiter=self.maxiter[data_type],
max_param=self.max_param[data_type],
learning_rate=self.learning_rates[data_type],
step_size=self.step_size[data_type],
gamma=self.gamma[data_type],
n_jobs=self.n_jobs
)
glmpca_clf.compute_saturated_loadings(
X[data_type],
batch_size=self.batch_size[data_type],
n_init=self.n_glmpca_init[data_type],
exp_family_params=exp_family_params[data_type]
)
return (data_type, glmpca_clf)
def _aggregate_scores(self):
"""
Compute the matrix M alongside its SVD decomposition.
"""
self.M_ = torch.cat(self.orthogonal_scores, axis=1)
self.M_svd_ = list(torch.linalg.svd(self.M_, full_matrices=False))
return True
def _initialize_models(self, X):
self.joint_models = {k:self.factor_models[k].clone_empty_GLMPCA() for k in X}
self.individual_models = {k:difference_GLMPCA.clone_from_GLMPCA(self.factor_models[k]) for k in X}
self.noise_models = {k:ResidualGLMPCA.clone_from_GLMPCA(self.factor_models[k]) for k in X}
def _computation_joint_individual_factor_model(self, X=None, not_aligned_types=[]):
# Set up GPU device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Compute joint scores like in AJIVE
self.joint_scores_ = self.M_svd_[0][:,:self.n_joint]
# Compute individual scores by taking the rest of the glmpca signal
joint_proj = self.joint_scores_.matmul(self.joint_scores_.T)
individual_proj = torch.eye(self.joint_scores_.shape[0]).to(device) - joint_proj.to(device)
self.individual_scores_ = {}
for score, dt in zip(self.orthogonal_scores, self.data_types):
individual_svd = torch.linalg.svd(individual_proj.matmul(score))
self.individual_scores_[dt] = individual_svd[0][:,:-self.n_joint]
_, S_M, V_M = self.M_svd_
self.V_M_ = V_M.T
self.V_M_decomposition_ = {
self.data_types[0]: self.V_M_.T[:,:self.n_factors[self.data_types[0]]].T,
self.data_types[1]: self.V_M_.T[:,self.n_factors[self.data_types[0]]:].T
}
# Compute rotation matrices per data-type
print('START JOINT MODEL', flush=True)
self.joint_scores_contribution_ = {}
for d in self.data_types:
if d in not_aligned_types:
continue
self.joint_models[d].n_pc = self.n_joint
rotation = torch.linalg.svd(self.factor_models[d].saturated_scores_.T.matmul(self.joint_scores_), full_matrices=False)
self.joint_models[d].saturated_loadings_ = self.factor_models[d].saturated_loadings_.matmul(
self.factor_models[d].saturated_scores_.T.matmul(self.joint_scores_)
)
# Compute the joint models as indicated in Methods
self.joint_models[d].saturated_loadings_ = self.factor_models[d].saturated_loadings_.matmul(
self.factor_models[d].saturated_loadings_.T
).matmul(
self.factor_models[d].projected_orthogonal_scores_svd_[2]
).matmul(
torch.diag(1/self.factor_models[d].projected_orthogonal_scores_svd_[1])
).matmul(self.V_M_decomposition_[d]).matmul(torch.diag(1/S_M)[:,:self.n_joint])
# Compute the contribution to the joint scores
self.joint_scores_contribution_[d] = self.factor_models[d].saturated_param_.matmul(
self.joint_models[d].saturated_loadings_
)
# Set up individual by computing the difference
print('START INDIVIDUAL MODEL', flush=True)
for d in self.data_types:
if d in not_aligned_types:
continue
indiv_matrix = torch.Tensor(np.identity(self.joint_scores_.shape[0])).to(device)
indiv_matrix = indiv_matrix - self.joint_scores_.matmul(self.joint_scores_.T)
indiv_matrix, _, _ = torch.linalg.svd(indiv_matrix)
indiv_matrix = indiv_matrix[:,:self.factor_models[d].n_pc - self.n_joint]
self.individual_models[d].saturated_loadings_ = self.factor_models[d].saturated_loadings_.matmul(
self.factor_models[d].saturated_scores_.T.matmul(indiv_matrix)
)
self.individual_models[d].fill_GLMPCA_instances(
self.factor_models[d],
self.joint_models[d]
)
del indiv_matrix
# Set up individual
print('START NOISE MODEL', flush=True)
for d in self.data_types:
if d in not_aligned_types:
continue
noise_matrix = torch.Tensor(np.identity(self.factor_models[d].saturated_loadings_.shape[0])).to(device)
noise_matrix = noise_matrix - self.factor_models[d].saturated_loadings_.matmul(self.factor_models[d].saturated_loadings_.T)
try:
noise_matrix, _, _ = torch.linalg.svd(noise_matrix)
noise_matrix = noise_matrix[:,self.factor_models[d].n_pc:]
except:
print('NOISE MODEL CANNOT BE COMPUTED: SVD DID NOT CONVERGE')
#raise ValueError('NOISE MODEL CANNOT BE COMPUTED: SVD DID NOT CONVERGE')
self.noise_models[d].saturated_loadings_ = noise_matrix
self.noise_models[d].fill_GLMPCA_instances(
self.factor_models[d]
)
del noise_matrix
return True
def compute_joint_signal_from_saturated_params(self, saturated_params, return_decomposition=False):
# Project data
if type(saturated_params) is dict:
U_known = self.joint_models[self.known_data_type].project_low_rank_from_saturated_parameters(
saturated_params[self.known_data_type]
)
else:
U_known = self.joint_models[self.known_data_type].project_low_rank_from_saturated_parameters(saturated_params)
# Predict unknown_data
U_unknown = torch.Tensor([
self.trans_type_regressors_[joint_factor_idx].predict(U_known.detach().cpu().numpy())
for joint_factor_idx in range(self.n_joint)
]).T
if return_decomposition:
return U_known , U_unknown
return U_known + U_unknown
def compute_joint_signal(self, X, return_decomposition=False):
"""
Given a sample of self.known_data:
- Project X on the joint factor.
- Predict the unknown_data type.
- Sum two contributions.
"""
# Project data
if type(X) is dict:
U_known = self.joint_models[self.known_data_type].project_low_rank(X[self.known_data_type])
else:
U_known = self.joint_models[self.known_data_type].project_low_rank(X)
# Predict unknown_data
U_unknown = torch.Tensor([
self.trans_type_regressors_[joint_factor_idx].predict(U_known.detach().cpu().numpy())
for joint_factor_idx in range(self.n_joint)
]).T
if return_decomposition:
return U_known , U_unknown
return U_known + U_unknown
def _train_trans_type_regression_model(self, unknown_factor_idx, cv=10, n_jobs=1, method='knn'):
"""
Train a kNN regression model from the known data-type to the unknown data-type.
"""
X_known = self.joint_scores_contribution_[self.known_data_type].detach().cpu().numpy()
# If the unknown data-type has not been aligned, then look at the difference.
if self.unknown_data_type in self.joint_scores_contribution_:
X_unknown = self.joint_scores_contribution_[self.unknown_data_type][:,unknown_factor_idx].detach().cpu().numpy()
else:
X_unknown = (self.joint_scores_ - self.joint_scores_contribution_[self.known_data_type]).detach().cpu().numpy()[:,unknown_factor_idx]
if method == 'knn':
param_grid = {
'regression__n_neighbors': np.linspace(2,20,19).astype(int),
'regression__weights': ['uniform', 'distance']
}
# GridSearch by cross-validation
imputation_model_ = GridSearchCV(
Pipeline([
('regression', KNeighborsRegressor())
]),
cv=cv,
n_jobs=n_jobs,
pre_dispatch='1.2*n_jobs',
param_grid=param_grid,
verbose=1,
scoring='neg_mean_squared_error'
)
elif method == 'krr':
alpha_values = np.logspace(-5,0,20)
gamma_values = np.logspace(-2, 0, 10)
param_grid ={'regression__alpha': alpha_values, 'regression__gamma': gamma_values}
pipeline = Pipeline([
('normalization', StandardScaler(with_mean=True, with_std=True)),
('regression', KernelRidge(kernel='rbf'))
])
imputation_model_ = GridSearchCV(
pipeline,
cv=cv, n_jobs=n_jobs, param_grid=param_grid, verbose=1, scoring='neg_mean_squared_error', pre_dispatch='1.2*n_jobs',
)
return imputation_model_.fit(X_known, X_unknown)
def project_low_rank(self, X, data_source, data_type):
"""
- data_source: name in training data to align (e.g. mutations).
- data_type: individual or joint.
"""
if data_type == 'individual':
return self.individual_models[data_source].project_low_rank(X)
elif data_type == 'joint':
return self.joint_models[data_source].project_low_rank(X)
elif data_type == 'noise':
return self.noise_models[data_source].project_low_rank(X)
def project_cell_view(self, X, data_source, data_type):
"""
- data_source: name in training data to align (e.g. mutations).
- data_type: individual or joint.
"""
if data_type == 'individual':
return self.individual_models[data_source].project_cell_view(X)
elif data_type == 'joint':
return self.joint_models[data_source].project_cell_view(X)
elif data_type == 'noise':
return self.noise_models[data_source].project_cell_view(X)
def estimate_number_joint_components_random_matrix(self, n_iter=20, quantile_top_component=0.95, n_jobs=1):
# Generate random orthogonal matrices
random_state = np.random.randint(1,10**6,size=2)
random_orth_mat = np.array(Parallel(n_jobs=min(n_jobs,2), verbose=1)(
delayed(ortho_group.rvs)(np.max(score.shape), n_iter, random_state=seed)
for score, seed in zip(self.orthogonal_scores, random_state)
)).transpose(1,0,2,3)
# Restrict to the shape of orthogonal scores (previous matrices are squared)
random_orth_mat = [
[
torch.Tensor(m[:score.shape[0],:score.shape[1]])
for m, score in zip(mat, self.orthogonal_scores)
]
for mat in random_orth_mat
]
# Verifies that resulting matrices are orthogonal
for mat in random_orth_mat:
for m in mat:
torch.testing.assert_allclose(m.T.matmul(m), torch.eye(m.shape[1]))
# Compute resulting top singular values
random_svd_value = np.array([
torch.linalg.svd(torch.cat(mat, axis=1))[1].detach().cpu().numpy()
for mat in random_orth_mat
])
# Compute number of joint components as the components above the 95% top random singular values
number_joint = torch.sum(torch.linalg.svd(self.M_)[1] > np.quantile(random_svd_value[:,0],quantile_top_component))
number_joint = number_joint.detach().cpu().numpy()
return int(number_joint)
def estimate_number_joint_components_permutation(self, n_perm=20, quantile_top_component=0.95):
"""
max_joint: int or float
If float, proportion of the minimum number of components.
"""
self.permuted_M_ = []
self.permuted_M_svd_ = []
for perm_idx in range(n_perm):
# Permute data
source_idx = np.arange(self.M_.shape[0])
target_idx = np.arange(self.M_.shape[0])
np.random.shuffle(source_idx)
np.random.shuffle(target_idx)
# Train instance
try:
self.permuted_M_svd_.append(torch.cat([
self.orthogonal_scores[0][source_idx],
self.orthogonal_scores[1][target_idx]
], axis=1))
self.permuted_M_svd_[-1] = torch.linalg.svd(self.permuted_M_svd_[-1])[1][0]
except:
print('PERMUTED SVD DOES NOT RUN')
self.permuted_M_svd_ = torch.Tensor(self.permuted_M_svd_)
number_joint = torch.sum(torch.linalg.svd(self.M_)[1] > np.quantile(self.permuted_M_svd_, quantile_top_component))
number_joint | |
def sqrt(n):
ans = n ** 0.5
return ans
def factorial(n):
k = 1
for i in range(1, n+1):
k = i * k
return k
def degrees(x):
pi = 3.14159265359
y=x/pi*180.0
return y
def radians(x):
pi = 3.14159265359
y=x/180.0*pi
return y
def sin(x):
pi = 3.14159265359
#n = 180 / int(d) # 180 degrees = pi radians
#x = pi / n # Converting degrees to radians
while x > pi:
x=x-2*pi
while x < -pi:
x=x+2*pi
ans = x - ( x ** 3 / factorial(3) ) + ( x ** 5 / factorial(5) ) - ( x ** 7 / factorial(7) ) + ( x ** 9 / factorial(9) )
return ans
def cos(x):
pi = 3.14159265359
while x > pi:
x=x-2*pi
while x < -pi:
x=x+2*pi
ans = 1 - ( x ** 2 / factorial(2) ) + ( x ** 4 / factorial(4) ) - ( x ** 6 / factorial(6) ) + ( x ** 8 / factorial(8) )
return ans
def tan(x):
ans = sin(x) / sqrt(1 - sin(x) ** 2)
return ans
def arctan_taylor(x, terms=9):
"""
Compute arctan for small x via Taylor polynomials.
Uses a fixed number of terms. The default of 9 should give good results for
abs(x) < 0.1. Results will become poorer as abs(x) increases, becoming
unusable as abs(x) approaches 1.0 (the radius of convergence of the
series).
"""
# Uses Horner's method for evaluation.
t = 0.0
for n in range(2*terms-1, 0, -2):
t = 1.0/n - x*x*t
return x * t
def arctan_taylor_with_reduction(x, terms=9, threshold=0.1):
"""
Compute arctan via argument reduction and Taylor series.
Applies reduction steps until x is below `threshold`,
then uses Taylor series.
"""
reductions = 0
while abs(x) > threshold:
x = x / (1 + sqrt(1 + x*x))
reductions += 1
return arctan_taylor(x, terms=terms) * 2**reductions
def atan2(y,x):
pi = 3.14159265359
z=0
if x > 0 :
z=arctan_taylor_with_reduction(y/x)
elif x<0 and y>=0:
z=arctan_taylor_with_reduction(y/x)+pi
elif x<0 and y<0:
z=arctan_taylor_with_reduction(y/x)-pi
elif x==0 and y>0:
z=pi/2
elif x==0 and y<0:
z=-pi/2
else:
z=0
return z
class Reward:
def __init__(self, verbose=False):
self.first_racingpoint_index = None
self.verbose = verbose
self._x = None
self._y = None
self._speed = None
def reward_function(self, params):
################## HELPER FUNCTIONS ###################
def dist_2_points(x1, x2, y1, y2):
return abs(abs(x1-x2)**2 + abs(y1-y2)**2)**0.5
def closest_2_racing_points_index(racing_coords, car_coords):
# Calculate all distances to racing points
distances = []
for i in range(len(racing_coords)):
distance = dist_2_points(x1=racing_coords[i][0], x2=car_coords[0],
y1=racing_coords[i][1], y2=car_coords[1])
distances.append(distance)
# Get index of the closest racing point
closest_index = distances.index(min(distances))
# Get index of the second closest racing point
distances_no_closest = distances.copy()
distances_no_closest[closest_index] = 999
second_closest_index = distances_no_closest.index(
min(distances_no_closest))
return [closest_index, second_closest_index]
def dist_to_racing_line(closest_coords, second_closest_coords, car_coords):
# Calculate the distances between 2 closest racing points
a = abs(dist_2_points(x1=closest_coords[0],
x2=second_closest_coords[0],
y1=closest_coords[1],
y2=second_closest_coords[1]))
# Distances between car and closest and second closest racing point
b = abs(dist_2_points(x1=car_coords[0],
x2=closest_coords[0],
y1=car_coords[1],
y2=closest_coords[1]))
c = abs(dist_2_points(x1=car_coords[0],
x2=second_closest_coords[0],
y1=car_coords[1],
y2=second_closest_coords[1]))
# Calculate distance between car and racing line (goes through 2 closest racing points)
# try-except in case a=0 (rare bug in DeepRacer)
try:
distance = abs(-(a**4) + 2*(a**2)*(b**2) + 2*(a**2)*(c**2) -
(b**4) + 2*(b**2)*(c**2) - (c**4))**0.5 / (2*a)
except:
distance = b
return distance
# Calculate which one of the closest racing points is the next one and which one the previous one
def next_prev_racing_point(closest_coords, second_closest_coords, car_coords, heading):
# Virtually set the car more into the heading direction
heading_vector = [cos(radians(
heading)), sin(radians(heading))]
new_car_coords = [car_coords[0]+heading_vector[0],
car_coords[1]+heading_vector[1]]
# Calculate distance from new car coords to 2 closest racing points
distance_closest_coords_new = dist_2_points(x1=new_car_coords[0],
x2=closest_coords[0],
y1=new_car_coords[1],
y2=closest_coords[1])
distance_second_closest_coords_new = dist_2_points(x1=new_car_coords[0],
x2=second_closest_coords[0],
y1=new_car_coords[1],
y2=second_closest_coords[1])
if distance_closest_coords_new <= distance_second_closest_coords_new:
next_point_coords = closest_coords
prev_point_coords = second_closest_coords
else:
next_point_coords = second_closest_coords
prev_point_coords = closest_coords
return [next_point_coords, prev_point_coords]
def racing_direction_diff(closest_coords, second_closest_coords, car_coords, heading):
# Calculate the direction of the center line based on the closest waypoints
next_point, prev_point = next_prev_racing_point(closest_coords,
second_closest_coords,
car_coords,
heading)
# Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians
track_direction = atan2(
next_point[1] - prev_point[1], next_point[0] - prev_point[0])
# Convert to degree
track_direction = degrees(track_direction)
# Calculate the difference between the track direction and the heading direction of the car
direction_diff = abs(track_direction - heading)
if direction_diff > 180:
direction_diff = 360 - direction_diff
return direction_diff
# Gives back indexes that lie between start and end index of a cyclical list
# (start index is included, end index is not)
def indexes_cyclical(start, end, array_len):
if end < start:
end += array_len
return [index % array_len for index in range(start, end)]
# Calculate how long car would take for entire lap, if it continued like it did until now
def projected_time(first_index, closest_index, step_count, times_list):
# Calculate how much time has passed since start
current_actual_time = (step_count-1) / 15
# Calculate which indexes were already passed
indexes_traveled = indexes_cyclical(first_index, closest_index, len(times_list))
# Calculate how much time should have passed if car would have followed optimals
current_expected_time = sum([times_list[i] for i in indexes_traveled])
# Calculate how long one entire lap takes if car follows optimals
total_expected_time = sum(times_list)
# Calculate how long car would take for entire lap, if it continued like it did until now
try:
projected_time_ans = (current_actual_time/current_expected_time) * total_expected_time
except:
projected_time_ans = 9999
return projected_time_ans
#################### RACING LINE ######################
# Optimal racing line for the Spain track
# Each row: [x,y,speed,timeFromPreviousPoint]
racing_track = [[0.31104, 2.83077, 1.94357],
[0.32414, 2.68038, 2.12813],
[0.34543, 2.53082, 2.36228],
[0.3733, 2.38233, 2.00229],
[0.4071, 2.23508, 1.72364],
[0.44978, 2.09051, 1.55854],
[0.50383, 1.95098, 1.45322],
[0.57068, 1.81924, 1.3823],
[0.65072, 1.69787, 1.35427],
[0.74357, 1.58903, 1.34103],
[0.84808, 1.49401, 1.34103],
[0.96297, 1.4137, 1.35405],
[1.0868, 1.34841, 1.38287],
[1.21814, 1.29817, 1.43986],
[1.35553, 1.2625, 1.50596],
[1.49767, 1.24086, 1.58553],
[1.64334, 1.23254, 1.69214],
[1.79132, 1.23651, 1.8091],
[1.94049, 1.25165, 1.9574],
[2.08953, 1.27651, 1.76552],
[2.23853, 1.29226, 1.61025],
[2.38633, 1.29631, 1.51251],
[2.53169, 1.28683, 1.47229],
[2.67336, 1.26274, 1.47229],
[2.81028, 1.22393, 1.47402],
[2.94174, 1.17098, 1.52205],
[3.06743, 1.10507, 1.63422],
[3.18761, 1.02804, 1.84269],
[3.3031, 0.94225, 2.23531],
[3.4152, 0.85046, 3.1885],
[3.52557, 0.7557, 2.94368],
[3.63872, 0.65878, 2.18937],
[3.75415, 0.56537, 1.88155],
[3.87338, 0.47846, 1.69813],
[3.99682, 0.40069, 1.59584],
[4.12332, 0.33476, 1.53928],
[4.24963, 0.2827, 1.50562],
[4.3721, 0.24486, 1.49287],
[4.49039, 0.21965, 1.49287],
[4.60842, 0.20518, 1.51407],
[4.73178, 0.20077, 1.54031],
[4.86435, 0.20774, 1.57986],
[5.00455, 0.22781, 1.62794],
[5.14724, 0.26138, 1.68547],
[5.28852, 0.30756, 1.75118],
[5.42652, 0.36515, 1.83711],
[5.56049, 0.43283, 1.93476],
[5.69013, 0.50938, 2.04539],
[5.8153, 0.5936, 2.1894],
[5.93611, 0.68423, 2.35569],
[6.05281, 0.78017, 2.56213],
[6.16585, 0.88045, 2.81983],
[6.27572, 0.98419, 3.06035],
[6.38282, 1.09078, 3.12985],
[6.4872, 1.20004, 3.19794],
[6.58892, 1.31179, 3.26933],
[6.68802, 1.42585, 3.34194],
[6.78459, 1.54207, 3.41706],
[6.87867, 1.6603, 3.49638],
[6.97035, 1.78041, 3.57949],
[7.05971, 1.90227, 3.65611],
[7.1468, 2.02575, 3.73555],
[7.23169, 2.15076, 3.80011],
[7.31445, 2.27719, 3.78058],
[7.39504, 2.40502, 3.75671],
[7.47341, 2.53422, 3.73008],
[7.5495, 2.66476, 3.69918],
[7.62327, 2.79664, 3.66741],
[7.69465, 2.92982, 3.62625],
[7.76358, 3.06429, 3.58438],
[7.82997, 3.20002, 3.53921],
[7.89374, 3.33701, 3.48919],
[7.9548, 3.47523, 3.42826],
[8.01305, 3.61466, 3.2132],
[8.06836, 3.75528, 3.02286],
[8.12029, 3.89719, 2.86173],
[8.16837, 4.04044, 2.71998],
[8.21212, 4.18507, 2.59684],
[8.25102, 4.33108, 2.23712],
[8.28455, 4.47841, 1.89036],
[8.31076, 4.62712, 1.68362],
[8.32662, 4.7767, 1.54915],
[8.3295, 4.9258, 1.45486],
[8.3175, 5.07263, 1.395],
[8.28941, 5.21522, 1.3615],
[8.24483, 5.35169, 1.34423],
[8.18399, 5.48034, 1.34423],
[8.10747, 5.59967, 1.34561],
[8.01624, 5.70839, 1.36535],
[7.91155, 5.8055, 1.40174],
[7.79495, 5.89026, 1.45342],
[7.66815, 5.96227, 1.52127],
[7.53302, 6.02147, 1.60606],
[7.39153, 6.06821, 1.71379],
[7.24558, 6.10329, 1.84598],
[7.09685, 6.12787, 2.01119],
[6.94661, 6.14338, 2.22889],
[6.79572, 6.15142, 2.52062],
[6.64464, 6.15359, 2.49557],
[6.49359, 6.15005, 2.43894],
[6.3428, 6.14053, 2.38561],
[6.19261, 6.12478, 2.31923],
[6.04329, 6.10254, 2.07534],
[5.89516, 6.07349, 1.90975],
[5.74895, 6.03602, 1.78784],
[5.60594, 5.98888, 1.70369],
[5.46766, 5.93121, 1.66046],
[5.33555, 5.86257, 1.63623],
[5.2107, 5.78305, 1.63623],
[5.09402, 5.69302, 1.65581],
[4.98592, 5.59334, 1.69526],
[4.8865, 5.48496, 1.78349],
[4.79536, 5.36915, 1.92002],
[4.71168, 5.24718, 2.118],
[4.63438, 5.12036, 2.42769],
[4.5621, 4.98995, 2.93145],
[4.49333, 4.85714, 2.41862],
[4.42748, 4.72486, 1.96875],
[4.3568, 4.59639, 1.72707],
[4.27913, 4.47373, 1.58386],
[4.19286, 4.35871, 1.49626],
[4.09694, 4.25299, 1.44541],
[3.99088, 4.15808, 1.41799],
[3.8747, 4.07539, 1.41799],
[3.74891, 4.00629, 1.4216],
[3.61476, 3.9518, 1.43635],
[3.47408, 3.91276, 1.47334],
[3.32913, 3.8893, 1.52574],
[3.18214, 3.88086, 1.59439],
[3.03484, 3.88636, 1.6901],
[2.88834, 3.90427, 1.82251],
[2.74318, 3.93284, 1.98323],
[2.59958, 3.9704, 2.20254],
[2.4575, 4.01527, 2.50779],
[2.31676, 4.0658, 2.55991],
[2.1769, 4.12041, 2.13511],
[2.03545, 4.16963, 1.86435],
[1.89195, 4.2111, 1.69176],
[1.74617, 4.24236, 1.56844],
[1.59842, 4.2611, 1.47598],
[1.44967, 4.26516, 1.41622],
[1.30167, 4.25268, 1.37185],
[1.15686, 4.22259, 1.34187],
[1.01787, 4.17453, 1.33159],
[0.88724, 4.10883, 1.33],
[0.76709, 4.02656, 1.33],
[0.65923, 3.92908, 1.34182],
[0.56503, 3.81813, 1.36901],
[0.48536, 3.69568, 1.40283],
[0.42077, 3.56373, 1.45199],
[0.37132, 3.42437, 1.51676],
[0.33657, 3.27966, 1.59183],
[0.31576, 3.13147, 1.68543],
[0.30776, 2.98142, 1.81042]]
################## INPUT PARAMETERS ###################
# episode,steps,X,Y,yaw,steer,throttle,action,reward,done,all_wheels_on_track,progress,closest_waypoint,track_len,tstamp,episode_status
# Read all input parameters
all_wheels_on_track = params['all_wheels_on_track']
x = params['x']
y = params['y']
distance_from_center = params['distance_from_center']
is_left_of_center = params['is_left_of_center']
heading = params['heading']
progress = params['progress']
steps = params['steps']
speed = params['speed']
steering_angle = params['steering_angle']
track_width = params['track_width']
waypoints = params['waypoints']
closest_waypoints = params['closest_waypoints']
is_offtrack = params['is_offtrack']
############### OPTIMAL X,Y,SPEED,TIME ################
# Get closest indexes for racing line (and distances to all points on racing line)
closest_index, second_closest_index = closest_2_racing_points_index(
racing_track, [x, y])
# Get optimal [x, y, speed, time] for closest and second closest index
optimals = racing_track[closest_index]
optimals_second = | |
out_map_obj = MapParser('')
out_map_obj.copy_header(in_map)
out_map_obj.data = final
if mask_path is not None:
mask_ob = MapParser('')
mask_ob.copy_header(in_map)
mask_ob.data = mask
mask_ob.write_map(mask_path)
if out_map is not None:
out_map_obj.write_map(out_map)
return out_map_obj
@staticmethod
def find_near_atoms(atom, model, distance=6):
"""
Finds all atoms which are closer than "distance" from the target atom
:param atom: target atom (center of search)
:param model: biopython structure object
:param distance: search radius
:return: list of biopython atom objects
"""
close = []
# print("Atom_parent", atom.parent.id[1], type(atom.parent.id[1]))
for atom1 in model.get_atoms():
if atom1.parent.id != atom.parent.id or atom1.parent.parent.id != atom.parent.parent.id:
d = atom1 - atom
if d < distance:
close.append(atom1)
close = list(set(close))
# Filter out flanking residues BB atoms
# close = [a for a in close if a.parent.id[1] not in [atom.parent.id[1]-1, atom.parent.id[1]+1]]
filtered = []
for a in close:
if a.get_name() not in ['N', 'C', 'O', 'CA']:
filtered.append(a)
elif a.parent.id[1] not in [atom.parent.id[1] - 1, atom.parent.id[1] + 1]:
filtered.append(a)
return filtered
@staticmethod
def print_mem():
mem = psutil.Process(p.pid).memory_info()
return mem.rss / 1000000
def chop_soft_radius_watershed_slow(self, model, in_map, whole_model, shifts=None, out_map=None,
radius=2, soft_radius=1, mask_path=None):
"""
TODO: requires more testing
Chop map using a soft mask with a given radius (hard_radius + soft_radius) around an amino acid residue residue.
A cosine function is used to create the soft mask. Similar to chop_soft_radius but avoids
cutting neighboring residues side chains map. To do so, it finds the closest atom
(which does not belong to the guide model) for each atom in the guide model and
tries to separate the map between them.
It can be used to chop map around bigger models but may take long for big objects.
:param whole_model: biopython model object. The complete model of which the guide model is a part of
:param model: biopython atomic residue object
:param in_map: in_dir to the input map
:param out_map: out_map: in_dir for the chopped map
:param radius: hard radius
:param soft_radius: soft radius (a cosine function is applied for it)
:return out_map_obj: map object
"""
# Get atom coordinates
if shifts is None:
shifts = np.array([0, 0, 0])
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
voxel_size = in_map.voxel_size
aver_voxel_size = sum(voxel_size) / 3
# Create a numpy array for mask
shape = in_map.data.shape
mask = np.zeros(shape, dtype='float32')
r = int(round((radius + soft_radius) / aver_voxel_size))
for atom in model.get_atoms():
xyz = in_map.coord_to_index(atom.coord - shifts)
xyz_int = in_map.coord_to_index_int(atom.coord - shifts)
near_atoms = []
if atom.get_name() not in ['C', 'CA']:
near_atoms = self.find_near_atoms(atom, whole_model, distance=(radius + soft_radius) * 2)
for x in range(xyz_int[0] - r, xyz_int[0] + r):
for y in range(xyz_int[1] - r, xyz_int[1] + r):
for z in range(xyz_int[2] - r, xyz_int[2] + r):
near_ds = [100]
for n_atom in near_atoms:
n_xyz = in_map.coord_to_index(n_atom.coord - shifts)
dn = aver_voxel_size * math.sqrt((x - n_xyz[0]) ** 2 + (y - n_xyz[1]) ** 2
+ (z - n_xyz[2]) ** 2)
near_ds.append(dn)
dn = min(near_ds)
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - xyz[0]) ** 2 + (y - xyz[1]) ** 2 + (z - xyz[2]) ** 2)
if d > dn * 1.3:
continue
elif dn < radius + soft_radius:
delta2 = min((d + dn) * 0.65, radius + soft_radius)
delta1 = delta2 - soft_radius
else:
delta2 = radius + soft_radius
delta1 = radius
# Assign mask values based to the distance to the atoms
if d < delta1:
try:
mask[x, y, z] = 1
except IndexError:
pass
elif delta1 < d < delta2:
try:
mask[x, y, z] += (math.cos((math.pi / soft_radius) * (d - delta1)) + 1) / 2
except IndexError:
pass
mask[mask > 1] = 1
final = (mask * in_map.data)
out_map_obj = MapParser('')
out_map_obj.copy_header(in_map)
out_map_obj.data = final
if mask_path is not None:
mask_ob = MapParser('')
mask_ob.copy_header(in_map)
mask_ob.data = mask
mask_ob.write_map(mask_path)
if out_map is not None:
out_map_obj.write_map(out_map)
return out_map_obj
def chop_soft_radius_watershed(self, model, in_map, whole_model, shifts=None, out_map=None,
radius=2, soft_radius=1, asymmetric_delta=0.5, mask_path=None):
"""
Chop map using a soft mask with a given radius (hard_radius + soft_radius) around an amino acid residue residue.
A cosine function is used to create the soft mask. Similar to chop_soft_radius but avoids
cutting neighboring residues side chains map. To do so, it creates two masks: a soft edge mask (var: mask)
around the guide model and another soft edge mask (var: outer_mask) around the atoms which are near the guide
model atoms (d < hard_radius + soft_radius). The final mask is given by: mask = (mask - outer_mask * mask).
It can be used to chop map around bigger models but may take long for big objects.
:param whole_model: biopython model object. The complete model of which the guide model is a part of
:param shifts: between model and map
:param mask_path: mask output path
:param model: biopython atomic residue object
:param in_map: in_dir to the input map
:param out_map: out_map: in_dir for the chopped map
:param radius: hard radius
:param soft_radius: soft radius
:param asymmetric_delta:
"""
# r1 - hard radius for near atoms
r1 = radius - asymmetric_delta
# r2 - soft radius for near atoms
r2 = r1 + soft_radius
# Get atom coordinates
if shifts is None:
shifts = np.array([0, 0, 0])
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
voxel_size = in_map.voxel_size
aver_voxel_size = sum(voxel_size) / 3
delta1 = radius
delta2 = radius + soft_radius
# Create a numpy array for mask
shape = in_map.data.shape
mask = np.zeros(shape, dtype='float32')
outer_mask = np.zeros(shape, dtype='float32')
r = int(round((radius + soft_radius) / aver_voxel_size))
near_atoms = []
import time
near_time = 0
for atom in model.get_atoms():
xyz = in_map.coord_to_index(atom.coord - shifts)
xyz_int = in_map.coord_to_index_int(atom.coord - shifts)
t = time.time()
if atom.get_name() not in ['C', 'CA', 'N', 'O']:
# near_atoms += self.find_near_atoms(atom, whole_model, distance=(radius + soft_radius) * 2)
near_atoms += self.find_near_atoms(atom, whole_model, distance=4)
near_time += time.time() - t
for x in range(xyz_int[0] - r, xyz_int[0] + r):
for y in range(xyz_int[1] - r, xyz_int[1] + r):
for z in range(xyz_int[2] - r, xyz_int[2] + r):
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - xyz[0]) ** 2 + (y - xyz[1]) ** 2 + (z - xyz[2]) ** 2)
if d <= delta1:
try:
mask[x, y, z] = 1
except IndexError:
pass
elif delta1 < d < delta2:
try:
mask[x, y, z] += (math.cos((math.pi / soft_radius) * (d - delta1)) + 1) / 2
# if intensity value became > 1 it is set to 1
if mask[x, y, z] > 1:
mask[x, y, z] = 1
except IndexError:
pass
mask[mask > 1] = 1
near_atoms = list(set(near_atoms))
# print('NEAR', len(near_atoms), near_atoms)
# print("NEAR time ", near_time)
for atom in near_atoms:
xyz = in_map.coord_to_index(atom.coord - shifts)
xyz_int = in_map.coord_to_index_int(atom.coord - shifts)
for x in range(xyz_int[0] - r, xyz_int[0] + r):
for y in range(xyz_int[1] - r, xyz_int[1] + r):
for z in range(xyz_int[2] - r, xyz_int[2] + r):
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - xyz[0]) ** 2 + (y - xyz[1]) ** 2 + (z - xyz[2]) ** 2)
if d <= r1:
try:
outer_mask[x, y, z] = 1
except IndexError:
pass
elif r1 < d < r2:
try:
outer_mask[x, y, z] += (math.cos((math.pi / soft_radius) * (d - r1)) + 1) / 2
# if intensity value became > 1 it is set to 1
if outer_mask[x, y, z] > 1:
outer_mask[x, y, z] = 1
except IndexError:
pass
outer_mask[outer_mask > 1] = 1
outer_mask[mask == 0] = 0
mask = (mask - outer_mask * mask)
final = (mask * in_map.data)
out_map_obj = MapParser('')
out_map_obj.copy_header(in_map)
out_map_obj.data = final
if | |
import pandas as pd
import os
import numpy as np
import argparse
import json
import sys
import shutil
import argparse
import numpy as np
import glob
from lxml import etree
def write_voc_file(fname, labels, coords, img_width, img_height):
"""
Writes label into VOC (XML) format.
Args:
fname - full file path to label file
labels - list of objects in file
coords - list of position of objects in file
img_width - width of image
img_height - height of image
Returns:
annotation - XML tree for image file
Updates:
N/A
Writes to file:
N/A
Credit:
eweill/convert-datasets
"""
annotation = etree.Element('annotation')
filename = etree.Element('filename')
f = fname.split("/")
filename.text = f[-1]
annotation.append(filename)
folder = etree.Element('folder')
folder.text = "/".join(f[:-1])
annotation.append(folder)
for i in range(len(coords)):
object = etree.Element('object')
annotation.append(object)
name = etree.Element('name')
name.text = labels[i]
object.append(name)
bndbox = etree.Element('bndbox')
object.append(bndbox)
xmax = etree.Element('xmax')
xmax.text = str(coords[i][2])
bndbox.append(xmax)
xmin = etree.Element('xmin')
xmin.text = str(coords[i][0])
bndbox.append(xmin)
ymax = etree.Element('ymax')
ymax.text = str(coords[i][3])
bndbox.append(ymax)
ymin = etree.Element('ymin')
ymin.text = str(coords[i][1])
bndbox.append(ymin)
difficult = etree.Element('difficult')
difficult.text = '0'
object.append(difficult)
occluded = etree.Element('occluded')
occluded.text = '0'
object.append(occluded)
pose = etree.Element('pose')
pose.text = 'Unspecified'
object.append(pose)
truncated = etree.Element('truncated')
truncated.text = '1'
object.append(truncated)
img_size = etree.Element('size')
annotation.append(img_size)
depth = etree.Element('depth')
depth.text = '3'
img_size.append(depth)
height = etree.Element('height')
height.text = str(img_height)
img_size.append(height)
width = etree.Element('width')
width.text = str(img_width)
img_size.append(width)
return annotation
def get_all_bounding(process_path, imag_w, imag_h):
'''
Gets all of the bounding box informations for the annotations
file built from clean_dstl
Args:
process_path (str): Path to processed data (parent of chopped images
and annotations file). This should be the processed data path
in config
imag_w (int): Chopped image width
imag_h (int): Chopped image height
Returns:
df (pd.Dataframe): Dataframe with the bounding box info
for all of the chopped images (from clean_dstl)
files2delete (list): List all the unlabeled chopped images
Updates:
N/A
Writes to file:
N/A
'''
# get file
ann_file = os.getcwd() + process_path + "annotations.csv"
df = pd.read_csv(ann_file, header=None)
df.columns = ['file', 'x_min', 'y_min', 'x_max', 'y_max', 'label_str']
label_path = os.getcwd() + process_path + "/labels"
image_path = os.getcwd() + process_path + "/images"
# make dir
if not os.path.exists(label_path):
os.makedirs(label_path)
# make dir
if not os.path.exists(image_path):
os.makedirs(image_path)
df = pd.read_csv(ann_file, header=None)
df.columns = ['file', 'x_min', 'y_min', 'x_max', 'y_max', 'label_str']
# find and remove all nans
unlabelfiles = list(df.file.loc[df.isnull().any(axis=1)])
# drop nans()
df.dropna(axis=0, inplace=True)
# set all the labels, after rerunning set just to trees
label_dir = {'trees': 0, 'canopy': 1}
df['label'] = df.label_str.apply(lambda x: label_dir[x]).astype('int')
df['label'] = df['label'].astype('int')
# get width and height
w = df['x_max'] - df['x_min']
h = df['y_max'] - df['y_min']
width_image = imag_w
height_image = imag_h
# convert to x, y, w, h (scaled)
x_center = (df['x_min'] + w / 2) / width_image
y_center = (df['y_min'] + h / 2) / height_image
df['w'] = w / width_image
df['h'] = h / height_image
df['x'] = x_center
df['y'] = y_center
return df, unlabelfiles
def build_labels(df, unlabelfiles, imag_w, imag_h, lab_format='voc'):
'''
Given a df, builds image and label directories for the images
with labels and deletes the unused ones.
Args:
df (pd.Dataframe): Dataframe of the annotations from clean_dstl.
Contains path info and labels for each images in dstl.
unlabelfiles (list of strs): List of the full path of unlabeled
data that will be deleted
imag_w (int): Chopped image width
imag_h (int): Chopped image height
lab_format (str): Label format. Accepts 'voc'/'yolo'
Returns:
N/A
Updates:
N/A
Writes to file:
Deletes unlabeled images and moves labeled images from
/base/path/data/processed/chopped_images to
/base/path/data/processed/(images, labels)
'''
# move all unlabled data
num_unlabeled = len(unlabelfiles)
num_labeled = len(df)
print('{} unlabeled files...{} labeled files'.format(
num_unlabeled, num_labeled))
if num_unlabeled > 0:
fullpath = unlabelfiles[0]
base_dir = '/'.join(fullpath.split('/')[:-2])
move_dir = base_dir + '/unlabeled/'
chopped_dir = '/'.join(fullpath.split('/')[:-1])
# make dir
if not os.path.exists(move_dir):
os.makedirs(move_dir)
# move it
for f_name in unlabelfiles:
if os.path.exists(f_name):
shutil.move(f_name, move_dir)
else:
print('No unlabeled data')
# move labeled data
if num_labeled > 0:
# get base directory
fullpath = df.iloc[0, 0]
base_dir = '/'.join(fullpath.split('/')[:-2])
chopped_dir = '/'.join(fullpath.split('/')[:-1])
# build outputs
for a_dir in [base_dir + '/images/', base_dir + '/labels/']:
if not os.path.exists(a_dir):
os.makedirs(a_dir)
# grab files
files2use = pd.unique(df['file'])
# build labels for usable files
print('Found {} labeled files'.format(len(np.unique(files2use))))
for f_name in files2use:
# convert data for file
data_temp = df.loc[df['file'] == f_name]
# split up path for conversion
f_name_list = f_name.split('/')
# get image name
f_img = base_dir + '/images/' + f_name_list[-1]
# move the image
os.rename(f_name, f_img)
# grab labels depending on format
if lab_format == 'yolo':
f_name_local_lab = f_name_list[-1][:-4]+'.txt'
f_lab = base_dir + '/labels/' + f_name_local_lab
with open(f_lab, 'w+') as f:
for index, row in data_temp.iterrows():
str2dump = str([row['label'], row['x'],
row['y'], row['w'], row['h']])
f.write(str2dump + '\n')
elif lab_format == 'voc':
f_name_local_lab = f_name_list[-1][:-4]+'.xml'
f_lab = base_dir + '/labels/' + f_name_local_lab
with open(f_lab, 'w+') as f:
labels = list(data_temp.label_str)
coords = np.array(
data_temp.loc[:, ['x_min', 'y_min', 'x_max', 'y_max']])
# convert to xml
annot2dump = write_voc_file(f_img, labels, coords,
imag_w, imag_h)
et = etree.ElementTree(annot2dump)
et.write(f_lab, pretty_print=True)
else:
error_str = ('Do not recognize label conversion format. ' +
'Must be voc or yolo')
raise RuntimeError(error_str)
else:
print('No labeled data')
# remove chopped_files directory
if not os.listdir(chopped_dir):
print('Chopped empty, deleting')
os.rmdir(chopped_dir)
else:
print('Chopped not empty, not deleting')
def verify_image_label_match(image_path, label_path):
'''
At some point, there was a mismatch between files in images
and labels (a bug that no longer exists).
This function moves mislabled files
to a mismatched directory.
Args:
image_path (str): path to dslt images
label_path (str): path to dslt labels
Returns:
num_excess1 (int): excess files in images (no corresponding label)
num_excess2 (int): excess files in labels (no corresponding image)
Updates:
N/A
Writes to file:
Moves mismatched images and labels to
/base/path/data/processed/mismatch/(images, labels)
'''
# get base path
base_path1 = '/'.join(image_path.split('/')[:-2])
base_path2 = '/'.join(label_path.split('/')[:-2])
if base_path1 != base_path2:
error_str = "Error! Images and labels don't match"
raise RuntimeError(error_str)
# make dirs
print(base_path1)
print(base_path2)
move_dir1 = base_path1 + '/mistmatch/images/'
move_dir2 = base_path1 + '/mistmatch/labels/'
dirs2make = [move_dir1, move_dir2]
for a_dir in dirs2make:
if not os.path.exists(a_dir):
os.makedirs(a_dir)
# get all files in both paths
list1 = glob.glob(image_path + '/*')
list2 = glob.glob(label_path + '/*')
# strip the file extension
strip1 = [a_str.split('/')[-1][:-4] for a_str in list1]
strip2 = [a_str.split('/')[-1][:-4] for a_str in list2]
# get intersection
intersect = set(strip1).intersection(set(strip2))
remove1 = set(strip1) - set(strip2)
remove2 = set(strip2) - set(strip1)
num_excess1 = len(remove1)
num_excess2 = len(remove2)
for f in remove1:
fremove = image_path + '/' + f + '.png'
shutil.move(fremove, move_dir1)
for f in remove2:
fremove = label_path + '/' + f + '.xml'
shutil.move(fremove, move_dir1)
return num_excess1, num_excess2
def build_val_train(path2data, val_size=0.3):
'''
Splits the images/labels from an input path to a train and validation
set
Args:
path2data (str): base path to the images and labels (e.g.,
/path2data/images, /path2data/labels
val_size (float, optional): validation set fraction
Returns:
N/A
Updates:
N/A
Writes to file:
Removes /path2data/(images, labels) and writes
/path2data/(train, val)/(images, labels)
'''
# set up paths
image_path = path2data + 'images/'
label_path = path2data + 'labels/'
if os.path.exists(image_path) and os.path.exists(label_path):
image_path_train = path2data + 'train/images/'
label_path_train = path2data + 'train/labels/'
image_path_val = path2data + 'valid/images/'
label_path_val = path2data + 'valid/labels/'
# make dirs
all_dirs = [image_path_train, label_path_train,
image_path_val, label_path_val]
for a_dir in all_dirs:
if not os.path.exists(a_dir):
os.makedirs(a_dir)
# grab all files
all_images = [f for f in os.listdir(image_path)
if os.path.isfile(os.path.join(image_path, f))]
all_labels = [f for f in os.listdir(label_path)
if os.path.isfile(os.path.join(label_path, f))]
# verify the lengths are the same (should compare sets)
if len(all_images) != len(all_labels):
raise RuntimeError('Images/label mismatch!')
# get all the files
num_files = len(all_images)
num_valid = int(num_files * 0.3)
num_train = num_files - num_valid
# get all indices and shuffle them
all_ids = np.arange(num_files)
np.random.shuffle(all_ids)
train_ids = all_ids[1:num_train]
val_ids = all_ids[num_train:]
# move all files
for ind in all_ids:
# train
if ind <= num_train:
os.rename(image_path+all_images[ind],
image_path_train+all_images[ind])
os.rename(label_path+all_labels[ind],
label_path_train+all_labels[ind])
# val
else:
os.rename(image_path+all_images[ind],
image_path_val+all_images[ind])
os.rename(label_path+all_labels[ind],
label_path_val+all_labels[ind])
# remove empty directories
for a_dir in [image_path, label_path]:
if not os.listdir(a_dir):
print(str(a_dir) + ' empty, deleting')
os.rmdir(a_dir)
else:
print(str(a_dir) + ' not empty, not deleting')
| |
<gh_stars>0
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from dsl_parser import constants
from dsl_parser.parser import parse_from_path
from dsl_parser.parser import parse as dsl_parse
from dsl_parser import exceptions
from dsl_parser.exceptions import (ERROR_MISSING_PROPERTY,
DSLParsingLogicException,
ERROR_UNDEFINED_PROPERTY)
from dsl_parser import version
from dsl_parser.tests.abstract_test_parser import AbstractTestParser
from dsl_parser.import_resolver.default_import_resolver import \
DefaultImportResolver
class TestParserLogicExceptions(AbstractTestParser):
def test_parse_dsl_from_file_bad_path(self):
self.assertRaises(
EnvironmentError, parse_from_path, 'fake-file.yaml',
DefaultImportResolver())
def test_no_type_definition(self):
self._assert_dsl_parsing_exception_error_code(
self.BASIC_NODE_TEMPLATES_SECTION, 7, DSLParsingLogicException)
def test_explicit_interface_with_missing_plugin(self):
yaml = self.BASIC_NODE_TEMPLATES_SECTION + self.BASIC_PLUGIN + """
node_types:
test_type:
interfaces:
test_interface1:
install:
implementation: missing_plugin.install
inputs: {}
terminate:
implementation: missing_plugin.terminate
inputs: {}
properties:
install_agent:
default: 'false'
key: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 10, DSLParsingLogicException)
def test_type_derive_non_from_none_existing(self):
yaml = self.BASIC_NODE_TEMPLATES_SECTION + """
node_types:
test_type:
derived_from: "non_existing_type_parent"
"""
self._assert_dsl_parsing_exception_error_code(
yaml, exceptions.ERROR_UNKNOWN_TYPE, DSLParsingLogicException)
def test_import_bad_path(self):
yaml = """
imports:
- fake-file.yaml
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 13, DSLParsingLogicException)
def test_cyclic_dependency(self):
yaml = self.BASIC_NODE_TEMPLATES_SECTION + """
node_types:
test_type:
derived_from: "test_type_parent"
test_type_parent:
derived_from: "test_type_grandparent"
test_type_grandparent:
derived_from: "test_type"
"""
ex = self._assert_dsl_parsing_exception_error_code(
yaml, 100, DSLParsingLogicException)
circular = ex.circular_dependency
self.assertEqual(len(circular), 4)
self.assertEqual(circular[0], circular[-1])
def test_plugin_with_wrongful_executor_field(self):
yaml = self.BASIC_NODE_TEMPLATES_SECTION + """
plugins:
test_plugin:
executor: "bad value"
source: dummy
node_types:
test_type:
properties:
key: {}
interfaces:
test_interface1:
install:
implementation: test_plugin.install
inputs: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 18, DSLParsingLogicException)
def test_operation_with_wrongful_executor_field(self):
yaml = self.BASIC_NODE_TEMPLATES_SECTION + """
plugins:
test_plugin:
executor: central_deployment_agent
source: dummy
node_types:
test_type:
properties:
key: {}
interfaces:
test_interface1:
install:
executor: wrong_executor
implementation: test_plugin.install
inputs: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 28, DSLParsingLogicException)
def test_top_level_relationships_relationship_with_undefined_plugin(self):
yaml = self.MINIMAL_BLUEPRINT + """
relationships:
test_relationship:
source_interfaces:
some_interface:
op:
implementation: no_plugin.op
inputs: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 19, DSLParsingLogicException)
def test_workflow_mapping_no_plugin(self):
yaml = self.BLUEPRINT_WITH_INTERFACES_AND_PLUGINS + """
workflows:
workflow1: test_plugin2.workflow1
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 21, DSLParsingLogicException)
def test_top_level_relationships_import_same_name_relationship(self):
imported_yaml = self.MINIMAL_BLUEPRINT + """
relationships:
test_relationship: {}
"""
yaml = self.create_yaml_with_imports([imported_yaml]) + """
relationships:
test_relationship: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 4, DSLParsingLogicException)
def test_top_level_relationships_circular_inheritance(self):
yaml = self.MINIMAL_BLUEPRINT + """
relationships:
test_relationship1:
derived_from: test_relationship2
test_relationship2:
derived_from: test_relationship3
test_relationship3:
derived_from: test_relationship1
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 100, DSLParsingLogicException)
def test_instance_relationships_bad_target_value(self):
# target value is a non-existent node
yaml = self.MINIMAL_BLUEPRINT + """
test_node2:
type: test_type
relationships:
- type: test_relationship
target: fake_node
relationships:
test_relationship: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 25, DSLParsingLogicException)
def test_instance_relationships_bad_type_value(self):
# type value is a non-existent relationship
yaml = self.MINIMAL_BLUEPRINT + """
test_node2:
type: test_type
relationships:
- type: fake_relationship
target: test_node
relationships:
test_relationship: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 26, DSLParsingLogicException)
def test_instance_relationships_same_source_and_target(self):
# A relationship from a node to itself is not valid
yaml = self.MINIMAL_BLUEPRINT + """
test_node2:
type: test_type
relationships:
- type: test_relationship
target: test_node2
relationships:
test_relationship: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 23, DSLParsingLogicException)
def test_instance_relationship_with_undefined_plugin(self):
yaml = self.MINIMAL_BLUEPRINT + """
test_node2:
type: test_type
relationships:
- type: "test_relationship"
target: "test_node"
source_interfaces:
an_interface:
op: no_plugin.op
relationships:
test_relationship: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 19, DSLParsingLogicException)
def test_validate_agent_plugin_on_non_host_node(self):
yaml = """
node_templates:
test_node1:
type: test_type
node_types:
test_type:
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 24, DSLParsingLogicException)
def test_ambiguous_plugin_operation_mapping(self):
yaml = """
node_types:
test_type: {}
node_templates:
test_node:
type: test_type
interfaces:
test_interface:
op: one.two.three.four
plugins:
one.two:
executor: host_agent
source: dummy
one:
executor: host_agent
source: dummy
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 91, DSLParsingLogicException)
def test_node_set_non_existing_property(self):
yaml = self.BASIC_NODE_TEMPLATES_SECTION + self.BASIC_PLUGIN + """
node_types:
test_type: {}
"""
ex = self._assert_dsl_parsing_exception_error_code(
yaml,
ERROR_UNDEFINED_PROPERTY,
DSLParsingLogicException)
self.assertEquals('key', ex.property)
def test_node_doesnt_implement_schema_mandatory_property(self):
yaml = self.BASIC_NODE_TEMPLATES_SECTION + self.BASIC_PLUGIN + """
node_types:
test_type:
properties:
key: {}
mandatory: {}
"""
ex = self._assert_dsl_parsing_exception_error_code(
yaml, ERROR_MISSING_PROPERTY, DSLParsingLogicException)
self.assertEquals('mandatory', ex.property)
def test_relationship_instance_set_non_existing_property(self):
yaml = self.MINIMAL_BLUEPRINT + """
test_node2:
type: test_type
properties:
key: "val"
relationships:
- type: test_relationship
target: test_node
properties:
do_not_exist: some_value
relationships:
test_relationship: {}
"""
ex = self._assert_dsl_parsing_exception_error_code(
yaml, ERROR_UNDEFINED_PROPERTY, DSLParsingLogicException)
self.assertEquals('do_not_exist', ex.property)
def test_relationship_instance_doesnt_implement_schema_mandatory_property(self): # NOQA
yaml = self.MINIMAL_BLUEPRINT + """
test_node2:
type: test_type
properties:
key: "val"
relationships:
- type: test_relationship
target: test_node
relationships:
test_relationship:
properties:
should_implement: {}
"""
ex = self._assert_dsl_parsing_exception_error_code(
yaml, ERROR_MISSING_PROPERTY, DSLParsingLogicException)
self.assertEquals('should_implement', ex.property)
def test_instance_relationship_more_than_one_contained_in(self):
yaml = self.MINIMAL_BLUEPRINT + """
test_node2:
type: test_type
relationships:
- type: cloudify.relationships.contained_in
target: test_node
- type: derived_from_contained_in
target: test_node
relationships:
cloudify.relationships.contained_in: {}
derived_from_contained_in:
derived_from: cloudify.relationships.contained_in
"""
ex = self._assert_dsl_parsing_exception_error_code(
yaml, 112, DSLParsingLogicException)
self.assertEqual(set(['cloudify.relationships.contained_in',
'derived_from_contained_in']),
set(ex.relationship_types))
def test_group_missing_member(self):
yaml = self.MINIMAL_BLUEPRINT + """
policy_types:
policy_type:
properties:
metric:
default: 100
source: source
groups:
group:
members: [vm]
policies:
policy:
type: policy_type
properties: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 40, DSLParsingLogicException)
def test_group_missing_policy_type(self):
yaml = self.MINIMAL_BLUEPRINT + """
policy_types:
policy_type:
properties:
metric:
default: 100
source: source
groups:
group:
members: [test_node]
policies:
policy:
type: non_existent_policy_type
properties: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 41, DSLParsingLogicException)
def test_group_missing_trigger_type(self):
yaml = self.MINIMAL_BLUEPRINT + """
policy_types:
policy_type:
source: source
groups:
group:
members: [test_node]
policies:
policy:
type: policy_type
triggers:
trigger1:
type: non_existent_trigger
"""
self._assert_dsl_parsing_exception_error_code(
yaml, 42, DSLParsingLogicException)
def test_group_policy_type_undefined_property(self):
yaml = self.MINIMAL_BLUEPRINT + """
policy_types:
policy_type:
properties: {}
source: source
groups:
group:
members: [test_node]
policies:
policy:
type: policy_type
properties:
key: value
"""
self._assert_dsl_parsing_exception_error_code(
yaml, ERROR_UNDEFINED_PROPERTY, DSLParsingLogicException)
def test_group_policy_type_missing_property(self):
yaml = self.MINIMAL_BLUEPRINT + """
policy_types:
policy_type:
properties:
key:
description: a key
source: source
groups:
group:
members: [test_node]
policies:
policy:
type: policy_type
properties: {}
"""
self._assert_dsl_parsing_exception_error_code(
yaml, ERROR_MISSING_PROPERTY, DSLParsingLogicException)
def test_group_policy_trigger_undefined_parameter(self):
yaml = self.MINIMAL_BLUEPRINT + """
policy_triggers:
trigger:
source: source
policy_types:
policy_type:
source: source
groups:
group:
members: [test_node]
policies:
policy:
type: policy_type
triggers:
trigger1:
type: trigger
parameters:
some: undefined
"""
self._assert_dsl_parsing_exception_error_code(
yaml, ERROR_UNDEFINED_PROPERTY, DSLParsingLogicException)
def test_group_policy_trigger_missing_parameter(self):
yaml = self.MINIMAL_BLUEPRINT + """
policy_triggers:
trigger:
source: source
parameters:
param1:
description: the description
policy_types:
policy_type:
source: source
groups:
group:
members: [test_node]
policies:
policy:
type: policy_type
triggers:
trigger1:
type: trigger
"""
self._assert_dsl_parsing_exception_error_code(
yaml, ERROR_MISSING_PROPERTY, DSLParsingLogicException)
def test_properties_schema_invalid_values_for_types(self):
def test_type_with_value(prop_type, prop_val):
yaml = """
node_templates:
test_node:
type: test_type
properties:
string1: {0}
node_types:
test_type:
properties:
string1:
type: {1}
""".format(prop_val, prop_type)
self._assert_dsl_parsing_exception_error_code(
yaml,
exceptions.ERROR_VALUE_DOES_NOT_MATCH_TYPE,
DSLParsingLogicException)
test_type_with_value('boolean', 'not-a-boolean')
test_type_with_value('boolean', '"True"')
test_type_with_value('boolean', '5')
test_type_with_value('boolean', '5.0')
test_type_with_value('boolean', '1')
test_type_with_value('boolean', 1)
test_type_with_value('integer', 'not-an-integer')
test_type_with_value('integer', 'True')
test_type_with_value('integer', '"True"')
test_type_with_value('integer', '5.0')
test_type_with_value('integer', '"5"')
test_type_with_value('integer', 'NaN')
test_type_with_value('integer', '0.2')
test_type_with_value('float', 'not-a-float')
test_type_with_value('float', 'True')
test_type_with_value('float', '"True"')
test_type_with_value('float', '"5.0"')
test_type_with_value('float', 'NaN')
test_type_with_value('float', 'inf')
def test_no_version_field(self):
yaml = self.MINIMAL_BLUEPRINT
self._assert_dsl_parsing_exception_error_code(
yaml, 27, DSLParsingLogicException, dsl_parse)
def test_no_version_field_in_main_blueprint_file(self):
imported_yaml = self.BASIC_VERSION_SECTION_DSL_1_0
imported_yaml_filename = self.make_yaml_file(imported_yaml)
yaml = """
imports:
- {0}""".format(imported_yaml_filename) + self.MINIMAL_BLUEPRINT
self._assert_dsl_parsing_exception_error_code(
yaml, 27, DSLParsingLogicException, dsl_parse)
def test_mismatching_version_in_import(self):
imported_yaml = """
tosca_definitions_version: cloudify_1_1
"""
imported_yaml_filename = self.make_yaml_file(imported_yaml)
yaml = """
imports:
- {0}""".format(imported_yaml_filename) + \
self.BASIC_VERSION_SECTION_DSL_1_0 +\
self.MINIMAL_BLUEPRINT
self._assert_dsl_parsing_exception_error_code(
yaml, 28, DSLParsingLogicException, dsl_parse)
def test_unsupported_version(self):
yaml = """
tosca_definitions_version: unsupported_version
""" + self.MINIMAL_BLUEPRINT
self._assert_dsl_parsing_exception_error_code(
yaml, 29, DSLParsingLogicException, dsl_parse)
def test_script_mapping_illegal_script_path_override(self):
yaml = self.BASIC_VERSION_SECTION_DSL_1_0 + """
plugins:
{0}:
executor: central_deployment_agent
install: false
node_types:
type:
interfaces:
test:
op:
implementation: stub.py
inputs:
script_path:
default: invalid
type: string
node_templates:
node:
type: type
""".format(constants.SCRIPT_PLUGIN_NAME)
self.make_file_with_name(content='content',
filename='stub.py')
yaml_path = self.make_file_with_name(content=yaml,
filename='blueprint.yaml')
self._assert_dsl_parsing_exception_error_code(
yaml_path, 60, DSLParsingLogicException,
parsing_method=self.parse_from_path)
def test_script_mapping_missing_script_plugin(self):
yaml = self.BASIC_VERSION_SECTION_DSL_1_0 + """
node_types:
type:
interfaces:
test:
op:
implementation: stub.py
inputs: {}
node_templates:
node:
type: type
"""
self.make_file_with_name(content='content',
filename='stub.py')
yaml_path = self.make_file_with_name(content=yaml,
filename='blueprint.yaml')
self._assert_dsl_parsing_exception_error_code(
yaml_path, 61, DSLParsingLogicException,
parsing_method=self.parse_from_path)
def test_plugin_with_install_args_wrong_dsl_version(self):
yaml = self.PLUGIN_WITH_INTERFACES_AND_PLUGINS_WITH_INSTALL_ARGS
self._assert_dsl_parsing_exception_error_code(
yaml, exceptions.ERROR_CODE_DSL_DEFINITIONS_VERSION_MISMATCH,
parsing_method=self.parse_1_0)
def test_parse_empty_or_none_dsl_version(self):
expected_err_msg = 'tosca_definitions_version is missing or empty'
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version, '')
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version, None)
def test_parse_not_string_dsl_version(self):
expected_err_msg = r'Invalid tosca_definitions_version: \[1\] is not' \
r' a string'
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version, [1])
def test_parse_wrong_dsl_version_format(self):
expected_err_msg = "Invalid tosca_definitions_version: '{0}', " \
"expected a value following this format: '{1}'"\
.format('1_0', version.DSL_VERSION_1_0)
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version, '1_0')
expected_err_msg = "Invalid tosca_definitions_version: '{0}', " \
"expected a value following this format: '{1}'" \
.format('cloudify_dsl_1.0', version.DSL_VERSION_1_0)
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version,
'cloudify_dsl_1.0')
expected_err_msg = "Invalid tosca_definitions_version: '{0}', " \
"major version is 'a' while expected to be a" \
" number".format('cloudify_dsl_a_0')
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version,
'cloudify_dsl_a_0')
expected_err_msg = "Invalid tosca_definitions_version: '{0}', " \
"minor version is 'a' while expected to be a" \
" number".format('cloudify_dsl_1_a')
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version,
'cloudify_dsl_1_a')
expected_err_msg = "Invalid tosca_definitions_version: '{0}', " \
"micro version is 'a' while expected to be a" \
" number".format('cloudify_dsl_1_1_a')
self.assertRaisesRegex(DSLParsingLogicException,
expected_err_msg,
version.parse_dsl_version,
'cloudify_dsl_1_1_a')
def test_max_retries_version_validation(self):
yaml_template = '{0}' + self.MINIMAL_BLUEPRINT + """
interfaces:
my_interface:
my_operation:
max_retries: 1
"""
self.parse(yaml_template.format(self.BASIC_VERSION_SECTION_DSL_1_1))
self._assert_dsl_parsing_exception_error_code(
yaml_template.format(self.BASIC_VERSION_SECTION_DSL_1_0),
exceptions.ERROR_CODE_DSL_DEFINITIONS_VERSION_MISMATCH,
DSLParsingLogicException)
def test_retry_interval_version_validation(self):
yaml_template = '{0}' + self.MINIMAL_BLUEPRINT + """
interfaces:
my_interface:
my_operation:
retry_interval: 1
"""
self.parse(yaml_template.format(self.BASIC_VERSION_SECTION_DSL_1_1))
self._assert_dsl_parsing_exception_error_code(
yaml_template.format(self.BASIC_VERSION_SECTION_DSL_1_0),
exceptions.ERROR_CODE_DSL_DEFINITIONS_VERSION_MISMATCH,
DSLParsingLogicException)
def test_dsl_definitions_version_validation(self):
yaml_template = """{0}
dsl_definitions:
def: &def
key: value
node_types:
type:
properties:
prop:
default: 1
node_templates:
node:
| |
logical_name = _attributes.AttributeViString(1050305)
'''Type: str
Contains the logical name you specified when opening the current IVI session.
You can pass a logical name to the __init__ method. The IVI Configuration utility must contain an entry for the logical name. The logical name entry refers to a method section in the IVI Configuration file. The method section specifies a physical device and initial user options.
'''
measure_buffer_size = _attributes.AttributeViInt32(1150077)
'''Type: int
Specifies the number of samples that the active channel measurement buffer can hold.
The default value is the maximum number of samples that a device is capable of recording in one second.
for information about supported devices.
Valid Values: 1000 to 2147483647
Default Value: Varies by device. Refer to Supported Properties by Device topic in the NI DC Power Supplies and SMUs Help for more information about default values.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_buffer_size`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_buffer_size`
'''
measure_complete_event_delay = _attributes.AttributeViReal64TimeDeltaSeconds(1150046)
'''Type: hightime.timedelta, datetime.timedelta, or float in seconds
Specifies the amount of time to delay the generation of the Measure Complete event, in seconds.
for information about supported devices.
Valid Values: 0 to 167 seconds
Default Value: The NI PXI-4132 and NI PXIe-4140/4141/4142/4143/4144/4145/4154 supports values from 0 seconds to 167 seconds.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_complete_event_delay`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_complete_event_delay`
'''
measure_complete_event_output_terminal = _attributes.AttributeViString(1150047)
'''Type: str
Specifies the output terminal for exporting the Measure Complete event.
for information about supported devices.
Output terminals can be specified in one of two ways. If the device is named Dev1 and your terminal is PXI_Trig0, you can specify the terminal with the fully qualified terminal name, /Dev1/PXI_Trig0, or with the shortened terminal name, PXI_Trig0.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_complete_event_output_terminal`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_complete_event_output_terminal`
'''
measure_complete_event_pulse_polarity = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.Polarity, 1150044)
'''Type: enums.Polarity
Specifies the behavior of the Measure Complete event.
for information about supported devices.
Default Value: Polarity.HIGH
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_complete_event_pulse_polarity`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_complete_event_pulse_polarity`
'''
measure_complete_event_pulse_width = _attributes.AttributeViReal64(1150045)
'''Type: float
Specifies the width of the Measure Complete event, in seconds.
The minimum event pulse width value for PXI devices is 150 ns, and the minimum event pulse width value for PXI Express devices is 250 ns.
The maximum event pulse width value for all devices is 1.6 microseconds.
for information about supported devices.
Valid Values: 1.5e-7 to 1.6e-6
Default Value: The default value for PXI devices is 150 ns. The default value for PXI Express devices is 250 ns.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_complete_event_pulse_width`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_complete_event_pulse_width`
'''
measure_record_delta_time = _attributes.AttributeViReal64TimeDeltaSeconds(1150065)
'''Type: hightime.timedelta, datetime.timedelta, or float in seconds
Queries the amount of time, in seconds, between between the start of two consecutive measurements in a measure record. Only query this property after the desired measurement settings are committed.
for information about supported devices.
two measurements and the rest would differ.
Note: This property is not available when Auto Zero is configured to Once because the amount of time between the first
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_record_delta_time`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_record_delta_time`
'''
measure_record_length = _attributes.AttributeViInt32(1150063)
'''Type: int
Specifies how many measurements compose a measure record. When this property is set to a value greater than 1, the measure_when property must be set to MeasureWhen.AUTOMATICALLY_AFTER_SOURCE_COMPLETE or MeasureWhen.ON_MEASURE_TRIGGER.
for information about supported devices.
Valid Values: 1 to 16,777,216
Default Value: 1
Note:
This property is not available in a session involving multiple channels.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_record_length`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_record_length`
'''
measure_record_length_is_finite = _attributes.AttributeViBoolean(1150064)
'''Type: bool
Specifies whether to take continuous measurements. Call the abort method to stop continuous measurements. When this property is set to False and the source_mode property is set to SourceMode.SINGLE_POINT, the measure_when property must be set to MeasureWhen.AUTOMATICALLY_AFTER_SOURCE_COMPLETE or MeasureWhen.ON_MEASURE_TRIGGER. When this property is set to False and the source_mode property is set to SourceMode.SEQUENCE, the measure_when property must be set to MeasureWhen.ON_MEASURE_TRIGGER.
for information about supported devices.
Default Value: True
Note:
This property is not available in a session involving multiple channels.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_record_length_is_finite`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_record_length_is_finite`
'''
measure_trigger_type = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.TriggerType, 1150034)
'''Type: enums.TriggerType
Specifies the behavior of the Measure trigger.
for information about supported devices.
Default Value: TriggerType.DIGITAL_EDGE
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_trigger_type`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_trigger_type`
'''
measure_when = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.MeasureWhen, 1150057)
'''Type: enums.MeasureWhen
Specifies when the measure unit should acquire measurements. Unless this property is configured to MeasureWhen.ON_MEASURE_TRIGGER, the measure_trigger_type property is ignored.
Refer to the Acquiring Measurements topic in the NI DC Power Supplies and SMUs Help for more information about how to configure your measurements.
Default Value: If the source_mode property is set to SourceMode.SINGLE_POINT, the default value is MeasureWhen.ON_DEMAND. This value supports only the measure method and measure_multiple method. If the source_mode property is set to SourceMode.SEQUENCE, the default value is MeasureWhen.AUTOMATICALLY_AFTER_SOURCE_COMPLETE. This value supports only the fetch_multiple method.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].measure_when`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.measure_when`
'''
merged_channels = _attributes.AttributeViStringRepeatedCapability(1150249)
'''Type: str
Specifies the channel(s) | |
VBA object.
"""
# Get all the functions called in the VBA object.
call_visitor = function_call_visitor()
item.accept(call_visitor)
func_names = call_visitor.called_funcs
# Get all of the 0 argument functions called in the VBA object.
tmp_context = Context(context=context, _locals=context.locals, copy_globals=True)
_, zero_arg_funcs = _get_var_vals(item, tmp_context)
func_names.update(zero_arg_funcs)
# Get the definitions for all local functions called.
local_funcs = []
for func_name in func_names:
if (context.contains(func_name)):
curr_func = context.get(func_name)
if (isinstance(curr_func, VBA_Object)):
local_funcs.append(curr_func)
# Done. Return the definitions of all the local functions
# that were called.
return local_funcs
def _called_funcs_to_python(loop, context, indent):
"""
Convert all the functions called in the loop to Python.
"""
# Get the definitions for all local functions called directly in the loop.
local_funcs = _get_all_called_funcs(loop, context)
local_func_hashes = set()
for curr_func in local_funcs:
curr_func_hash = hashlib.md5(str(curr_func).encode()).hexdigest()
local_func_hashes.add(curr_func_hash)
# Now get the definitions of all the local functions called by the local
# functions.
seen_funcs = set()
funcs_to_handle = list(local_funcs)
while (len(funcs_to_handle) > 0):
# Get the current function definition to check for calls.
curr_func = funcs_to_handle.pop()
curr_func_hash = hashlib.md5(str(curr_func).encode()).hexdigest()
# Already looked at this one?
if (curr_func_hash in seen_funcs):
continue
seen_funcs.add(curr_func_hash)
# Get the functions called in the current function.
curr_local_funcs = _get_all_called_funcs(curr_func, context)
# Save the new functions for processing.
for new_func in curr_local_funcs:
new_func_hash = hashlib.md5(str(new_func).encode()).hexdigest()
if (new_func_hash not in local_func_hashes):
local_func_hashes.add(new_func_hash)
local_funcs.append(new_func)
funcs_to_handle.append(new_func)
# Convert each local function to Python.
r = ""
for local_func in local_funcs:
r += to_python(local_func, context, indent=indent) + "\n"
# Done.
indent_str = " " * indent
r = indent_str + "# VBA Local Function Definitions\n" + r
return r
# Cache JIT loop results to avoid emulating the exact same loop
# multiple times.
jit_cache = {}
def _eval_python(loop, context, params=None, add_boilerplate=False, namespace=None):
"""
Convert the loop to Python and emulate the loop directly in Python.
"""
# Are we actually doing this?
if (not context.do_jit):
return False
# Emulating full VB programs in Python is difficult, so for now skip loops
# that Execute() dynamic VB.
code_vba = str(loop).replace("\n", "\\n")[:20]
log.info("Starting JIT emulation of '" + code_vba + "...' ...")
if (("Execute(" in str(loop)) or
("ExecuteGlobal(" in str(loop)) or
("Eval(" in str(loop))):
log.warning("Loop Execute()s dynamic code. Not JIT emulating.")
return False
# Generate the Python code for the VB code and execute the generated Python code.
# TODO: Remove dangerous functions from what can be exec'ed.
code_python = ""
try:
# For JIT handling we modify the values of certain variables to
# handle recursive python code generation, so make a copy of the
# original context.
tmp_context = Context(context=context, _locals=context.locals, copy_globals=True)
# Get the Python code for the loop.
log.info("Generating Python JIT code...")
code_python = to_python(loop, tmp_context)
if add_boilerplate:
var_inits, _ = _loop_vars_to_python(loop, tmp_context, 0)
func_defns = _called_funcs_to_python(loop, tmp_context, 0)
code_python = _boilerplate_to_python(0) + "\n" + \
func_defns + "\n" + \
var_inits + "\n" + \
code_python + "\n" + \
_check_for_iocs(loop, tmp_context, 0) + "\n" + \
_updated_vars_to_python(loop, tmp_context, 0)
if (log.getEffectiveLevel() == logging.DEBUG):
safe_print("JIT CODE!!")
safe_print(code_python)
log.info("Done generating Python JIT code.")
# Extended ASCII strings are handled differently in VBScript and VBA.
# Punt if we are emulating VBA and we have what appears to be extended ASCII
# strings. For performance we are not handling the MS VBA extended ASCII in the python
# JIT code.
if (not context.is_vbscript):
# Look for non-ASCII strings.
non_ascii_pat = r'"[^"]*[\x7f-\xff][^"]*"'
non_ascii_pat1 = r'"[^"]*(?:\\x7f|\\x[89a-f][0-9a-f])[^"]*"'
if ((re.search(non_ascii_pat1, code_python) is not None) or
(re.search(non_ascii_pat, code_python) is not None)):
log.warning("VBA code contains Microsoft specific extended ASCII strings. Not JIT emulating.")
return False
# Check for dynamic code execution in called functions.
if (('"Execute", ' in code_python) or
('"ExecuteGlobal", ' in code_python) or
('"Eval", ' in code_python)):
log.warning("Functions called by loop Execute() dynamic code. Not JIT emulating.")
return False
# Run the Python code.
# Have we already run this exact loop?
if (code_python in jit_cache):
var_updates = jit_cache[code_python]
log.info("Using cached JIT loop results.")
if (var_updates == "ERROR"):
log.error("Previous run of Python JIT loop emulation failed. Using fallback emulation for loop.")
return False
# No cached results. Run the loop.
elif (namespace is None):
# Magic. For some reason exec'ing in locals() makes the dynamically generated
# code recognize functions defined in the dynamic code. I don't know why.
log.info("Evaluating Python JIT code...")
exec code_python in locals()
else:
exec(code_python, namespace)
var_updates = namespace["var_updates"]
log.info("Done JIT emulation of '" + code_vba + "...' .")
# Cache the loop results.
jit_cache[code_python] = var_updates
# Update the context with the variable values from the JIT code execution.
try:
for updated_var in var_updates.keys():
if (updated_var == "__shell_code__"):
continue
context.set(updated_var, var_updates[updated_var])
except (NameError, UnboundLocalError):
log.warning("No variables set by Python JIT code.")
# Update shellcode bytes from the JIT emulation.
import vba_context
vba_context.shellcode = var_updates["__shell_code__"]
except NotImplementedError as e:
log.error("Python JIT emulation of loop failed. " + str(e) + ". Using fallback emulation method for loop...")
#safe_print("REMOVE THIS!!")
#raise e
return False
except Exception as e:
# Cache the error.
jit_cache[code_python] = "ERROR"
# If we bombed out due to a potential infinite loop we
# are done.
if ("Infinite Loop" in str(e)):
log.warning("Detected infinite loop. Terminating loop.")
return True
# We had some other error. Emulating the loop in Python failed.
log.error("Python JIT emulation of loop failed. " + str(e) + ". Using fallback emulation method for loop...")
if (log.getEffectiveLevel() == logging.DEBUG):
traceback.print_exc(file=sys.stdout)
safe_print("-*-*-*-*-\n" + code_python + "\n-*-*-*-*-")
return False
# Done.
return True
def eval_arg(arg, context, treat_as_var_name=False):
"""
evaluate a single argument if it is a VBA_Object, otherwise return its value
"""
# pypy seg faults sometimes if the recursion depth is exceeded. Try to
# avoid that. Also check to see if emulation has taken too long.
limits_exceeded(throw_error=True)
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("try eval arg: %s (%s, %s, %s)" % (arg, type(arg), isinstance(arg, VBA_Object), treat_as_var_name))
# Is this a constant math expression?
got_constant_math = is_constant_math(arg)
# Do we have the cached value of this expression?
cached_val = get_cached_value(arg)
if (cached_val is not None):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Got cached value %r = %r" % (arg, cached_val))
return cached_val
# Try handling reading value from an Excel spreadsheet cell.
excel_val = _read_from_excel(arg, context)
if (excel_val is not None):
if got_constant_math: set_cached_value(arg, excel_val)
return excel_val
# Short circuit the checks and see if we are accessing some object text first.
obj_text_val = _read_from_object_text(arg, context)
if (obj_text_val is not None):
if got_constant_math: set_cached_value(arg, obj_text_val)
return obj_text_val
# Not reading from an Excel cell. Try as a VBA object.
if ((isinstance(arg, VBA_Object)) or (isinstance(arg, VbaLibraryFunc))):
# Handle cases where wscriptshell.run() is being called and there is a local run() function.
if ((".run(" in str(arg).lower()) and (context.contains("run"))):
# Resolve the run() call.
if ("MemberAccessExpression" in str(type(arg))):
arg_evaled = arg.eval(context)
if got_constant_math: set_cached_value(arg, arg_evaled)
return arg_evaled
# Handle as a regular VBA object.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: eval as VBA_Object %s" % arg)
r = arg.eval(context=context)
# Is this a Shapes() access that still needs to be handled?
poss_shape_txt = ""
try:
poss_shape_txt = str(r)
except:
pass
if ((poss_shape_txt.startswith("Shapes(")) or (poss_shape_txt.startswith("InlineShapes("))):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Handling intermediate Shapes() access for " + str(r))
r = eval_arg(r, context)
if got_constant_math: set_cached_value(arg, r)
return r
# Regular VBA object.
if got_constant_math: set_cached_value(arg, r)
return r
# Not a VBA object.
else:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: not a VBA_Object: %r" % arg)
# Might this be a special type of variable lookup?
if (isinstance(arg, str)):
# Simple case first. Is this a variable?
try:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try as variable name: %r" % arg)
r = context.get(arg)
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Got %r = %r" % (arg, r))
if got_constant_math: set_cached_value(arg, r)
return r
except:
# No it is not. Try more complicated cases.
if (log.getEffectiveLevel() | |
sys_updatestat('SOTAWATCH',E_NONE)
conn2 = sqlite3.connect(alert_db)
cur2 = conn2.cursor()
r.reverse()
for item in r:
if item['comments'] is None:
item['comments'] = ""
ts = item['timeStamp']
ts = ts[:ts.find('.')]
spot_time = int(datetime.strptime(ts,'%Y-%m-%dT%H:%M:%S').strftime("%s"))
spot_end= spot_time + 3600 * KEYS['WINDOW_TO']
activator = item['activatorCallsign'].upper().replace(" ","")
m = re.match('(\w+)/(\w+)/(\w+)',activator)
if m:
op = m.group(2)
else:
m = re.match('(\w+)/(\w+)',activator)
if m:
op = m.group(1)
else:
op = activator.strip()
summit = item['associationCode']+"/"+item['summitCode']
summit = summit.upper()
(lat,lng) = parse_summit(summit)
q ='insert or replace into spots (time,end,operator,callsign,summit,summit_info,lat,lng,spot_freq,spot_mode,spot_comment,spot_color,poster) values (?,?,?,?,?,?,?,?,?,?,?,?,?)'
cur2.execute(q,(spot_time,spot_end,op,activator,summit,item['summitDetails'],lat,lng,item['frequency'],item['mode'],item['comments'],item['highlightColor'],item['callsign']))
last_tweetat = read_params('last_tweetat')
if spot_time >= last_tweetat:
st = datetime.fromtimestamp(int(spot_time)).strftime("%H:%M")
mesg = st +' ' + activator + ' on ' + summit + ' (' + item['summitDetails'] +') '+ item['frequency'] + ' ' + item['mode'] +' '+item['comments'] + '[' + item['callsign'] + ']'
mesg = mesg + ' ' + sotalive_url + '/#' + urllib.quote(op.encode('utf8') + '+' + summit.encode('utf8') , '')
if re.search(KEYS['JASummits'],summit):
tweet(tweet_api,mesg)
comment = item['comments'].upper()
m = re.search('JA-\d\d\d\d', comment)
if m:
mesg = st + ' ' + activator + ' on ' + m.group(0) + ' (' + summit + ') '+ item['frequency'] + ' ' + item['mode'] +' '+item['comments'] + '[' + item['callsign'] + ']'
tweet(pota_tweet_api, mesg)
#tweet(tweet_api_debug,mesg)
update_params('last_tweetat',int(datetime.utcnow().strftime("%s")))
conn2.commit()
conn2.close()
update_json_data()
def update_alerts():
global aprs_filter
try:
conn = sqlite3.connect(alert_db)
except Exception as err:
print >> sys.stderr, alert_db
print >> sys.stderr, '%s' % err
return
try:
aprs = sqlite3.connect(aprslog_db)
except Exception as err:
print >> sys.stderr, aprslog_db
print >> sys.stderr, '%s' % err
return
cur = conn.cursor()
cur2 = conn.cursor()
aprs_cur = aprs.cursor()
now = int(datetime.utcnow().strftime("%s"))
keep_in_db = now - 3600 * KEYS['KEEP_IN_DB']
keepin_aprs = now - 2 * 3600 * KEYS['KEEP_IN_DB']
keep_in_db_hist = now - 3600 * KEYS['WINDOW_TO'] + 3600 * KEYS['WINDOW_FROM']
aprs_cur.execute("delete from aprslog where time < %s" % str(keepin_aprs))
aprs.commit()
aprs.close()
q = 'drop table if exists current'
cur.execute(q)
q = 'create table current(operator text,summit text)'
cur.execute(q);
q = 'create table if not exists alerts (time int,start int,end int,operator text,callsign text,summit text,summit_info text,lat_dest text,lng_dest text,alert_freq text,alert_comment text,poster text,primary key(callsign,summit))'
cur.execute(q)
q = 'delete from alerts where end < ?'
cur.execute(q,(keep_in_db,))
conn.commit()
q ='create table if not exists beacons (start int,end int,operator text,lastseen int,lat text,lng text,lat_dest text,lng_dest text,dist int,az int,state int,summit text,message text,message2 text,tlon int,lasttweet text,type text,primary key(operator,summit))'
cur2.execute(q)
q = 'delete from beacons where end < ?'
cur2.execute(q,(keep_in_db,))
q ='create table if not exists spots (time int,end int,operator text,callsign text,summit text,summit_info text,lat text,lng text,spot_freq text,spot_mode text,spot_comment text,spot_color text,poster text,primary key(operator))'
cur2.execute(q)
q = 'delete from spots where end < ?'
cur2.execute(q,(keep_in_db,))
q = 'create view if not exists oprts as select distinct operator,callsign, summit from alerts union select operator,callsign,summit from spots;'
cur2.execute(q)
q = 'create table if not exists message_history(time int,operator text,ssid text, summit text,state int,distance int,primary key(operator,ssid,summit))'
cur2.execute(q)
q = 'delete from message_history where time < ?'
cur2.execute(q,(keep_in_db_hist,))
conn.commit()
res = parse_json_alerts(sotawatch_json_url,now+3600 * KEYS['ALERT_TO'])
operators = []
for user in KEYS['TEST_USER']:
d = {'time':now,'start':now-100,'end':now+10800,
'operator':user,'callsign':user,'summit':'JA/KN-006',
'summit_info':'Test','freq':'433-fm',
'comment':'Alert Test','poster':'(Posted By JL1NIE)'}
res.append(d)
for d in res:
(lat_dest,lng_dest) = parse_summit(d['summit'])
m = re.match('(\w+)/(\w+)/(\w+)',d['callsign'])
if m:
op = m.group(2)
else:
m = re.match('(\w+)/(\w+)',d['callsign'])
if m:
op = m.group(1)
else:
op = d['callsign']
q = 'insert into current(operator,summit) values (?,?)'
cur.execute(q,(op,d['summit']))
q = 'insert or replace into alerts(time,start,end,operator,callsign,summit,summit_info,lat_dest,lng_dest,alert_freq,alert_comment,poster) values (?,?,?,?,?,?,?,?,?,?,?,?)'
cur.execute(q,(d['time'],d['start'],d['end'],
op,d['callsign'],
d['summit'],d['summit_info'],
str(lat_dest),str(lng_dest),
d['freq'],
d['comment'],d['poster']))
if now >= d['start'] and now <= d['end']:
if not op in operators:
operators.append(op)
q = 'select * from beacons where operator = ? and summit = ?'
cur2.execute(q,(op,d['summit']))
r = cur2.fetchall()
if len(r)>0:
q = 'update beacons set start = ? ,end = ? where operator = ? and summit = ?'
cur2.execute(q,(d['start'],d['end'],op,d['summit']))
else:
q = 'insert into beacons (start,end,operator,lastseen,lat,lng,lat_dest,lng_dest,dist,az,state,summit,message,message2,tlon,lasttweet,type) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'
cur2.execute(q,(d['start'],d['end'],op,
0, #lastseen
'', # lat
'', # lng
str(lat_dest), # lat_dest
str(lng_dest), # lng_dest
-1,0,
NOTRCVD,
d['summit'],
d['summit_info'],
d['summit_info'],
0,'',
'SW2'))
conn.commit()
q = 'delete from alerts where (operator,summit) not in (select * from current) and alerts.time > ?'
cur.execute(q,(now,))
q = 'delete from beacons where (operator,summit) not in (select * from current) and beacons.start > ?'
cur2.execute(q,(now,))
q = 'select distinct operator from beacons where start < ? and end > ?'
cur.execute(q,(now,now))
operators = []
for ((r,)) in cur.fetchall():
operators.append(r.strip())
aprs_filter = "b/"+ "-*/".join(operators) +"-*"
if aprs_beacon:
aprs_beacon.set_filter(aprs_filter)
#print >>sys.stderr, 'APRS Filter:' + aprs_filter
conn.commit()
conn.close()
def tweet_alerts():
today = datetime.now(localtz).strftime("%d %B %Y")
conn = sqlite3.connect(alert_db)
cur = conn.cursor()
start = int(datetime.utcnow().strftime("%s")) + 3600 * KEYS['ALERT_FROM']
end = int(datetime.utcnow().strftime("%s")) + 3600 * KEYS['TWEET_ALERT_TO']
q = 'select * from alerts where time >= ? and time <= ? and summit like ?'
cur.execute(q,(start,end,'JA%',))
rows = cur.fetchall()
num = len(rows) - len(KEYS['TEST_USER'])
mesg = "SOTAwatch alerts:\n"
if num == 0:
mesg = mesg + "No activations are currently scheduled on "
elif num == 1:
mesg = mesg + "An activation is currently scheduled on "
else:
mesg = str(num)+" activations are currently scheduled on "
mesg = mesg + today + "."
tweet(tweet_api,mesg)
for (tm,_,_,op,call,summit,info,lat,lng,freq,comment,poster) in rows:
tm = datetime.fromtimestamp(int(tm)).strftime("%H:%M")
mesg = tm + " " + call + " on\n" + summit + " " + freq + "\n" + info + "\n" + comment + " " + poster
mesg = mesg + ' ' + sotalive_url + '/#' + urllib.quote(op.encode('utf8') + '+' + summit.encode('utf8'), '')
if summit != 'JA/TT-TEST':
tweet(tweet_api,mesg)
conn.close()
def get_new_msgno():
global _thlock
global _ackpool
global _senderpool
global _count
_thlock.acquire()
_count = _count + 1
if _count == 1000:
_count = 1
_senderpool.add(_count)
_thlock.release()
return _count
def ack_received(mlist):
global _thlock
global _ackpool
global _senderpool
global _count
if debug:
print _senderpool
print _ackpool
for msgno in mlist:
if msgno in _ackpool:
_thlock.acquire()
_ackpool.discard(msgno)
_thlock.release()
return True
return False
def push_msgno(msgno):
global _thlock
global _ackpool
global _senderpool
global _count
if msgno in _senderpool:
_thlock.acquire()
_ackpool.add(msgno)
_thlock.release()
return True
return False
def discard_ack(mlist):
global _thlock
global _ackpool
global _senderpool
global _count
_thlock.acquire()
for msgno in mlist:
_ackpool.discard(msgno)
_senderpool.discard(msgno)
_thlock.release()
def aprs_worker():
global aprs_beacon
global _thlock
global _ackpool
global _senderpool
global _count
_thlock = Lock()
_ackpool = {-1}
_senderpool = {-1}
_count = 0
aprs_beacon = aprslib.IS(aprs_user, host=aprs_host,
passwd=aprs_password, port=aprs_port)
aprs_beacon.connect(blocking=True)
aprs_beacon.consumer(callback, immortal=True, raw=True)
def send_ack_worker(aprs, msgno):
sleep(2)
for i in range(3):
if debug:
print "SendingAck("+ str(i) + "):" + msgno
else:
aprs.sendall(msgno)
sleep(30)
def send_ack(aprs, callfrom, msgno):
ack = aprs_user+">APRS,TCPIP*::"+callfrom+":ack"+str(msgno)
th = Thread(name="AckWoker",target=send_ack_worker,args =(aprs,ack))
th.start()
def send_message(aprs, callfrom, message):
header = aprs_user+">APRS,TCPIP*::"+callfrom+":"
if len(message)>67:
message = message[0:67]
if debug:
print "Sending: "+ header + message
else:
aprs.sendall(header+message)
def send_message_worker(aprs, callfrom, message):
mlist = []
for i in range(2):
msgno = get_new_msgno()
mlist.append(msgno)
m = message + '{' + str(msgno)
if debug:
print "Sending("+ str(i) + "):" + m
else:
aprs.sendall(m)
sleep(60+int(i/2)*30)
if ack_received(mlist):
break
discard_ack(mlist)
if len(mlist) == 2:
print >>sys.stderr, "APRS: Can't send message:" + callfrom + ' ' + message + '\n'
def send_message_with_ack(aprs, callfrom, message):
header = aprs_user+">APRS,TCPIP*::"+callfrom+":"
if len(message)>67:
message = message[0:67]
if debug:
print "Sending:" + header + message
else:
th = Thread(name="MessageWorker",target=send_message_worker,args=(aprs, callfrom, header+message))
th.start()
def send_long_message_with_ack2(aprs, callfrom, message):
for m in message.splitlines():
send_message_with_ack(aprs, callfrom, m)
def send_message_worker2(aprs, callfrom, header, messages,retry):
retry += 1
for message in messages.splitlines():
mlist = []
wait_timer = 7
for i in range(retry):
msgno = get_new_msgno()
mlist.append(msgno)
if len(message)>67:
message = message[0:67]
m = header + message + '{' + str(msgno)
print >>sys.stderr, 'APRS raw message(' + str(wait_timer) + ',' + str(i) + '):' + m
aprs.sendall(m.encode('utf-8'))
sleep(wait_timer)
if ack_received(mlist):
print >>sys.stderr, 'APRS recv_ack(' +str(wait_timer) +','+ str(msgno)+ ')'
break
else:
wait_timer *= 2
discard_ack(mlist)
if len(mlist) == retry:
print >>sys.stderr, "APRS: Can't send message:" + callfrom + ' ' + message + '\n'
def send_long_message_with_ack(aprs, callfrom, messages,retry = 3):
header = aprs_user+">APRS,TCPIP*::"+callfrom+":"
th = Thread(name="MessageWorker",target=send_message_worker2,args=(aprs, callfrom, header, messages,retry))
th.start()
def send_summit_message(callfrom, lat ,lng):
foreign,continent,state,tlon,mesg = lookup_summit(callfrom,lat,lng)
if state == ONSUMMIT: # On Summit
mesg = mesg + "\n" + readlast3(continent)
print >>sys.stderr, 'APRS: Message ' | |
S_StartDrunk__Fv()")
del_items(0x80068BF0)
SetType(0x80068BF0, "void StartStore__Fc(char s)")
del_items(0x80068EE0)
SetType(0x80068EE0, "void DrawSText__Fv()")
del_items(0x80068F20)
SetType(0x80068F20, "void DrawSTextTSK__FP4TASK(struct TASK *T)")
del_items(0x80068FE8)
SetType(0x80068FE8, "void DoThatDrawSText__Fv()")
del_items(0x80069194)
SetType(0x80069194, "void STextESC__Fv()")
del_items(0x80069310)
SetType(0x80069310, "void STextUp__Fv()")
del_items(0x80069498)
SetType(0x80069498, "void STextDown__Fv()")
del_items(0x80069630)
SetType(0x80069630, "void S_SmithEnter__Fv()")
del_items(0x80069704)
SetType(0x80069704, "void SetGoldCurs__Fii(int pnum, int i)")
del_items(0x80069780)
SetType(0x80069780, "void SetSpdbarGoldCurs__Fii(int pnum, int i)")
del_items(0x800697FC)
SetType(0x800697FC, "void TakePlrsMoney__Fl(long cost)")
del_items(0x80069C48)
SetType(0x80069C48, "void SmithBuyItem__Fv()")
del_items(0x80069E3C)
SetType(0x80069E3C, "void S_SBuyEnter__Fv()")
del_items(0x8006A060)
SetType(0x8006A060, "void SmithBuyPItem__Fv()")
del_items(0x8006A1E8)
SetType(0x8006A1E8, "void S_SPBuyEnter__Fv()")
del_items(0x8006A418)
SetType(0x8006A418, "unsigned char StoreGoldFit__Fi(int idx)")
del_items(0x8006A6D0)
SetType(0x8006A6D0, "void PlaceStoreGold__Fl(long v)")
del_items(0x8006A934)
SetType(0x8006A934, "void StoreSellItem__Fv()")
del_items(0x8006AC28)
SetType(0x8006AC28, "void S_SSellEnter__Fv()")
del_items(0x8006AD2C)
SetType(0x8006AD2C, "void SmithRepairItem__Fv()")
del_items(0x8006AF9C)
SetType(0x8006AF9C, "void S_SRepairEnter__Fv()")
del_items(0x8006B0F8)
SetType(0x8006B0F8, "void S_WitchEnter__Fv()")
del_items(0x8006B1A8)
SetType(0x8006B1A8, "void WitchBuyItem__Fv()")
del_items(0x8006B3A8)
SetType(0x8006B3A8, "void S_WBuyEnter__Fv()")
del_items(0x8006B594)
SetType(0x8006B594, "void S_WSellEnter__Fv()")
del_items(0x8006B698)
SetType(0x8006B698, "void WitchRechargeItem__Fv()")
del_items(0x8006B810)
SetType(0x8006B810, "void S_WRechargeEnter__Fv()")
del_items(0x8006B96C)
SetType(0x8006B96C, "void S_BoyEnter__Fv()")
del_items(0x8006BAA4)
SetType(0x8006BAA4, "void BoyBuyItem__Fv()")
del_items(0x8006BB28)
SetType(0x8006BB28, "void HealerBuyItem__Fv()")
del_items(0x8006BDCC)
SetType(0x8006BDCC, "void S_BBuyEnter__Fv()")
del_items(0x8006BFA4)
SetType(0x8006BFA4, "void StoryIdItem__Fv()")
del_items(0x8006C2F0)
SetType(0x8006C2F0, "void S_ConfirmEnter__Fv()")
del_items(0x8006C40C)
SetType(0x8006C40C, "void S_HealerEnter__Fv()")
del_items(0x8006C4A4)
SetType(0x8006C4A4, "void S_HBuyEnter__Fv()")
del_items(0x8006C6B0)
SetType(0x8006C6B0, "void S_StoryEnter__Fv()")
del_items(0x8006C748)
SetType(0x8006C748, "void S_SIDEnter__Fv()")
del_items(0x8006C8C4)
SetType(0x8006C8C4, "void S_TalkEnter__Fv()")
del_items(0x8006CABC)
SetType(0x8006CABC, "void S_TavernEnter__Fv()")
del_items(0x8006CB2C)
SetType(0x8006CB2C, "void S_BarmaidEnter__Fv()")
del_items(0x8006CB9C)
SetType(0x8006CB9C, "void S_DrunkEnter__Fv()")
del_items(0x8006CC0C)
SetType(0x8006CC0C, "void STextEnter__Fv()")
del_items(0x8006CE0C)
SetType(0x8006CE0C, "void CheckStoreBtn__Fv()")
del_items(0x8006CF28)
SetType(0x8006CF28, "void ReleaseStoreBtn__Fv()")
del_items(0x8006CF3C)
SetType(0x8006CF3C, "void _GLOBAL__D_pSTextBoxCels()")
del_items(0x8006CF64)
SetType(0x8006CF64, "void _GLOBAL__I_pSTextBoxCels()")
del_items(0x8006CF8C)
SetType(0x8006CF8C, "unsigned short GetDown__C4CPad_addr_8006CF8C(struct CPad *this)")
del_items(0x8006CFB4)
SetType(0x8006CFB4, "void SetRGB__6DialogUcUcUc_addr_8006CFB4(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8006CFD4)
SetType(0x8006CFD4, "void SetBorder__6Dialogi_addr_8006CFD4(struct Dialog *this, int Type)")
del_items(0x8006CFDC)
SetType(0x8006CFDC, "void ___6Dialog_addr_8006CFDC(struct Dialog *this, int __in_chrg)")
del_items(0x8006D004)
SetType(0x8006D004, "struct Dialog *__6Dialog_addr_8006D004(struct Dialog *this)")
del_items(0x8006D060)
SetType(0x8006D060, "void T_DrawView__Fii(int StartX, int StartY)")
del_items(0x8006D1D4)
SetType(0x8006D1D4, "void T_FillSector__FPUcT0iiiib(unsigned char *P3Tiles, unsigned char *pSector, int xi, int yi, int w, int h, bool AddSec)")
del_items(0x8006D3CC)
SetType(0x8006D3CC, "void T_FillTile__FPUciii(unsigned char *P3Tiles, int xx, int yy, int t)")
del_items(0x8006D4BC)
SetType(0x8006D4BC, "void T_Pass3__Fv()")
del_items(0x8006D87C)
SetType(0x8006D87C, "void CreateTown__Fi(int entry)")
del_items(0x8006D9E4)
SetType(0x8006D9E4, "unsigned char *GRL_LoadFileInMemSig__FPCcPUl(char *Name, unsigned long *Len)")
del_items(0x8006DAC8)
SetType(0x8006DAC8, "void GRL_StripDir__FPcPCc(char *Dest, char *Src)")
del_items(0x8006DB60)
SetType(0x8006DB60, "unsigned char ForceTownTrig__Fv()")
del_items(0x8006DE78)
SetType(0x8006DE78, "unsigned char ForceL1Trig__Fv()")
del_items(0x8006E128)
SetType(0x8006E128, "unsigned char ForceL2Trig__Fv()")
del_items(0x8006E588)
SetType(0x8006E588, "unsigned char ForceL3Trig__Fv()")
del_items(0x8006EA04)
SetType(0x8006EA04, "unsigned char ForceL4Trig__Fv()")
del_items(0x8006EF10)
SetType(0x8006EF10, "void Freeupstairs__Fv()")
del_items(0x8006EFD0)
SetType(0x8006EFD0, "unsigned char ForceSKingTrig__Fv()")
del_items(0x8006F0C4)
SetType(0x8006F0C4, "unsigned char ForceSChambTrig__Fv()")
del_items(0x8006F1B8)
SetType(0x8006F1B8, "unsigned char ForcePWaterTrig__Fv()")
del_items(0x8006F2AC)
SetType(0x8006F2AC, "void CheckTrigForce__Fv()")
del_items(0x8006F5C8)
SetType(0x8006F5C8, "void FadeGameOut__Fv()")
del_items(0x8006F664)
SetType(0x8006F664, "bool IsTrigger__Fii(int x, int y)")
del_items(0x8006F6C8)
SetType(0x8006F6C8, "void CheckTriggers__Fi(int pnum)")
del_items(0x8006FBD8)
SetType(0x8006FBD8, "int GetManaAmount__Fii(int id, int sn)")
del_items(0x8006FEA0)
SetType(0x8006FEA0, "void UseMana__Fii(int id, int sn)")
del_items(0x8006FFE4)
SetType(0x8006FFE4, "unsigned char CheckSpell__FiicUc(int id, int sn, char st, unsigned char manaonly)")
del_items(0x80070084)
SetType(0x80070084, "void CastSpell__Fiiiiiiii(int id, int spl, int sx, int sy, int dx, int dy, int caster, int spllvl)")
del_items(0x80070330)
SetType(0x80070330, "void DoResurrect__Fii(int pnum, int rid)")
del_items(0x800705E4)
SetType(0x800705E4, "void DoHealOther__Fii(int pnum, int rid)")
del_items(0x80070848)
SetType(0x80070848, "void snd_update__FUc(unsigned char bStopAll)")
del_items(0x80070850)
SetType(0x80070850, "void snd_get_volume__FPCcPl(char *pszKey, long *plVolume)")
del_items(0x800708B8)
SetType(0x800708B8, "void snd_stop_snd__FP4TSnd(struct TSnd *pSnd)")
del_items(0x800708D8)
SetType(0x800708D8, "void snd_play_snd__FP4TSFXll(struct TSFX *pSnd, long lVolume, long lPan)")
del_items(0x80070948)
SetType(0x80070948, "void snd_play_msnd__FUsll(unsigned short pszName, long lVolume, long lPan)")
del_items(0x800709E4)
SetType(0x800709E4, "void snd_init__FUl(unsigned long hWnd)")
del_items(0x80070A40)
SetType(0x80070A40, "void music_stop__Fv()")
del_items(0x80070A84)
SetType(0x80070A84, "void music_fade__Fv()")
del_items(0x80070AC4)
SetType(0x80070AC4, "void music_start__Fi(int nTrack)")
del_items(0x80070B50)
SetType(0x80070B50, "void music_hold__Fv()")
del_items(0x80070BB0)
SetType(0x80070BB0, "void music_release__Fv()")
del_items(0x80070C00)
SetType(0x80070C00, "void flyabout__7GamePad(struct GamePad *this)")
del_items(0x800710BC)
SetType(0x800710BC, "void CloseInvChr__Fv()")
del_items(0x80071104)
SetType(0x80071104, "void WorldToOffset__Fiii(int pnum, int WorldX, int WorldY)")
del_items(0x800711B0)
SetType(0x800711B0, "char pad_UpIsUp__Fi(int pval)")
del_items(0x80071220)
SetType(0x80071220, "char pad_UpIsUpRight__Fi(int pval)")
del_items(0x80071290)
SetType(0x80071290, "struct GamePad *__7GamePadi(struct GamePad *this, int player_num)")
del_items(0x800713C0)
SetType(0x800713C0, "void SetMoveStyle__7GamePadc(struct GamePad *this, char style_num)")
del_items(0x80071400)
SetType(0x80071400, "void SetDownButton__7GamePadiPFi_v(struct GamePad *this, int pad_val, void (*func)())")
del_items(0x80071444)
SetType(0x80071444, "void SetComboDownButton__7GamePadiPFi_v(struct GamePad *this, int pad_val, void (*func)())")
del_items(0x80071488)
SetType(0x80071488, "void SetAllButtons__7GamePadP11KEY_ASSIGNS(struct GamePad *this, struct KEY_ASSIGNS *actions)")
del_items(0x800716F8)
SetType(0x800716F8, "void GetAllButtons__7GamePadP11KEY_ASSIGNS(struct GamePad *this, struct KEY_ASSIGNS *actions)")
del_items(0x800718B8)
SetType(0x800718B8, "int GetActionButton__7GamePadPFi_v(struct GamePad *this, void (*func)())")
del_items(0x80071914)
SetType(0x80071914, "void SetUpAction__7GamePadPFi_vT1(struct GamePad *this, void (*func)(), void (*upfunc)())")
del_items(0x80071950)
SetType(0x80071950, "void RunFunc__7GamePadi(struct GamePad *this, int pad)")
del_items(0x800719F0)
SetType(0x800719F0, "void ButtonDown__7GamePadi(struct GamePad *this, int button)")
del_items(0x80071DC4)
SetType(0x80071DC4, "void TestButtons__7GamePad(struct GamePad *this)")
del_items(0x80071E98)
SetType(0x80071E98, "int CheckDirs__7GamePadi(struct GamePad *this, int dir)")
del_items(0x80071FB0)
SetType(0x80071FB0, "int CheckSide__7GamePadi(struct GamePad *this, int dir)")
del_items(0x80071FF4)
SetType(0x80071FF4, "int CheckBodge__7GamePadi(struct GamePad *this, int dir)")
del_items(0x800722F8)
SetType(0x800722F8, "void walk__7GamePadc(struct GamePad *this, char cmd)")
del_items(0x80072604)
SetType(0x80072604, "void check_around_player__7GamePad(struct GamePad *this)")
del_items(0x8007295C)
SetType(0x8007295C, "void show_combos__7GamePad(struct GamePad *this)")
del_items(0x80072B10)
SetType(0x80072B10, "void Handle__7GamePad(struct GamePad *this)")
del_items(0x80073174)
SetType(0x80073174, "void GamePadTask__FP4TASK(struct TASK *T)")
del_items(0x80073378)
SetType(0x80073378, "void PostGamePad__Fiiii(int val, int var1, int var2, int var3)")
del_items(0x80073428)
SetType(0x80073428, "void Init_GamePad__Fv()")
del_items(0x80073458)
SetType(0x80073458, "void InitGamePadVars__Fv()")
del_items(0x800734E8)
SetType(0x800734E8, "int SetWalkStyle__Fii(int pnum, int style)")
del_items(0x80073558)
SetType(0x80073558, "void MoveToScrollTarget__7CBlocks_addr_80073558(struct CBlocks *this)")
del_items(0x8007356C)
SetType(0x8007356C, "unsigned short GetDown__C4CPad_addr_8007356C(struct CPad *this)")
del_items(0x80073594)
SetType(0x80073594, "unsigned short GetUp__C4CPad_addr_80073594(struct CPad *this)")
del_items(0x800735BC)
SetType(0x800735BC, "unsigned short GetCur__C4CPad_addr_800735BC(struct CPad *this)")
del_items(0x800735E4)
SetType(0x800735E4, "void DoGameTestStuff__Fv()")
del_items(0x80073610)
SetType(0x80073610, "void DoInitGameStuff__Fv()")
del_items(0x80073644)
SetType(0x80073644, "void *SMemAlloc(unsigned long bytes, char *filename, int linenumber, unsigned long flags)")
del_items(0x80073664)
SetType(0x80073664, "unsigned char SMemFree(void *ptr, char *filename, int linenumber, unsigned long flags)")
del_items(0x80073684)
SetType(0x80073684, "void GRL_InitGwin__Fv()")
del_items(0x80073690)
SetType(0x80073690, "unsigned long (*GRL_SetWindowProc__FPFUlUilUl_Ul(unsigned long (*NewProc)()))()")
del_items(0x800736A0)
SetType(0x800736A0, "void GRL_CallWindowProc__FUlUilUl(unsigned long hw, unsigned int msg, long wp, unsigned long lp)")
del_items(0x800736C8)
SetType(0x800736C8, "unsigned char GRL_PostMessage__FUlUilUl(unsigned long hWnd, unsigned int Msg, long wParam, unsigned long lParam)")
del_items(0x80073774)
SetType(0x80073774, "char *Msg2Txt__Fi(int Msg)")
del_items(0x800737BC)
SetType(0x800737BC, "enum LANG_TYPE LANG_GetLang__Fv()")
del_items(0x800737C8)
SetType(0x800737C8, "void LANG_SetDb__F10LANG_DB_NO(enum LANG_DB_NO NewLangDbNo)")
del_items(0x80073948)
SetType(0x80073948, "char *GetStr__Fi(int StrId)")
del_items(0x800739B0)
SetType(0x800739B0, "void LANG_SetLang__F9LANG_TYPE(enum LANG_TYPE NewLanguageType)")
del_items(0x80073B28)
SetType(0x80073B28, "void DumpCurrentText__Fv()")
del_items(0x80073B80)
SetType(0x80073B80, "int CalcNumOfStrings__FPPc(char **TPtr)")
del_items(0x80073B8C)
SetType(0x80073B8C, "void GetLangFileName__F9LANG_TYPEPc(enum LANG_TYPE NewLanguageType, char *Dest)")
del_items(0x80073CAC)
SetType(0x80073CAC, "char *GetLangFileNameExt__F9LANG_TYPE(enum LANG_TYPE NewLanguageType)")
del_items(0x80073D2C)
SetType(0x80073D2C, "void TempPrintMissile__FiiiiiiiiccUcUcUcc(int ScrX, int ScrY, int OtPos, int spell, int aframe, int direction, int anim, int sfx, int xflip, int yflip, int red, int grn, int blu, int semi)")
del_items(0x80074270)
SetType(0x80074270, "void FuncTOWN__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x800743F0)
SetType(0x800743F0, "void FuncRPORTAL__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074550)
SetType(0x80074550, "void FuncFIREBOLT__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x800745E8)
SetType(0x800745E8, "void FuncHBOLT__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074698)
SetType(0x80074698, "void FuncLIGHTNING__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x800746FC)
SetType(0x800746FC, "void FuncGUARDIAN__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074814)
SetType(0x80074814, "void FuncFIREWALL__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x800748AC)
SetType(0x800748AC, "void FuncFIREMOVE__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074944)
SetType(0x80074944, "void FuncFLAME__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x800749AC)
SetType(0x800749AC, "void FuncARROW__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074A40)
SetType(0x80074A40, "void FuncFARROW__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074B20)
SetType(0x80074B20, "void FuncLARROW__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074BF8)
SetType(0x80074BF8, "void FuncMAGMABALL__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074C88)
SetType(0x80074C88, "void FuncBONESPIRIT__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074DA4)
SetType(0x80074DA4, "void FuncACID__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074E40)
SetType(0x80074E40, "void FuncACIDSPLAT__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074EA8)
SetType(0x80074EA8, "void FuncACIDPUD__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80074F10)
SetType(0x80074F10, "void FuncFLARE__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075044)
SetType(0x80075044, "void FuncFLAREXP__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075188)
SetType(0x80075188, "void FuncCBOLT__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x800751F0)
SetType(0x800751F0, "void FuncBOOM__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075248)
SetType(0x80075248, "void FuncELEMENT__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075314)
SetType(0x80075314, "void FuncMISEXP__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075378)
SetType(0x80075378, "void FuncRHINO__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075380)
SetType(0x80075380, "void FuncFLASH__FP13MissileStructiii(struct MissileStruct *Ms, int x, int y, int OtPos)")
del_items(0x800758A8)
SetType(0x800758A8, "void FuncMANASHIELD__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075950)
SetType(0x80075950, "void FuncFLASH2__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80075958)
SetType(0x80075958, "void FuncRESURRECTBEAM__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x8007598C)
SetType(0x8007598C, "void PRIM_GetPrim__FPP8POLY_FT4_addr_8007598C(struct POLY_FT4 **Prim)")
del_items(0x80075A08)
SetType(0x80075A08, "struct CPlayer *GetPlayer__7CPlayeri_addr_80075A08(int PNum)")
del_items(0x80075A58)
SetType(0x80075A58, "int GetLastOtPos__C7CPlayer_addr_80075A58(struct CPlayer *this)")
del_items(0x80075A64)
SetType(0x80075A64, "int GetLastScrY__C7CPlayer_addr_80075A64(struct CPlayer *this)")
del_items(0x80075A70)
SetType(0x80075A70, "int GetLastScrX__C7CPlayer_addr_80075A70(struct CPlayer *this)")
del_items(0x80075A7C)
SetType(0x80075A7C, "int GetNumOfFrames__7TextDat_addr_80075A7C(struct TextDat *this)")
del_items(0x80075A90)
SetType(0x80075A90, "struct FRAME_HDR *GetFr__7TextDati_addr_80075A90(struct TextDat *this, int FrNum)")
del_items(0x80075AAC)
SetType(0x80075AAC, "void ML_Init__Fv()")
del_items(0x80075AE4)
SetType(0x80075AE4, "int ML_GetList__Fi(int Level)")
del_items(0x80075B64)
SetType(0x80075B64, "int ML_SetRandomList__Fi(int Level)")
del_items(0x80075BFC)
SetType(0x80075BFC, "int ML_SetList__Fii(int Level, int List)")
del_items(0x80075CAC)
SetType(0x80075CAC, "int ML_GetPresetMonsters__FiPiUl(int currlevel, int *typelist, unsigned long QuestsNeededMask)")
del_items(0x80075E68)
SetType(0x80075E68, "struct POLY_FT4 *DefaultObjPrint__FP12ObjectStructiiP7TextDatiii(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos, int XOffSet, int YOffSet)")
del_items(0x80075FFC)
SetType(0x80075FFC, "struct POLY_FT4 *LightObjPrint__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800760B4)
SetType(0x800760B4, "struct POLY_FT4 *DoorObjPrint__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076348)
SetType(0x80076348, "void DrawLightSpark__Fiii(int xo, int yo, int ot)")
del_items(0x80076420)
SetType(0x80076420, "struct POLY_FT4 *PrintOBJ_L1LIGHT__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800764A8)
SetType(0x800764A8, "struct POLY_FT4 *PrintOBJ_SKFIRE__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800764D4)
SetType(0x800764D4, "struct POLY_FT4 *PrintOBJ_LEVER__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076500)
SetType(0x80076500, "struct POLY_FT4 *PrintOBJ_CHEST1__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x8007652C)
SetType(0x8007652C, "struct POLY_FT4 *PrintOBJ_CHEST2__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076558)
SetType(0x80076558, "struct POLY_FT4 *PrintOBJ_CHEST3__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076584)
SetType(0x80076584, "struct POLY_FT4 *PrintOBJ_CANDLE1__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800765A8)
SetType(0x800765A8, "struct POLY_FT4 *PrintOBJ_CANDLE2__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800765CC)
SetType(0x800765CC, "struct POLY_FT4 *PrintOBJ_CANDLEO__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800765F8)
SetType(0x800765F8, "struct POLY_FT4 *PrintOBJ_BANNERL__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076624)
SetType(0x80076624, "struct POLY_FT4 *PrintOBJ_BANNERM__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076650)
SetType(0x80076650, "struct POLY_FT4 *PrintOBJ_BANNERR__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x8007667C)
SetType(0x8007667C, "struct POLY_FT4 *PrintOBJ_SKPILE__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800766A8)
SetType(0x800766A8, "struct POLY_FT4 *PrintOBJ_SKSTICK1__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800766D4)
SetType(0x800766D4, "struct POLY_FT4 *PrintOBJ_SKSTICK2__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076700)
SetType(0x80076700, "struct POLY_FT4 *PrintOBJ_SKSTICK3__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x8007672C)
SetType(0x8007672C, "struct POLY_FT4 *PrintOBJ_SKSTICK4__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076758)
SetType(0x80076758, "struct POLY_FT4 *PrintOBJ_SKSTICK5__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x80076784)
SetType(0x80076784, "struct POLY_FT4 *PrintOBJ_CRUX1__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800767B0)
SetType(0x800767B0, "struct POLY_FT4 *PrintOBJ_CRUX2__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, int ScrY, struct TextDat *ObjDat, int OtPos)")
del_items(0x800767DC)
SetType(0x800767DC, "struct POLY_FT4 *PrintOBJ_CRUX3__FP12ObjectStructiiP7TextDati(struct ObjectStruct *OStr, int ScrX, | |
# Copyright (C) 2015-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
contains_inanyorder,
empty,
equal_to,
has_entries,
has_entry,
matches_regexp )
from unittest.mock import patch
from pprint import pformat
import os
import pytest
import requests
from ycmd import handlers
from ycmd.tests.rust import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
ChunkMatcher,
ErrorMatcher,
ExpectedFailure,
LocationMatcher,
WithRetry )
from ycmd.utils import ReadFile
RESPONSE_TIMEOUT = 5
def RunTest( app, test, contents = None ):
if not contents:
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
def CombineRequest( request, data ):
kw = request
request.update( data )
return BuildRequest( **kw )
# Because we aren't testing this command, we *always* ignore errors. This
# is mainly because we (may) want to test scenarios where the completer
# throws an exception and the easiest way to do that is to throw from
# within the FlagsForFile function.
app.post_json( '/event_notification',
CombineRequest( test[ 'request' ], {
'event_name': 'FileReadyToParse',
'contents': contents,
'filetype': 'rust',
} ),
expect_errors = True )
# We also ignore errors here, but then we check the response code
# ourself. This is to allow testing of requests returning errors.
response = app.post_json(
'/run_completer_command',
CombineRequest( test[ 'request' ], {
'completer_target': 'filetype_default',
'contents': contents,
'filetype': 'rust',
'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
+ test[ 'request' ].get( 'arguments', [] ) )
} ),
expect_errors = True
)
print( f'completer response: { pformat( response.json ) }' )
assert_that( response.status_code,
equal_to( test[ 'expect' ][ 'response' ] ) )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
@SharedYcmd
def Subcommands_DefinedSubcommands_test( app ):
subcommands_data = BuildRequest( completer_target = 'rust' )
assert_that( app.post_json( '/defined_subcommands', subcommands_data ).json,
contains_inanyorder( 'FixIt',
'Format',
'GetDoc',
'GetType',
'GoTo',
'GoToDeclaration',
'GoToDefinition',
'GoToImplementation',
'GoToReferences',
'GoToSymbol',
'GoToType',
'RefactorRename',
'RestartServer' ) )
@SharedYcmd
def Subcommands_ServerNotInitialized_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
completer = handlers._server_state.GetFiletypeCompleter( [ 'rust' ] )
@patch.object( completer, '_ServerIsInitialized', return_value = False )
def Test( app, cmd, arguments, *args ):
RunTest( app, {
'description': 'Subcommand ' + cmd + ' handles server not ready',
'request': {
'command': cmd,
'line_num': 1,
'column_num': 1,
'filepath': filepath,
'arguments': arguments,
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError,
'Server is initializing. Please wait.' ),
}
} )
Test( app, 'Format', [] )
Test( app, 'FixIt', [] )
Test( app, 'GetType', [] )
Test( app, 'GetDoc', [] )
Test( app, 'GoTo', [] )
Test( app, 'GoToDeclaration', [] )
Test( app, 'GoToDefinition', [] )
Test( app, 'GoToImplementation', [] )
Test( app, 'GoToReferences', [] )
Test( app, 'RefactorRename', [ 'test' ] )
@SharedYcmd
def Subcommands_Format_WholeFile_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
RunTest( app, {
'description': 'Formatting is applied on the whole file',
'request': {
'command': 'Format',
'filepath': filepath,
'options': {
'tab_size': 2,
'insert_spaces': True
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
# Let's just rewrite the whole file...
ChunkMatcher( "mod test;\n\nuse test::*;\n\nstruct Earth {}"
"\nstruct Mars {}\ntrait Atmosphere {}\nimpl "
"Atmosphere for Earth {}\nimpl Atmosphere for "
"Mars {}\n\nfn main() {\n create_universe();"
"\n let builder = Builder {};\n builder."
"build_\n}\n\nfn format_test() {\n let a: "
"i32 = 5;\n}\n",
LocationMatcher( filepath, 1, 1 ),
LocationMatcher( filepath, 23, 1 ) ),
)
} ) )
} )
}
} )
@ExpectedFailure( 'rangeFormat is not yet implemented',
matches_regexp( '\nExpected: <200>\n but: was <500>\n' ) )
@SharedYcmd
def Subcommands_Format_Range_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
RunTest( app, {
'description': 'Formatting is applied on some part of the file',
'request': {
'command': 'Format',
'filepath': filepath,
'range': {
'start': {
'line_num': 17,
'column_num': 1,
},
'end': {
'line_num': 22,
'column_num': 2
}
},
'options': {
'tab_size': 4,
'insert_spaces': False
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( 'fn format_test() {\n'
'\tlet a: i32 = 5;\n',
LocationMatcher( filepath, 17, 1 ),
LocationMatcher( filepath, 22, 1 ) ),
)
} ) )
} )
}
} )
@SharedYcmd
def Subcommands_GetDoc_NoDocumentation_test( app ):
RunTest( app, {
'description': 'GetDoc on a function with no documentation '
'raises an error',
'request': {
'command': 'GetDoc',
'line_num': 4,
'column_num': 11,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError,
'No documentation available.' )
}
} )
@WithRetry
@SharedYcmd
def Subcommands_GetDoc_Function_test( app ):
RunTest( app, {
'description': 'GetDoc on a function returns its documentation',
'request': {
'command': 'GetDoc',
'line_num': 2,
'column_num': 8,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entry( 'detailed_info',
'common::test\n'
'pub fn create_universe()\n'
'---\n'
'Be careful when using that function' ),
}
} )
@SharedYcmd
def Subcommands_GetType_UnknownType_test( app ):
RunTest( app, {
'description': 'GetType on a unknown type raises an error',
'request': {
'command': 'GetType',
'line_num': 2,
'column_num': 4,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError, 'Unknown type.' )
}
} )
@WithRetry
@SharedYcmd
def Subcommands_GetType_Function_test( app ):
RunTest( app, {
'description': 'GetType on a function returns its type',
'request': {
'command': 'GetType',
'line_num': 2,
'column_num': 22,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entry( 'message', 'pub fn create_universe()' ),
}
} )
def RunGoToTest( app, command, test ):
folder = PathToTestFile( 'common', 'src' )
filepath = os.path.join( folder, test[ 'req' ][ 0 ] )
request = {
'command': command,
'line_num': test[ 'req' ][ 1 ],
'column_num': test[ 'req' ][ 2 ],
'filepath': filepath,
}
response = test[ 'res' ]
if isinstance( response, list ):
expect = {
'response': requests.codes.ok,
'data': contains_inanyorder( *[
LocationMatcher(
os.path.join( folder, location[ 0 ] ),
location[ 1 ],
location[ 2 ]
) for location in response
] )
}
elif isinstance( response, tuple ):
expect = {
'response': requests.codes.ok,
'data': LocationMatcher(
os.path.join( folder, response[ 0 ] ),
response[ 1 ],
response[ 2 ]
)
}
else:
error_type = test.get( 'exc', RuntimeError )
expect = {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( error_type, test[ 'res' ] )
}
RunTest( app, {
'request': request,
'expect' : expect
} )
@pytest.mark.parametrize( 'test', [
# Variable
{ 'req': ( 'main.rs', 14, 5 ), 'res': ( 'test.rs', 4, 12 ) },
# Type
{ 'req': ( 'main.rs', 13, 19 ), 'res': ( 'test.rs', 4, 12 ) },
# Function
{ 'req': ( 'main.rs', 12, 14 ), 'res': 'Cannot jump to location' },
# Keyword
{ 'req': ( 'main.rs', 3, 2 ), 'res': 'Cannot jump to location' },
] )
@SharedYcmd
def Subcommands_GoToType_Basic_test( app, test ):
RunGoToTest( app, 'GoToType', test )
@pytest.mark.parametrize( 'test', [
# Structure
{ 'req': ( 'main.rs', 8, 24 ), 'res': ( 'main.rs', 5, 8 ) },
# Function
{ 'req': ( 'main.rs', 12, 14 ), 'res': ( 'test.rs', 2, 8 ) },
# Implementation
{ 'req': ( 'main.rs', 9, 12 ), 'res': ( 'main.rs', 7, 7 ) },
# Keyword
{ 'req': ( 'main.rs', 3, 2 ), 'res': 'Cannot jump to location' },
] )
@pytest.mark.parametrize( 'command', [ 'GoToDeclaration',
'GoToDefinition',
'GoTo' ] )
@WithRetry
@SharedYcmd
def Subcommands_GoTo_test( app, command, test ):
RunGoToTest( app, command, test )
@pytest.mark.parametrize( 'test', [
# Structure
{ 'req': ( 'main.rs', 5, 9 ), 'res': ( 'main.rs', 8, 21 ) },
# Trait
{ 'req': ( 'main.rs', 7, 7 ), 'res': [ ( 'main.rs', 8, 21 ),
( 'main.rs', 9, 21 ) ] },
] )
@WithRetry
@SharedYcmd
def Subcommands_GoToImplementation_test( app, test ):
RunGoToTest( app, 'GoToImplementation', test )
@WithRetry
@SharedYcmd
def Subcommands_GoToImplementation_Failure_test( app ):
RunGoToTest( app,
'GoToImplementation',
{ 'req': ( 'main.rs', 11, 2 ),
'res': 'Cannot jump to location',
'exc': RuntimeError } )
@pytest.mark.parametrize( 'test', [
# Struct
{ 'req': ( 'main.rs', | |
one of this program's text fields." ),
8: ( 'A quick and easy way to view file structures relating to a given texture is to use '
'the "Show in Structural Analysis" feature, found by right-clicking on a texture.' ),
9: ( "You don't have to close this program in order to run your disc in Dolphin "
'(though you do need to stop emulation if you want to save changes to the disc).' ),
10: ( "DODONGO DISLIKES SMOKE." ),
11: ( "Have you ever noticed those dotted lines at the top of the 'Open Recent' "
"and 'Texture Operations' menus? Try clicking on one sometime! It will turn the menu into a window for fast-access." ),
12: ( "If you click on one of the 'Disc Shortcuts' before loading a disc, DTW will load the "
"last disc that you've used, and then jump to the appropriate section. They're two shortcuts in one!" ),
13: ( "When DTW builds a disc from a root folder of files, it can build a ISO that's a good amount smaller than the "
"standard disc size of ~1.35 GB (1,459,978,240 bytes). Useful if you want to add more or larger files." ),
14: ( 'You can actually modify the amount of empty space, or "padding", present between files in your ISO. A small '
'amount of padding allows for more files or total data in the same size ISO. While more padding allows you to '
'replace/import larger files without having to rebuild the disc.' ),
15: ( "Did you notice the cheese in the toilet? It's in every level." ),
16: ( "This program has a lot of lesser-known but very useful features, some of which aren't easily found "
"by browsing the GUI. Check out the Program Usage.txt to find them all." ),
#17: ( '' ),
#18: ( '' ),
#19: ( '' ),
#20: ( "IT'S A SECRET TO EVERYBODY." ),
}
def showReadMeFile( event=None ): # May take a click event from the help window click binding
try:
os.startfile( scriptHomeFolder + '\\Program Usage.txt' )
except:
msg( "Couldn't find the 'Program Usage.txt' file!" )
def showSupportWindow():
# Define the window
helpWindow = Tk.Toplevel( Gui.root )
helpWindow.title( 'Support DTW' )
helpWindow.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
helpWindow.resizable( width=False, height=False )
helpWindow.wm_attributes( '-topmost', 1 ) # Makes window stay topmost (main program still usable).
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
helpWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 120) + '+' + str(rootDistanceFromScreenTop + 100) )
helpWindow.focus()
mainCanvas = Tk.Canvas( helpWindow, bg='#101010', width=640, height=394, borderwidth=0, highlightthickness=0 )
# Create and attach the background
mainCanvas.create_image( 0, 0, image=Gui.imageBank('supportDTW'), anchor='nw' )
# Create rectangles over the image to use as buttons
mainCanvas.create_rectangle( 288, 224, 357, 245, outline="", tags=('paypalLink', 'link') )
mainCanvas.create_rectangle( 350, 292, 432, 310, outline="", tags=('patreonLink', 'link') )
# Bind a click event on the buttons to hyperlinks
def gotoPaypal( event ): webbrowser.open( r'https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=K95AJCMZDR7CG&lc=US&item_name=Melee%20Modding&item_number=DTW¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donate_SM%2egif%3aNonHosted' )
def gotoPatreon( event ): webbrowser.open( r'https://www.patreon.com/drgn' )
mainCanvas.tag_bind( 'paypalLink', '<1>', gotoPaypal )
mainCanvas.tag_bind( 'patreonLink', '<1>', gotoPatreon )
# Bind mouse hover events for buttons, for the cursor
def changeCursorToHand( event ): helpWindow.config( cursor='hand2' )
def changeCursorToArrow( event ): helpWindow.config( cursor='' )
mainCanvas.tag_bind( 'link', '<Enter>', changeCursorToHand )
mainCanvas.tag_bind( 'link', '<Leave>', changeCursorToArrow )
mainCanvas.pack( pady=0, padx=0 )
def showAboutWindow(): # todo: should be a class based off of basicWindow
if Gui.root.aboutWindow != None: Gui.root.aboutWindow.deiconify()
else:
# Define the window
aboutWindow = Tk.Toplevel( Gui.root )
aboutWindow.title( 'DAT Texture Wizard' )
aboutWindow.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
aboutWindow.resizable( width=False, height=False )
aboutWindow.wm_attributes( '-topmost', 1 )
Gui.root.aboutWindow = aboutWindow
# lulz
Gui.root.aboutWindow.originalProgramStatus = Gui.programStatus.get()
updateProgramStatus( 'Too good!' )
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
aboutWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 240) + '+' + str(rootDistanceFromScreenTop + 170) )
aboutWindow.focus()
# Button functions
def close():
updateProgramStatus( Gui.root.aboutWindow.originalProgramStatus )
Gui.root.aboutWindow.destroy()
Gui.root.aboutWindow = None
aboutWindow.protocol( 'WM_DELETE_WINDOW', close ) # Overrides the 'X' close button.
# Create the canvas
aboutCanvas = Tk.Canvas( aboutWindow, bg='#101010', width=350, height=247 )
aboutCanvas.pack()
# Define a few images
aboutCanvas.bannerImage = Gui.imageBank( 'pannerBanner' ) # 604x126
aboutCanvas.hoverOverlayImage = Gui.imageBank('hoverOverlay')
aboutCanvas.blankBoxImage = ImageTk.PhotoImage( Image.new('RGBA', (182,60)) ) # Sits behind the main background (same size/position as bgbg).
# Attach the images to the canvas
aboutCanvas.create_image( 88, 98, image=Gui.imageBank('bgbg'), anchor='nw' ) # Sits behind the main background (182x60).
aboutCanvas.create_image( 10, 123, image=aboutCanvas.bannerImage, anchor='w', tags='r2lBanners' )
aboutCanvas.create_image( 340, 123, image=aboutCanvas.bannerImage, anchor='e', tags='l2rBanners' )
foregroundObject = aboutCanvas.create_image( 2, 2, image=Gui.imageBank('bg'), anchor='nw' ) # The main background, the mask (350x247).
# Define and attach the text to the canvas
windowFont = tkFont.Font(family='MS Serif', size=11, weight='normal')
aboutCanvas.create_text( 207, 77, text='C r e a t e d b y', fill='#d4d4ef', font=windowFont )
aboutCanvas.create_text( 207, 174, text='Version ' + programVersion, fill='#d4d4ef', font=windowFont )
aboutCanvas.create_text( 207, 204, text='Written in Python v' + sys.version.split()[0] + '\nand tKinter v' + str( Tk.TkVersion ),
justify='center', fill='#d4d4ef', font=windowFont )
# Create a "button", and bind events for the mouse pointer, and for going to my profile page on click.
aboutCanvas.create_image( 82, 98, image=aboutCanvas.blankBoxImage, activeimage=aboutCanvas.hoverOverlayImage, anchor='nw', tags='profileLink' ) # 88 in v4.3
def gotoProfile( event ): webbrowser.open( 'http://smashboards.com/members/drgn.21936/' )
def changeCursorToHand( event ): aboutWindow.config( cursor='hand2' )
def changeCursorToArrow( event ): aboutWindow.config( cursor='' )
aboutCanvas.tag_bind( 'profileLink', '<1>', gotoProfile )
aboutCanvas.tag_bind( 'profileLink', '<Enter>', changeCursorToHand )
aboutCanvas.tag_bind( 'profileLink', '<Leave>', changeCursorToArrow )
# v Creates an infinite "revolving" image between the two background elements.
i = 0
while Gui.root.aboutWindow != None:
if i == 0:
aboutCanvas.create_image( 614, 123, image=aboutCanvas.bannerImage, anchor='w', tags='r2lBanners' )
aboutCanvas.create_image( 340 - 604, 123, image=aboutCanvas.bannerImage, anchor='e', tags='l2rBanners' )
aboutCanvas.tag_lower( 'r2lBanners', foregroundObject ) # Update the layer order to keep the foreground on top.
aboutCanvas.tag_lower( 'l2rBanners', foregroundObject ) # Update the layer order to keep the foreground on top.
i += 1
aboutCanvas.move( 'r2lBanners', -1, 0 )
aboutCanvas.move( 'l2rBanners', 1, 0 )
time.sleep( .13 ) # Value in seconds
aboutCanvas.update()
if i == 604: # Delete the first banner, so the canvas isn't infinitely long
aboutCanvas.delete( aboutCanvas.find_withtag('r2lBanners')[0] )
aboutCanvas.delete( aboutCanvas.find_withtag('l2rBanners')[0] )
i = 0
def treeview_sort_column( treeview, col, reverse ):
# Create a list of the items, as tuples of (statOfInterest, iid), and sort them.
if col == 'file':
if os.path.exists( globalDiscDetails['isoFilePath'] ): # Means that a disc has been loaded.
# Make sure the disc doesn't have any changes that need saving first
if unsavedDiscChanges and not globalDiscDetails['rebuildRequired']:
okToSave = tkMessageBox.askyesno( 'OK to save disc changes?',
'Changes to the disc must be saved before sorting its files.\n\nWould you like to save changes to the disc now?' )
# Attempt to save, and exit this function if there was a problem.
if not okToSave or not saveChanges(): return
if not reverse: # The default upon starting the program.
rootIid = Gui.isoFileTree.get_children()[0]
rowsList = []
foldersToDelete = []
def sortChildren( parent ):
for iid in treeview.get_children( parent ):
description, entity, isoOffset, fileSize, isoPath, source, data = treeview.item( iid, 'values' )
if entity == 'folder':
# Organize the contents of the folder first (so that the first file's offset, to use for this folder, will be the first of the set).
sortChildren( iid )
foldersToDelete.append( iid )
else:
# Add this file to the sorting list.
rowsList.append( (int(isoOffset, 16), iid) )
sortChildren( rootIid )
# Sort the items in the treeview.
rowsList.sort( reverse=reverse )
for index, ( columnValue, iid ) in enumerate( rowsList ): treeview.move( iid, rootIid, index )
# Remove the folders from the treeview.
for folder in foldersToDelete: treeview.delete( folder )
# Update the treeview's header text and its function call for the next (reversed) sort.
treeview.heading( '#0', text='File (Sorted by Offset)' )
treeview.heading( '#0', command=lambda: treeview_sort_column(treeview, col, True) )
else:
if isRootFolder( globalDiscDetails['isoFilePath'], showError=False )[0]: scanRoot()
else: scanDisc()
else:
if col == 'texture': rowsList = [( int(treeview.set(iid, col).split()[0],16), iid ) for iid in treeview.get_children('')]
elif col == 'dimensions': rowsList = [( int(treeview.set(iid, col).split(' x ')[0]) * int(treeview.set(iid, col).split(' x ')[1]), iid ) for iid in treeview.get_children('')]
elif col == 'type': rowsList = [( treeview.set(iid, col).replace('_', ''), iid ) for iid in treeview.get_children('')]
# Sort the rows and rearrange the treeview based on the newly sorted list.
rowsList.sort(reverse=reverse)
for index, ( columnValue, iid ) in enumerate( rowsList ): treeview.move( iid, '', index )
# Set the function call for the next (reversed) sort.
treeview.heading(col, command=lambda: treeview_sort_column( treeview, col, not reverse ))
def scanDiscItemForStats( iidSelectionsTuple, folder ):
""" This is simply a helper function to recursively get the file size of all files in a given folder,
as well as total file count. """
totalFileSize = 0 # Out of scope of the original declaration; need to recreate it.
fileCount = 0
for iid in folder:
if iid not in iidSelectionsTuple: # Check | |
# Copyright (c) 2015-2021 <NAME> and contributors.
# mc3 is open-source software under the MIT license (see LICENSE).
__all__ = [
'sample',
]
import os
import sys
import importlib
import multiprocessing as mpr
from datetime import date
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
mpl.use('Agg')
import matplotlib.pyplot as plt
from .fit_driver import fit
from .mcmc_driver import mcmc
from .ns_driver import nested_sampling
from . import utils as mu
from . import stats as ms
from . import plots as mp
from .VERSION import __version__
@mu.ignore_system_exit
def sample(
data=None, uncert=None, func=None, params=None, indparams=[],
pmin=None, pmax=None, pstep=None,
prior=None, priorlow=None, priorup=None,
sampler=None, ncpu=None, leastsq=None, chisqscale=False,
nchains=7, nsamples=None, burnin=0, thinning=1,
grtest=True, grbreak=0.0, grnmin=0.5, wlike=False,
fgamma=1.0, fepsilon=0.0, hsize=10, kickoff='normal',
plots=False, ioff=False, showbp=True, savefile=None, resume=False,
rms=False, log=None, pnames=None, texnames=None,
**kwargs):
"""
This beautiful piece of code executes an MCMC or NS posterior sampling.
Parameters
----------
data: 1D float ndarray or string
Data to be fit by func. If string, path to file containing data.
uncert: 1D float ndarray
Uncertainties of data.
func: Callable or string-iterable
The callable function that models data as:
model = func(params, *indparams)
Or an iterable of 3 strings (funcname, modulename, path)
that specifies the function name, function module, and module path.
If the module is already in the python-path scope, path can be omitted.
params: 1D float ndarray or string
Set of initial fitting parameters for func.
If string, path to file containing data.
indparams: tuple or string
Additional arguments required by func. If string, path to file
containing indparams.
pmin: 1D ndarray
Lower boundaries for the posterior exploration.
pmax: 1D ndarray
Upper boundaries for the posterior exploration.
pstep: 1D ndarray
Parameter stepping behavior.
- Free parameters have pstep>0.
- Fixed parameters have pstep=0.
- Negative values indicate a shared parameter, with pstep set to
the negative index of the sharing parameter (starting the count
from 1), e.g.: to share second parameter and first one, do:
pstep[1] = -1.
For MCMC, the pstep value of free parameters set the scale of the
initial jump proposal.
prior: 1D ndarray
Parameter priors. The type of prior is determined by priorlow
and priorup:
if both priorlow>0 and priorup>0 Gaussian
else Uniform between [pmin,pmax]
priorlow: 1D ndarray
Lower prior uncertainty values.
priorup: 1D ndarray
Upper prior uncertainty values.
sampler: String
Sampling algorithm:
- 'mrw': Metropolis random walk.
- 'demc': Differential Evolution Markov chain.
- 'snooker': DEMC-z with snooker update.
- 'dynesty': DynamicNestedSampler() sampler from dynesty.
ncpu: Integer
Number of processors for the MCMC chains (mc3 defaults to
one CPU for each chain plus a CPU for the central hub).
leastsq: String
If not None, perform a least-square optimization before the MCMC run.
Select from:
'lm': Levenberg-Marquardt (most efficient, but doesn't obey bounds)
'trf': Trust Region Reflective
chisqscale: Boolean
Scale the data uncertainties such that the reduced chi-square = 1.
nchains: Scalar
Number of simultaneous chains to run.
nsamples: Scalar
Total number of samples.
burnin: Integer
Number of burned-in (discarded) number of iterations at the beginning
of the chains.
thinning: Integer
Thinning factor of the chains (use every thinning-th iteration) used
in the GR test and plots.
wlike: Bool
If True, calculate the likelihood in a wavelet-base. This requires
three additional parameters (TBD: this needs documentation).
grtest: Boolean
If True, run Gelman & Rubin test.
grbreak: Float
Gelman-Rubin convergence threshold to stop the MCMC (I'd suggest
grbreak ~ 1.001--1.005). Do not break if grbreak=0.0 (default).
grnmin: Integer or float
Minimum number of samples required for grbreak to stop the MCMC.
If grnmin > 1: grnmin sets the minimum required number of samples.
If 0 < grnmin < 1: grnmin sets the minimum required nsamples fraction.
fgamma: Float
Proposals jump scale factor for DEMC's gamma.
The code computes: gamma = fgamma * 2.38 / sqrt(2*Nfree)
fepsilon: Float
Jump scale factor for DEMC's support distribution.
The code computes: e = fepsilon * Normal(0, pstep)
hsize: Integer
Number of initial samples per chain.
kickoff: String
Flag to indicate how to start the chains:
'normal' for normal distribution around initial guess, or
'uniform' for uniform distribution withing the given boundaries.
plots: Bool
If True plot parameter traces, pairwise-posteriors, and posterior
histograms.
ioff: Bool
If True, set plt.ioff(), i.e., do not display figures on screen.
showbp: Bool
If True, show best-fitting values in histogram and pairwise plots.
savefile: String
If not None, filename to store allparams and other MCMC results.
resume: Boolean
If True resume a previous run (identified by the .npz file name).
rms: Boolean
If True, calculate the RMS of the residuals: data - best_model.
log: String or mc3.utils.Log instance
Filename (as string) or log handler (as Log instance) handle logging.
pnames: 1D string iterable
List of parameter names (including fixed and shared parameters)
to display on output screen and figures. See also texnames.
Screen output trims up to the 11th character.
If not defined, default to texnames.
texnames: 1D string iterable
Parameter names for figures, which may use latex syntax.
If not defined, default to pnames.
kwargs: Dict
Additional keyword arguments passed to the sampler.
Returns
-------
mc3_output: Dict
A Dictionary containing the MCMC posterior distribution and related
stats, including:
- posterior: thinned posterior distribution of shape [nsamples, nfree],
including the burn-in phase.
- zchain: chain indices for the posterior samples.
- zmask: posterior mask to remove the burn-in.
- chisq: chi^2 values for the posterior samples.
- log_post: log(posterior) for the posterior samples (see Notes).
- burnin: number of burned-in samples per chain.
- ifree: Indices of the free parameters.
- pnames: Parameter names.
- texnames: Parameter names in Latex format.
- meanp: mean of the marginal posteriors.
- stdp: standard deviation of the marginal posteriors.
- CRlo: lower boundary of the marginal 68%-highest posterior
density (the credible region).
- CRhi: upper boundary of the marginal 68%-HPD.
- bestp: model parameters for the optimal log(posterior) in the sample.
- best_log_post: optimal log(posterior) in the sample (see Notes).
- best_model: model evaluated at bestp.
- best_chisq: chi^2 for the optimal log(posterior) in the sample.
- red_chisq: reduced chi-square: chi^2/(ndata-nfree) for the
best-fitting sample.
- BIC: Bayesian Information Criterion: chi^2 - nfree*log(ndata)
for the best-fitting sample.
- chisq_factor: Uncertainties scale factor to enforce chi^2_red = 1.
- stddev_residuals: standard deviation of the residuals.
- acceptance_rate: sample's acceptance rate.
Notes
-----
The log_post variable is defined here as:
log_post = log(posterior)
= log(likelihood) + log(prior)
= -0.5*chi-square + log_prior
= sum_i -0.5*((data[i] - model[i])/uncert[i])**2 + log_prior
with log_prior defined as:
log_prior = sum_j -0.5*((params[j] - prior[j])/prior_uncert[j])**2
For each parameter with a Gaussian prior.
Note that constant terms have been neglected.
Examples
--------
>>> import numpy as np
>>> import mc3
>>> def quad(p, x):
>>> return p[0] + p[1]*x + p[2]*x**2.0
>>> # Preamble, create a noisy synthetic dataset:
>>> np.random.seed(3)
>>> x = np.linspace(0, 10, 100)
>>> p_true = [3, -2.4, 0.5]
>>> y = quad(p_true, x)
>>> uncert = np.sqrt(np.abs(y))
>>> data = y + np.random.normal(0, uncert)
>>> # Initial guess for fitting parameters:
>>> params = np.array([ 3.0, -2.0, 0.1])
>>> pstep = np.array([ 1.0, 1.0, 1.0])
>>> pmin = np.array([ 0.0, -5.0, -1.0])
>>> pmax = np.array([10.0, 5.0, 1.0])
>>> # Gaussian prior on first parameter, uniform on second and third:
>>> prior = np.array([3.5, 0.0, 0.0])
>>> priorlow = np.array([0.1, 0.0, 0.0])
>>> priorup = np.array([0.1, 0.0, 0.0])
>>> indparams = [x]
>>> func = quad
>>> ncpu = 7
>>> # MCMC sampling:
>>> mcmc_output = mc3.sample(
>>> data, uncert, func, params, indparams=indparams,
>>> sampler='snooker', pstep=pstep, ncpu=ncpu, pmin=pmin, pmax=pmax,
>>> prior=prior, priorlow=priorlow, priorup=priorup,
>>> leastsq='lm', nsamples=1e5, burnin=1000, plots=True)
>>> # Nested sampling:
>>> ns_output = mc3.sample(
>>> data, uncert, func, params, indparams=indparams,
>>> sampler='dynesty', pstep=pstep, ncpu=ncpu, pmin=pmin, pmax=pmax,
>>> prior=prior, priorlow=priorlow, priorup=priorup,
>>> leastsq='lm', plots=True)
>>> # See more examples and details at:
>>> # https://mc3.readthedocs.io/en/latest/mcmc_tutorial.html
>>> # https://mc3.readthedocs.io/en/latest/ns_tutorial.html
"""
# Logging object:
if isinstance(log, str):
log = mu.Log(log, append=resume)
| |
result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Organization body: Organization body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Organization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.organization_plan_with_http_info(owner, body, **kwargs) # noqa: E501
def organization_plan_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Organization plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.organization_plan_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Organization body: Organization body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Organization, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method organization_plan" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `organization_plan`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `organization_plan`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/plan', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Organization', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_organization(self, owner, body, **kwargs): # noqa: E501
"""Patch organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_organization(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Organization body: Organization body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Organization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_organization_with_http_info(owner, body, **kwargs) # noqa: E501
def patch_organization_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Patch organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_organization_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Organization body: Organization body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Organization, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_organization" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `patch_organization`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_organization`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Organization', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_organization_invitation(self, owner, body, **kwargs): # noqa: E501
"""Patch organization invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_organization_invitation(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1OrganizationMember body: Organization body (required)
:param str email: Optional email.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1OrganizationMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_organization_invitation_with_http_info(owner, body, **kwargs) # noqa: E501
def patch_organization_invitation_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Patch organization invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_organization_invitation_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1OrganizationMember body: Organization body (required)
:param str email: Optional email.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1OrganizationMember, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body',
'email'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_organization_invitation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `patch_organization_invitation`") # noqa: E501
# verify the required parameter 'body' is | |
# -*- coding: utf-8 -*-
"""
Automatic Colour Conversion Graph
=================================
Defines the automatic colour conversion graph objects:
- :func:`colour.describe_conversion_path`
- :func:`colour.convert`
"""
import inspect
import numpy as np
import textwrap
from collections import namedtuple
from copy import copy
from functools import partial
from pprint import pformat
from colour.colorimetry import (CCS_ILLUMINANTS, SDS_ILLUMINANTS,
TVS_ILLUMINANTS_HUNTERLAB)
from colour.colorimetry import (colorimetric_purity, complementary_wavelength,
dominant_wavelength, excitation_purity,
lightness, luminance, luminous_efficacy,
luminous_efficiency, luminous_flux, sd_to_XYZ,
whiteness, yellowness, wavelength_to_XYZ)
from colour.recovery import XYZ_to_sd
from colour.models import RGB_COLOURSPACE_sRGB
from colour.models import (
CAM02LCD_to_JMh_CIECAM02, CAM02SCD_to_JMh_CIECAM02,
CAM02UCS_to_JMh_CIECAM02, CAM16LCD_to_JMh_CAM16, CAM16SCD_to_JMh_CAM16,
CAM16UCS_to_JMh_CAM16, CMYK_to_CMY, CMY_to_CMYK, CMY_to_RGB, DIN99_to_Lab,
HSL_to_RGB, HSV_to_RGB, Hunter_Lab_to_XYZ, Hunter_Rdab_to_XYZ,
ICtCp_to_XYZ, IgPgTg_to_XYZ, IPT_to_XYZ, JMh_CAM16_to_CAM16LCD,
JMh_CAM16_to_CAM16SCD, JMh_CAM16_to_CAM16UCS, JMh_CIECAM02_to_CAM02LCD,
JMh_CIECAM02_to_CAM02SCD, JMh_CIECAM02_to_CAM02UCS, JzAzBz_to_XYZ,
LCHab_to_Lab, LCHuv_to_Luv, Lab_to_DIN99, Lab_to_LCHab, Lab_to_XYZ,
Luv_to_LCHuv, Luv_to_XYZ, Luv_to_uv, Luv_uv_to_xy, OSA_UCS_to_XYZ,
Oklab_to_XYZ, Prismatic_to_RGB, RGB_luminance, RGB_to_CMY, RGB_to_HSL,
RGB_to_HSV, RGB_to_Prismatic, RGB_to_RGB, RGB_to_XYZ, RGB_to_YCbCr,
RGB_to_YCoCg, RGB_to_YcCbcCrc, UCS_to_XYZ, UCS_to_uv, UCS_uv_to_xy,
UVW_to_XYZ, XYZ_to_Hunter_Lab, XYZ_to_Hunter_Rdab, XYZ_to_ICtCp,
XYZ_to_IgPgTg, XYZ_to_IPT, XYZ_to_JzAzBz, XYZ_to_Lab, XYZ_to_Luv,
XYZ_to_OSA_UCS, XYZ_to_Oklab, XYZ_to_RGB, XYZ_to_UCS, XYZ_to_UVW,
XYZ_to_hdr_CIELab, XYZ_to_hdr_IPT, XYZ_to_sRGB, XYZ_to_xy, XYZ_to_xyY,
YCbCr_to_RGB, YCoCg_to_RGB, YcCbcCrc_to_RGB, cctf_decoding, cctf_encoding,
hdr_CIELab_to_XYZ, hdr_IPT_to_XYZ, sRGB_to_XYZ, uv_to_Luv, uv_to_UCS,
xyY_to_XYZ, xyY_to_xy, xy_to_Luv_uv, xy_to_UCS_uv, xy_to_XYZ, xy_to_xyY)
from colour.notation import (HEX_to_RGB, RGB_to_HEX, munsell_value,
munsell_colour_to_xyY, xyY_to_munsell_colour)
from colour.quality import colour_quality_scale, colour_rendering_index
from colour.appearance import (
CAM_Specification_CAM16, CAM16_to_XYZ, CAM_Specification_CIECAM02,
CIECAM02_to_XYZ, XYZ_to_ATD95, XYZ_to_CAM16, XYZ_to_CIECAM02, XYZ_to_Hunt,
XYZ_to_LLAB, XYZ_to_Nayatani95, XYZ_to_RLAB)
from colour.temperature import CCT_to_uv, uv_to_CCT
from colour.utilities import (domain_range_scale, filter_kwargs, message_box,
required, tsplit, tstack, usage_warning)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'Conversion_Specification', 'CIECAM02_to_JMh_CIECAM02',
'JMh_CIECAM02_to_CIECAM02', 'CAM16_to_JMh_CAM16', 'JMh_CAM16_to_CAM16',
'XYZ_to_luminance', 'RGB_luminance_to_RGB',
'CONVERSION_SPECIFICATIONS_DATA', 'CONVERSION_GRAPH_NODE_LABELS',
'CONVERSION_SPECIFICATIONS', 'CONVERSION_GRAPH',
'describe_conversion_path', 'convert'
]
class Conversion_Specification(
namedtuple('Conversion_Specification',
('source', 'target', 'conversion_function'))):
"""
Conversion specification for *Colour* graph for automatic colour
conversion describing two nodes and the edge in the graph.
Parameters
----------
source : unicode
Source node in the graph.
target : array_like
Target node in the graph.
conversion_function : callable
Callable converting from the ``source`` node to the ``target`` node.
"""
def __new__(cls, source=None, target=None, conversion_function=None):
return super(Conversion_Specification, cls).__new__(
cls, source.lower(), target.lower(), conversion_function)
def CIECAM02_to_JMh_CIECAM02(CAM_Specification_CIECAM02):
"""
Converts from *CIECAM02* specification to *CIECAM02* :math:`JMh`
correlates.
Parameters
----------
CAM_Specification_CIECAM02 : CAM_Specification_CIECAM02
*CIECAM02* colour appearance model specification.
Returns
-------
ndarray
*CIECAM02* :math:`JMh` correlates.
Examples
--------
>>> specification = CAM_Specification_CIECAM02(J=41.731091132513917,
... M=0.108842175669226,
... h=219.048432658311780)
>>> CIECAM02_to_JMh_CIECAM02(specification) # doctest: +ELLIPSIS
array([ 4.1731091...e+01, 1.0884217...e-01, 2.1904843...e+02])
"""
return tstack([
CAM_Specification_CIECAM02.J,
CAM_Specification_CIECAM02.M,
CAM_Specification_CIECAM02.h,
])
def JMh_CIECAM02_to_CIECAM02(JMh):
"""
Converts from *CIECAM02* :math:`JMh` correlates to *CIECAM02*
specification.
Parameters
----------
JMh : array_like
*CIECAM02* :math:`JMh` correlates.
Returns
-------
CAM_Specification_CIECAM02
*CIECAM02* colour appearance model specification.
Examples
--------
>>> JMh = np.array([4.17310911e+01, 1.08842176e-01, 2.19048433e+02])
>>> JMh_CIECAM02_to_CIECAM02(JMh) # doctest: +ELLIPSIS
CAM_Specification_CIECAM02(J=41.7310911..., C=None, h=219.0484329..., \
s=None, Q=None, M=0.1088421..., H=None, HC=None)
"""
J, M, h = tsplit(JMh)
return CAM_Specification_CIECAM02(J=J, M=M, h=h)
def CAM16_to_JMh_CAM16(CAM_Specification_CAM16):
"""
Converts from *CAM16* specification to *CAM16* :math:`JMh` correlates.
Parameters
----------
CAM_Specification_CAM16 : CAM_Specification_CAM16
*CAM16* colour appearance model specification.
Returns
-------
ndarray
*CAM16* :math:`JMh` correlates.
Examples
--------
>>> specification = CAM_Specification_CAM16(J=41.731207905126638,
... M=0.107436772335905,
... h=217.067959767393010)
>>> CAM16_to_JMh_CAM16(specification) # doctest: +ELLIPSIS
array([ 4.1731207...e+01, 1.0743677...e-01, 2.1706796...e+02])
"""
return tstack([
CAM_Specification_CAM16.J,
CAM_Specification_CAM16.M,
CAM_Specification_CAM16.h,
])
def JMh_CAM16_to_CAM16(JMh):
"""
Converts from *CAM6* :math:`JMh` correlates to *CAM6* specification.
Parameters
----------
JMh : array_like
*CAM6* :math:`JMh` correlates.
Returns
-------
CAM6_Specification
*CAM6* colour appearance model specification.
Examples
--------
>>> JMh = np.array([4.17312079e+01, 1.07436772e-01, 2.17067960e+02])
>>> JMh_CAM16_to_CAM16(JMh) # doctest: +ELLIPSIS
CAM_Specification_CAM16(J=41.7312079..., C=None, h=217.06796..., s=None, \
Q=None, M=0.1074367..., H=None, HC=None)
"""
J, M, h = tsplit(JMh)
return CAM_Specification_CAM16(J=J, M=M, h=h)
def XYZ_to_luminance(XYZ):
"""
Converts from *CIE XYZ* tristimulus values to *luminance* :math:`Y`.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values.
Returns
-------
array_like
*Luminance* :math:`Y`.
Examples
--------
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_to_luminance(XYZ) # doctest: +ELLIPSIS
0.1219722...
"""
_X, Y, _Z = tsplit(XYZ)
return Y
def RGB_luminance_to_RGB(Y):
"""
Converts from *luminance* :math:`Y` to *RGB*.
Parameters
----------
Y : array_like
*Luminance* :math:`Y`.
Returns
-------
array_like
*RGB*.
Examples
--------
>>> RGB_luminance_to_RGB(0.123014562384318) # doctest: +ELLIPSIS
array([ 0.1230145..., 0.1230145..., 0.1230145...])
"""
return tstack([Y, Y, Y])
_DEFAULT_ILLUMINANT = 'D65'
"""
Default automatic colour conversion graph illuminant name.
_DEFAULT_ILLUMINANT : unicode
"""
_SD_DEFAULT_ILLUMINANT = SDS_ILLUMINANTS[_DEFAULT_ILLUMINANT]
"""
Default automatic colour conversion graph illuminant spectral distribution.
_SD_DEFAULT_ILLUMINANT : SpectralDistribution
"""
_CCS_DEFAULT_ILLUMINANT = CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][_DEFAULT_ILLUMINANT]
"""
Default automatic colour conversion graph illuminant *CIE xy* chromaticity
coordinates.
_CCS_DEFAULT_ILLUMINANT : ndarray
"""
_TVS_DEFAULT_ILLUMINANT = xy_to_XYZ(_CCS_DEFAULT_ILLUMINANT)
"""
Default automatic colour conversion graph illuminant *CIE XYZ* tristimulus
values.
_TVS_DEFAULT_ILLUMINANT : ndarray
"""
_RGB_COLOURSPACE_DEFAULT = RGB_COLOURSPACE_sRGB
"""
Default automatic colour conversion graph *RGB* colourspace.
_RGB_COLOURSPACE_DEFAULT : RGB_COLOURSPACE_RGB
"""
CONVERSION_SPECIFICATIONS_DATA = [
# Colorimetry
('Spectral Distribution', 'CIE XYZ',
partial(sd_to_XYZ, illuminant=_SD_DEFAULT_ILLUMINANT)),
('CIE XYZ', 'Spectral Distribution', XYZ_to_sd),
('Spectral Distribution', 'Luminous Flux', luminous_flux),
('Spectral Distribution', 'Luminous Efficiency', luminous_efficiency),
('Spectral Distribution', 'Luminous Efficacy', luminous_efficacy),
('CIE XYZ', 'Luminance', XYZ_to_luminance),
('Luminance', 'Lightness', lightness),
('Lightness', 'Luminance', luminance),
('CIE XYZ', 'Whiteness', partial(whiteness,
XYZ_0=_TVS_DEFAULT_ILLUMINANT)),
('CIE XYZ', 'Yellowness', yellowness),
('CIE xy', 'Colorimetric Purity',
partial(colorimetric_purity, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('CIE xy', 'Complementary Wavelength',
partial(complementary_wavelength, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('CIE xy', 'Dominant Wavelength',
partial(dominant_wavelength, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('CIE xy', 'Excitation Purity',
partial(excitation_purity, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('Wavelength', 'CIE XYZ', wavelength_to_XYZ),
# Colour Models
('CIE XYZ', 'CIE xyY', XYZ_to_xyY),
('CIE xyY', 'CIE XYZ', xyY_to_XYZ),
('CIE xyY', 'CIE xy', xyY_to_xy),
('CIE xy', 'CIE xyY', xy_to_xyY),
('CIE XYZ', 'CIE xy', XYZ_to_xy),
('CIE xy', 'CIE XYZ', xy_to_XYZ),
('CIE XYZ', 'CIE Lab', XYZ_to_Lab),
('CIE Lab', 'CIE XYZ', Lab_to_XYZ),
('CIE Lab', 'CIE LCHab', Lab_to_LCHab),
('CIE LCHab', 'CIE Lab', LCHab_to_Lab),
('CIE XYZ', 'CIE Luv', XYZ_to_Luv),
('CIE Luv', 'CIE XYZ', Luv_to_XYZ),
('CIE Luv', 'CIE Luv uv', Luv_to_uv),
('CIE Luv uv', 'CIE Luv', uv_to_Luv),
('CIE Luv uv', 'CIE xy', Luv_uv_to_xy),
('CIE xy', 'CIE Luv uv', xy_to_Luv_uv),
('CIE Luv', 'CIE LCHuv', Luv_to_LCHuv),
('CIE LCHuv', 'CIE Luv', LCHuv_to_Luv),
('CIE XYZ', 'CIE UCS', XYZ_to_UCS),
('CIE UCS', 'CIE XYZ', UCS_to_XYZ),
('CIE UCS', 'CIE UCS uv', UCS_to_uv),
('CIE UCS uv', 'CIE UCS', uv_to_UCS),
('CIE UCS uv', 'CIE xy', UCS_uv_to_xy),
('CIE xy', 'CIE UCS uv', xy_to_UCS_uv),
('CIE XYZ', 'CIE UVW', XYZ_to_UVW),
('CIE UVW', 'CIE XYZ', UVW_to_XYZ),
('CIE Lab', 'DIN99', Lab_to_DIN99),
('DIN99', 'CIE Lab', DIN99_to_Lab),
('CIE XYZ', 'hdr CIELab', XYZ_to_hdr_CIELab),
('hdr CIELab', 'CIE XYZ', hdr_CIELab_to_XYZ),
('CIE XYZ', 'Hunter Lab',
partial(
XYZ_to_Hunter_Lab,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('Hunter Lab', 'CIE XYZ',
partial(
Hunter_Lab_to_XYZ,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('CIE XYZ', '<NAME>',
partial(
XYZ_to_Hunter_Rdab,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('<NAME>', 'CIE XYZ',
partial(
Hunter_Rdab_to_XYZ,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('CIE XYZ', 'ICtCp', XYZ_to_ICtCp),
('ICtCp', 'CIE XYZ', ICtCp_to_XYZ),
('CIE XYZ', 'IgPgTg', XYZ_to_IgPgTg),
('IgPgTg', 'CIE XYZ', IgPgTg_to_XYZ),
('CIE XYZ', 'IPT', XYZ_to_IPT),
('IPT', 'CIE XYZ', IPT_to_XYZ),
('CIE XYZ', 'JzAzBz', XYZ_to_JzAzBz),
('JzAzBz', 'CIE XYZ', JzAzBz_to_XYZ),
('CIE XYZ', 'hdr IPT', XYZ_to_hdr_IPT),
('hdr IPT', 'CIE XYZ', hdr_IPT_to_XYZ),
('CIE XYZ', 'OSA UCS', XYZ_to_OSA_UCS),
('OSA UCS', 'CIE XYZ', OSA_UCS_to_XYZ),
('CIE XYZ', 'Oklab', XYZ_to_Oklab),
('Oklab', 'CIE XYZ', Oklab_to_XYZ),
# RGB Colour Models
('CIE XYZ', 'RGB',
partial(
XYZ_to_RGB,
illuminant_XYZ=_RGB_COLOURSPACE_DEFAULT.whitepoint,
illuminant_RGB=_RGB_COLOURSPACE_DEFAULT.whitepoint,
matrix_XYZ_to_RGB=_RGB_COLOURSPACE_DEFAULT.matrix_XYZ_to_RGB)),
('RGB', 'CIE XYZ',
partial(
RGB_to_XYZ,
illuminant_RGB=_RGB_COLOURSPACE_DEFAULT.whitepoint,
illuminant_XYZ=_RGB_COLOURSPACE_DEFAULT.whitepoint,
matrix_RGB_to_XYZ=_RGB_COLOURSPACE_DEFAULT.matrix_RGB_to_XYZ)),
('RGB', 'Scene-Referred RGB',
partial(
RGB_to_RGB,
input_colourspace=_RGB_COLOURSPACE_DEFAULT,
output_colourspace=_RGB_COLOURSPACE_DEFAULT)),
('Scene-Referred RGB', 'RGB',
partial(
RGB_to_RGB,
input_colourspace=_RGB_COLOURSPACE_DEFAULT,
output_colourspace=_RGB_COLOURSPACE_DEFAULT)),
('RGB', 'HSV', RGB_to_HSV),
('HSV', 'RGB', HSV_to_RGB),
('RGB', 'HSL', RGB_to_HSL),
('HSL', 'RGB', HSL_to_RGB),
('CMY', 'RGB', CMY_to_RGB),
('RGB', 'CMY', RGB_to_CMY),
('CMY', 'CMYK', CMY_to_CMYK),
('CMYK', 'CMY', CMYK_to_CMY),
('RGB', 'RGB Luminance',
partial(
RGB_luminance,
primaries=_RGB_COLOURSPACE_DEFAULT.primaries,
whitepoint=_RGB_COLOURSPACE_DEFAULT.whitepoint)),
('RGB Luminance', 'RGB', RGB_luminance_to_RGB),
('RGB', 'Prismatic', RGB_to_Prismatic),
('Prismatic', 'RGB', Prismatic_to_RGB),
('Output-Referred RGB', 'YCbCr', RGB_to_YCbCr),
('YCbCr', 'Output-Referred RGB', YCbCr_to_RGB),
('RGB', 'YcCbcCrc', RGB_to_YcCbcCrc),
('YcCbcCrc', 'RGB', YcCbcCrc_to_RGB),
('Output-Referred RGB', 'YCoCg', RGB_to_YCoCg),
('YCoCg', 'Output-Referred RGB', YCoCg_to_RGB),
('RGB', 'Output-Referred RGB', cctf_encoding),
('Output-Referred RGB', 'RGB', cctf_decoding),
('Scene-Referred RGB', 'Output-Referred RGB', cctf_encoding),
('Output-Referred RGB', 'Scene-Referred RGB', cctf_decoding),
('CIE XYZ', 'sRGB', XYZ_to_sRGB),
('sRGB', 'CIE XYZ', sRGB_to_XYZ),
# Colour Notation Systems
('Output-Referred RGB', 'Hexadecimal', RGB_to_HEX),
('Hexadecimal', 'Output-Referred RGB', HEX_to_RGB),
('CIE xyY', 'Munsell Colour', xyY_to_munsell_colour),
('Munsell Colour', 'CIE xyY', munsell_colour_to_xyY),
('Luminance', 'Munsell Value', munsell_value),
('Munsell Value', 'Luminance', partial(luminance, method='ASTM D1535')),
# Colour Quality
('Spectral Distribution', 'CRI', colour_rendering_index),
('Spectral Distribution', 'CQS', colour_quality_scale),
# Colour Temperature
('CCT', 'CIE UCS uv', CCT_to_uv),
('CIE UCS uv', 'CCT', uv_to_CCT),
# Advanced Colorimetry
('CIE XYZ', 'Hunt',
partial(
XYZ_to_Hunt,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
XYZ_b=_TVS_DEFAULT_ILLUMINANT,
L_A=80 * 0.2,
CCT_w=6504)),
('CIE XYZ', 'ATD95',
partial(
XYZ_to_ATD95,
XYZ_0=_TVS_DEFAULT_ILLUMINANT,
Y_0=80 * 0.2,
k_1=0,
k_2=(15 + 50) / 2)),
('CIE XYZ', 'CIECAM02',
partial(
XYZ_to_CIECAM02,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CIECAM02', 'CIE XYZ',
partial(
CIECAM02_to_XYZ,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CIECAM02', 'CIECAM02 JMh', CIECAM02_to_JMh_CIECAM02),
('CIECAM02 JMh', 'CIECAM02', JMh_CIECAM02_to_CIECAM02),
('CIE XYZ', 'CAM16',
partial(
XYZ_to_CAM16,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CAM16', 'CIE XYZ',
partial(
CAM16_to_XYZ,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CAM16', 'CAM16 JMh', CAM16_to_JMh_CAM16),
('CAM16 JMh', 'CAM16', JMh_CAM16_to_CAM16),
('CIE XYZ', 'LLAB',
| |
== 1:
ydata, xdata = np.meshgrid(ydata, xdata)
if xdata:
surface = axes.plot_surface(xdata, ydata, image, **kwargs)
else:
surface = axes.plot_surface(image, **kwargs)
axes.axis(axlim)
return surface
def create_html_page(body_lines=(), header_lines=()):
"""Create html page"""
css = """<style>
.ScanBox {
border: 1px solid black;
padding-top: 10px;
padding-bottom: 10px;
padding-left: 10px;
width: 90pc;
height: 400px;
resize: vertical;
}
.ScanDetails {
float: left;
width: 40pc;
}
.ScanImage{
float: left:
padding-left:2px;
width: 22pc;
}
</style>"""
header_lines = css.splitlines() + list(header_lines)
html = "<!doctype html>\n<html lang=\"en\">\n<html>\n\n"
html += "<head>\n %s\n</head>\n\n" % '\n '.join(header_lines)
html += "<body>\n %s\n</body>" % '\n '.join(body_lines)
html += "\n\n</html>\n"
return html
def create_figure_div(title, details, fig1_file, fig2_file=None):
"""
Create html code to generate scan details div
:param title: str title (single line)
:param details: str details of scan (multi-line)
:param fig1_file: str
:param fig2_file: str or None
:param class_name: str
:return:
"""
detail_div = [" <div class=\"ScanDetails\">"]
# detail_div += [' <p>%s</p>' % det for det in details.splitlines()]
detail_div += [' %s' % details.replace('\n', '<br>')]
detail_div += [" </div>"]
image1 = " <img src=\"%s\" alt=\"%s\" class=\"ScanImage\">" % (
fig1_file, title)
if fig2_file is None:
image2 = ""
else:
image2 = " <img src=\"%s\" alt=\"%s\" class=\"ScanImage\">" % (
fig2_file, title)
html = [
"<div class=\"ScanBox\">",
" <h3>%s</h3>" % title.replace('\n', '<br>'),
]
html = html + detail_div
html += [
image1,
image2,
"</div>"
""
]
return html
def create_plotly_blob(data_list, xlabel, ylabel, title):
"""
Create plotly line plot object, useful for jupyter plots or generation of interactive html plots
E.G.
import plotly.graph_objects as go
blob = create_plotly_blob([(xdata1, ydata1, label1, True), (xdata2, ydata2, label2, False)], 'x', 'y', 'title')
fig = go.Figure(blob)
fig.show()
4 element tuples in data_list must follow (xdata, ydata, label, visible):
xdata: 1d array of x-axis data
ydata: 1d array of y-axis data
label: str label description
visible: bool, if False plot is only given in legend but not turned on
:param data_list: list of 4 element tuples (xdata, ydata, label, visible)
:param xlabel: str x-axis label
:param ylabel: str y-axis label
:param title: str plot title
:return: dict
"""
# Convert title
title = title.replace('\n', '<br>')
# Create json blob
auto_blob = {
'data': [],
'layout': {'font': {'family': 'Courier New, monospace', 'size': 18},
'legend': {'title': {'text': 'Scannables'}},
'title': {'text': title},
'xaxis': {'title': {'text': xlabel}},
'yaxis': {'title': {'text': ylabel}}}
}
for item in data_list:
if not item[3]:
vis = 'legendonly'
else:
vis = True
trace = {
'mode': 'markers+lines',
'name': item[2],
'type': 'scatter',
'visible': vis,
'x': list(item[0]),
'y': list(item[1]),
}
auto_blob['data'] += [trace]
return auto_blob
"----------------------------------------------------------------------------------------------------------------------"
"----------------------------------------------- ScanPlotManager ------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
class ScanPlotManager:
"""
ScanPlotManager
scan.plot = ScanPlotManager(scan)
scan.plot() # plot default axes
scan.plot.plot(xaxis, yaxis) # creates figure
scan.plot.plotline(xaxis, yaxis) # plots line on current figure
scan.plot.plot_image() # create figure and display detector image
Options called from babelscan.Scan:
'plot_show': True >> automatically call "plt.show" after plot command
:param scan: babelscan.Scan
"""
def __init__(self, scan):
self.scan = scan
def __call__(self, *args, **kwargs):
"""Calls ScanPlotManager.plot(...)"""
return self.plot(*args, **kwargs)
def plotline(self, xaxis='axes', yaxis='signal', *args, **kwargs):
"""
Plot scanned datasets on matplotlib axes subplot
:param xaxis: str name or address of array to plot on x axis
:param yaxis: str name or address of array to plot on y axis
:param args: given directly to plt.plot(..., *args, **kwars)
:param axes: matplotlib.axes subplot, or None to use plt.gca()
:param kwargs: given directly to plt.plot(..., *args, **kwars)
:return: list lines object, output of plot
"""
xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)
if 'label' not in kwargs:
kwargs['label'] = self.scan.label()
axes = kwargs.pop('axes') if 'axes' in kwargs else None
lines = plot_line(axes, xdata, ydata, None, *args, **kwargs)
return lines
def plot(self, xaxis='axes', yaxis='signal', *args, **kwargs):
"""
Create matplotlib figure with plot of the scan
:param axes: matplotlib.axes subplot
:param xaxis: str name or address of array to plot on x axis
:param yaxis: str name or address of array to plot on y axis, also accepts list of names for multiplt plots
:param args: given directly to plt.plot(..., *args, **kwars)
:param axes: matplotlib.axes subplot, or None to create a figure
:param kwargs: given directly to plt.plot(..., *args, **kwars)
:return: axes object
"""
# Check for multiple inputs on yaxis
ylist = fn.liststr(yaxis)
# Create figure
if 'axes' in kwargs:
axes = kwargs.pop('axes')
else:
axes = create_axes(subplot=111)
xname, yname = xaxis, yaxis
for yaxis in ylist:
xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)
plot_line(axes, xdata, ydata, None, *args, label=yname, **kwargs)
# Add labels
ttl = self.scan.title()
labels(ttl, xname, yname, legend=True, axes=axes)
if self.scan.options('plot_show'):
plt.show()
return axes
def plot_image(self, index=None, xaxis='axes', axes=None, clim=None, cmap=None, colorbar=False, **kwargs):
"""
Plot image in matplotlib figure (if available)
:param index: int, detector image index, 0-length of scan, if None, use centre index
:param xaxis: name or address of xaxis dataset
:param axes: matplotlib axes to plot on (None to create figure)
:param clim: [min, max] colormap cut-offs (None for auto)
:param cmap: str colormap name (None for auto)
:param colorbar: False/ True add colorbar to plot
:param kwargs: additinoal arguments for plot_detector_image
:return: axes object
"""
# x axis data
xname, xdata = self.scan._name_eval(xaxis)
# image data
im = self.scan.image(index)
if index is None or index == 'sum':
xvalue = xdata[np.size(xdata) // 2]
else:
xvalue = xdata[index]
# Create figure
if axes is None:
axes = create_axes(subplot=111)
plot_detector_image(axes, im, **kwargs)
# labels
ttl = '%s\n%s [%s] = %s' % (self.scan.title(), xname, index, xvalue)
labels(ttl, colorbar=colorbar, colorbar_label='Detector', axes=axes)
colormap(clim, cmap, axes)
if self.scan.options('plot_show'):
plt.show()
return axes
def detail_plot(self, xaxis='axes', yaxis='signal', index=None, cmap=None, **kwargs):
"""
Create matplotlib figure with plot of the scan
:param axes: matplotlib.axes subplot
:param xaxis: str name or address of array to plot on x axis
:param yaxis: str name or address of array to plot on y axis, also accepts list of names for multiplt plots
:param args: given directly to plt.plot(..., *args, **kwars)
:param axes: matplotlib.axes subplot, or None to create a figure
:param kwargs: given directly to plt.plot(..., *args, **kwars)
:return: axes object
"""
# Create figure
fig, ((lt, rt), (lb, rb)) = plt.subplots(2, 2, figsize=[FIG_SIZE[0] * 1.2, FIG_SIZE[1] * 1.2], dpi=FIG_DPI)
fig.subplots_adjust(hspace=0.35, left=0.1, right=0.95)
# Top left - line plot
self.plot(xaxis, yaxis, axes=lt, **kwargs)
# Top right - image plot
try:
self.plot_image(index, xaxis, cmap=cmap, axes=rt)
except (FileNotFoundError, KeyError, TypeError):
rt.text(0.5, 0.5, 'No Image')
rt.set_axis_off()
# Bottom-Left - details
details = str(self.scan)
lb.text(-0.1, 0.8, details, multialignment="left", fontsize=12, wrap=True)
lb.set_axis_off()
rb.set_axis_off()
if self.scan.options('plot_show'):
plt.show()
return fig
def plotly_blob(self, xaxis='axes', yaxis='signal'):
"""
Create plotly line plot object, useful for jupyter plots or generation of interactive html plots
E.G.
import plotly.graph_objects as go
blob = scan.plot.plotly_blob('axes', ['signal', 'signal/2'])
fig = go.Figure(blob)
fig.show()
:param xaxis: str name or address of array to plot on x axis
:param yaxis: str name or address of array to plot on y axis, also accepts list of names for multiplt plots
:return: dict
"""
# Check for multiple inputs on yaxis
ylist = fn.liststr(yaxis)
xname, yname = xaxis, yaxis
data_list = []
for yaxis in ylist:
xdata, ydata, yerror, xname, yname = self.scan.get_plot_data(xaxis, yaxis, None, None)
data_list += [(xdata, ydata, yname, True)]
ttl = self.scan.title()
return create_plotly_blob(data_list, xname, yname, ttl)
"----------------------------------------------------------------------------------------------------------------------"
"-------------------------------------------- MultiScanPlotManager ----------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
class MultiScanPlotManager:
"""
ScanPlotManager
:param scan: babelscan.Scan
"""
def __init__(self, multiscan):
self.multiscan = multiscan
def __call__(self, *args, **kwargs):
return self.plot(*args, **kwargs)
def plot_simple(self, xname, yname, *args, **kwargs):
"""
Simple plot method, retrieves x,y data and plots using plt.plot
:param xname:
:param yname:
:param args, kwargs: same as plt.plot(x,y, ...)
:return: axis
"""
# Get data
xdata, ydata, xlabel, ylabel = self.multiscan.get_plot_data(xname, yname)
# Create figure
if 'axes' in kwargs:
axes = kwargs.pop('axes')
else:
axes = create_axes(subplot=111)
axes.plot(xdata, ydata, *args, **kwargs)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_title(self.multiscan.title())
# Add legend if multiple arrays added
if np.ndim(xdata[0]) > 0:
# xdata is a list of arrays
scan_labels = self.multiscan.labels()
axes.legend(scan_labels)
return axes
def plot(self, xaxis='axes', yaxis='signal', *args, **kwargs):
"""
Create matplotlib figure with plot of the scan
:param axes: matplotlib.axes | |
# Standard libraray imports
from __future__ import print_function
from collections import deque
import json
import multiprocessing
import os
import shlex
import shutil
import subprocess
import sys
import time
# External module imports.
import psutil
def _make_command_list(command):
if not isinstance(command, (list, tuple)):
return shlex.split(command)
else:
return command
def free_cores():
free = (1.0 - psutil.cpu_percent(interval=0.5)/100.)
ncore = free*psutil.cpu_count(logical=False)
return round(ncore, 0)
def total_cores():
return psutil.cpu_count(logical=False)
def cores_required(n_core):
if n_core < 0:
return int(total_cores()/(-n_core))
else:
return n_core
def threads_required(n_thread, n_core):
if n_thread < 0:
return int(cores_required(n_core)*(-n_thread))
else:
return n_thread
class Job(object):
def __init__(self, command, output_dir, n_core=1, n_thread=1, env=None):
"""Constructor
Note that `n_core` is used to schedule a task on a machine which has
that many free cores. This is not used to run the job but only used by
the scheduler. The number can be any integer. When the number is
negative, it will use the value of `total_cores()/(-n_core)`. This
value may be used to set the number of threads as discussed below.
`n_thread` is used to set the `OMP_NUM_THREADS`. Note that if
`n_thread` is set to `None`, the environment variable is not set. If a
positive integer is given that specific number is used. If the number
is negative, then the number of threads is set to `n_core*(-n_thread)`,
i.e. the product of the number of cores and the negative of the number
given.
"""
self.command = _make_command_list(command)
self._given_env = env
self.env = dict(os.environ)
if env is not None:
self.env.update(env)
if n_thread is not None:
nt = threads_required(n_thread, n_core)
self.env['OMP_NUM_THREADS'] = str(nt)
self.n_core = n_core
self.n_thread = n_thread
self.output_dir = output_dir
self.output_already_exists = os.path.exists(self.output_dir)
self.stderr = os.path.join(self.output_dir, 'stderr.txt')
self.stdout = os.path.join(self.output_dir, 'stdout.txt')
self._info_file = os.path.join(self.output_dir, 'job_info.json')
self.proc = None
def substitute_in_command(self, basename, substitute):
"""Replace occurrence of given basename with the substitute.
This is useful where the user asks to run ['python', 'script.py'] and
we wish to change the 'python' to a specific Python. Normally this is
not needed as the PATH is set to pick up the right Python. However, in
the rare cases where this rewriting is needed, this method is
available.
"""
args = []
for arg in self.command:
if os.path.basename(arg) == basename:
args.append(substitute)
else:
args.append(arg)
self.command = args
def to_dict(self):
state = dict()
for key in ('command', 'output_dir', 'n_core', 'n_thread'):
state[key] = getattr(self, key)
state['env'] = self._given_env
return state
def pretty_command(self):
return ' '.join(self.command)
def get_stderr(self):
with open(self.stderr) as fp:
return fp.read()
def get_stdout(self):
with open(self.stdout) as fp:
return fp.read()
def get_info(self):
return self._read_info()
def _write_info(self, info):
with open(self._info_file, 'w') as fp:
json.dump(info, fp)
def _read_info(self):
if not os.path.exists(self._info_file):
return {'status': 'not started'}
with open(self._info_file, 'r') as fp:
try:
return json.load(fp)
except ValueError:
return {'status': 'running'}
def _run(self): # pragma: no cover
# This is run in a multiprocessing.Process instance so does not
# get covered.
stdout = open(self.stdout, 'wb')
stderr = open(self.stderr, 'wb')
proc = subprocess.Popen(
self.command, stdout=stdout, stderr=stderr, env=self.env
)
info = dict(
start=time.ctime(), end='', status='running',
exitcode=None, pid=proc.pid
)
self._write_info(info)
proc.wait()
status = 'error' if proc.returncode != 0 else 'done'
info.update(end=time.ctime(), status=status, exitcode=proc.returncode)
self._write_info(info)
stdout.close()
stderr.close()
def run(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self._write_info(dict(status='running', pid=None))
self.proc = multiprocessing.Process(
target=self._run
)
self.proc.start()
def join(self):
self.proc.join()
def status(self):
info = self._read_info()
if self.proc is None and info.get('status') == 'running':
# Either the process creating the job or the job itself
# was killed.
pid = info.get('pid')
if pid is not None:
proc = psutil.Process(pid)
if not proc.is_running():
return 'error'
elif self.proc is not None and info.get('status') != 'running':
if not self.proc.is_alive():
self.join()
self.proc = None
return info.get('status')
def clean(self, force=False):
if self.output_already_exists and not force:
if os.path.exists(self.stdout):
os.remove(self.stdout)
os.remove(self.stderr)
elif os.path.exists(self.output_dir):
shutil.rmtree(self.output_dir)
############################################
# This class is meant to be used by execnet alone.
class _RemoteManager(object): # pragma: no cover
# This is run via execnet so coverage does not catch these.
# This is used by the RemoteWorker and that is tested, so we should
# be safe not explicitly covering this.
def __init__(self):
self.jobs = dict()
self.job_count = 0
self._setup_path()
def _setup_path(self):
py_dir = os.path.dirname(sys.executable)
env_path = os.environ.get('PATH').split(os.pathsep)
if py_dir not in env_path:
env_path.insert(0, py_dir)
os.environ['PATH'] = os.pathsep.join(env_path)
def run(self, job_data):
job = Job(**job_data)
job.run()
ret_val = self.job_count
self.jobs[ret_val] = job
self.job_count += 1
return ret_val
def status(self, job_id):
if job_id in self.jobs:
return self.jobs[job_id].status()
else:
return 'invalid job id %d' % job_id
def clean(self, job_id, force=False):
if job_id in self.jobs:
return self.jobs[job_id].clean(force)
else:
return 'invalid job id %d' % job_id
def get_stdout(self, job_id):
return self.jobs[job_id].get_stdout()
def get_stderr(self, job_id):
return self.jobs[job_id].get_stderr()
def get_info(self, job_id):
return self.jobs[job_id].get_info()
def serve(channel): # pragma: no cover
"""Serve the remote manager via execnet.
"""
manager = _RemoteManager()
while True:
msg, data = channel.receive()
if msg == 'free_cores':
channel.send(free_cores())
elif msg == 'total_cores':
channel.send(total_cores())
else:
channel.send(getattr(manager, msg)(*data))
############################################
class Worker(object):
def __init__(self):
self.jobs = dict()
self.running_jobs = set()
self._total_cores = None
def _check_running_jobs(self):
for i in self.running_jobs.copy():
self.status(i)
def free_cores(self):
return free_cores()
def cores_required(self, n_core):
if n_core < 0:
return int(self.total_cores()/(-n_core))
else:
return n_core
def total_cores(self):
if self._total_cores is None:
self._total_cores = total_cores()
return self._total_cores
def can_run(self, req_core):
"""Returns True if the worker can run a job with the required cores.
"""
n_core = self.cores_required(req_core)
if n_core == 0:
return True
free = self.free_cores()
result = False
if free >= n_core:
self._check_running_jobs()
jobs = self.jobs
n_cores_used = sum(
[self.cores_required(jobs[i].n_core)
for i in self.running_jobs]
)
if (self.total_cores() - n_cores_used) >= n_core:
result = True
return result
def run(self, job):
"""Runs the job and returns a JobProxy for the job."""
raise NotImplementedError()
def status(self, job_id):
"""Returns status of the job."""
raise NotImplementedError()
def copy_output(self, job_id, dest):
raise NotImplementedError()
def clean(self, job_id, force=False):
raise NotImplementedError()
def get_stdout(self, job_id):
raise NotImplementedError()
def get_stderr(self, job_id):
raise NotImplementedError()
def get_info(self, job_id):
raise NotImplementedError()
class JobProxy(object):
def __init__(self, worker, job_id, job):
self.worker = worker
self.job_id = job_id
self.job = job
def free_cores(self):
return self.worker.free_cores()
def total_cores(self):
return self.worker.total_cores()
def run(self):
print("JobProxy cannot be run")
def status(self):
return self.worker.status(self.job_id)
def copy_output(self, dest):
return self.worker.copy_output(self.job_id, dest)
def clean(self, force=False):
return self.worker.clean(self.job_id, force)
def get_stdout(self):
return self.worker.get_stdout(self.job_id)
def get_stderr(self):
return self.worker.get_stderr(self.job_id)
def get_info(self):
return self.worker.get_info(self.job_id)
class LocalWorker(Worker):
def __init__(self):
super(LocalWorker, self).__init__()
self.host = 'localhost'
self.job_count = 0
def get_config(self):
return dict(host='localhost')
def run(self, job):
count = self.job_count
print("Running %s" % job.pretty_command())
self.jobs[count] = job
self.running_jobs.add(count)
job.run()
self.job_count += 1
return JobProxy(self, count, job)
def status(self, job_id):
s = self.jobs[job_id].status()
rj = self.running_jobs
if s != 'running':
rj.discard(job_id)
return s
def copy_output(self, job_id, dest):
return
def clean(self, job_id, force=False):
if force:
self.jobs[job_id].clean(force)
def get_stdout(self, job_id):
return self.jobs[job_id].get_stdout()
def get_stderr(self, job_id):
return self.jobs[job_id].get_stderr()
def get_info(self, job_id):
return self.jobs[job_id].get_info()
class RemoteWorker(Worker):
def __init__(self, host, python, chdir=None, testing=False,
nfs=False):
super(RemoteWorker, self).__init__()
self.host = host
self.python = python
self.chdir = chdir
self.testing = testing
self.nfs = nfs
if testing:
spec = 'popen//python={python}'.format(python=python)
else:
spec = 'ssh={host}//python={python}'.format(
host=host, python=python
)
if chdir is not None:
spec += '//chdir={chdir}'.format(chdir=chdir)
import execnet
self.gw = execnet.makegateway(spec)
self.channel = self.gw.remote_exec(
"from automan import jobs; jobs.serve(channel)"
)
def get_config(self):
return dict(host=self.host, python=self.python, chdir=self.chdir)
def _call_remote(self, method, *data):
ch = self.channel
ch.send((method, data))
return ch.receive()
def free_cores(self):
return self._call_remote('free_cores', None)
def total_cores(self):
if self._total_cores is None:
self._total_cores = self._call_remote('total_cores', None)
return self._total_cores
def run(self, job):
print("Running %s" % job.pretty_command())
job_id = self._call_remote('run', job.to_dict())
self.jobs[job_id] = job
self.running_jobs.add(job_id)
return JobProxy(self, job_id, job)
def status(self, job_id):
s = self._call_remote('status', job_id)
rj = self.running_jobs
if s != 'running':
rj.discard(job_id)
return s
def copy_output(self, job_id, dest):
job = self.jobs[job_id]
if self.testing:
src = os.path.join(self.chdir, job.output_dir)
real_dest = os.path.join(dest, job.output_dir)
args = [
sys.executable, '-c',
'import sys,shutil; shutil.copytree(sys.argv[1], sys.argv[2])',
src, real_dest
]
elif not self.nfs:
src = '{host}:{path}'.format(
host=self.host, path=os.path.join(self.chdir, job.output_dir)
)
real_dest = os.path.join(dest, os.path.dirname(job.output_dir))
args = ['scp', '-qr', src, real_dest]
else:
args = []
if args:
print("\n" + " ".join(args))
proc = subprocess.Popen(args)
return proc
else:
return
def clean(self, job_id, force=False):
return self._call_remote('clean', job_id, force)
def get_stdout(self, job_id):
return self._call_remote('get_stdout', job_id)
def get_stderr(self, job_id):
return self._call_remote('get_stderr', job_id)
def get_info(self, job_id):
return self._call_remote('get_info', job_id)
class Scheduler(object):
def __init__(self, root='.', worker_config=(), wait=5):
self.workers = deque()
self.worker_config = list(worker_config)
self.root = | |
0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1],
[0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, | |
# ref: https://github.com/bitcoin-core/HWI/blob/master/hwilib/serializations.py
from collections import OrderedDict
from .transaction import Transaction, TransactionOutput, SIGHASH
from . import compact
from . import bip32
from . import ec
from . import hashes
from .script import Script, Witness
from . import script
from .base import EmbitBase, EmbitError
from binascii import b2a_base64, a2b_base64, hexlify
class PSBTError(EmbitError):
pass
def ser_string(stream, s: bytes) -> int:
return stream.write(compact.to_bytes(len(s))) + stream.write(s)
def read_string(stream) -> bytes:
l = compact.read_from(stream)
s = stream.read(l)
if len(s) != l:
raise PSBTError("Failed to read %d bytes" % l)
return s
class PSBT(EmbitBase):
MAGIC = b"psbt\xff"
def __init__(self, tx=None):
if tx is not None:
self.tx = tx
self.inputs = [InputScope() for i in range(len(tx.vin))]
self.outputs = [OutputScope() for i in range(len(tx.vout))]
else:
self.tx = Transaction()
self.inputs = []
self.outputs = []
self.unknown = {}
self.xpubs = OrderedDict()
def verify(self):
for i, inp in enumerate(self.inputs):
if inp.non_witness_utxo:
if inp.non_witness_utxo.txid() != self.tx.vin[i].txid:
raise PSBTError("Invalid hash of the non witness utxo for input %d" % i)
def utxo(self, i):
if not (self.inputs[i].witness_utxo or self.inputs[i].non_witness_utxo):
raise PSBTError("Missing previous utxo on input %d" % i)
return self.inputs[i].witness_utxo or self.inputs[i].non_witness_utxo.vout[self.tx.vin[i].vout]
def write_to(self, stream) -> int:
# magic bytes
r = stream.write(self.MAGIC)
# unsigned tx flag
r += stream.write(b"\x01\x00")
# write serialized tx
tx = self.tx.serialize()
r += ser_string(stream, tx)
# xpubs
for xpub in self.xpubs:
r += ser_string(stream, b"\x01" + xpub.serialize())
r += ser_string(stream, self.xpubs[xpub].serialize())
# unknown
for key in self.unknown:
r += ser_string(stream, key)
r += ser_string(stream, self.unknown[key])
# separator
r += stream.write(b"\x00")
# inputs
for inp in self.inputs:
r += inp.write_to(stream)
# outputs
for out in self.outputs:
r += out.write_to(stream)
return r
@classmethod
def from_base64(cls, b64):
raw = a2b_base64(b64)
return cls.parse(raw)
def to_base64(self):
return b2a_base64(self.serialize()).strip().decode()
def to_string(self, encoding="base64"):
if encoding == "base64":
return self.to_base64()
else:
return hexlify(self.serialize()).decode()
@classmethod
def from_string(cls, s):
if s.startswith("70736274ff"):
return cls.parse(unhexlify(s))
else:
return cls.from_base64(s)
@classmethod
def read_from(cls, stream):
tx = None
unknown = {}
xpubs = OrderedDict()
# check magic
if stream.read(len(cls.MAGIC)) != cls.MAGIC:
raise PSBTError("Invalid PSBT magic")
while True:
key = read_string(stream)
# separator
if len(key) == 0:
break
value = read_string(stream)
# tx
if key == b"\x00":
if tx is None:
tx = Transaction.parse(value)
else:
raise PSBTError(
"Failed to parse PSBT - duplicated transaction field"
)
else:
if key in unknown:
raise PSBTError("Duplicated key")
unknown[key] = value
psbt = cls(tx)
# now we can go through all the key-values and parse them
for k in list(unknown):
# xpub field
if k[0] == 0x01:
xpub = bip32.HDKey.parse(k[1:])
xpubs[xpub] = DerivationPath.parse(unknown.pop(k))
psbt.unknown = unknown
psbt.xpubs = xpubs
# input scopes
for i in range(len(tx.vin)):
psbt.inputs[i] = InputScope.read_from(stream)
# output scopes
for i in range(len(tx.vout)):
psbt.outputs[i] = OutputScope.read_from(stream)
psbt.verify()
return psbt
def sign_with(self, root, sighash=SIGHASH.ALL) -> int:
"""
Signs psbt with root key (HDKey or similar).
Returns number of signatures added to PSBT.
Sighash kwarg is set to SIGHASH.ALL by default,
so if PSBT is asking to sign with a different sighash this function won't sign.
If you want to sign with sighashes provided in the PSBT - set sighash=None.
"""
# if WIF - fingerprint is None
fingerprint = None if not hasattr(root, "child") else root.child(0).fingerprint
if not fingerprint:
pub = root.get_public_key()
sec = pub.sec()
pkh = hashes.hash160(sec)
counter = 0
for i, inp in enumerate(self.inputs):
# check which sighash to use
inp_sighash = inp.sighash_type or sighash or SIGHASH.ALL
# if input sighash is set and is different from kwarg - skip input
if sighash is not None and inp_sighash != sighash:
continue
utxo = self.utxo(i)
value = utxo.value
sc = inp.witness_script or inp.redeem_script or utxo.script_pubkey
# detect if it is a segwit input
is_segwit = (inp.witness_script
or inp.witness_utxo
or utxo.script_pubkey.script_type() in {"p2wpkh", "p2wsh"}
or (
inp.redeem_script
and inp.redeem_script.script_type() in {"p2wpkh", "p2wsh"}
)
)
# convert to p2pkh according to bip143
if sc.script_type() == "p2wpkh":
sc = script.p2pkh_from_p2wpkh(sc)
sig = None
# if we have individual private key
if not fingerprint:
sc = inp.witness_script or inp.redeem_script or self.utxo(i).script_pubkey
# check if we are included in the script
if sec in sc.data or pkh in sc.data:
if is_segwit:
h = self.tx.sighash_segwit(i, sc, value, sighash=inp_sighash)
else:
h = self.tx.sighash_legacy(i, sc, sighash=inp_sighash)
sig = root.sign(h)
if sig is not None:
# sig plus sighash_all
inp.partial_sigs[pub] = sig.serialize() + bytes([inp_sighash])
counter += 1
continue
# if we use HDKey
for pub in inp.bip32_derivations:
# check if it is root key
if inp.bip32_derivations[pub].fingerprint == fingerprint:
hdkey = root.derive(inp.bip32_derivations[pub].derivation)
mypub = hdkey.key.get_public_key()
if mypub != pub:
raise PSBTError("Derivation path doesn't look right")
sig = None
if is_segwit:
h = self.tx.sighash_segwit(i, sc, value, sighash=inp_sighash)
else:
h = self.tx.sighash_legacy(i, sc, sighash=inp_sighash)
sig = hdkey.key.sign(h)
if sig is not None:
# sig plus sighash flag
inp.partial_sigs[mypub] = sig.serialize() + bytes([inp_sighash])
counter += 1
return counter
class DerivationPath(EmbitBase):
def __init__(self, fingerprint: bytes, derivation: list):
self.fingerprint = fingerprint
self.derivation = derivation
def write_to(self, stream) -> int:
r = stream.write(self.fingerprint)
for idx in self.derivation:
r += stream.write(idx.to_bytes(4, "little"))
return r
@classmethod
def read_from(cls, stream):
fingerprint = stream.read(4)
derivation = []
while True:
r = stream.read(4)
if len(r) == 0:
break
if len(r) < 4:
raise PSBTError("Invalid length")
derivation.append(int.from_bytes(r, "little"))
return cls(fingerprint, derivation)
class PSBTScope(EmbitBase):
def __init__(self, unknown: dict = {}):
self.unknown = unknown
def write_to(self, stream) -> int:
# unknown
r = 0
for key in self.unknown:
r += ser_string(stream, key)
r += ser_string(stream, self.unknown[key])
# separator
r += stream.write(b"\x00")
return r
@classmethod
def read_from(cls, stream):
unknown = {}
while True:
key = read_string(stream)
# separator
if len(key) == 0:
break
value = read_string(stream)
if key in unknown:
raise PSBTError("Duplicated key")
unknown[key] = value
# now we can go through all the key-values and parse them
return cls(unknown)
class InputScope(PSBTScope):
def __init__(self, unknown: dict = {}):
self.unknown = unknown
self.non_witness_utxo = None
self.witness_utxo = None
self.partial_sigs = OrderedDict()
self.sighash_type = None
self.redeem_script = None
self.witness_script = None
self.bip32_derivations = OrderedDict()
self.final_scriptsig = None
self.final_scriptwitness = None
self.parse_unknowns()
def parse_unknowns(self):
# go through all the unknowns and parse them
for k in list(self.unknown):
# legacy utxo
if k[0] == 0x00:
if len(k) != 1:
raise PSBTError("Invalid non-witness utxo key")
elif self.non_witness_utxo is not None:
raise PSBTError("Duplicated utxo value")
else:
self.non_witness_utxo = Transaction.parse(self.unknown.pop(k))
# witness utxo
elif k[0] == 0x01:
if len(k) != 1:
raise PSBTError("Invalid witness utxo key")
elif self.witness_utxo is not None:
raise PSBTError("Duplicated utxo value")
else:
self.witness_utxo = TransactionOutput.parse(self.unknown.pop(k))
# partial signature
elif k[0] == 0x02:
pub = ec.PublicKey.parse(k[1:])
if pub in self.partial_sigs:
raise PSBTError("Duplicated partial sig")
else:
self.partial_sigs[pub] = self.unknown.pop(k)
# hash type
elif k[0] == 0x03:
if len(k) != 1:
raise PSBTError("Invalid sighash type key")
elif self.sighash_type is None:
if len(self.unknown[k]) != 4:
raise PSBTError("Sighash type should be 4 bytes long")
self.sighash_type = int.from_bytes(self.unknown.pop(k), "little")
else:
raise PSBTError("Duplicated sighash type")
# redeem script
elif k[0] == 0x04:
if len(k) != 1:
raise PSBTError("Invalid redeem script key")
elif self.redeem_script is None:
self.redeem_script = Script(self.unknown.pop(k))
else:
raise PSBTError("Duplicated redeem script")
# witness script
elif k[0] == 0x05:
if len(k) != 1:
raise PSBTError("Invalid witness script key")
elif self.witness_script is None:
self.witness_script = Script(self.unknown.pop(k))
else:
raise PSBTError("Duplicated witness script")
# bip32 derivation
elif k[0] == 0x06:
pub = ec.PublicKey.parse(k[1:])
if pub in self.bip32_derivations:
raise PSBTError("Duplicated derivation path")
else:
self.bip32_derivations[pub] = DerivationPath.parse(
self.unknown.pop(k)
)
# final scriptsig
elif k[0] == 0x07:
if len(k) != 1:
raise PSBTError("Invalid final scriptsig key")
elif self.final_scriptsig is None:
self.final_scriptsig = Script(self.unknown.pop(k))
else:
raise PSBTError("Duplicated final scriptsig")
# final script witness
elif k[0] == 0x08:
if len(k) != 1:
raise PSBTError("Invalid final scriptwitness key")
elif self.final_scriptwitness is None:
self.final_scriptwitness = Witness.parse(self.unknown.pop(k))
else:
raise PSBTError("Duplicated final scriptwitness")
def write_to(self, stream) -> int:
r = 0
if self.non_witness_utxo is not None:
r += stream.write(b"\x01\x00")
r += ser_string(stream, self.non_witness_utxo.serialize())
if self.witness_utxo is not None:
r += stream.write(b"\x01\x01")
r += ser_string(stream, self.witness_utxo.serialize())
for pub in self.partial_sigs:
r += ser_string(stream, b"\x02" + pub.serialize())
r += ser_string(stream, self.partial_sigs[pub])
if self.sighash_type is not None:
r += stream.write(b"\x01\x03")
r += ser_string(stream, self.sighash_type.to_bytes(4, "little"))
if self.redeem_script is not None:
| |
<reponame>HiroakiMikami/DeepCoder-Utils
#!/usr/bin/env python2
"""
Usage:
generate_io_samples.py [options] PROGRAM_TEXT
Example:
$ ./generate_io_samples.py "a <- [int] | b <- int | c <- TAKE b a | d <- COUNT isEVEN c | e <- TAKE d a"
Options:
-h --help Show this screen.
-N --number NUM Number of I/O examples to generate. [default: 5]
-L --length NUM Length of generated lists. [default: 10]
-V --value-range NUM Range of values. [default: 512]
"""
import os
import sys
from collections import namedtuple, defaultdict
from math import ceil, sqrt
import numpy as np
from docopt import docopt
Function = namedtuple('Function', ['src', 'sig', 'fun', 'bounds'])
Program = namedtuple('Program', ['src', 'ins', 'out', 'fun', 'bounds'])
# HELPER FUNCTIONS
def type_to_string(t):
if t == int:
return 'int'
if t == [int]:
return '[int]'
if t == bool:
return 'bool'
if t == [bool]:
return '[bool]'
raise ValueError('Type %s cannot be converted to string.' % t)
def scanl1(f, xs):
if len(xs) > 0:
r = xs[0]
for i in range(len(xs)):
if i > 0:
r = f.fun(r, xs[i])
yield r
##### Bound helpers:
def SQR_bounds(A, B):
l = max(0, A) # inclusive lower bound
u = B - 1 # inclusive upper bound
if l > u:
return [(0, 0)]
# now 0 <= l <= u
# ceil(sqrt(l))
# Assume that if anything is valid then 0 is valid
return [(-int(sqrt(u)), ceil(sqrt(u+1)))]
def MUL_bounds(A, B):
return SQR_bounds(0, min(-(A+1), B))
def scanl1_bounds(l, A, B, L):
if l.src == '+' or l.src == '-':
return [(A/L+1, B/L)]
elif l.src == '*':
return [(int((max(0, A)+1) ** (1.0 / L)), int((max(0, B)) ** (1.0 / L)))]
elif l.src == 'MIN' or l.src == 'MAX':
return [(A, B)]
else:
raise Exception('Unsupported SCANL1 lambda, cannot compute valid input bounds.')
##### LINQ language:
def get_language(V):
Null = V
lambdas = [
Function('IDT', (int, int), lambda i: i, lambda (A, B): [(A, B)]),
Function('INC', (int, int), lambda i: i+1, lambda (A, B): [(A, B-1)]),
Function('DEC', (int, int), lambda i: i-1, lambda (A, B): [(A+1, B)]),
Function('SHL', (int, int), lambda i: i*2, lambda (A, B): [((A+1)/2, B/2)]),
Function('SHR', (int, int), lambda i: int(float(i)/2), lambda (A, B): [(2*A, 2*B)]),
Function('doNEG', (int, int), lambda i: -i, lambda (A, B): [(-B+1, -A+1)]),
Function('MUL3', (int, int), lambda i: i*3, lambda (A, B): [((A+2)/3, B/3)]),
Function('DIV3', (int, int), lambda i: int(float(i)/3), lambda (A, B): [(A, B)]),
Function('MUL4', (int, int), lambda i: i*4, lambda (A, B): [((A+3)/4, B/4)]),
Function('DIV4', (int, int), lambda i: int(float(i)/4), lambda (A, B): [(A, B)]),
Function('SQR', (int, int), lambda i: i*i, lambda (A, B): SQR_bounds(A, B)),
#Function('SQRT', (int, int), lambda i: int(sqrt(i)), lambda (A, B): [(max(0, A*A), B*B)]),
Function('isPOS', (int, bool), lambda i: i > 0, lambda (A, B): [(A, B)]),
Function('isNEG', (int, bool), lambda i: i < 0, lambda (A, B): [(A, B)]),
Function('isODD', (int, bool), lambda i: i % 2 == 1, lambda (A, B): [(A, B)]),
Function('isEVEN', (int, bool), lambda i: i % 2 == 0, lambda (A, B): [(A, B)]),
Function('+', (int, int, int), lambda i, j: i+j, lambda (A, B): [(A/2+1, B/2)]),
Function('-', (int, int, int), lambda i, j: i-j, lambda (A, B): [(A/2+1, B/2)]),
Function('*', (int, int, int), lambda i, j: i*j, lambda (A, B): MUL_bounds(A, B)),
Function('MIN', (int, int, int), lambda i, j: min(i, j), lambda (A, B): [(A, B)]),
Function('MAX', (int, int, int), lambda i, j: max(i, j), lambda (A, B): [(A, B)]),
]
LINQ = [
Function('REVERSE', ([int], [int]), lambda xs: list(reversed(xs)), lambda (A, B, L): [(A, B)]),
Function('SORT', ([int], [int]), lambda xs: sorted(xs), lambda (A, B, L): [(A, B)]),
Function('TAKE', (int, [int], [int]), lambda n, xs: xs[:n], lambda (A, B, L): [(0,L), (A, B)]),
Function('DROP', (int, [int], [int]), lambda n, xs: xs[n:], lambda (A, B, L): [(0,L), (A, B)]),
Function('ACCESS', (int, [int], int), lambda n, xs: xs[n] if n>=0 and len(xs)>n else Null, lambda (A, B, L): [(0,L), (A, B)]),
Function('HEAD', ([int], int), lambda xs: xs[0] if len(xs)>0 else Null, lambda (A, B, L): [(A, B)]),
Function('LAST', ([int], int), lambda xs: xs[-1] if len(xs)>0 else Null, lambda (A, B, L): [(A, B)]),
Function('MINIMUM', ([int], int), lambda xs: min(xs) if len(xs)>0 else Null, lambda (A, B, L): [(A, B)]),
Function('MAXIMUM', ([int], int), lambda xs: max(xs) if len(xs)>0 else Null, lambda (A, B, L): [(A, B)]),
Function('SUM', ([int], int), lambda xs: sum(xs), lambda (A, B, L): [(A/L+1, B/L)]),
] + \
[Function(
'MAP ' + l.src,
([int], [int]),
lambda xs, l=l: map(l.fun, xs),
lambda (A, B, L), l=l: l.bounds((A, B))
) for l in lambdas if l.sig==(int, int)] + \
[Function(
'FILTER ' + l.src,
([int], [int]),
lambda xs, l=l: filter(l.fun, xs),
lambda (A, B, L), l=l: [(A, B)],
) for l in lambdas if l.sig==(int, bool)] + \
[Function(
'COUNT ' + l.src,
([int], int),
lambda xs, l=l: len(filter(l.fun, xs)),
lambda (A, B, L), l=l: [(-V, V)],
) for l in lambdas if l.sig==(int, bool)] + \
[Function(
'ZIPWITH ' + l.src,
([int], [int], [int]),
lambda xs, ys, l=l: [l.fun(x, y) for (x, y) in zip(xs, ys)],
lambda (A, B, L), l=l: l.bounds((A, B)) + l.bounds((A, B)),
) for l in lambdas if l.sig==(int, int, int)] + \
[Function(
'SCANL1 ' + l.src,
([int], [int]),
lambda xs, l=l: list(scanl1(l, xs)),
lambda (A, B, L), l=l: scanl1_bounds(l, A, B, L),
) for l in lambdas if l.sig==(int, int, int)]
return LINQ, lambdas
def compile(source_code, V, L, min_input_range_length=0):
"""
Taken in a program source code, the integer range V and the tape lengths L,
and produces a Program.
If L is None then input constraints are not computed.
"""
# Source code parsing into intermediate representation
LINQ, _ = get_language(V)
LINQ_names = [l.src for l in LINQ]
input_types = []
types = []
functions = []
pointers = []
for line in source_code.split('\n'):
instruction = line[5:]
if instruction in ['int', '[int]']:
input_types.append(eval(instruction))
types.append(eval(instruction))
functions.append(None)
pointers.append(None)
else:
split = instruction.split(' ')
command = split[0]
args = split[1:]
# Handle lambda
if len(split[1]) > 1 or split[1] < 'a' or split[1] > 'z':
command += ' ' + split[1]
args = split[2:]
f = LINQ[LINQ_names.index(command)]
assert len(f.sig) - 1 == len(args), "Wrong number of arguments for %s" % command
ps = [ord(arg) - ord('a') for arg in args]
types.append(f.sig[-1])
functions.append(f)
pointers.append(ps)
assert [types[p] == t for p, t in zip(ps, f.sig)]
input_length = len(input_types)
program_length = len(types)
# Validate program by propagating input constraints and check all registers are useful
limits = [(-V, V)]*program_length
if L is not None:
for t in range(program_length-1, -1, -1):
if t >= input_length:
lim_l, lim_u = limits[t]
new_lims = functions[t].bounds((lim_l, lim_u, L))
num_args = len(functions[t].sig) - 1
for a in range(num_args):
p = pointers[t][a]
limits[pointers[t][a]] = (max(limits[p][0], new_lims[a][0]),
min(limits[p][1], new_lims[a][1]))
#print('t=%d: New limit for %d is %s' % (t, p, limits[pointers[t][a]]))
elif min_input_range_length >= limits[t][1] - limits[t][0]:
print('Program with no valid inputs: %s' % source_code)
return None
# for t in range(input_length, program_length):
# print('%s (%s)' % (functions[t].src, ' '.join([chr(ord('a') + p) for p in pointers[t]])))
# Construct executor
my_input_types = list(input_types)
my_types = list(types)
my_functions = list(functions)
my_pointers = list(pointers)
my_program_length = program_length
def program_executor(args):
# print '--->'
# for t in range(input_length, my_program_length):
# print('%s <- %s (%s)' % (chr(ord('a') + t), my_functions[t].src, ' '.join([chr(ord('a') + p) for p in my_pointers[t]])))
assert len(args) == len(my_input_types)
registers = [None]*my_program_length
for t in range(len(args)):
registers[t] = args[t]
for t in range(len(args), my_program_length):
registers[t] = my_functions[t].fun(*[registers[p] for p in my_pointers[t]])
return registers[-1]
return Program(
source_code,
input_types,
types[-1],
program_executor,
limits[:input_length]
)
def generate_IO_examples(program, N, L, V):
""" Given a programs, randomly generates N IO examples.
using the specified length L for the input arrays. """
input_types = program.ins
input_nargs = len(input_types)
# Generate N input-output pairs
IO = []
for _ in range(N):
input_value = [None]*input_nargs
for a in range(input_nargs):
minv, maxv = program.bounds[a]
if input_types[a] == int:
input_value[a] = np.random.randint(minv, maxv)
elif input_types[a] == [int]:
input_value[a] = list(np.random.randint(minv, maxv, size=L))
else:
raise Exception("Unsupported input type " + input_types[a] + " for random input | |
<filename>models/official/detection/modeling/architecture/spinenet_mbconv.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of SpineNet-MBConv model.
SpineNet with MBConv blocks.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization
https://arxiv.org/abs/1912.05027
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
from ops import spatial_transform_ops
DEFAULT_EXPAND_RATIO = 6
FILTER_SIZE_MAP = {
0: 8,
1: 16,
2: 24,
3: 40,
4: 80,
5: 112,
6: 112,
7: 112,
}
# The static SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS = [
(2, 'mbconv', (0, 1), False),
(2, 'mbconv', (1, 2), False),
(4, 'mbconv', (1, 2), False),
(3, 'mbconv', (3, 4), False),
(4, 'mbconv', (3, 5), False),
(6, 'mbconv', (4, 6), False),
(4, 'mbconv', (4, 6), False),
(5, 'mbconv', (7, 8), False),
(7, 'mbconv', (7, 9), False),
(5, 'mbconv', (9, 10), False),
(5, 'mbconv', (9, 11), False),
(4, 'mbconv', (6, 11), True),
(3, 'mbconv', (5, 11), True),
(5, 'mbconv', (8, 13), True),
(7, 'mbconv', (6, 15), True),
(6, 'mbconv', (13, 15), True),
]
SCALING_MAP = {
'49': {
'endpoints_num_filters': 48,
'filter_size_scale': 1.0,
'block_repeats': 1,
},
}
class BlockSpec(object):
"""A container class that specifies the block configuration for SpineNet."""
def __init__(self, level, block_fn, input_offsets, is_output):
self.level = level
self.block_fn = block_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
def block_group(inputs,
in_filters,
out_filters,
strides,
expand_ratio,
block_repeats,
se_ratio=0.2,
batch_norm_activation=nn_ops.BatchNormActivation(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Creates one group of blocks for Mobile SpineNet."""
# Apply strides only to the first block in block_group.
inputs = nn_blocks.mbconv_block(
inputs,
in_filters,
out_filters,
expand_ratio,
strides,
se_ratio=se_ratio,
batch_norm_activation=batch_norm_activation,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = nn_blocks.mbconv_block(
inputs,
out_filters,
out_filters,
expand_ratio,
1, # strides
se_ratio=se_ratio,
batch_norm_activation=batch_norm_activation,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
def resample_with_sepconv(feat,
target_width,
target_num_filters,
use_native_resize_op=False,
batch_norm_activation=nn_ops.BatchNormActivation(),
data_format='channels_last',
name=None,
is_training=False):
"""Match resolution and feature dimension to the target block."""
_, height, width, num_filters = feat.get_shape().as_list()
if width is None or num_filters is None:
raise ValueError('Shape of feat is None (shape:{}).'.format(feat.shape))
with tf.variable_scope('resample_with_sepconv_{}'.format(name)):
# Down-sample.
if width > target_width:
if width % target_width != 0:
raise ValueError('width ({}) is not divisible by '
'target_width ({}).'.format(width, target_width))
while width > target_width:
feat = nn_ops.depthwise_conv2d_fixed_padding(
inputs=feat, kernel_size=3, strides=2, data_format=data_format)
feat = batch_norm_activation(feat, is_training=is_training)
width /= 2
# Up-sample with NN interpolation.
elif width < target_width:
if target_width % width != 0:
raise ValueError('target_wdith ({}) is not divisible by '
'width ({}).'.format(target_width, width))
scale = target_width // width
if use_native_resize_op:
feat = tf.image.resize_nearest_neighbor(feat,
[height * scale, width * scale])
else:
feat = spatial_transform_ops.nearest_upsampling(feat, scale=scale)
# Match feature dimension to the target block.
feat = nn_ops.conv2d_fixed_padding(
inputs=feat,
filters=target_num_filters,
kernel_size=1,
strides=1,
data_format=data_format)
feat = batch_norm_activation(feat, relu=False, is_training=is_training)
return feat
def get_drop_connect_rate(init_rate, i, n):
"""Get drop connect rate for the ith block."""
if (init_rate is not None) and (init_rate > 0 and init_rate < 1):
dc_rate = init_rate * float(i + 1) / n
logging.info('Drop connect rate %f for block_%d.', dc_rate, i)
else:
dc_rate = None
return dc_rate
class SpineNetMBConv(object):
"""Class to build SpineNet family models with MBConv blocks."""
def __init__(self,
min_level=3,
max_level=7,
block_specs=build_block_specs(),
endpoints_num_filters=48,
use_native_resize_op=False,
se_ratio=0.2,
block_repeats=1,
filter_size_scale=1.0,
activation='swish',
batch_norm_activation=nn_ops.BatchNormActivation(
activation='swish'),
init_drop_connect_rate=None,
data_format='channels_last'):
"""SpineNetMBConv initialization function.
Args:
min_level: `int` minimum level in SpineNet endpoints.
max_level: `int` maximum level in SpineNet endpoints.
block_specs: a list of BlockSpec objects that specifies the SpineNet
network topology. By default, the previously discovered architecture is
used.
endpoints_num_filters: `int` feature dimension applied to endpoints before
sharing conv layers in head.
use_native_resize_op: Whether to use native
tf.image.nearest_neighbor_resize or the broadcast implmentation to do
upsampling.
se_ratio: squeeze and excitation ratio for MBConv blocks.
block_repeats: `int` number of repeats per block.
filter_size_scale: `float` a scaling factor to uniformaly scale feature
dimension in SpineNet.
activation: the activation function after cross-scale feature fusion.
Support 'relu' and 'swish'.
batch_norm_activation: An operation that includes a batch normalization
layer followed by an optional activation layer.
init_drop_connect_rate: `float` initial drop connect rate.
data_format: An optional string from: "channels_last", "channels_first".
Defaults to "channels_last".
"""
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._endpoints_num_filters = endpoints_num_filters
self._use_native_resize_op = use_native_resize_op
self._se_ratio = se_ratio
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
if activation == 'relu':
self._activation = tf.nn.relu
elif activation == 'swish':
self._activation = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._batch_norm_activation = batch_norm_activation
self._init_dc_rate = init_drop_connect_rate
self._data_format = data_format
self._dropblock = nn_ops.Dropblock()
def _build_stem_network(self, inputs, is_training):
"""Build the stem network."""
# Build the first conv layer.
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=nn_ops.round_filters(FILTER_SIZE_MAP[0],
self._filter_size_scale),
kernel_size=3,
strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_activation(inputs, is_training=is_training)
# Build the initial L1 block and L2 block.
base0 = block_group(
inputs=inputs,
in_filters=nn_ops.round_filters(FILTER_SIZE_MAP[0],
self._filter_size_scale),
out_filters=nn_ops.round_filters(FILTER_SIZE_MAP[1],
self._filter_size_scale),
expand_ratio=DEFAULT_EXPAND_RATIO,
block_repeats=self._block_repeats,
strides=1,
se_ratio=self._se_ratio,
batch_norm_activation=self._batch_norm_activation,
dropblock=self._dropblock,
data_format=self._data_format,
name='stem_block_0',
is_training=is_training)
base1 = block_group(
inputs=base0,
in_filters=nn_ops.round_filters(FILTER_SIZE_MAP[1],
self._filter_size_scale),
out_filters=nn_ops.round_filters(FILTER_SIZE_MAP[2],
self._filter_size_scale),
expand_ratio=DEFAULT_EXPAND_RATIO,
block_repeats=self._block_repeats,
strides=2,
se_ratio=self._se_ratio,
batch_norm_activation=self._batch_norm_activation,
dropblock=self._dropblock,
data_format=self._data_format,
name='stem_block_1',
is_training=is_training)
return [base0, base1]
def _build_endpoints(self, features, is_training):
"""Match filter size for endpoints before sharing conv layers."""
endpoints = {}
for level in range(self._min_level, self._max_level + 1):
feature = nn_ops.conv2d_fixed_padding(
inputs=features[level],
filters=self._endpoints_num_filters,
kernel_size=1,
strides=1,
data_format=self._data_format)
feature = self._batch_norm_activation(feature, is_training=is_training)
endpoints[level] = feature
return endpoints
def _build_scale_permuted_network(self, feats, input_width, is_training):
"""Builds the scale permuted network from a given config."""
# Number of output connections from each feat.
num_outgoing_connections = [0] * len(feats)
output_feats = {}
for i, block_spec in enumerate(self._block_specs):
with tf.variable_scope('sub_policy{}'.format(i)):
# Find feature map size, filter size, and block fn for the target block.
target_width = int(math.ceil(input_width / 2 ** block_spec.level))
target_num_filters = nn_ops.round_filters(
FILTER_SIZE_MAP[block_spec.level], self._filter_size_scale)
def _input_ind(input_offset):
if input_offset < len(feats):
return input_offset
else:
raise ValueError(
'input_offset ({}) is out of existing blocks({})'.format(
input_offset, len(feats)))
# Resample and merge two parent blocks.
input0 = _input_ind(block_spec.input_offsets[0])
input1 = _input_ind(block_spec.input_offsets[1])
parent0_feat = feats[input0]
parent0_feat = resample_with_sepconv(
parent0_feat,
target_width,
target_num_filters,
use_native_resize_op=self._use_native_resize_op,
batch_norm_activation=self._batch_norm_activation,
data_format=self._data_format,
name='resample_{}_0'.format(i),
is_training=is_training)
num_outgoing_connections[input0] += 1
parent1_feat = feats[input1]
parent1_feat = resample_with_sepconv(
parent1_feat,
target_width,
target_num_filters,
use_native_resize_op=self._use_native_resize_op,
batch_norm_activation=self._batch_norm_activation,
data_format=self._data_format,
name='resample_{}_1'.format(i),
is_training=is_training)
num_outgoing_connections[input1] += 1
# Sum parent0 and parent1 to create the target feat.
target_feat = parent0_feat + parent1_feat
# Connect intermediate blocks with outdegree 0 to the output block.
if block_spec.is_output:
for j, (j_feat, j_connections) in enumerate(
zip(feats, num_outgoing_connections)):
if j_connections == 0 and (
j_feat.shape[2] == target_width and
j_feat.shape[3] == target_feat.shape[3]):
target_feat += j_feat
num_outgoing_connections[j] += 1
with tf.variable_scope('scale_permuted_block_{}'.format(len(feats))):
target_feat = self._activation(target_feat)
# Build the target block.
target_feat = block_group(
inputs=target_feat,
in_filters=target_num_filters,
out_filters=target_num_filters,
expand_ratio=DEFAULT_EXPAND_RATIO,
block_repeats=self._block_repeats,
strides=1,
se_ratio=self._se_ratio,
batch_norm_activation=self._batch_norm_activation,
drop_connect_rate=get_drop_connect_rate(self._init_dc_rate, i,
len(self._block_specs)),
dropblock=self._dropblock,
data_format=self._data_format,
name='scale_permuted_block_{}'.format(i),
is_training=is_training)
feats.append(target_feat)
num_outgoing_connections.append(0)
# Save output feats.
if block_spec.is_output:
if block_spec.level in output_feats:
raise ValueError(
'Duplicate feats found for output level {}.'.format(
block_spec.level))
if (block_spec.level < self._min_level or
block_spec.level > self._max_level):
raise ValueError('Output level is out of range [{}, {}]'.format(
self._min_level, self._max_level))
output_feats[block_spec.level] = target_feat
return output_feats
def __call__(self, images, is_training=False):
"""Generate a multiscale feature pyramid.
Args:
images: The input image tensor.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels
[min_level, min_level + 1, ..., max_level]. The values are corresponding
features with shape [batch_size, height_l, width_l,
endpoints_num_filters].
"""
_, _, in_width, _ = images.get_shape().as_list()
with tf.variable_scope('spinenet_mbconv'):
feats = self._build_stem_network(images, is_training)
feats = self._build_scale_permuted_network(feats, in_width, is_training)
endpoints = self._build_endpoints(feats, is_training)
return endpoints
def spinenet_mbconv_builder(model_id,
min_level=3,
max_level=7,
block_specs=build_block_specs(),
use_native_resize_op=False,
se_ratio=0.2,
activation='swish',
batch_norm_activation=nn_ops.BatchNormActivation(
activation='swish'),
init_drop_connect_rate=None,
data_format='channels_last'):
"""Builds the SpineNet-MBConv network."""
if model_id not in SCALING_MAP:
raise ValueError('SpineNetMBConv | |
<reponame>MrFelixU/ks5-edexcel<filename>oink/simpletal/simpleTAL.py
""" simpleTAL Interpreter
Copyright (c) 2009 <NAME> (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
The classes in this module implement the TAL language, expanding
both XML and HTML templates.
Module Dependencies: logging, simpleTALES, simpleTALTemplates
"""
import logging
import xml.sax, xml.sax.saxutils,html, io, re, types, codecs
import simpletal, copy, sys
from . import FixedHTMLParser, sgmlentitynames
__version__ = simpletal.__version__
try:
# Is PyXML's LexicalHandler available?
from xml.sax.saxlib import LexicalHandler
use_lexical_handler = 1
except ImportError:
use_lexical_handler = 0
class LexicalHandler:
pass
try:
# Is PyXML's DOM2SAX available?
import xml.dom.ext.Dom2Sax
use_dom2sax = 1
except ImportError:
use_dom2sax = 0
# Check Python version. If 3.3 or later, all SGML entities will be expanded in the HTML Parser
if sys.version_info[0] > 3 or (sys.version_info[0] == 3 and sys.version_info[1] > 3):
HTML_ENTITIES_PRE_EXPANDED = True
else:
HTML_ENTITIES_PRE_EXPANDED = False
from . import simpleTALES
# Name-space URIs
METAL_NAME_URI="http://xml.zope.org/namespaces/metal"
TAL_NAME_URI="http://xml.zope.org/namespaces/tal"
# All commands are of the form (opcode, args, commandList)
# The numbers are the opcodes, and also the order of priority
# Argument: [(isLocalFlag (Y/n), variableName, variablePath),...]
TAL_DEFINE = 1
# Argument: expression, endTagSymbol
TAL_CONDITION = 2
# Argument: (varname, expression, endTagSymbol)
TAL_REPEAT = 3
# Argument: (replaceFlag, type, expression)
TAL_CONTENT = 4
# Not used in byte code, only ordering.
TAL_REPLACE = 5
# Argument: [(attributeName, expression)]
TAL_ATTRIBUTES = 6
# Argument: expression
TAL_OMITTAG = 7
# Argument: (originalAttributeList, currentAttributeList)
TAL_START_SCOPE = 8
# Argument: String to output
TAL_OUTPUT = 9
# Argument: None
TAL_STARTTAG = 10
# Argument: Tag, omitTagFlag
TAL_ENDTAG_ENDSCOPE = 11
# Argument: None
TAL_NOOP = 13
# METAL Starts here
# Argument: expression, slotParams, endTagSymbol
METAL_USE_MACRO = 14
# Argument: macroName, endTagSymbol
METAL_DEFINE_SLOT=15
# Only used for parsing
METAL_FILL_SLOT=16
METAL_DEFINE_MACRO=17
METAL_NAME_REGEX = re.compile ("[a-zA-Z_][a-zA-Z0-9_]*")
SINGLETON_XML_REGEX = re.compile ('^<[^\s/>]+(?:\s*[^=>]+="[^">]+")*\s*/>')
SINGLETON_BYTES_XML_REGEX = re.compile (b'^<[^\s/>]+(?:\s*[^=>]+="[^">]+")*\s*/>')
ENTITY_REF_REGEX = re.compile (r'(?:&[a-zA-Z][\-\.a-zA-Z0-9]*[^\-\.a-zA-Z0-9])|(?:&#[xX]?[a-eA-E0-9]*[^0-9a-eA-E])')
# The list of elements in HTML that can not have end tags - done as a dictionary for fast
# lookup.
HTML_FORBIDDEN_ENDTAG = {'AREA': 1, 'BASE': 1, 'BASEFONT': 1, 'BR': 1, 'COL': 1
,'FRAME': 1, 'HR': 1, 'IMG': 1, 'INPUT': 1, 'ISINDEX': 1
,'LINK': 1, 'META': 1, 'PARAM': 1}
# List of element:attribute pairs that can use minimized form in HTML
HTML_BOOLEAN_ATTS = {'AREA:NOHREF': 1, 'IMG:ISMAP': 1, 'OBJECT:DECLARE': 1
, 'INPUT:CHECKED': 1, 'INPUT:DISABLED': 1, 'INPUT:READONLY': 1, 'INPUT:ISMAP': 1
, 'SELECT:MULTIPLE': 1, 'SELECT:DISABLED': 1
, 'OPTGROUP:DISABLED': 1
, 'OPTION:SELECTED': 1, 'OPTION:DISABLED': 1
, 'TEXTAREA:DISABLED': 1, 'TEXTAREA:READONLY': 1
, 'BUTTON:DISABLED': 1, 'SCRIPT:DEFER': 1}
class TemplateInterpreter:
def __init__ (self):
self.programStack = []
self.commandList = None
self.symbolTable = None
self.slotParameters = {}
self.commandHandler = {}
self.commandHandler [TAL_DEFINE] = self.cmdDefine
self.commandHandler [TAL_CONDITION] = self.cmdCondition
self.commandHandler [TAL_REPEAT] = self.cmdRepeat
self.commandHandler [TAL_CONTENT] = self.cmdContent
self.commandHandler [TAL_ATTRIBUTES] = self.cmdAttributes
self.commandHandler [TAL_OMITTAG] = self.cmdOmitTag
self.commandHandler [TAL_START_SCOPE] = self.cmdStartScope
self.commandHandler [TAL_OUTPUT] = self.cmdOutput
self.commandHandler [TAL_STARTTAG] = self.cmdOutputStartTag
self.commandHandler [TAL_ENDTAG_ENDSCOPE] = self.cmdEndTagEndScope
self.commandHandler [METAL_USE_MACRO] = self.cmdUseMacro
self.commandHandler [METAL_DEFINE_SLOT] = self.cmdDefineSlot
self.commandHandler [TAL_NOOP] = self.cmdNoOp
def tagAsText (self, tagObj, singletonFlag=0):
""" This returns a tag as text.
"""
tag,atts = tagObj
result = ["<"]
result.append (tag)
for attName, attValue in atts:
result.append (' ')
result.append (attName)
result.append ('="')
result.append (html.escape (attValue, quote=1))
result.append ('"')
if (singletonFlag):
result.append (" />")
else:
result.append (">")
return "".join (result)
def initialise (self, context, outputFile):
self.context = context
self.file = outputFile
def cleanState (self):
self.scopeStack = []
self.programCounter = 0
self.movePCForward = None
self.movePCBack = None
self.outputTag = 1
self.originalAttributes = {}
self.currentAttributes = []
# Used in repeat only.
self.repeatAttributesCopy = []
self.currentSlots = {}
self.repeatVariable = None
self.tagContent = None
# tagState flag as to whether there are any local variables to pop
self.localVarsDefined = 0
# Pass in the parameters
self.currentSlots = self.slotParameters
def popProgram (self):
vars, self.commandList, self.symbolTable = self.programStack.pop()
self.programCounter,self.scopeStack,self.slotParameters,self.currentSlots, self.movePCForward,self.movePCBack,self.outputTag,self.originalAttributes,self.currentAttributes,self.repeatVariable,self.repeatAttributesCopy,self.tagContent,self.localVarsDefined = vars
def pushProgram (self):
vars = (self.programCounter
,self.scopeStack
,self.slotParameters
,self.currentSlots
,self.movePCForward
,self.movePCBack
,self.outputTag
,self.originalAttributes
,self.currentAttributes
,self.repeatVariable
,self.repeatAttributesCopy
,self.tagContent
,self.localVarsDefined)
self.programStack.append ((vars,self.commandList, self.symbolTable))
def execute (self, template):
self.cleanState()
self.commandList, self.programCounter, programLength, self.symbolTable = template.getProgram()
cmndList = self.commandList
while (self.programCounter < programLength):
cmnd = cmndList [self.programCounter]
#print "PC: %s - Executing command: %s" % (str (self.programCounter), str (cmnd))
self.commandHandler[cmnd[0]] (cmnd[0], cmnd[1])
def cmdDefine (self, command, args):
""" args: [(isLocalFlag (Y/n), variableName, variablePath),...]
Define variables in either the local or global context
"""
foundLocals = 0
for isLocal, varName, varPath in args:
result = self.context.evaluate (varPath, self.originalAttributes)
if (isLocal):
if (not foundLocals):
foundLocals = 1
self.context.pushLocals ()
self.context.setLocal (varName, result)
else:
self.context.addGlobal (varName, result)
self.localVarsDefined = foundLocals
self.programCounter += 1
def cmdCondition (self, command, args):
""" args: expression, endTagSymbol
Conditionally continues with execution of all content contained
by it.
"""
result = self.context.evaluate (args[0], self.originalAttributes)
#~ if (result is None or (not result)):
conditionFalse = 0
if (result is None):
conditionFalse = 1
else:
if (not result): conditionFalse = 1
try:
temp = len (result)
if (temp == 0): conditionFalse = 1
except:
# Result is not a sequence.
pass
if (conditionFalse):
# Nothing to output - evaluated to false.
self.outputTag = 0
self.tagContent = None
self.programCounter = self.symbolTable[args[1]]
return
self.programCounter += 1
def cmdRepeat (self, command, args):
""" args: (varName, expression, endTagSymbol)
Repeats anything in the cmndList
"""
if (self.repeatVariable is not None):
# We are already part way through a repeat
# Restore any attributes that might have been changed.
if (self.currentAttributes != self.repeatAttributesCopy):
self.currentAttributes = copy.copy (self.repeatAttributesCopy)
self.outputTag = 1
self.tagContent = None
self.movePCForward = None
try:
self.repeatVariable.increment()
self.context.setLocal (args[0], self.repeatVariable.getCurrentValue())
self.programCounter += 1
return
except IndexError as e:
# We have finished the repeat
self.repeatVariable = None
self.context.removeRepeat (args[0])
# The locals were pushed in context.addRepeat
self.context.popLocals()
self.movePCBack = None
# Suppress the final close tag and content
self.tagContent = None
self.outputTag = 0
self.programCounter = self.symbolTable [args[2]]
# Restore the state of repeatAttributesCopy in case we are nested.
self.repeatAttributesCopy = self.scopeStack.pop()
return
# The first time through this command
result = self.context.evaluate (args[1], self.originalAttributes)
if (result is not None and result == simpleTALES.DEFAULTVALUE):
# Leave everything un-touched.
self.programCounter += 1
return
try:
# We have three options, either the result is a natural sequence, an iterator, or something that can produce an iterator.
isSequence = len (result)
if (isSequence):
# Only setup if we have a sequence with length
self.repeatVariable = simpleTALES.RepeatVariable (result)
else:
# Delete the tags and their contents
self.outputTag = 0
self.programCounter = self.symbolTable [args[2]]
return
except:
# Not a natural sequence, can it produce an iterator?
if (hasattr (result, "__iter__") and hasattr (result.__iter__, "__call__")):
# We can get an iterator!
self.repeatVariable = simpleTALES.IteratorRepeatVariable (result.__iter__())
elif (hasattr (result, "__next__") and hasattr (result.__next__, "__call__")):
# Treat as an iterator
self.repeatVariable = simpleTALES.IteratorRepeatVariable (result)
else:
# Just a plain object, let's not loop
# Delete the tags and their contents
self.outputTag = 0
self.programCounter = self.symbolTable [args[2]]
return
try:
curValue = self.repeatVariable.getCurrentValue()
except IndexError as e:
# The iterator ran out of values before we started - treat as an empty list
self.outputTag = 0
self.repeatVariable = None
self.programCounter = self.symbolTable [args[2]]
return
# We really do want to repeat - so lets do it
self.movePCBack = self.programCounter
self.context.addRepeat (args[0], self.repeatVariable, curValue)
# We keep the old state of the repeatAttributesCopy for nested loops
self.scopeStack.append (self.repeatAttributesCopy)
# Keep a copy of the current attributes for this tag
self.repeatAttributesCopy = copy.copy (self.currentAttributes)
self.programCounter += 1
def cmdContent (self, command, args):
""" args: (replaceFlag, structureFlag, expression, endTagSymbol)
Expands content
"""
result = self.context.evaluate (args[2], self.originalAttributes)
if (result is None):
if (args[0]):
# Only output tags if this is a content not a replace
self.outputTag = 0
# Output none of our content or the existing content, but potentially the tags
self.movePCForward = self.symbolTable [args[3]]
self.programCounter += 1
return
elif (not result == simpleTALES.DEFAULTVALUE):
# We have content, so let's suppress the natural content and output this!
if (args[0]):
self.outputTag = 0
self.tagContent = (args[1], result)
self.movePCForward = self.symbolTable [args[3]]
self.programCounter += 1
return
else:
# Default, let's just run through as normal
self.programCounter += 1
return
def cmdAttributes (self, command, args):
""" args: [(attributeName, expression)]
Add, leave, or remove attributes from the start tag
"""
attsToRemove = {}
newAtts = []
for attName, attExpr in args:
resultVal = self.context.evaluate (attExpr, self.originalAttributes)
if (resultVal is None):
# Remove this attribute from the current attributes
attsToRemove [attName]=1
elif (not resultVal == simpleTALES.DEFAULTVALUE):
# We have a value - let's use it!
attsToRemove [attName]=1
if (isinstance (resultVal, str)):
escapedAttVal = resultVal
elif (isinstance (resultVal, bytes)):
# THIS IS NOT A BUG!
# Use Unicode in the Context object if you are not using Ascii
escapedAttVal = str (resultVal, 'ascii')
else:
# THIS IS NOT A BUG!
# Use Unicode in the Context object if you are not using Ascii
escapedAttVal = str (resultVal)
newAtts.append | |
import numpy as np
import pandas as pd
import os
import logging
import random
import pywt
import time
from multiprocessing import Pool
import sys
import tensorflow as tf
import datetime
import argparse
from sklearn.metrics import mean_squared_error, mean_absolute_error
sys.path.append("../")
from core import WANN
# configure the logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
class PowerData(object):
"""
Read stock price data from data directory and reshape them to what we need.
"""
def __init__(self, ahead_step, train_percentage, val_percentage, time_window,
num_classes, num_frequencies, batch_size, model_structure=1, num_slave=20, wavelet_function="cmor",
training_slice=0, num_wavelet_channels=2, use_amplitude=0, standardize_method=""):
self.ahead_step = ahead_step
self.train_percentage = train_percentage
self.val_percentage = val_percentage
self.time_window = time_window
self.num_classes = num_classes
self.num_frequencies = num_frequencies
self.batch_size = batch_size
self.model_structure = model_structure
self.num_slave = num_slave
self.wavelet_function = wavelet_function
self.training_slice = training_slice
self.num_wavelet_channels = num_wavelet_channels
self.use_amplitude = use_amplitude
self.standardize_method = standardize_method
self.data = self._load_data()
self.normalized_data = None
self.train_index = None
self.val_index = None
self.test_index = None
self.min_data = None
self.max_data = None
self.train_size = 0
self.val_size = 0
self.test_size = 0
self._split_data()
# check to see whether wavelet transform has been computed already
self.wavelet_x = None
if self.model_structure not in [1, 5]:
self.save_load_wavelet()
@staticmethod
def _load_data():
data = pd.read_csv("../data/power/household_power_consumption.txt", sep=";")
data = data[["Date", "Time", "Voltage"]]
data = data[(data["Voltage"] != "?")]
# data.reset_index(drop=True, inplace=True)
data["Voltage"] = data["Voltage"].astype("float")
# data["voltage_shifted"] = data["Voltage"].shift(1)
# data = data[~(data["voltage_shifted"].isnull())]
# data["voltage_change"] = data["voltage_shifted"] / data["Voltage"] - 1.0
# data = data.loc[:100000] # sample first 100,000 records for testing
data = data[(data["Date"].str.contains("2010"))]
data.reset_index(drop=True, inplace=True)
return np.array(data["Voltage"])
def _generate_tuple_list(self, index):
j_item_tuple_list = []
for j in range(len(self.data)):
for item in index:
j_item_tuple_list.append((j, item))
return j_item_tuple_list
def _split_data(self):
# maybe we should write it like this
train_pos = int(self.train_percentage * self.data.shape[0])
val_pos = int(self.val_percentage * self.data.shape[0]) + train_pos
self.train_index = list(range(0, train_pos - self.time_window - self.ahead_step + 1))
self.val_index = list(range(train_pos - self.time_window - self.ahead_step + 1, val_pos - self.time_window - self.ahead_step + 1))
self.test_index = list(range(val_pos - self.time_window - self.ahead_step + 1, self.data.shape[0] - self.time_window - self.ahead_step))
# data normalization
self.max_data = np.max(self.data)
self.min_data = np.min(self.data)
self.normalized_data = (self.data - self.min_data) / (self.max_data - self.min_data)
self.train_size = len(self.train_index)
self.val_size = len(self.val_index)
self.test_size = len(self.test_index)
logger.info(
"\nSplit data finished!\n"
"train: {}, val: {}, test: {}".format(
self.train_size, self.val_size, self.test_size
)
)
def _generate_time_frequency(self, time_series):
wavelet_transformed, freqs = pywt.cwt(time_series, range(1, self.num_frequencies + 1), self.wavelet_function)
return wavelet_transformed
def _generate_batch_single(self, index):
index_size = len(index)
# the shape of batch_x, label
batch_x = np.zeros([index_size, self.time_window, self.num_frequencies, self.num_wavelet_channels])
label = np.zeros([index_size, self.num_classes])
temp = 0
for item in index:
wavelet_transformed = self._generate_time_frequency(self.normalized_data[item:item + self.time_window])
if self.num_wavelet_channels > 1:
if self.use_amplitude == 1:
batch_x[temp, :, :] = np.stack((np.absolute(wavelet_transformed), np.arctan(wavelet_transformed.imag / wavelet_transformed.real)), axis=-1)
else:
batch_x[temp, :, :] = np.stack((wavelet_transformed.real, wavelet_transformed.imag), axis=-1)
else:
batch_x[temp, :, :] = wavelet_transformed
label[temp, 0] = self.normalized_data[item + self.time_window + self.ahead_step - 1]
temp += 1
result_tuple = [index, batch_x]
return result_tuple
@staticmethod
def _split_list(parent_list, num):
avg = len(parent_list) / float(num)
child_list_list = []
last = 0.0
while last < len(parent_list):
child_list_list.append(parent_list[int(last):int(last + avg)])
last += avg
return child_list_list
def _pre_compute_save(self):
time_pre_compute = time.time()
index = np.array(range(0, self.data.shape[0] - self.time_window - self.ahead_step))
index_size = len(index)
# the shape of batch_x
batch_x = np.zeros([index_size, self.time_window, self.num_frequencies, self.num_wavelet_channels])
index_list_list = self._split_list(index, self.num_slave)
p = Pool(self.num_slave)
outputs = p.map(self._generate_batch_single, index_list_list)
p.close()
p.join()
for output_i in outputs:
temp = 0
for item in output_i[0]:
batch_x[item] = output_i[1][temp]
temp += 1
logger.info("It takes {:.2f} seconds to pre compute.".format(time.time() - time_pre_compute))
return batch_x
def save_load_wavelet(self):
wavelet_dir = "./../data/wavelet_pre_computed/power/"
if not os.path.exists(wavelet_dir):
os.makedirs(wavelet_dir)
if self.standardize_method == "":
wavelet_filename = "power_wavelet_" + self.wavelet_function + "_" + str(self.time_window) + "_" + str(self.num_frequencies) + "_" + str(self.use_amplitude) + ".npy"
else:
wavelet_filename = "power_wavelet_" + self.wavelet_function + "_" + str(self.time_window) + "_" + str(
self.num_frequencies) + "_" + str(self.use_amplitude) + "_" + self.standardize_method + ".npy"
wavelet_filepath = os.path.join(wavelet_dir, wavelet_filename)
if not os.path.exists(wavelet_filepath):
self.wavelet_x = self._pre_compute_save()
logger.info("saving wavelet_x to file {}...".format(wavelet_filepath))
np.save(wavelet_filepath, self.wavelet_x)
else:
logger.info("loading wavelet_x from file {}...".format(wavelet_filepath))
self.wavelet_x = np.load(wavelet_filepath)
def _generate_batch(self, index):
index_size = len(index)
# the shape of batch_x
batch_x = np.zeros([index_size, self.time_window, self.num_frequencies, self.num_wavelet_channels])
batch_y = np.zeros([index_size, self.num_classes])
index_list_list = self._split_list(index, self.num_slave)
p = Pool(self.num_slave)
outputs = p.map(self._generate_batch_single, index_list_list)
p.close()
p.join()
for output_i in outputs:
temp = 0
for item in output_i[0]:
batch_x[item] = output_i[1][temp]
batch_y[item] = self.normalized_data[item + self.time_window + self.ahead_step - 1]
temp += 1
return batch_x, batch_y
def _generate_batch_from_pre_compute(self, index):
index_size = len(index)
# the shape of batch_x, label
batch_x = np.zeros([index_size, self.time_window, self.num_frequencies, self.num_wavelet_channels])
label = np.zeros([index_size, self.num_classes])
temp = 0
for item in index:
batch_x[temp, :, :] = self.wavelet_x[item]
label[temp, 0] = self.normalized_data[item + self.time_window + self.ahead_step - 1]
temp += 1
return batch_x, label
def next_batch(self):
# generate a random index from the range [0, len(self.train_x) - self.time_window]
index = random.sample(self.train_index, self.batch_size)
# index = range(0, len(self.train_x[1]))
index = np.array(index)
# index = np.array(range(0, len(self.train_x[1])))
if self.wavelet_x is None:
batch_x, label = self._generate_batch(index)
else:
batch_x, label = self._generate_batch_from_pre_compute(index)
return batch_x, label
def validation(self):
index = np.array(self.val_index)
# logger.info("index max: {}, min: {}".format(np.max(index), np.min(index)))
# logger.info("max y pos: {}".format(np.max(index) + self.time_window + self.ahead_step - 1))
if self.wavelet_x is None:
batch_x, label = self._generate_batch(index)
else:
batch_x, label = self._generate_batch_from_pre_compute(index)
return batch_x, label
def testing(self):
index = np.array(self.test_index)
# logger.info("index max: {}, min: {}".format(np.max(index), np.min(index)))
# logger.info("max y pos: {}".format(np.max(index) + self.time_window + self.ahead_step - 1))
if self.wavelet_x is None:
batch_x, label = self._generate_batch(index)
else:
batch_x, label = self._generate_batch_from_pre_compute(index)
return batch_x, label
def _generate_batch_lstm(self, index):
index_size = len(index)
# the shape of batch_x, label
batch_x = np.zeros([index_size, self.time_window, 1])
label = np.zeros([index_size, self.num_classes])
temp = 0
for item in index:
batch_x[temp, :, :] = np.reshape(self.normalized_data[item:item + self.time_window], (-1, 1))
label[temp, 0] = self.normalized_data[item + self.time_window + self.ahead_step - 1]
temp += 1
return batch_x, label
def next_batch_lstm(self):
index = random.sample(self.train_index, self.batch_size)
index = np.array(index)
batch_x, label = self._generate_batch_lstm(index)
return batch_x, label
def validation_lstm(self):
index = np.array(self.val_index)
batch_x, label = self._generate_batch_lstm(index)
return batch_x, label
def testing_lstm(self):
index = np.array(self.test_index)
batch_x, label = self._generate_batch_lstm(index)
return batch_x, label
def next_batch_combined(self):
"""We make this next_batch_combined single, because it involves a random process.
Therefore, we cannot make the batch_x and label the same unless we provide the same index
to generate_batch method.
"""
index = random.sample(self.train_index, self.batch_size)
index = np.array(index)
# for lstm
batch_x_lstm, label_lstm = self._generate_batch_lstm(index)
# for wavelet
if self.wavelet_x is None:
batch_x_wavelet, label_wavelet = self._generate_batch(index)
else:
batch_x_wavelet, label_wavelet = self._generate_batch_from_pre_compute(index)
return {
"batch_x_lstm": batch_x_lstm,
"batch_x_wavelet": batch_x_wavelet,
"label": label_lstm
}
if __name__ == "__main__":
today = datetime.date.today().strftime("%Y%m%d")
logger.info("Running model on {}.".format(today))
# time.sleep(random.uniform(1, 2))
# for looking checkpoints and log
model_timestamp = int(time.time() * 1000 + int(os.getpid()))
logger.info("Model timestamp: {}".format(str(model_timestamp)))
parser = argparse.ArgumentParser()
parser.add_argument("--ahead_step", help="time step to predict", required=False, default=1, type=int)
parser.add_argument("--time_window", help="time window to look back", required=False, default=5, type=int)
parser.add_argument("--num_frequencies", help="number of Fourier frequencies to decompose the time series",
required=False, default=5, type=int)
parser.add_argument("--batch_size", help="size of batch to train", required=False, default=64, type=int)
parser.add_argument("--learning_rate", help="learning rate", required=False, default=0.001, type=float)
parser.add_argument("--keep_prob", help="dropout rate", required=False, default=0.9, type=float)
parser.add_argument("--ensemble_lstm", help="ensemble lstm weight", required=False, default=0.5, type=float)
parser.add_argument("--max_training_iters", help="max training iterations", required=False, default=50,
type=int)
parser.add_argument("--model_structure", help="choose which model structure to train", required=False, default=1,
type=int)
parser.add_argument("--lstm_units", help="number of hidden states in lstm", required=False, default=10,
type=int)
parser.add_argument("--lstm_num_layers", help="number of lstm layers", required=False, default=1, type=int)
parser.add_argument("--wavelet_function", help="wavelet function", required=False, default="cmor", type=str)
parser.add_argument("--use_amplitude", help="whether to use amplitude or not", required=False, default=0,
type=int)
parser.add_argument("--gpu_fraction", help="how much fraction of gpu to use", required=False, default=0.5,
type=float)
parser.add_argument("--decay_dense_net",
help="decay learning rate only at 50% and 75% percentage of max_training_iters",
required=False, default=False, type=lambda x: (str(x).lower() == "true"))
parser.add_argument("--restore_to_test", help="whether test from restored model", required=False, default=True,
type=lambda x: (str(x).lower() == "true"))
parser.add_argument("--two_dense_layers", help="whether to use one more layer, default false", required=False,
default=False, type=lambda x: (str(x).lower() == "true"))
parser.add_argument("--dense_units", help="number of units in first fully connected dense layer",
required=False, default=1024, type=int)
parser.add_argument("--vgg_num_layers", help="number of vgg layers", required=False, default=19, type=int)
parser.add_argument("--vgg_kernel_size", help="size of vgg kernel", required=False, default=3, type=int)
parser.add_argument("--vgg_initial_filters", help="number of initial filters in VGG", required=False,
default=64, type=int)
parser.add_argument("--training_slice", help="desert training data before training_slice", required=False,
default=0, type=int)
parser.add_argument("--num_wavelet_channels", help="number of wavelet | |
<reponame>nexB/extractcode
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/extractcode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
import os
from collections import namedtuple
from commoncode import fileutils
from commoncode import filetype
from commoncode import functional
from commoncode.ignore import is_ignored
from typecode import contenttype
from extractcode import all_kinds
from extractcode import regular
from extractcode import package
from extractcode import docs
from extractcode import regular_nested
from extractcode import file_system
from extractcode import patches
from extractcode import special_package
from extractcode import libarchive2
from extractcode import patch
from extractcode import sevenzip
from extractcode import vmimage
from extractcode.uncompress import uncompress_gzip
from extractcode.uncompress import uncompress_bzip2
logger = logging.getLogger(__name__)
TRACE = False
TRACE_DEEP = False
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
"""
Archive formats handling. The purpose of this module is to select an extractor
suitable for the accurate extraction of a given kind of archive. An extractor is
a function that can read an archive and extract it to a directory. Multiple
extractors functions can be called in sequence to handle nested archives such
as tar.gz.
A handler contains selection criteria and a list of extractors.
We select an extraction handler based on these croiteria:
- file type,
- mime type,
- file extension,
- kind of archive.
Several handlers may be suitable candidates for extraction of a given archive.
Candidates are scored and the best one picked which is typically the most
specific and the one capable of doing the deepest extraction of a given archive.
At the lowest level, archives are processed by standard library code (sometimes
patched) or native code (libarchive, 7zip).
For background on archive and compressed file formats see:
- http://en.wikipedia.org/wiki/List_of_archive_formats
- http://en.wikipedia.org/wiki/List_of_file_formats#Archive_and_compressed
"""
# if strict, all handlers criteria must be matched for a handler to be selected
Handler = namedtuple(
'Handler',
[
'name',
'filetypes',
'mimetypes',
'extensions',
'kind',
'extractors',
'strict',
]
)
def can_extract(location):
"""
Return True if this location can be extracted by some handler.
"""
handlers = list(get_handlers(location))
if handlers:
return True
else:
return False
def should_extract(location, kinds, ignore_pattern=()):
"""
Return True if this ``location`` should be extracted based on the provided
``kinds`` tuple and an ``ignore_pattern`` list of glob patterns.
"""
location = os.path.abspath(os.path.expanduser(location))
ignore_pattern = {
extension : 'User ignore: Supplied by --ignore'
for extension in ignore_pattern
}
should_ignore = is_ignored(location, ignore_pattern)
extractor = get_extractor(location, kinds=kinds)
if TRACE_DEEP:
logger.debug(
f' should_extract: extractor: {extractor}, '
f'should_ignore: {should_ignore}'
)
if extractor and not should_ignore:
return True
def get_extractor(location, kinds=all_kinds):
"""
Return an extraction callable that can extract the file at ``location`` or
None if no extraction callable function is found.
Limit the search for an extractor to the ``kinds`` list of archive kinds.
See extractcode.all_kinds for details.
An extraction callable should accept these arguments:
- location of the file to extract
- target_dir where to extract
It should extract files from the `location` in the `target_dir` directory.
It must return a list of warning messages if any or an empty list.
It must raise Exceptions on errors.
"""
assert location
location = os.path.abspath(os.path.expanduser(location))
extractors = get_extractors(location, kinds=kinds)
if not extractors:
if TRACE_DEEP:
logger.debug(f' get_extractor: not extractors: {extractors}')
return None
if len(extractors) == 2:
extractor1, extractor2 = extractors
nested_extractor = functional.partial(
extract_twice,
extractor1=extractor1,
extractor2=extractor2,
)
return nested_extractor
elif len(extractors) == 1:
return extractors[0]
else:
return None
def get_extractors(location, kinds=all_kinds):
"""
Return a list of extractors that can extract the file at
location or an empty list.
"""
handler = get_best_handler(location, kinds)
if TRACE_DEEP:
logger.debug(f' get_extractors: handler: {handler}')
return handler and handler.extractors or []
def get_best_handler(location, kinds=all_kinds):
"""
Return the best handler for the file at `location` or None .
"""
location = os.path.abspath(os.path.expanduser(location))
if not filetype.is_file(location):
return
handlers = list(get_handlers(location))
if TRACE_DEEP:
logger.debug(f' get_best_handler: handlers: {handlers}')
if not handlers:
return
candidates = list(score_handlers(handlers))
if TRACE_DEEP:
logger.debug(f' get_best_handler: candidates: {candidates}')
if not candidates:
if TRACE_DEEP:
logger.debug(f' get_best_handler: candidates: {candidates}')
return
picked = pick_best_handler(candidates, kinds=kinds)
if TRACE_DEEP:
logger.debug(f' get_best_handler: picked: {picked}')
return picked
def get_handlers(location):
"""
Return an iterable of (handler, type_matched, mime_matched,
extension_matched,) for this `location`.
"""
if filetype.is_file(location):
T = contenttype.get_type(location)
ftype = T.filetype_file.lower()
mtype = T.mimetype_file
if TRACE_DEEP:
logger.debug(
'get_handlers: processing %(location)s: '
'ftype: %(ftype)s, mtype: %(mtype)s ' % locals())
for handler in archive_handlers:
if not handler.extractors:
continue
extractor_count = len(handler.extractors)
if extractor_count > 2:
raise Exception('Maximum level of archive nesting is two.')
# default to False
type_matched = handler.filetypes and any(t in ftype for t in handler.filetypes)
if TRACE_DEEP:
logger.debug(f' get_handlers: handler.filetypes={handler.filetypes}')
mime_matched = handler.mimetypes and any(m in mtype for m in handler.mimetypes)
exts = handler.extensions
if exts:
extension_matched = exts and location.lower().endswith(exts)
if TRACE_DEEP:
print(
f' get_handlers: matched type: {type_matched}, '
f'mime: {mime_matched}, ext: {extension_matched}' % locals()
)
if (
handler.strict
and not (
type_matched
and mime_matched
and extension_matched
)
):
if TRACE_DEEP:
print(f' get_handlers: skip strict: {handler.name}')
continue
if type_matched or mime_matched or extension_matched:
if TRACE_DEEP:
handler_name = handler.name
logger.debug(' get_handlers: yielding handler: %(handler_name)r' % locals())
yield handler, type_matched, mime_matched, extension_matched
def score_handlers(handlers):
"""
Score candidate handlers. Higher score is better.
"""
for handler, type_matched, mime_matched, extension_matched in handlers:
if TRACE_DEEP:
logger.debug(
f' score_handlers: handler={handler}, '
f'type_matched={type_matched}, '
f'mime_matched={mime_matched}, '
f'extension_matched={extension_matched}'
)
score = 0
# increment kind value: higher kinds numerical values are more
# specific by design
score += handler.kind
if TRACE_DEEP: logger.debug(f' score_handlers: score += handler.kind {score}')
# increment score based on matched criteria
if type_matched and mime_matched and extension_matched:
# bump for matching all criteria
score += 10
if type_matched:
# type is more specific than mime
score += 8
if mime_matched:
score += 6
if extension_matched:
# extensions have little power
score += 2
if extension_matched and not (type_matched or mime_matched):
# extension matched alone should not be extracted
score -= 100
# increment using the number of extractors: higher score means that we
# have some kind of nested archive that we can extract in one
# operation, therefore more this is a more specific extraction that we
# should prefer. For instance we prefer uncompressing and extracting a
# tgz at once, rather than uncompressing in a first operation then
# later extracting the plain tar in a second operation
score += len(handler.extractors)
if TRACE_DEEP:
handler_name = handler.name
logger.debug(
' score_handlers: yielding handler: %(handler_name)r, '
'score: %(score)d, extension_matched: %(extension_matched)r' % locals())
if score > 0:
yield score, handler, extension_matched
def pick_best_handler(candidates, kinds):
"""
Return the best handler with the highest score.
In case of ties, look at the top two handlers and keep:
- the handler with the most extractors (i.e. a handler that does deeper
nested extractions),
- OR the handler that has matched extensions,
- OR finally the first handler in the list.
"""
# sort by increasing scores
scored = sorted(candidates, reverse=True)
if TRACE_DEEP:
logger.debug(f' pick_best_handler: scored: {scored}')
if not scored:
return
top_score, top, top_ext = scored[0]
# logger.debug('pick_best_handler: top: %(top)r\n' % locals())
# single candidate case
if len(scored) == 1:
return top if top.kind in kinds else None
# else: here we have 2 or more candidates: look at the runner up.
runner_up_score, runner_up, runner_up_ext = scored[1]
# logger.debug('pick_best_handler: runner_up: %(runner_up)r\n' % locals())
# return the top scoring if there is score ties.
if top_score > runner_up_score:
return top if top.kind in kinds else None
# else: with sorting top_score == runner_up_score by construction here
# break ties based on number of extractors
if len(top.extractors) > len(runner_up.extractors):
return top if top.kind in kinds else None
elif len(top.extractors) < len(runner_up.extractors):
return runner_up if runner_up.kind in kinds else None
# else: here len(top.extractors) == len(runner_up.extractors)
# now, break ties based on extensions being matched
if top_ext and not runner_up_ext:
return top if top.kind in kinds else None
elif runner_up_ext and not top_ext:
return runner_up if runner_up.kind in kinds else None
# else: we could not break ties. finally return the top
return top if top.kind in kinds else None
def extract_twice(location, target_dir, extractor1, extractor2):
"""
Extract a nested compressed archive at `location` to `target_dir` using
the `extractor1` function to a temporary directory then the | |
<reponame>torfsen/sbl2py
#!/usr/bin/env python
# vim:fileencoding=utf8
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Snowball grammar and parser for sbl2py.
"""
import functools
import inspect
import sys
import threading
from pyparsing import *
from sbl2py.ast import *
__all__ = ['parse_string']
# Grammar elements are in all-caps.
ParserElement.enablePackrat()
#
# PARSER STATE
#
state = threading.local()
state.strings = [] # Declared string names
state.integers = [] # Declared integer names
state.externals = [] # Declared externals names
state.booleans = [] # Declared boolean names
state.routines = [] # Declared routine names
state.groupings = [] # Declared grouping names
state.stringescapes = [] # Left and right string escape chars
state.stringdefs = {} # String replacement definitions
def reset():
"""
Reset internal parser state.
"""
state.strings[:] = []
state.integers[:] = []
state.externals[:] = []
state.booleans[:] = []
state.routines[:] = []
state.groupings[:] = []
state.stringescapes[:] = []
state.stringdefs.clear()
#
# UTILITY FUNCTIONS
#
def parse_action(f):
"""
Decorator for pyparsing parse actions to ease debugging.
pyparsing uses trial & error to deduce the number of arguments a
parse action accepts. Unfortunately any ``TypeError`` raised by a
parse action confuses that mechanism.
This decorator replaces the trial & error mechanism with one based
on reflection. If the decorated function itself raises a
``TypeError`` then that exception is re-raised if the wrapper is
called with less arguments than required. This makes sure that the
actual ``TypeError`` bubbles up from the call to the parse action
(instead of the one caused by pyparsing's trial & error).
"""
num_args = len(inspect.getargspec(f).args)
if num_args > 3:
raise ValueError('Input function must take at most 3 parameters.')
@functools.wraps(f)
def action(*args):
if len(args) < num_args:
if action.exc_info:
raise (action.exc_info[0], action.exc_info[1],
action.exc_info[2])
action.exc_info = None
try:
v = f(*args[:-(num_args + 1):-1])
return v
except TypeError as e:
action.exc_info = sys.exc_info()
raise
action.exc = None
return action
def make_node_action(cls, ungroup=False, init_args=0):
@parse_action
def action(tokens):
if ungroup:
tokens = tokens[0]
node = cls(*tokens[:init_args])
node.extend(tokens[init_args:])
return node
return action
def add_node_action(pattern, *args, **kwargs):
pattern.addParseAction(make_node_action(*args, **kwargs))
return pattern
def make_binary_op_list_action(operators, classes, ungroup=False):
"""
Make parse action for lists of binary operators and operands.
``operators`` is a list of the operators (as strings) and
``classes`` is a list of the corresponding ``Node`` subclasses.
"""
@parse_action
def action(tokens):
if ungroup:
tokens = tokens[0]
tokens = list(reversed(tokens))
left = tokens.pop()
while tokens:
token = tokens.pop()
for op, cls in zip(operators, classes):
if token == op:
node = cls()
break
node.append(left)
node.append(tokens.pop())
left = node
return node
return action
LPAREN = Suppress('(')
RPAREN = Suppress(')')
#
# KEYWORDS
#
keywords = []
def make_keyword(s):
kw = Keyword(s)
globals()[s.upper()] = kw
keywords.append(kw)
map(make_keyword, """maxint minint cursor limit size sizeof or and strings
integers booleans routines externals groupings define as not test try do fail
goto gopast repeat loop atleast insert attach delete hop next setmark tomark
atmark tolimit atlimit setlimit for backwards reverse substring among set unset
non true false backwardmode stringescapes stringdef hex decimal""".split())
KEYWORD = MatchFirst(keywords)
#
# NAMES
#
NAME = ~KEYWORD + Word(alphas, alphanums + '_')
NAME.setParseAction(parse_action(lambda t: t[0]))
#
# DECLARATIONS
#
def make_decl(kw, targets, cls):
declaration = Suppress(kw) + LPAREN + ZeroOrMore(NAME) + RPAREN
@parse_action
def action(tokens):
for target in targets:
target.extend(tokens)
return [cls(t) for t in tokens]
declaration.setParseAction(action)
return declaration
DECLARATION = MatchFirst([
make_decl(STRINGS, [state.strings], StringDeclarationNode),
make_decl(INTEGERS, [state.integers], IntegerDeclarationNode),
make_decl(BOOLEANS, [state.booleans], BooleanDeclarationNode),
make_decl(ROUTINES, [state.routines], RoutineDeclarationNode),
make_decl(EXTERNALS, [state.externals, state.routines],
ExternalDeclarationNode),
make_decl(GROUPINGS, [state.groupings], GroupingDeclarationNode),
])
#
# REFERENCES
#
reference_chars = set(alphanums + '_')
class Reference(Token):
"""
A reference to a previously declared variable.
This class works like pyparsing's ``Or`` in combination with
``Keyword``. However, the list of candidates can be updated later
on.
"""
def __init__(self, declarations):
"""
Constructor.
``declarations`` is a list of previously declared variables.
Any of them will match if they occur as a separate word (cf.
``Keyword``). Matching is done in decreasing length of
candidates (cf. ``Or``). Later updates of ``declarations``
are taken into account.
"""
super(Reference, self).__init__()
self.declarations = declarations
def __str__(self):
return 'Reference(%s)' % self.declarations
def parseImpl(self, instring, loc, doActions=True):
candidates = sorted(self.declarations, key=lambda x: len(x),
reverse=True)
for candidate in candidates:
if instring.startswith(candidate, loc):
n = len(candidate)
if (len(instring) == loc + n or instring[loc + n] not in
reference_chars):
return loc + n, candidate
raise ParseException("Expected one of " + ", ".join(candidates))
def make_reference(declarations, cls):
pattern = Reference(declarations)
pattern.setParseAction(parse_action(lambda t: cls(t[0])))
return pattern
STR_REF = make_reference(state.strings, StringReferenceNode)
CHARS_REF = make_reference(state.strings, CharsReferenceNode)
GROUPING_REF = make_reference(state.groupings, GroupingReferenceNode)
INT_REF = make_reference(state.integers, IntegerReferenceNode)
BOOLEAN_REF = make_reference(state.booleans, BooleanReferenceNode)
ROUTINE_REF = make_reference(state.routines, RoutineReferenceNode)
#
# STRINGS
#
class StringLiteral(Token):
"""
String literal that supports dynamically changing escape characters.
"""
def __init__(self, escape_chars, replacements):
"""
Constructor.
``escape_chars`` is a list containing either two or no
characters. These characters are the left and right escape
marker, respectively. You may change the content of the list
afterwards, the parsing code always uses the latest values.
``replacements`` is a dict that maps escape sequences to their
replacements. Later modifications are taken into account.
"""
super(StringLiteral, self).__init__()
self.escape_chars = escape_chars
self.replacements = replacements
def __str__(self):
if self.escape_chars:
return 'StringLiteral("%s%s")' % tuple(self.escape_chars)
else:
return 'StringLiteral("")'
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] != "'":
raise ParseException('Expected "\'".')
# Find next "'" that is not contained in escape chars
pos = loc + 1
while True:
try:
candidate = instring.index("'", pos)
except ValueError:
raise ParseException('Runaway string literal.')
if not self.escape_chars:
break
left = instring.rfind(self.escape_chars[0], loc, candidate)
right = instring.rfind(self.escape_chars[1], loc, candidate)
if right >= left:
break
pos = candidate + 1
s = instring[loc + 1 : candidate]
if self.escape_chars:
# Replace escape sequences
left = re.escape(self.escape_chars[0])
right = re.escape(self.escape_chars[1])
for k, v in self.replacements.iteritems():
s = re.sub(left + re.escape(k) + right, v, s)
return candidate + 1, unicode(s)
@parse_action
def stringescapes_cmd_action(tokens):
state.stringescapes[:] = tokens[0] + tokens[1]
state.stringdefs["'"] = "'"
state.stringdefs['['] = '['
return []
@parse_action
def stringdef_cmd_action(tokens):
key = tokens[0]
mode = tokens[1]
value = tokens[2].string
if mode == 'hex':
value = u''.join(unichr(int(x, 16)) for x in value.split())
elif mode == 'decimal':
value = u''.join(unichr(int(x)) for x in value.split())
state.stringdefs[key] = value
return []
STR_LITERAL = StringLiteral(state.stringescapes, state.stringdefs)
STR_LITERAL.setParseAction(parse_action(lambda t: StringLiteralNode(t[0])))
CHAR = Word(printables, exact=1)
STRINGESCAPES_CMD = Suppress(STRINGESCAPES) + CHAR + CHAR
STRINGESCAPES_CMD.setParseAction(stringescapes_cmd_action)
STRINGDEF_CMD = (Suppress(STRINGDEF) + Word(printables) +
Optional(HEX | DECIMAL, default=None) + STR_LITERAL)
STRINGDEF_CMD.setParseAction(stringdef_cmd_action)
# A sequence of characters
CHARS = STR_LITERAL | CHARS_REF
#
# INTEGERS
#
@parse_action
def int_literal_action(tokens):
return IntegerLiteralNode(int(tokens[0]))
INT_LITERAL = Word(nums)
INT_LITERAL.setParseAction(int_literal_action)
INT = INT_REF | INT_LITERAL
#
# ARITHMETIC EXPRESSIONS
#
EXPRESSION_OPERAND = MatchFirst([
add_node_action(Suppress(MAXINT), MaxIntNode),
add_node_action(Suppress(MININT), MinIntNode),
add_node_action(Suppress(CURSOR), CursorNode),
add_node_action(Suppress(LIMIT), LimitNode),
add_node_action(Suppress(SIZEOF) + STR_REF, SizeOfNode),
add_node_action(Suppress(SIZE), SizeNode),
INT,
])
negation_action = make_node_action(NegationNode, ungroup=True)
multiplicative_action = make_binary_op_list_action(
['*', '/'], [MultiplicationNode, DivisionNode])
additive_action = make_binary_op_list_action(
['+', '-'], [AdditionNode, SubtractionNode])
EXPRESSION = operatorPrecedence(
EXPRESSION_OPERAND,
[
(Suppress('-'), 1, opAssoc.RIGHT,negation_action),
(oneOf('* /'), 2, opAssoc.LEFT, multiplicative_action),
(oneOf('+ -'), 2, opAssoc.LEFT, additive_action),
]
)
#
# INTEGER COMMANDS
#
def make_int_cmd(op, cls):
VAR = Suppress('$') + INT_REF
# We're not using ``Combine`` here because ``Combine`` automatically
# converts the result to a string.
VAR.leaveWhitespace()
return add_node_action(VAR + Suppress(op) + EXPRESSION, cls)
INT_CMD = MatchFirst([
make_int_cmd('+=', IntegerIncrementByNode),
make_int_cmd('*=', IntegerMultiplyByNode),
make_int_cmd('-=', IntegerDecrementByNode),
make_int_cmd('/=', IntegerDivideByNode),
make_int_cmd('==', IntegerEqualNode),
make_int_cmd('!=', IntegerUnequalNode),
make_int_cmd('>=', IntegerGreaterOrEqualNode),
make_int_cmd('<=', IntegerLessOrEqualNode),
make_int_cmd('=', IntegerAssignNode),
make_int_cmd('>', IntegerGreaterNode),
make_int_cmd('<', IntegerLessNode),
])
#
# STRING COMMANDS
#
STR_CMD = Forward()
UNARY_OPERATOR = (NOT | TEST | TRY | DO | FAIL | GOTO | GOPAST | REPEAT |
BACKWARDS | (LOOP + EXPRESSION) | (ATLEAST + EXPRESSION))
not_action = make_node_action(NotNode)
test_action = make_node_action(TestNode)
try_action = make_node_action(TryNode)
do_action = make_node_action(DoNode)
fail_action = make_node_action(FailNode)
goto_action = make_node_action(GoToNode)
gopast_action = make_node_action(GoPastNode)
repeat_action = make_node_action(RepeatNode)
loop_action = make_node_action(LoopNode)
atleast_action = make_node_action(AtLeastNode)
backwards_action = make_node_action(BackwardsNode)
unary_actions = {
'not':not_action,
'test':test_action,
| |
Washington",37185),
("Parkwood CDP, Washington",6700),
("Pasco city, Washington",71858),
("Pateros city, Washington",601),
("Peaceful Valley CDP, Washington",4032),
("Pe Ell town, Washington",531),
("Picnic Point CDP, Washington",8976),
("Pine Grove CDP, Washington",122),
("Point Roberts CDP, Washington",1191),
("Pomeroy city, Washington",1289),
("Port Angeles city, Washington",19615),
("Port Angeles East CDP, Washington",3328),
("Porter CDP, Washington",229),
("Port Gamble Tribal Community CDP, Washington",931),
("Port Hadlock-Irondale CDP, Washington",3574),
("Port Ludlow CDP, Washington",2771),
("Port Orchard city, Washington",13788),
("Port Townsend city, Washington",9428),
("Poulsbo city, Washington",10301),
("Prairie Heights CDP, Washington",4256),
("Prairie Ridge CDP, Washington",12550),
("Prescott city, Washington",308),
("Prosser city, Washington",6076),
("Puget Island CDP, Washington",836),
("Pullman city, Washington",33050),
("Purdy CDP, Washington",1682),
("Puyallup city, Washington",40305),
("Queets CDP, Washington",238),
("Quilcene CDP, Washington",480),
("Qui-nai-elt Village CDP, Washington",59),
("Quincy city, Washington",7531),
("Raft Island CDP, Washington",430),
("Rainier city, Washington",2346),
("Ravensdale CDP, Washington",1873),
("Raymond city, Washington",2882),
("Reardan town, Washington",584),
("Redmond city, Washington",63197),
("Renton city, Washington",101054),
("Republic city, Washington",1340),
("Richland city, Washington",55043),
("Ridgefield city, Washington",7178),
("Ritzville city, Washington",1628),
("Riverbend CDP, Washington",2302),
("River Road CDP, Washington",894),
("Riverside town, Washington",309),
("Rochester CDP, Washington",3232),
("Rockford town, Washington",437),
("Rock Island city, Washington",1195),
("Rockport CDP, Washington",49),
("Rocky Point CDP, Washington",1579),
("Ronald CDP, Washington",164),
("Roosevelt CDP, Washington",161),
("Rosalia town, Washington",568),
("Rosburg CDP, Washington",323),
("Rosedale CDP, Washington",4512),
("Roslyn city, Washington",644),
("Roy city, Washington",692),
("Royal City city, Washington",1517),
("Ruston town, Washington",1233),
("Ryderwood CDP, Washington",454),
("St. John town, Washington",567),
("Salmon Creek CDP, Washington",21299),
("Sammamish city, Washington",64049),
("Santiago CDP, Washington",30),
("Satsop CDP, Washington",624),
("Seabeck CDP, Washington",1060),
("SeaTac city, Washington",28925),
("Seattle city, Washington",708823),
("Sedro-Woolley city, Washington",11579),
("Sekiu CDP, Washington",62),
("Selah city, Washington",7766),
("Sequim city, Washington",7100),
("Shadow Lake CDP, Washington",2286),
("Shelton city, Washington",9996),
("Shoreline city, Washington",56020),
("Silvana CDP, Washington",13),
("Silverdale CDP, Washington",21071),
("Silver Firs CDP, Washington",22052),
("Sisco Heights CDP, Washington",2518),
("Skamokawa Valley CDP, Washington",304),
("Skokomish CDP, Washington",655),
("Skykomish town, Washington",114),
("Snohomish city, Washington",9875),
("Snoqualmie city, Washington",13317),
("Snoqualmie Pass CDP, Washington",316),
("Soap Lake city, Washington",1445),
("South Bend city, Washington",1592),
("South Cle Elum town, Washington",582),
("South Creek CDP, Washington",2716),
("South Hill CDP, Washington",58164),
("South Prairie town, Washington",280),
("South Wenatchee CDP, Washington",2011),
("Southworth CDP, Washington",2073),
("Spanaway CDP, Washington",32150),
("Spangle city, Washington",189),
("Spokane city, Washington",214804),
("Spokane Valley city, Washington",96070),
("Sprague city, Washington",452),
("Springdale town, Washington",395),
("Stansberry Lake CDP, Washington",2768),
("Stanwood city, Washington",6973),
("Starbuck town, Washington",111),
("Startup CDP, Washington",663),
("Steilacoom town, Washington",6270),
("Steptoe CDP, Washington",177),
("Stevenson city, Washington",1503),
("Sudden Valley CDP, Washington",7999),
("Sultan city, Washington",5040),
("Sumas city, Washington",1757),
("Summit CDP, Washington",8435),
("Summit View CDP, Washington",7621),
("Summitview CDP, Washington",1047),
("Sumner city, Washington",9898),
("Sunday Lake CDP, Washington",655),
("Sunnyside city, Washington",16442),
("Sunnyslope CDP, Washington",3815),
("Suquamish CDP, Washington",4310),
("Swede Heaven CDP, Washington",952),
("Tacoma city, Washington",210103),
("Taholah CDP, Washington",665),
("Tampico CDP, Washington",250),
("Tanglewilde CDP, Washington",6581),
("Tanner CDP, Washington",885),
("Tekoa city, Washington",742),
("Tenino city, Washington",1695),
("Terrace Heights CDP, Washington",8273),
("Thorp CDP, Washington",246),
("Three Lakes CDP, Washington",3560),
("Tieton city, Washington",1541),
("Tokeland CDP, Washington",291),
("Toledo city, Washington",602),
("Tonasket city, Washington",1057),
("Toppenish city, Washington",8906),
("Torboy CDP, Washington",48),
("Touchet CDP, Washington",509),
("Town and Country CDP, Washington",4728),
("Tracyton CDP, Washington",5933),
("Trout Lake CDP, Washington",632),
("Tukwila city, Washington",20198),
("Tumwater city, Washington",22500),
("Twin Lakes CDP, Washington",71),
("Twisp town, Washington",1153),
("Union CDP, Washington",412),
("Union Gap city, Washington",6142),
("Union Hill-Novelty Hill CDP, Washington",22034),
("Uniontown town, Washington",382),
("University Place city, Washington",32907),
("Upper Elochoman CDP, Washington",100),
("Vader city, Washington",923),
("Valley CDP, Washington",164),
("Vancouver city, Washington",178413),
("Vantage CDP, Washington",23),
("Vashon CDP, Washington",10036),
("Vaughn CDP, Washington",552),
("Venersborg CDP, Washington",4349),
("Verlot CDP, Washington",475),
("Waitsburg city, Washington",1188),
("Walla Walla city, Washington",32731),
("Walla Walla East CDP, Washington",1929),
("Waller CDP, Washington",7894),
("Wallula CDP, Washington",361),
("Wapato city, Washington",5041),
("Warden city, Washington",2744),
("Warm Beach CDP, Washington",2699),
("Washougal city, Washington",15484),
("Washtucna town, Washington",241),
("Waterville town, Washington",1415),
("Wauna CDP, Washington",4323),
("Waverly town, Washington",94),
("Wenatchee city, Washington",34094),
("West Clarkston-Highland CDP, Washington",6081),
("West Pasco CDP, Washington",1478),
("Westport city, Washington",2091),
("West Richland city, Washington",14187),
("West Side Highway CDP, Washington",5877),
("Whidbey Island Station CDP, Washington",1930),
("White Center CDP, Washington",15852),
("White Salmon city, Washington",2485),
("White Swan CDP, Washington",996),
("Wilbur town, Washington",807),
("Wilderness Rim CDP, Washington",1457),
("Wilkeson town, Washington",440),
("Willapa CDP, Washington",182),
("Wilson Creek town, Washington",215),
("Winlock city, Washington",1725),
("Winthrop town, Washington",407),
("Wishram CDP, Washington",499),
("Wollochet CDP, Washington",6390),
("Woodinville city, Washington",12026),
("Woodland city, Washington",5912),
("Woods Creek CDP, Washington",5907),
("Woodway city, Washington",1189),
("Yacolt town, Washington",1575),
("Yakima city, Washington",93416),
("Yarrow Point town, Washington",1168),
("Yelm city, Washington",8772),
("Zillah city, Washington",3118),
("Accoville CDP, West Virginia",938),
("Addison (Webster Springs) town, West Virginia",869),
("Albright town, West Virginia",311),
("Alderson town, West Virginia",1178),
("Alum Creek CDP, West Virginia",1744),
("Amherstdale CDP, West Virginia",503),
("Anawalt town, West Virginia",100),
("Anmoore town, West Virginia",645),
("Ansted town, West Virginia",1437),
("Apple Grove CDP, West Virginia",51),
("Arbovale CDP, West Virginia",131),
("Athens town, West Virginia",1043),
("Auburn town, West Virginia",117),
("Aurora CDP, West Virginia",185),
("Bancroft town, West Virginia",482),
("Barboursville village, West Virginia",4185),
("Barrackville town, West Virginia",1330),
("Bartley CDP, West Virginia",54),
("Bartow CDP, West Virginia",78),
("Bath (Berkeley Springs) town, West Virginia",674),
("Bayard town, West Virginia",217),
("Beards Fork CDP, West Virginia",137),
("Beaver CDP, West Virginia",1384),
("Beckley city, West Virginia",16697),
("Beech Bottom village, West Virginia",645),
("Belington town, West Virginia",1925),
("Belle town, West Virginia",1230),
("Belmont city, West Virginia",1040),
("Belva CDP, West Virginia",66),
("Benwood city, West Virginia",1440),
("Bergoo CDP, West Virginia",57),
("Berwind CDP, West Virginia",422),
("Bethany town, West Virginia",1039),
("Bethlehem village, West Virginia",2522),
("Beverly town, West Virginia",687),
("Big Chimney CDP, West Virginia",454),
("Big Creek CDP, West Virginia",116),
("Big Sandy CDP, West Virginia",148),
("Birch River CDP, West Virginia",118),
("Blacksville town, West Virginia",118),
("Blennerhassett CDP, West Virginia",3319),
("Bluefield city, West Virginia",10022),
("Bluewell CDP, West Virginia",2277),
("Boaz CDP, West Virginia",1585),
("Bolivar town, West Virginia",1270),
("Bolt CDP, West Virginia",893),
("Boomer CDP, West Virginia",644),
("Bowden CDP, West Virginia",0),
("Bradley CDP, West Virginia",1888),
("Bradshaw town, West Virginia",230),
("Bramwell town, West Virginia",320),
("Brandonville town, West Virginia",130),
("Brandywine CDP, West Virginia",123),
("Brenton CDP, West Virginia",207),
("Bridgeport city, West Virginia",8466),
("Brookhaven CDP, West Virginia",5900),
("Bruceton Mills town, West Virginia",48),
("Bruno CDP, West Virginia",1126),
("Brush Fork CDP, West Virginia",1499),
("Buckhannon city, West Virginia",5566),
("Bud CDP, West Virginia",417),
("Buffalo town, West Virginia",1261),
("Burlington CDP, West Virginia",115),
("Burnsville town, West Virginia",618),
("Cairo town, West Virginia",295),
("Camden-on-Gauley town, West Virginia",190),
("Cameron city, West Virginia",609),
("Capon Bridge town, West Virginia",453),
("Carolina CDP, West Virginia",569),
("Carpendale town, West Virginia",723),
("Cass CDP, West Virginia",38),
("Cassville CDP, West Virginia",1378),
("Cedar Grove town, West Virginia",625),
("Century CDP, West Virginia",97),
("Ceredo city, West Virginia",1245),
("Chapmanville town, West Virginia",1171),
("Charleston city, West Virginia",48734),
("Charles Town city, West Virginia",5885),
("Charlton Heights CDP, West Virginia",309),
("Chattaroy CDP, West Virginia",627),
("Chauncey CDP, West Virginia",501),
("Cheat Lake CDP, West Virginia",9337),
("Chelyan CDP, West Virginia",1001),
("Chesapeake town, West Virginia",1899),
("Chester city, West Virginia",2314),
("Clarksburg city, West Virginia",15830),
("Clay town, West Virginia",556),
("Clearview village, West Virginia",595),
("Clendenin town, West Virginia",1064),
("Coal City CDP, West Virginia",1810),
("Coal Fork CDP, West Virginia",1335),
("Comfort CDP, West Virginia",227),
("Corinne CDP, West Virginia",289),
("Covel CDP, West Virginia",25),
("Cowen town, West Virginia",636),
("Crab Orchard CDP, West Virginia",1904),
("Craigsville CDP, West Virginia",2213),
("Cross Lanes CDP, West Virginia",9619),
("Crum CDP, West Virginia",102),
("Crumpler CDP, West Virginia",162),
("Cucumber CDP, West Virginia",65),
("Culloden CDP, West Virginia",3457),
("Dailey CDP, West Virginia",28),
("Daniels CDP, West Virginia",1929),
("Danville town, West Virginia",771),
("Davis town, West Virginia",631),
("Davy town, West Virginia",248),
("Deep Water CDP, West Virginia",306),
("Delbarton town, West Virginia",520),
("Despard CDP, West Virginia",966),
("Dixie CDP, West Virginia",224),
("Dunbar city, West Virginia",7462),
("Durbin town, West Virginia",210),
("East Bank town, West Virginia",845),
("East Dailey CDP, West Virginia",789),
("Eccles CDP, West Virginia",68),
("Eleanor town, West Virginia",1521),
("Elizabeth town, West Virginia",809),
("Elk Garden town, West Virginia",214),
("Elkins city, West Virginia",7115),
("Elkview CDP, West Virginia",1609),
("Ellenboro town, West Virginia",351),
("Enterprise CDP, West Virginia",953),
("Fairlea CDP, West Virginia",1664),
("Fairmont city, West Virginia",18507),
("Fairview town, West Virginia",409),
("Falling Spring town, West Virginia",184),
("Falling Waters CDP, West Virginia",1485),
("Falls View CDP, West Virginia",178),
("Farmington town, West Virginia",433),
("Fayetteville town, West Virginia",2838),
("Fenwick CDP, West Virginia",162),
("Flatwoods town, West Virginia",257),
("Flemington town, West Virginia",397),
("Follansbee city, West Virginia",2806),
("Fort Ashby CDP, West Virginia",1461),
("Fort Gay town, West Virginia",924),
("Frank CDP, West Virginia",67),
("Franklin town, West Virginia",673),
("Friendly town, West Virginia",125),
("Gallipolis Ferry CDP, West Virginia",852),
("Galloway CDP, West Virginia",81),
("Gary city, West Virginia",733),
("Gassaway town, West Virginia",1019),
("Gauley Bridge town, West Virginia",669),
("Ghent CDP, West Virginia",525),
("Gilbert town, West Virginia",408),
("Gilbert Creek CDP, West Virginia",1030),
("Glasgow town, West Virginia",755),
("Glen Dale city, West Virginia",1505),
("Glen Ferris CDP, West Virginia",195),
("Glen Fork CDP, West Virginia",406),
("Glen Jean CDP, West Virginia",160),
("Glenville town, West Virginia",1692),
("Glen White CDP, West Virginia",302),
("Grafton city, West Virginia",5091),
("Grantsville town, West Virginia",538),
("Grant Town town, West Virginia",582),
("Granville town, West Virginia",2567),
("Great Cacapon CDP, West Virginia",307),
("Green Bank CDP, West Virginia",257),
("Green Spring CDP, West Virginia",163),
("Greenview CDP, West Virginia",461),
("Gypsy CDP, West Virginia",222),
("Hambleton town, West Virginia",207),
("Hamlin town, West Virginia",1533),
("Handley town, West Virginia",194),
("Harman town, West Virginia",88),
("Harpers Ferry town, West Virginia",283),
("Harrisville town, West Virginia",2256),
("Hartford City town, West Virginia",722),
("Harts CDP, West Virginia",551),
("Hedgesville town, West Virginia",299),
("Helen CDP, West Virginia",13),
("Helvetia CDP, West Virginia",80),
("Henderson town, West Virginia",342),
("Hendricks town, West Virginia",286),
("Henlawson CDP, West Virginia",90),
("Hepzibah CDP, West Virginia",572),
("Hico CDP, West Virginia",253),
("Hillsboro town, West Virginia",183),
("Hilltop CDP, West Virginia",426),
("Hinton city, West Virginia",2768),
("Holden CDP, West Virginia",649),
("Hometown CDP, West Virginia",576),
("Hooverson Heights CDP, West Virginia",2561),
("Hundred town, West Virginia",302),
("Huntersville CDP, West Virginia",117),
("Huntington city, West Virginia",47420),
("Hurricane city, West Virginia",6477),
("Huttonsville town, West Virginia",181),
("Iaeger town, West Virginia",229),
("Idamay CDP, West Virginia",544),
("Inwood CDP, West Virginia",3364),
("Itmann CDP, West Virginia",286),
("Jacksonburg CDP, West Virginia",41),
("Jane Lew town, West Virginia",391),
("Jefferson CDP, West Virginia",820),
("Junior town, West Virginia",388),
("Justice CDP, West Virginia",589),
("Kenova city, West Virginia",3095),
("Kermit town, West Virginia",257),
("Keyser city, West Virginia",5119),
("Keystone city, West Virginia",115),
("Kimball town, West Virginia",130),
("Kimberly CDP, West Virginia",148),
("Kincaid CDP, West Virginia",68),
("Kingwood city, West Virginia",2943),
("Kistler CDP, West Virginia",244),
("Kopperston CDP, West Virginia",486),
("Lashmeet CDP, West Virginia",408),
("Lavalette CDP, West Virginia",1347),
("Leon town, West Virginia",221),
("Lesage CDP, West Virginia",1022),
("Lester town, West Virginia",541),
("Lewisburg city, West Virginia",3904),
("Littleton CDP, West Virginia",131),
("Logan city, West Virginia",1790),
("Lost Creek town, West Virginia",551),
("Lubeck CDP, West Virginia",1258),
("Lumberport town, West Virginia",801),
("Mabscott town, West Virginia",1466),
("MacArthur CDP, West Virginia",1590),
("McConnell CDP, West Virginia",585),
("McMechen city, West Virginia",1915),
("Madison city, West Virginia",2860),
("Mallory CDP, West Virginia",1049),
("Man town, West Virginia",1009),
("Mannington city, West Virginia",1623),
("Marlinton town, West Virginia",1152),
("Marmet city, West Virginia",1440),
("Martinsburg city, West Virginia",17497),
("Mason town, West Virginia",920),
("Masontown town, West Virginia",618),
("Matewan town, West Virginia",467),
("Matheny CDP, West Virginia",355),
("Matoaka town, West Virginia",188),
("Maybeury CDP, West Virginia",93),
("Meadow Bridge town, West Virginia",347),
("Middlebourne town, West Virginia",684),
("Middleway CDP, West Virginia",431),
("Mill Creek town, West Virginia",737),
("Milton town, West Virginia",2612),
("Mineralwells CDP, West Virginia",1866),
("Mitchell Heights town, West Virginia",458),
("Monaville CDP, West Virginia",251),
("Monongah town, West Virginia",1089),
("Montcalm CDP, West Virginia",475),
("Montgomery city, West Virginia",1738),
("Montrose town, West Virginia",170),
("Moorefield town, West Virginia",3201),
("Morgantown city, West Virginia",30539),
("Moundsville city, West Virginia",8651),
("Mount Carbon CDP, | |
<gh_stars>0
import os
import os.path
import sys
import h5py
import numpy as np
from numpy.lib.function_base import hamming
import ASCAD_test_models
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, Input, Conv1D, MaxPooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D, AveragePooling1D, BatchNormalization, Activation, Add, add, LSTM
from tensorflow.keras import backend as K
from tensorflow.keras.applications.imagenet_utils import decode_predictions
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.optimizers import RMSprop, Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import load_model
def check_file_exists(file_path):
file_path = os.path.normpath(file_path)
if os.path.exists(file_path) == False:
print("Error: provided file path '%s' does not exist!" % file_path)
sys.exit(-1)
return
# MLP Best model (6 layers of 200 units)
def mlp_best(node=200, layer_nb=6, input_dim=1400, classes=256):
model = Sequential()
model.add(Dense(node, input_dim=input_dim, activation='relu'))
for i in range(layer_nb-2):
model.add(Dense(node, activation='relu'))
model.add(Dense(classes, activation='softmax'))
optimizer = RMSprop(lr=0.00001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
return model
# CNN Best model
def cnn_best(classes=256, input_dim=700):
# From VGG16 design
input_shape = (input_dim, 1)
img_input = Input(shape=input_shape)
# Block 1
x = Conv1D(64, 11, activation='relu', padding='same',
name='block1_conv1')(img_input)
x = AveragePooling1D(2, strides=2, name='block1_pool')(x)
# Block 2
x = Conv1D(128, 11, activation='relu',
padding='same', name='block2_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
# Block 3
x = Conv1D(256, 11, activation='relu',
padding='same', name='block3_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block3_pool')(x)
# Block 4
x = Conv1D(512, 11, activation='relu',
padding='same', name='block4_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block4_pool')(x)
# Block 5
x = Conv1D(512, 11, activation='relu',
padding='same', name='block5_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block5_pool')(x)
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='cnn_best')
optimizer = RMSprop(lr=0.00001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
return model
# CNN Best model
def cnn_best2(classes=256, input_dim=1400):
# From VGG16 design
input_shape = (input_dim, 1)
img_input = Input(shape=input_shape)
# Block 1
x = Conv1D(16, 6, strides=2, activation='selu',
padding='same', name='block1_conv1')(img_input)
x = Dropout(0.05)(x)
x = AveragePooling1D(2, strides=3, name='block1_pool')(x)
# Block 2
x = Conv1D(32, 6, activation='selu',
padding='same', name='block2_conv1')(x)
x = Dropout(0.05)(x)
x = MaxPooling1D(2, strides=3, name='block2_pool')(x)
# Block 3
x = Conv1D(64, 6, activation='selu',
padding='same', name='block3_conv1')(x)
x = Dropout(0.05)(x)
x = MaxPooling1D(2, strides=3, name='block3_pool')(x)
# Block 4
x = Conv1D(128, 6, activation='selu',
padding='same', name='block4_conv1')(x)
x = Dropout(0.05)(x)
x = MaxPooling1D(2, strides=3, name='block4_pool')(x)
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(classes, activation='softmax', name='predictions',
activity_regularizer=tf.keras.regularizers.l2(l2=0.01))(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='cnn_best2')
optimizer = Adam(lr=0.001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
model.summary()
return model
def cnn_best2_tmp(classes=256, input_dim=1400):
# From VGG16 design
input_shape = (input_dim, 1)
img_input = Input(shape=input_shape)
# Block 1
x = Conv1D(64, 11, strides=2, activation='relu',
padding='same', name='block1_conv1')(img_input)
x = AveragePooling1D(2, strides=2, name='block1_pool')(x)
# Block 2
x = Conv1D(128, 11, activation='relu',
padding='same', name='block2_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
# Block 3
x = Conv1D(256, 11, activation='relu',
padding='same', name='block3_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block3_pool')(x)
# Block 4
x = Conv1D(512, 11, activation='relu',
padding='same', name='block4_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block4_pool')(x)
# Block 5
x = Conv1D(512, 11, activation='relu',
padding='same', name='block5_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block5_pool')(x)
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='cnn_best2')
optimizer = RMSprop(lr=0.00001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
return model
def cnn_dense(classes=256, input_dim=1400):
# From VGG16 design
input_shape = (input_dim, 1)
img_input = Input(shape=input_shape)
# Block 1
x = Conv1D(64, 11, strides=2, activation='relu',
padding='same', name='block1_conv1')(img_input)
x = AveragePooling1D(2, strides=2, name='block1_pool')(x)
# Block 2
x = Conv1D(128, 11, activation='relu',
padding='same', name='block2_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
# Block 3
x = Conv1D(256, 11, activation='relu',
padding='same', name='block3_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block3_pool')(x)
# Block 4
x = Conv1D(512, 11, activation='relu',
padding='same', name='block4_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block4_pool')(x)
# Block 5
x = Conv1D(512, 11, activation='relu',
padding='same', name='block5_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block5_pool')(x)
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(4096, activation='relu', name='fc3')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='cnn_dense')
optimizer = RMSprop(lr=0.00001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
return model
def cnn_thin(classes=256, input_dim=1400):
# From VGG16 design
input_shape = (input_dim, 1)
img_input = Input(shape=input_shape)
# Block 1
x = Conv1D(64, 11, strides=2, activation='relu',
padding='same', name='block1_conv1')(img_input)
x = AveragePooling1D(2, strides=2, name='block1_pool')(x)
# Block 2
x = Conv1D(128, 11, activation='relu',
padding='same', name='block2_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
# Block 3
x = Conv1D(256, 11, activation='relu',
padding='same', name='block3_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block3_pool')(x)
# Block 4
x = Conv1D(512, 11, activation='relu',
padding='same', name='block4_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block4_pool')(x)
# Block 5
x = Conv1D(512, 11, activation='relu',
padding='same', name='block5_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block5_pool')(x)
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='cnn_thin')
optimizer = RMSprop(lr=0.00001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
model.summary()
return model
def cnn_lstm(classes=256, input_dim=1400):
# From VGG16 design
input_shape = (input_dim, 1)
img_input = Input(shape=input_shape)
# Block 1
x = Conv1D(64, 11, strides=2, activation='relu',
padding='same', name='block1_conv1')(img_input)
x = AveragePooling1D(2, strides=2, name='block1_pool')(x)
# Block 2
x = Conv1D(128, 11, activation='relu',
padding='same', name='block2_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
# Block 3
x = Conv1D(256, 11, activation='relu',
padding='same', name='block3_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block3_pool')(x)
# Block 4
x = Conv1D(512, 11, activation='relu',
padding='same', name='block4_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block4_pool')(x)
# Block 5
x = Conv1D(512, 11, activation='relu',
padding='same', name='block5_conv1')(x)
x = AveragePooling1D(2, strides=2, name='block5_pool')(x)
# Classification block
x = LSTM(5120)(x)
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='cnn_thin')
optimizer = RMSprop(lr=0.00001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
return model
# Resnet layer sub-function of ResNetSCA
def resnet_layer(inputs,
num_filters=16,
kernel_size=11,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
conv = Conv1D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal')
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
# Branch of ResNetSCA that predict the multiplicative mask alpha
def alpha_branch(x):
x = Dense(1024, activation='relu', name='fc1_alpha')(x)
x = BatchNormalization()(x)
x = Dense(256, activation="softmax", name='alpha_output')(x)
return x
# Branch of ResNetSCA that predict the additive mask beta
def beta_branch(x):
x = Dense(1024, activation='relu', name='fc1_beta')(x)
x = BatchNormalization()(x)
x = Dense(256, activation="softmax", name='beta_output')(x)
return x
# Branch of ResNetSCA that predict the masked sbox output
def sbox_branch(x, i):
x = Dense(1024, activation='relu', name='fc1_sbox_'+str(i))(x)
x = BatchNormalization()(x)
x = Dense(256, activation="softmax", name='sbox_'+str(i)+'_output')(x)
return x
# Branch of ResNetSCA that predict the pemutation indices
def permind_branch(x, i):
x = Dense(1024, activation='relu', name='fc1_pemind_'+str(i))(x)
x = BatchNormalization()(x)
x = Dense(16, activation="softmax", name='permind_'+str(i)+'_output')(x)
return x
# Generic function that produce the ResNetSCA architecture.
# If without_permind option is set to 1, the ResNetSCA model is built without permindices branch
def resnet_v1(input_shape, depth, num_classes=256, without_permind=0):
if (depth - 1) % 18 != 0:
raise ValueError('depth should be 18n+1 (eg 19, 37, 55 ...)')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 1) / 18)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(9):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0:
strides = 2
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None)
if stack > 0 and res_block == 0:
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = add([x, y])
x = Activation('relu')(x)
if (num_filters < 256):
num_filters *= 2
x = AveragePooling1D(pool_size=4)(x)
x = Flatten()(x)
x_alpha = alpha_branch(x)
x_beta = beta_branch(x)
x_sbox_l = []
x_permind_l = []
for i in range(16):
x_sbox_l.append(sbox_branch(x, i))
x_permind_l.append(permind_branch(x, i))
if without_permind != 1:
model = Model(inputs, [x_alpha, x_beta] +
x_sbox_l + x_permind_l, name='extract_resnet')
else:
model = Model(inputs, [x_alpha, x_beta] + x_sbox_l,
name='extract_resnet_without_permind')
optimizer = Adam()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
return model
# CNN multilabel test function. This model is only used for debugging.
def multi_test(input_dim=1400):
input_shape = (input_dim, 1)
inputs = Input(shape=input_shape)
# Block 1
x = Conv1D(3, 11, strides=100, activation='relu',
padding='same', name='block1_conv1')(inputs)
x = Flatten()(x)
x_alpha = alpha_branch(x)
x_beta = beta_branch(x)
x_sbox_l = []
x_permind_l = []
for i in range(16):
| |
of SubRipItems.
target_file_path {string} -- The path to the exported subtitle file.
frame_rate {float} -- The frame rate for frame-based subtitle formats {default: 25.0}.
encoding {str} -- The encoding of the exported subtitle file {default: None}.
"""
encoding = Utils.detect_encoding(source_file_path) if encoding is None else encoding
_, file_extension = os.path.splitext(source_file_path.lower())
Subtitle.__save_subtitle_by_extension(file_extension, subs, source_file_path, target_file_path, encoding, frame_rate, is_exporting=True)
@staticmethod
def remove_sound_effects_by_case(subs: List[SubRipItem], se_uppercase: bool = True) -> List[SubRipItem]:
"""Remove subtitles of sound effects based on case
Arguments:
subs {list} -- A list of SubRipItems.
se_uppercase {bool} -- True when the sound effect is in uppercase or False when in lowercase (default: {True}).
Returns:
{list} -- A list of SubRipItems.
"""
new_subs = deepcopy(subs)
for sub in subs:
if se_uppercase is not None:
if se_uppercase and sub.text.isupper():
new_subs.remove(sub)
elif not se_uppercase and sub.text.islower():
new_subs.remove(sub)
return new_subs
@staticmethod
def remove_sound_effects_by_affixes(subs: List[SubRipItem], se_prefix: str, se_suffix: Optional[str] = None) -> List[SubRipItem]:
"""Remove subtitles of sound effects based on prefix or prefix and suffix
Arguments:
subs {list} -- A list of SubRipItems.
se_prefix {string} -- A prefix indicating the start of the sound effect.
se_suffix {string} -- A suffix indicating the end of the sound effect (default: {None}).
Returns:
{list} -- A list of SubRipItems.
"""
new_subs = deepcopy(subs)
for sub in subs:
if se_suffix is not None:
match = re.search(
"^{0}[^{0}{1}]+{1}$".format(
re.escape(se_prefix), re.escape(se_suffix)
),
sub.text,
)
else:
match = re.search("^{0}[^{0}]+$".format(re.escape(se_prefix)), sub.text)
if match:
new_subs.remove(sub)
return new_subs
@staticmethod
def extract_text(subtitle_file_path: str, delimiter: str = " ") -> str:
"""Extract plain texts from a subtitle file.
Arguments:
subtitle_file_path {string} -- The path to the subtitle file.
Returns:
{string} -- The plain text of subtitle.
"""
subs = Subtitle.load(subtitle_file_path).subs
texts = [sub.text.replace("\r\n", " ").replace("\n", " ").replace("\r", " ") for sub in subs]
return delimiter.join(texts)
@staticmethod
def subtitle_extensions() -> set:
"""Get the file extensions of the supported subtitles.
Returns:
{set} -- The subtitle extensions.
"""
return set(Subtitle.SUBRIP_EXTENTIONS + Subtitle.TTML_EXTENSIONS + Subtitle.WEBVTT_EXTENSIONS
+ Subtitle.SSA_EXTENTIONS + Subtitle.ADVANCED_SSA_EXTENTIONS + Subtitle.MICRODVD_EXTENSIONS
+ Subtitle.MPL2_EXTENSIONS + Subtitle.TMP_EXTENSIONS + Subtitle.SAMI_EXTENSIONS
+ Subtitle.STL_EXTENSIONS + Subtitle.SCC_EXTENSIONS + Subtitle.SBV_EXTENSIONS
+ Subtitle.YT_TRANSCRIPT_EXTENSIONS)
@property
def subtitle_file_path(self) -> str:
return self.__subtitle_file_path
@property
def subs(self) -> SubRipFile:
return self.__subs
@staticmethod
def __load_subrip(subrip_file_path: str) -> SubRipFile:
"""Load a subtitle file in the SubRip format
Arguments:
subrip_file_path {string} -- The path to the SubRip subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
return Subtitle.__get_srt_subs(subrip_file_path)
@staticmethod
def __convert_ttml_to_subs(ttml_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the TTML format to the SubRip format
Arguments:
ttml_file_path {string} -- The path to the TTML subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
Utils.ttml2srt(ttml_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_vtt_to_subs(vtt_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the WebVTT format to the SubRip format
Arguments:
vtt_file_path {string} -- The path to the WebVTT subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
Utils.vtt2srt(vtt_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_ssa_to_subs(ssa_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the SubStation Alpha v4.0 format to the SubRip format
Arguments:
ass_file_path {string} -- The path to the SubStation Alpha v4.0 subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
path = "%s.srt" % path
Utils.ssa2srt(ssa_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_ass_to_subs(ass_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the Advanced SubStation Alpha v4.0+ format to the SubRip format
Arguments:
ass_file_path {string} -- The path to the Advanced SubStation Alpha v4.0+ subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
path = "%s.srt" % path
Utils.ass2srt(ass_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_microdvd_to_subs(microdvd_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the MicroDVD format to the SubRip format
Arguments:
microdvd_file_path {string} -- The path to the MicroDVD subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
path = "%s.srt" % path
Utils.microdvd2srt(microdvd_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_mpl2_to_subs(mpl2_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the MPL2 format to the SubRip format
Arguments:
mpl2_file_path {string} -- The path to the MPL2 subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
path = "%s.srt" % path
Utils.mpl22srt(mpl2_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_tmp_to_subs(tmp_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the TMP format to the SubRip format
Arguments:
tmp_file_path {string} -- The path to the TMP subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
path = "%s.srt" % path
Utils.tmp2srt(tmp_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_sami_to_subs(sami_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the SAMI format to the SubRip format
Arguments:
sami_file_path {string} -- The path to the SAMI subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
Utils.sami2srt(sami_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_stl_to_subs(stl_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the EBU STL format to the SubRip format
Arguments:
stl_file_path {string} -- The path to the STL subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
Utils.stl2srt(stl_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_scc_to_subs(scc_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the SCC format to the SubRip format
Arguments:
scc_file_path {string} -- The path to the SCC subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
Utils.scc2srt(scc_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_sbv_to_subs(sbv_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the SubViewer format to the SubRip format
Arguments:
sbv_file_path {string} -- The path to the SubViewer subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
Utils.sbv2srt(sbv_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __convert_ytt_to_subs(ytt_file_path: str) -> SubRipFile:
"""Convert a subtitle file from the YouTube transcript format to the SubRip format
Arguments:
ytt_file_path {string} -- The path to the YouTube transcript subtitle file.
Returns:
{list} -- A list of SubRipItems.
"""
_, path = tempfile.mkstemp()
Utils.ytt2srt(ytt_file_path, path)
return Subtitle.__get_srt_subs(path, housekeep=True)
@staticmethod
def __export_with_format(subs: List[SubRipItem], source_file_path: str, target_file_path: Optional[str], file_extension: str, suffix: str) -> None:
if target_file_path is None:
target_file_path = source_file_path.replace(
file_extension, "{}{}".format(suffix, file_extension)
)
Subtitle.export_subtitle(source_file_path, subs, target_file_path)
@staticmethod
def __get_srt_subs(subrip_file_path: str, housekeep: bool = False) -> SubRipFile:
encoding = Utils.detect_encoding(subrip_file_path)
try:
subs = pysrt.open(subrip_file_path, encoding=encoding)
except Exception as e:
raise UnsupportedFormatException("Error occurred when loading subtitle from %s" % subrip_file_path) from e
finally:
if housekeep:
os.remove(subrip_file_path)
return subs
@staticmethod
def __save_subtitle_by_extension(file_extension: str,
subs: List[SubRipItem],
source_file_path: str,
target_file_path: str,
encoding: str,
frame_rate: Optional[float],
is_exporting: bool = False):
if file_extension in Subtitle.SUBRIP_EXTENTIONS:
SubRipFile(subs).save(target_file_path, encoding=encoding)
Utils.remove_trailing_newlines(target_file_path, encoding)
elif file_extension in Subtitle.TTML_EXTENSIONS:
if is_exporting:
tree = ElementTree.parse(source_file_path)
tt = tree.getroot()
cues = (tt.find("tt:body", Subtitle.TT_NS).find("tt:div", Subtitle.TT_NS).findall("tt:p", Subtitle.TT_NS)) # type: ignore
for index, cue in enumerate(cues):
cue.attrib["begin"] = str(subs[index].start).replace(",", ".")
cue.attrib["end"] = str(subs[index].end).replace(",", ".")
# Change single quotes in the XML header to double quotes
with open(target_file_path, "w", encoding=encoding) as target:
if "xml_declaration" in inspect.getfullargspec(ElementTree.tostring).kwonlyargs: # for >= python 3.8
encoded = ElementTree.tostring(tt, encoding=encoding, method="xml", xml_declaration=True)
else:
encoded = ElementTree.tostring(tt, encoding=encoding, method="xml")
normalised = encoded.decode(encoding) \
.replace("<?xml version='1.0' encoding='", '<?xml version="1.0" encoding="',) \
.replace("'?>", '"?>')
target.write(normalised)
else:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2ttml(path, target_file_path)
finally:
os.remove(path)
elif file_extension in Subtitle.WEBVTT_EXTENSIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2vtt(path, target_file_path)
finally:
os.remove(path)
elif file_extension in Subtitle.SSA_EXTENTIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2ssa(path, target_file_path)
finally:
os.remove(path)
elif file_extension in Subtitle.ADVANCED_SSA_EXTENTIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2ass(path, target_file_path)
finally:
os.remove(path)
elif file_extension in Subtitle.MICRODVD_EXTENSIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2microdvd(path, target_file_path, frame_rate=frame_rate)
finally:
os.remove(path)
elif file_extension in Subtitle.MPL2_EXTENSIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2mpl2(path, target_file_path)
finally:
os.remove(path)
elif file_extension in Subtitle.TMP_EXTENSIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2tmp(path, target_file_path)
finally:
os.remove(path)
elif file_extension in Subtitle.SAMI_EXTENSIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(path, encoding=encoding)
Utils.srt2sami(path, target_file_path)
finally:
os.remove(path)
elif file_extension in Subtitle.STL_EXTENSIONS:
try:
_, path = tempfile.mkstemp()
SubRipFile(subs).save(target_file_path, encoding=encoding)
| |
<gh_stars>100-1000
#!/usr/bin/env python
"""
Generates an AXI crossbar wrapper with the specified number of ports
"""
import argparse
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=[4], nargs='+', help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
if type(ports) is int:
m = n = ports
elif len(ports) == 1:
m = n = ports[0]
else:
m, n = ports
if name is None:
name = "axi_crossbar_wrap_{0}x{1}".format(m, n)
if output is None:
output = name + ".v"
print("Generating {0}x{1} port AXI crossbar wrapper {2}...".format(m, n, name))
cm = (m-1).bit_length()
cn = (n-1).bit_length()
t = Template(u"""/*
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* AXI4 {{m}}x{{n}} crossbar (wrapper)
*/
module {{name}} #
(
// Width of data bus in bits
parameter DATA_WIDTH = 32,
// Width of address bus in bits
parameter ADDR_WIDTH = 32,
// Width of wstrb (width of data bus in words)
parameter STRB_WIDTH = (DATA_WIDTH/8),
// Input ID field width (from AXI masters)
parameter S_ID_WIDTH = 8,
// Output ID field width (towards AXI slaves)
// Additional bits required for response routing
parameter M_ID_WIDTH = S_ID_WIDTH+$clog2(S_COUNT),
// Propagate awuser signal
parameter AWUSER_ENABLE = 0,
// Width of awuser signal
parameter AWUSER_WIDTH = 1,
// Propagate wuser signal
parameter WUSER_ENABLE = 0,
// Width of wuser signal
parameter WUSER_WIDTH = 1,
// Propagate buser signal
parameter BUSER_ENABLE = 0,
// Width of buser signal
parameter BUSER_WIDTH = 1,
// Propagate aruser signal
parameter ARUSER_ENABLE = 0,
// Width of aruser signal
parameter ARUSER_WIDTH = 1,
// Propagate ruser signal
parameter RUSER_ENABLE = 0,
// Width of ruser signal
parameter RUSER_WIDTH = 1,
{%- for p in range(m) %}
// Number of concurrent unique IDs
parameter S{{'%02d'%p}}_THREADS = 2,
// Number of concurrent operations
parameter S{{'%02d'%p}}_ACCEPT = 16,
{%- endfor %}
// Number of regions per master interface
parameter M_REGIONS = 1,
{%- for p in range(n) %}
// Master interface base addresses
// M_REGIONS concatenated fields of ADDR_WIDTH bits
parameter M{{'%02d'%p}}_BASE_ADDR = 0,
// Master interface address widths
// M_REGIONS concatenated fields of 32 bits
parameter M{{'%02d'%p}}_ADDR_WIDTH = {M_REGIONS{32'd24}},
// Read connections between interfaces
// S_COUNT bits
parameter M{{'%02d'%p}}_CONNECT_READ = {{m}}'b{% for p in range(m) %}1{% endfor %},
// Write connections between interfaces
// S_COUNT bits
parameter M{{'%02d'%p}}_CONNECT_WRITE = {{m}}'b{% for p in range(m) %}1{% endfor %},
// Number of concurrent operations for each master interface
parameter M{{'%02d'%p}}_ISSUE = 4,
// Secure master (fail operations based on awprot/arprot)
parameter M{{'%02d'%p}}_SECURE = 0,
{%- endfor %}
{%- for p in range(m) %}
// Slave interface AW channel register type (input)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter S{{'%02d'%p}}_AW_REG_TYPE = 0,
// Slave interface W channel register type (input)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter S{{'%02d'%p}}_W_REG_TYPE = 0,
// Slave interface B channel register type (output)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter S{{'%02d'%p}}_B_REG_TYPE = 1,
// Slave interface AR channel register type (input)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter S{{'%02d'%p}}_AR_REG_TYPE = 0,
// Slave interface R channel register type (output)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter S{{'%02d'%p}}_R_REG_TYPE = 2,
{%- endfor %}
{%- for p in range(n) %}
// Master interface AW channel register type (output)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter M{{'%02d'%p}}_AW_REG_TYPE = 1,
// Master interface W channel register type (output)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter M{{'%02d'%p}}_W_REG_TYPE = 2,
// Master interface B channel register type (input)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter M{{'%02d'%p}}_B_REG_TYPE = 0,
// Master interface AR channel register type (output)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter M{{'%02d'%p}}_AR_REG_TYPE = 1,
// Master interface R channel register type (input)
// 0 to bypass, 1 for simple buffer, 2 for skid buffer
parameter M{{'%02d'%p}}_R_REG_TYPE = 0{% if not loop.last %},{% endif %}
{%- endfor %}
)
(
input wire clk,
input wire rst,
/*
* AXI slave interface
*/
{%- for p in range(m) %}
input wire [S_ID_WIDTH-1:0] s{{'%02d'%p}}_axi_awid,
input wire [ADDR_WIDTH-1:0] s{{'%02d'%p}}_axi_awaddr,
input wire [7:0] s{{'%02d'%p}}_axi_awlen,
input wire [2:0] s{{'%02d'%p}}_axi_awsize,
input wire [1:0] s{{'%02d'%p}}_axi_awburst,
input wire s{{'%02d'%p}}_axi_awlock,
input wire [3:0] s{{'%02d'%p}}_axi_awcache,
input wire [2:0] s{{'%02d'%p}}_axi_awprot,
input wire [3:0] s{{'%02d'%p}}_axi_awqos,
input wire [AWUSER_WIDTH-1:0] s{{'%02d'%p}}_axi_awuser,
input wire s{{'%02d'%p}}_axi_awvalid,
output wire s{{'%02d'%p}}_axi_awready,
input wire [DATA_WIDTH-1:0] s{{'%02d'%p}}_axi_wdata,
input wire [STRB_WIDTH-1:0] s{{'%02d'%p}}_axi_wstrb,
input wire s{{'%02d'%p}}_axi_wlast,
input wire [WUSER_WIDTH-1:0] s{{'%02d'%p}}_axi_wuser,
input wire s{{'%02d'%p}}_axi_wvalid,
output wire s{{'%02d'%p}}_axi_wready,
output wire [S_ID_WIDTH-1:0] s{{'%02d'%p}}_axi_bid,
output wire [1:0] s{{'%02d'%p}}_axi_bresp,
output wire [BUSER_WIDTH-1:0] s{{'%02d'%p}}_axi_buser,
output wire s{{'%02d'%p}}_axi_bvalid,
input wire s{{'%02d'%p}}_axi_bready,
input wire [S_ID_WIDTH-1:0] s{{'%02d'%p}}_axi_arid,
input wire [ADDR_WIDTH-1:0] s{{'%02d'%p}}_axi_araddr,
input wire [7:0] s{{'%02d'%p}}_axi_arlen,
input wire [2:0] s{{'%02d'%p}}_axi_arsize,
input wire [1:0] s{{'%02d'%p}}_axi_arburst,
input wire s{{'%02d'%p}}_axi_arlock,
input wire [3:0] s{{'%02d'%p}}_axi_arcache,
input wire [2:0] s{{'%02d'%p}}_axi_arprot,
input wire [3:0] s{{'%02d'%p}}_axi_arqos,
input wire [ARUSER_WIDTH-1:0] s{{'%02d'%p}}_axi_aruser,
input wire s{{'%02d'%p}}_axi_arvalid,
output wire s{{'%02d'%p}}_axi_arready,
output wire [S_ID_WIDTH-1:0] s{{'%02d'%p}}_axi_rid,
output wire [DATA_WIDTH-1:0] s{{'%02d'%p}}_axi_rdata,
output wire [1:0] s{{'%02d'%p}}_axi_rresp,
output wire s{{'%02d'%p}}_axi_rlast,
output wire [RUSER_WIDTH-1:0] s{{'%02d'%p}}_axi_ruser,
output wire s{{'%02d'%p}}_axi_rvalid,
input wire s{{'%02d'%p}}_axi_rready,
{% endfor %}
/*
* AXI master interface
*/
{%- for p in range(n) %}
output wire [M_ID_WIDTH-1:0] m{{'%02d'%p}}_axi_awid,
output wire [ADDR_WIDTH-1:0] m{{'%02d'%p}}_axi_awaddr,
output wire [7:0] m{{'%02d'%p}}_axi_awlen,
output wire [2:0] m{{'%02d'%p}}_axi_awsize,
output wire [1:0] m{{'%02d'%p}}_axi_awburst,
output wire m{{'%02d'%p}}_axi_awlock,
output wire [3:0] m{{'%02d'%p}}_axi_awcache,
output wire [2:0] m{{'%02d'%p}}_axi_awprot,
output wire [3:0] m{{'%02d'%p}}_axi_awqos,
output wire [3:0] m{{'%02d'%p}}_axi_awregion,
output wire [AWUSER_WIDTH-1:0] m{{'%02d'%p}}_axi_awuser,
output wire m{{'%02d'%p}}_axi_awvalid,
input wire m{{'%02d'%p}}_axi_awready,
output wire [DATA_WIDTH-1:0] m{{'%02d'%p}}_axi_wdata,
output wire [STRB_WIDTH-1:0] m{{'%02d'%p}}_axi_wstrb,
output wire m{{'%02d'%p}}_axi_wlast,
output wire [WUSER_WIDTH-1:0] m{{'%02d'%p}}_axi_wuser,
output wire m{{'%02d'%p}}_axi_wvalid,
input wire m{{'%02d'%p}}_axi_wready,
input wire [M_ID_WIDTH-1:0] m{{'%02d'%p}}_axi_bid,
input wire [1:0] m{{'%02d'%p}}_axi_bresp,
input wire [BUSER_WIDTH-1:0] m{{'%02d'%p}}_axi_buser,
input wire m{{'%02d'%p}}_axi_bvalid,
output wire m{{'%02d'%p}}_axi_bready,
output wire [M_ID_WIDTH-1:0] m{{'%02d'%p}}_axi_arid,
output wire [ADDR_WIDTH-1:0] m{{'%02d'%p}}_axi_araddr,
output wire [7:0] m{{'%02d'%p}}_axi_arlen,
output wire [2:0] m{{'%02d'%p}}_axi_arsize,
output wire [1:0] m{{'%02d'%p}}_axi_arburst,
output wire m{{'%02d'%p}}_axi_arlock,
output wire [3:0] m{{'%02d'%p}}_axi_arcache,
output wire [2:0] m{{'%02d'%p}}_axi_arprot,
output wire [3:0] m{{'%02d'%p}}_axi_arqos,
output wire [3:0] m{{'%02d'%p}}_axi_arregion,
output wire [ARUSER_WIDTH-1:0] m{{'%02d'%p}}_axi_aruser,
output wire m{{'%02d'%p}}_axi_arvalid,
input wire m{{'%02d'%p}}_axi_arready,
input wire [M_ID_WIDTH-1:0] m{{'%02d'%p}}_axi_rid,
input wire [DATA_WIDTH-1:0] m{{'%02d'%p}}_axi_rdata,
input wire [1:0] m{{'%02d'%p}}_axi_rresp,
input wire m{{'%02d'%p}}_axi_rlast,
input wire [RUSER_WIDTH-1:0] m{{'%02d'%p}}_axi_ruser,
input wire m{{'%02d'%p}}_axi_rvalid,
output wire m{{'%02d'%p}}_axi_rready{% if not loop.last %},{% endif %}
{% endfor -%}
);
localparam S_COUNT = {{m}};
localparam M_COUNT = {{n}};
// parameter sizing helpers
function [ADDR_WIDTH*M_REGIONS-1:0] w_a_r(input [ADDR_WIDTH*M_REGIONS-1:0] val);
w_a_r = val;
endfunction
function [32*M_REGIONS-1:0] w_32_r(input [32*M_REGIONS-1:0] val);
w_32_r = val;
endfunction
function [S_COUNT-1:0] w_s(input [S_COUNT-1:0] val);
w_s = val;
endfunction
function [31:0] w_32(input [31:0] val);
w_32 = val;
endfunction
function [1:0] w_2(input [1:0] val);
w_2 = val;
endfunction
function w_1(input val);
w_1 = val;
endfunction
axi_crossbar #(
.S_COUNT(S_COUNT),
.M_COUNT(M_COUNT),
.DATA_WIDTH(DATA_WIDTH),
.ADDR_WIDTH(ADDR_WIDTH),
.STRB_WIDTH(STRB_WIDTH),
.S_ID_WIDTH(S_ID_WIDTH),
.M_ID_WIDTH(M_ID_WIDTH),
.AWUSER_ENABLE(AWUSER_ENABLE),
.AWUSER_WIDTH(AWUSER_WIDTH),
.WUSER_ENABLE(WUSER_ENABLE),
.WUSER_WIDTH(WUSER_WIDTH),
.BUSER_ENABLE(BUSER_ENABLE),
.BUSER_WIDTH(BUSER_WIDTH),
.ARUSER_ENABLE(ARUSER_ENABLE),
.ARUSER_WIDTH(ARUSER_WIDTH),
.RUSER_ENABLE(RUSER_ENABLE),
.RUSER_WIDTH(RUSER_WIDTH),
.S_THREADS({ {% for p in range(m-1,-1,-1) %}w_32(S{{'%02d'%p}}_THREADS){% if not loop.last %}, {% endif %}{% endfor %} }),
.S_ACCEPT({ {% for p in range(m-1,-1,-1) %}w_32(S{{'%02d'%p}}_ACCEPT){% if not loop.last %}, {% endif %}{% endfor %} | |
moving list
move_files.append((oldroot, newroot))
dirlist.remove('_root.sds')
if skip:
flatten_fail.append(dirpath)
continue
# strip .sds for renaming
prefix = dirpath
if sds_endswith(prefix):
prefix = dirpath[:-4]
prefix = prefix + '!'
# move all .sds files to base directory
for fname in dirlist:
old = dirpath + os.sep + fname
new = prefix + fname
if sds_exists(new):
warnings.warn('{new} was already found in base directory. Could not flatten {dirpath}.')
skip = True
break
else:
move_files.append((old,new))
if skip:
flatten_fail.append(dirpath)
continue
# move all .sds files to base directory
for old, new in move_files:
os.rename(old, new)
shutil.rmtree(dirpath)
# rename temp _root if necessary
if oldroot is not None:
# strip ! from end of prefix
finalroot = prefix[:-1]+SDS_EXTENSION
os.rename(newroot, finalroot)
if len(flatten_fail)>0:
print('Failed to flatten subdirectories:')
for dname in flatten_fail:
print(dname)
#-----------------------------------------------------------------------------------------
def _sds_path_multi(path, share=None, overwrite=True):
'''
Checks for existence of directory for saving multiple .sds files.
If directory exists, asks user if it should be used (potentially overwriting existing .sds files inside)
Returns True if okay to proceed with save.
'''
# path will never get checked/created if saving to shared memory
if share is None:
# prompt user for overwrite
if sds_exists(path):
if overwrite is False:
prompt = f"{path} already exists. Possibly overwrite .sds files in directory? (subdirectories will remain intact) (y/n) "
overwrite = False
while(True):
choice = input(prompt)
if choice in ['Y', 'y']:
overwrite = True
break
elif choice in ['N', 'n']:
break
if overwrite is False:
print(f"No file was saved.")
return False
else:
pass
# don't remove the entire tree by default
#shutil.rmtree(path)
else:
# possible TODO: call chmod after this so permissions are correct
# or maybe use os.umask before creating the directory?
if SDSVerbose: VerbosePrint(f'calling makedirs')
if SDSMakeDirs:
os.makedirs(path)
else:
os.mkdir(path)
#raise ValueError(f'Directory {path!r} does not exist. SDSMakeDirs global variable must be set to auto create sub directories.')
return True
#-----------------------------------------------------------------------------------------
def _sds_path_single(path, share=None, overwrite=True, name=None, append=None):
'''
Checks for existence of a single .sds file and possibly prompts user to overwrite.
If the directory does not exist, it will be created for the final save.
Returns full path for final save and status (True if okay to proceed with save)
NOTE: TJD overwrite changed to True on Aug, 2019
'''
# TODO: add this routine to Dataset.save()
if isinstance(path, bytes):
path = path.decode()
# possibly add extension
if name is None:
name = os.path.basename(os.path.normpath(path))
else:
name = _parse_nested_name(name)
path = path+os.sep+name
if sds_endswith(name):
name = name[:-4]
else:
path += SDS_EXTENSION
# if the user is appending to a file, overwrite is expected
if append is not None:
overwrite=True
# TJD look at this path since it does os check on filepath
if share is None:
# if exists, let user know if file or directory
exists_str = None
if sds_isfile(path) is False:
if sds_isdir(path):
# for now, don't allow overwrite if name.sds is a directory
exists_str = f'directory'
raise TypeError(f"{path} already existed and was a {exists_str}.")
else:
exists_str = f'file'
# prompt user for overwrite
if exists_str is not None:
prompt = f"{path} already exists. Overwrite? (y/n) "
if overwrite is False:
while(True):
choice = input(prompt)
if choice in 'Yy':
overwrite = True
break
elif choice in 'Nn':
break
if overwrite is False:
print(f"No file was saved.")
return path, name, False
else:
# overwriting files is allowed, overwriting directories is not
if sds_isdir(path):
shutil.rmtree(path)
#TJD disabled this (consider flag to re-enable)
##print(f"Overwriting {exists_str} with {path}")
# if the file/directory does not exist, possibly create the nested containing directory
else:
dir_end = len(os.path.basename(os.path.normpath(path)))
if not sds_isdir(path[:-dir_end]):
# don't make directory if empty string
if len(path[:-dir_end]) > 0:
newpath = path[:-dir_end]
if SDSMakeDirs:
os.makedirs(newpath)
else:
os.mkdir(newpath)
#raise ValueError(f'Directory {newpath!r} does not exist. SDSMakeDirs global variable must be set to auto create sub directories.')
return path, name, True
#-----------------------------------------------------------------------------------------
def _sds_save_single(item, path, share=None, overwrite=True, compress=True, name=None, onefile=False, bandsize=None, append=None, complevel=None):
'''
Fast track for saving a single item in an .sds file. This will be called if someone saves
a single array or FastArray subclass with the main save_sds() wrapper
'''
_, name, status = _sds_path_single(path, share=share, overwrite=overwrite, name=name, append=append)
if status is False:
return
# wrap in struct, struct build meta will call item build meta if necessary
item = TypeRegister.Struct({name:item})
fileType = SDSFileType.Array
_write_to_sds(item, path, name=None, compress=compress, sharename=share, fileType=fileType, onefile=onefile, bandsize=bandsize, append=append, complevel=complevel)
#-----------------------------------------------------------------------------------------
def _sds_load_single(meta, arrays, meta_tups, info=False):
'''
If an .sds file has a filetype SDSFileType.Array, it will be sent to this routine.
Extracts the underlying array, and rebuilds any FastArray subclasses.
'''
item = TypeRegister.Struct._load_from_sds_meta_data(meta, arrays, meta_tups)
item = list(item.values())[0]
return item
#-----------------------------------------------------------------------------------------
def save_sds_uncompressed(
filepath: AnyPath,
item: Union[np.ndarray, 'Dataset', 'Struct'],
overwrite: bool = True,
name: Optional[str] = None
) -> None:
"""
Explicitly save an item without using compression.
Equivalent to ``save_sds(filepath, item, compress=False)``.
Parameters
----------
filepath: str or bytes
Path to directory for ``Struct``, path to ``.sds`` file for ``Dataset`` or array
(where SDS extension will be added if necessary).
item : Struct, Dataset, ndarray, or ndarray subclass
The ``Struct``, ``Dataset``, ``ndarray``, or ``ndarray`` subclass to store.
overwrite : bool
If ``True``, do not prompt the user when overwriting an existing ``.sds`` file (mainly useful for ``Struct.save()``,
which may call ``Dataset.save()`` multiple times) (default False).
name : str, optional
Name of the sds file (default None).
Raises
------
TypeError
If `item` type cannot be saved.
See Also
--------
save_sds: save datasets to the filename.
"""
save_sds(filepath, item, compress=False, overwrite=overwrite, name=name)
#-----------------------------------------------------------------------------------------
def save_sds(
filepath: AnyPath,
item: Union[np.ndarray, 'Dataset', 'Struct'],
share: Optional[str] = None,
compress: bool = True,
overwrite: bool = True,
name: Optional[str] = None,
onefile: bool = False,
bandsize: Optional[int] = None,
append: Optional[str] = None,
complevel: Optional[int] = None
) -> None:
"""
Datasets and arrays will be saved into a single .sds file.
Structs will create a directory of ``.sds`` files for potential nested structures.
Parameters
----------
filepath: str or bytes or os.PathLike
Path to directory for Struct, path to ``.sds`` file for Dataset/array (extension will be added if necessary).
item : Struct, dataset, array, or array subclass
share
If the shared memory name is set, `item` will be saved to shared memory and NOT to disk. When shared memory
is specified, a filename must be included in path. Only this will be used, the rest of the path will be discarded.
For Windows make sure SE_CREATE_GLOBAL_NAME flag is set.
compress : bool, default True
Use compression when saving the file (shared memory is always saved uncompressed)
overwrite : bool, default False
If ``True``, do not prompt the user when overwriting an existing ``.sds`` file (mainly useful for ``Struct.save()``,
which may call ``Dataset.save()`` multiple times)
name : str, optional
Name of the sds file.
onefile : bool, default False
If True will flatten() a nested struct before saving to make it one file.
bandsize : int, optional
If set to an integer greater than 10000 it will compress column datas every `bandsize` rows.
append : str, optional
If set to a string it will append to the file with the section name
complevel : int, optional
Compression level from 0 to 9. 2 (default) is average. 1 is faster, less compressed, 3 is slower, more compressed.
Raises
------
TypeError
If `item` type cannot be saved
Notes
-----
``save()`` can also be called from a ``Struct`` or ``Dataset`` object.
Examples
--------
Saving a Struct:
>>> st = Struct({ \
'a': Struct({ \
'arr' : arange(10), \
'a2' : Dataset({ 'col1': arange(5) }) \
}), \
'b': Struct({ \
'ds1' : Dataset({ 'ds1col': arange(6) }), \
'ds2' : Dataset({ 'ds2col' : arange(7) }) \
}), \
})
>>> st.tree()
Struct
├──── a (Struct)
│ ├──── arr int32 (10,) 4
│ └──── a2 (Dataset)
│ └──── col1 int32 (5,) 4
└──── b (Struct)
├──── ds1 (Dataset)
│ └──── ds1col int32 (6,) 4
└──── ds2 (Dataset)
└──── ds2col int32 (7,) 4
>>> save_sds(r'D:\\junk\\nested', st)
>>> os.listdir(r'D:\\junk\\nested')
_root.sds
a!a2.sds
a.sds
b!ds1.sds
| |
plantri
sage: [u for u in list(gen)] # optional plantri
[Graph on 4 vertices,
Multi-graph on 3 vertices,
Multi-graph on 2 vertices,
Looped multi-graph on 2 vertices,
Looped multi-graph on 1 vertex,
Looped multi-graph on 1 vertex]
The cycle of length 4 is the only 2-connected bipartite planar graph
on 4 vertices::
sage: l = list(graphs.planar_graphs(4, minimum_connectivity=2, only_bipartite=True)) # optional plantri
sage: l[0].get_embedding() # optional plantri
{1: [2, 3],
2: [1, 4],
3: [1, 4],
4: [2, 3]}
There is one planar graph with one vertex. This graph obviously has
minimum degree equal to 0::
sage: list(graphs.planar_graphs(1)) # optional plantri
[Graph on 1 vertex]
sage: list(graphs.planar_graphs(1, minimum_degree=1)) # optional plantri
[]
TESTS:
The number of edges in a planar graph is equal to the number of edges in
its dual::
sage: planar = list(graphs.planar_graphs(5,dual=True)) # optional -- plantri
sage: dual_planar = list(graphs.planar_graphs(5,dual=False)) # optional -- plantri
sage: planar_sizes = [g.size() for g in planar] # optional -- plantri
sage: dual_planar_sizes = [g.size() for g in dual_planar] # optional -- plantri
sage: planar_sizes == dual_planar_sizes # optional -- plantri
True
"""
if order < 0:
raise ValueError("number of vertices should be non-negative")
# plantri can only output general planar graphs on up to 64 vertices
if order > 64:
raise ValueError("number of vertices should be at most 64")
if exact_connectivity and minimum_connectivity is None:
raise ValueError("Minimum connectivity must be specified to use the exact_connectivity option.")
if minimum_connectivity is not None and not (1 <= minimum_connectivity <= 3):
raise ValueError("Minimum connectivity should be a number between 1 and 3.")
# minimum degree should be None or a number between 1 and 5
if minimum_degree == 0:
if order != 1:
raise ValueError("Minimum degree equal to 0 is only possible if the graphs have 1 vertex.")
elif minimum_degree is not None and not (1 <= minimum_degree <= 5):
raise ValueError("Minimum degree should be a number between 1 and 5 if the order is greater than 1.")
elif minimum_degree is None and order == 1:
minimum_degree = 0
# check combination of values of minimum degree and minimum connectivity
if minimum_connectivity is None:
if minimum_degree is not None:
minimum_connectivity = min(3, minimum_degree)
elif minimum_degree is None:
minimum_degree, minimum_connectivity = 1, 1
else:
if minimum_degree is None:
minimum_degree = minimum_connectivity
elif (minimum_degree < minimum_connectivity and
minimum_degree > 0):
raise ValueError("Minimum connectivity can be at most the minimum degree.")
#exact connectivity is not implemented for minimum connectivity 3
if exact_connectivity and minimum_connectivity==3:
raise NotImplementedError("Generation of planar graphs with connectivity exactly 3 is not implemented.")
if only_bipartite and minimum_degree > 3:
raise NotImplementedError("Generation of bipartite planar graphs with minimum degree 4 or 5 is not implemented.")
if order == 0:
return
minimum_order = {0:1, 1:2, 2:3, 3:4, 4:6, 5:12}[minimum_degree]
if order < minimum_order:
return
if order == 1:
if minimum_degree == 0:
G = graph.Graph(1)
G.set_embedding({0: []})
yield(G)
return
from sage.features.graph_generators import Plantri
Plantri().require()
cmd = 'plantri -p{}m{}c{}{}{} {}'
command = cmd.format('b' if only_bipartite else '',
minimum_degree,
minimum_connectivity,
'x' if exact_connectivity else '',
'd' if dual else '',
order)
sp = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True,
encoding='latin-1')
sp.stdout.reconfigure(newline='')
for G in graphs._read_planar_code(sp.stdout):
yield(G)
def triangulations(self, order, minimum_degree=None, minimum_connectivity=None,
exact_connectivity=False, only_eulerian=False, dual=False):
r"""
An iterator over connected planar triangulations using the plantri generator.
This uses the plantri generator (see [BM2007]_) which is available
through the optional package plantri.
INPUT:
- ``order`` - a positive integer smaller than or equal to 64.
This specifies the number of vertices in the generated triangulations.
- ``minimum_degree`` - default: ``None`` - a value `\geq 3` and `\leq 5`,
or ``None``. This specifies the minimum degree of the generated
triangulations. If this is ``None`` and the minimum connectivity
is specified, then this is set to the same value as the minimum
connectivity. If the minimum connectivity is also equal to ``None``,
then this is set to 3.
- ``minimum_connectivity`` - default: ``None`` - a value `\geq 3` and
`\leq 5`, or ``None``. This specifies the minimum connectivity of the
generated triangulations. If this is ``None`` and the minimum degree
is specified, then this is set to the minimum of the minimum degree
and 3. If the minimum degree is also equal to ``None``, then this is
set to 3.
- ``exact_connectivity`` - default: ``False`` - if ``True`` only
triangulations with exactly the specified connectivity will be generated.
This option cannot be used with ``minimum_connectivity=3``, or if
the minimum connectivity is not explicitly set.
- ``only_eulerian`` - default: ``False`` - if ``True`` only Eulerian
triangulations will be generated. This option cannot be used if the
minimum degree is explicitly set to anything else than 4.
- ``dual`` - default: ``False`` - if ``True`` return instead the
planar duals of the generated graphs.
OUTPUT:
An iterator which will produce all planar triangulations with the given
number of vertices as Sage graphs with an embedding set. These will be
simple graphs (no loops, no multiple edges, no directed edges).
.. SEEALSO::
- :meth:`~sage.graphs.generic_graph.GenericGraph.set_embedding`,
:meth:`~sage.graphs.generic_graph.GenericGraph.get_embedding` --
get/set methods for embeddings.
- :meth:`~sage.graphs.graph_generators.GraphGenerators.RandomTriangulation`
-- build a random triangulation.
EXAMPLES:
The unique planar embedding of the `K_4` is the only planar triangulations
on 4 vertices::
sage: gen = graphs.triangulations(4) # optional plantri
sage: [g.get_embedding() for g in gen] # optional plantri
[{1: [2, 3, 4], 2: [1, 4, 3], 3: [1, 2, 4], 4: [1, 3, 2]}]
but, of course, this graph is not Eulerian::
sage: gen = graphs.triangulations(4, only_eulerian=True) # optional plantri
sage: len(list(gen)) # optional plantri
0
The unique Eulerian triangulation on 6 vertices is isomorphic to the octahedral
graph. ::
sage: gen = graphs.triangulations(6, only_eulerian=True) # optional plantri
sage: g = next(gen) # optional plantri
sage: g.is_isomorphic(graphs.OctahedralGraph()) # optional plantri
True
An overview of the number of 5-connected triangulations on up to 22 vertices. This
agrees with :oeis:`A081621`::
sage: for i in range(12, 23): # optional plantri
....: L = len(list(graphs.triangulations(i, minimum_connectivity=5))) # optional plantri
....: print("{} {:3d}".format(i,L)) # optional plantri
12 1
13 0
14 1
15 1
16 3
17 4
18 12
19 23
20 71
21 187
22 627
The minimum connectivity can be at most the minimum degree::
sage: gen = next(graphs.triangulations(10, minimum_degree=3, minimum_connectivity=5)) # optional plantri
Traceback (most recent call last):
...
ValueError: Minimum connectivity can be at most the minimum degree.
There are 5 triangulations with 9 vertices and minimum degree equal to 4
that are 3-connected, but only one of them is not 4-connected::
sage: len([g for g in graphs.triangulations(9, minimum_degree=4, minimum_connectivity=3)]) # optional plantri
5
sage: len([g for g in graphs.triangulations(9, minimum_degree=4, minimum_connectivity=3, exact_connectivity=True)]) # optional plantri
1
Setting ``dual=True`` gives the planar dual graphs::
sage: [len(g) for g in graphs.triangulations(9, minimum_degree=4, minimum_connectivity=3, dual=True)] # optional plantri
[14, 14, 14, 14, 14]
TESTS::
sage: [g.size() for g in graphs.triangulations(6, minimum_connectivity=3)] # optional plantri
[12, 12]
"""
if order < 0:
raise ValueError("number of vertices should be non-negative")
# plantri can only output planar triangulations on up to 64 vertices
if order > 64:
raise ValueError("number of vertices should be at most 64")
if exact_connectivity and minimum_connectivity is None:
raise ValueError("Minimum connectivity must be specified to use the exact_connectivity option.")
if minimum_connectivity is not None and not (3 <= minimum_connectivity <= 5):
raise ValueError("Minimum connectivity should be None or a number between 3 and 5.")
if minimum_degree is not None and not (3 <= minimum_degree <= 5):
raise ValueError("Minimum degree should be None or a number between 3 and 5.")
# for Eulerian triangulations the minimum degree is set to 4 (unless it was already specifically set)
if only_eulerian and minimum_degree is None:
minimum_degree = 4
# check combination of values of minimum degree and minimum connectivity
if minimum_connectivity is None:
if minimum_degree is not None:
minimum_connectivity = min(3, minimum_degree)
else:
minimum_degree, minimum_connectivity = 3, 3
else:
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines test inputs and invocations for JAX primitives.
Used to test various implementations of JAX primitives, e.g., against
NumPy (lax_reference) or TensorFlow.
"""
import operator
from typing import Any, Callable, Dict, Iterable, Optional, NamedTuple, Sequence, Tuple, Union
from functools import partial
from absl import testing
import jax
from jax import config
from jax import dtypes
from jax import test_util as jtu
from jax import lax
from jax import numpy as jnp
from jax._src.lax import control_flow as lax_control_flow
from jaxlib import xla_client
import numpy as np
FLAGS = config.FLAGS
Rng = Any # A random number generator
class RandArg(NamedTuple):
"""Descriptor for a randomly generated argument.
See description of `Harness`.
"""
shape: Tuple[int, ...]
dtype: np.dtype
class StaticArg(NamedTuple):
"""Descriptor for a static argument.
See description of `Harness`.
"""
value: Any
class Harness:
"""Specifies inputs and callable for a primitive.
A harness is conceptually a callable and a list of arguments, that together
exercise a use case. The harness can optionally have additional parameters
that can be used by the test.
The arguments are specified through argument descriptors. An argument
descriptor can be:
* a numeric value or ndarray, or
* an instance of ``RandArg(shape, dtype)`` to be used with a PRNG to generate
random tensor of the given shape and type, or
* an instance of ``StaticArg(value)``. These are values that specialize the
callable, but are not exposed as external arguments.
For example, a harness for ``lax.take(arr, indices, axis=None)`` may want
to expose as external (dynamic) argument the array and the indices, and
keep the axis as a static argument (technically specializing the `take` to
a axis):
Harness(f"take_axis={axis}",
lax.take,
[RandArg((2, 4), np.float32), np.array([-1, 0, 1]), StaticArg(axis)],
axis=axis)
"""
# Descriptive name of the harness, used as a testcase_name. Unique in a group.
name: str
# The function taking all arguments (static and dynamic).
fun: Callable
arg_descriptors: Sequence[Union[RandArg, StaticArg, Any]]
rng_factory: Callable
params: Dict[str, Any]
def __init__(self, name, fun, arg_descriptors, *,
rng_factory=jtu.rand_default, **params):
self.name = name
self.fun = fun
self.arg_descriptors = arg_descriptors
self.rng_factory = rng_factory
self.params = params
def __str__(self):
return self.name
def _arg_maker(self, arg_descriptor, rng: Rng):
if isinstance(arg_descriptor, StaticArg):
return arg_descriptor.value
if isinstance(arg_descriptor, RandArg):
return self.rng_factory(rng)(arg_descriptor.shape, arg_descriptor.dtype)
return arg_descriptor
def args_maker(self, rng: Rng) -> Sequence:
"""All-argument maker, including the static ones."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors]
def dyn_args_maker(self, rng: Rng) -> Sequence:
"""A dynamic-argument maker, for use with `dyn_fun`."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors
if not isinstance(ad, StaticArg)]
def dyn_fun(self, *dyn_args):
"""Invokes `fun` given just the dynamic arguments."""
all_args = self._args_from_dynargs(dyn_args)
return self.fun(*all_args)
def _args_from_dynargs(self, dyn_args: Sequence) -> Sequence:
"""All arguments, including the static ones."""
next_dynamic_argnum = 0
all_args = []
for ad in self.arg_descriptors:
if isinstance(ad, StaticArg):
all_args.append(ad.value)
else:
all_args.append(dyn_args[next_dynamic_argnum])
next_dynamic_argnum += 1
return all_args
def parameterized(harness_group: Iterable[Harness],
one_containing : Optional[str] = None):
"""Decorator for tests.
The tests receive a `harness` argument.
The `one_containing` parameter is useful for debugging. If given, then
picks only one harness whose name contains the string. The whole set of
parameterized tests is reduced to one test, whose name is not decorated
to make it easier to pick for running.
"""
cases = tuple(
dict(testcase_name=harness.name if one_containing is None else "",
harness=harness)
for harness in harness_group
if one_containing is None or one_containing in harness.name)
if one_containing is not None:
if not cases:
raise ValueError(f"Cannot find test case with name containing {one_containing}."
"Names are:"
"\n".join([harness.name for harness in harness_group]))
cases = cases[0:1]
return testing.parameterized.named_parameters(*cases)
### Harness definitions ###
###
_LAX_UNARY_ELEMENTWISE = (
lax.abs, lax.acosh, lax.asinh, lax.atanh, lax.bessel_i0e, lax.bessel_i1e,
lax.ceil, lax.cos, lax.cosh, lax.digamma, lax.erf, lax.erf_inv, lax.erfc,
lax.exp, lax.expm1, lax.floor, lax.is_finite, lax.lgamma, lax.log,
lax.log1p, lax.neg, lax.round, lax.rsqrt, lax.sign, lax.sin, lax.sinh,
lax.sqrt, lax.tan, lax.tanh)
lax_unary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_UNARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg in [
np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype)
]
)
lax_bitwise_not = tuple(
[Harness(f"{jtu.dtype_str(dtype)}",
lax.bitwise_not,
[arg],
dtype=dtype)
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg in [
np.array([-1, -3, -2, 0, 0, 2, 1, 3], dtype=dtype),
]] +
[Harness("bool",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_not]
for arg in [
np.array([True, False])
]]
)
lax_population_count = tuple(
Harness(f"{jtu.dtype_str(dtype)}",
lax.population_count,
[arg],
dtype=dtype)
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg in [
np.array([-1, -2, 0, 1], dtype=dtype)
]
)
def _get_max_identity(dtype):
if dtypes.issubdtype(dtype, np.inexact):
return np.array(-np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(False, np.bool_)
def _get_min_identity(dtype):
if dtypes.issubdtype(dtype, np.inexact):
return np.array(np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(True, np.bool_)
lax_add_mul = tuple(
Harness(f"fun={f_jax.__name__}_{jtu.dtype_str(dtype)}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.add, lax.mul]
for dtype in filter(lambda t: t != np.bool_, jtu.dtypes.all)
for lhs, rhs in [
(np.array([1, 2], dtype=dtype), np.array([3, 4], dtype=dtype))
]
) + tuple(
Harness(f"fun={f_jax.__name__}_bounds_{jtu.dtype_str(dtype)}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.add, lax.mul]
for dtype in filter(lambda t: t != np.bool_, jtu.dtypes.all)
for lhs, rhs in [
(np.array([3, 3], dtype=dtype),
np.array([_get_max_identity(dtype), _get_min_identity(dtype)], dtype=dtype))
]
)
lax_min_max = tuple(
Harness(f"fun={f_jax.__name__}_{jtu.dtype_str(dtype)}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.min, lax.max]
for dtype in jtu.dtypes.all
for lhs, rhs in [
(np.array([1, 2], dtype=dtype), np.array([3, 4], dtype=dtype))
]
) + tuple(
Harness(f"fun={f_jax.__name__}_inf_nan_{jtu.dtype_str(dtype)}_{lhs[0]}_{rhs[0]}",
f_jax,
[lhs, rhs],
f_jax=f_jax,
dtype=dtype)
for f_jax in [lax.min, lax.max]
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for lhs, rhs in [
(np.array([np.inf, np.inf], dtype=dtype),
np.array([np.nan, np.nan], dtype=dtype)),
(np.array([-np.inf, -np.inf], dtype=dtype),
np.array([np.nan, np.nan], dtype=dtype))
]
)
_LAX_BINARY_ELEMENTWISE = (
lax.add, lax.atan2, lax.div, lax.igamma, lax.igammac, lax.max, lax.min,
lax.nextafter, lax.rem, lax.sub)
lax_binary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype
)
for f_lax in _LAX_BINARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg1, arg2 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.1, 0.2, 1., 1.4, -1.6], dtype=dtype))
]
)
_LAX_BINARY_ELEMENTWISE_LOGICAL = (
lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor, lax.shift_left,
)
lax_binary_elementwise_logical = tuple(
[Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_BINARY_ELEMENTWISE_LOGICAL
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg1, arg2 in [
(np.array([1, 3, 2, 0, 0, 2, 1, 3], dtype=dtype),
np.array([1, 2, 3, 0, 1, 0, 2, 3], dtype=dtype))
]
] +
[Harness(f"{f_lax.__name__}_bool",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor]
for arg1, arg2 in [
(np.array([True, True, False, False]),
np.array([True, False, True, False])),
]
]
)
def _make_broadcast_in_dim_harness(name, *, dtype=np.float32,
shape=(2,), outshape=(2,),
broadcast_dimensions=(0,)):
return Harness(f"{name}_shape={jtu.format_shape_dtype_string(shape, dtype)}_outshape={outshape}_broadcastdimensions={broadcast_dimensions}",
lambda operand: lax.broadcast_in_dim_p.bind(
operand, shape=outshape,
broadcast_dimensions=broadcast_dimensions),
[RandArg(shape, dtype)],
shape=shape,
dtype=dtype,
outshape=outshape,
broadcast_dimensions=broadcast_dimensions)
lax_broadcast_in_dim = tuple( # Validate dtypes
_make_broadcast_in_dim_harness("dtypes", dtype=dtype)
for dtype in jtu.dtypes.all
) + tuple( # Validate parameter combinations
_make_broadcast_in_dim_harness("parameter_combinations", shape=shape,
outshape=outshape,
broadcast_dimensions=broadcast_dimensions)
for shape, outshape, broadcast_dimensions in [
[(2,), (3, 2), (1,)], # add major dimension
[(2,), (2, 3), (0,)], # add inner dimension
[(), (2, 3), ()], # use scalar shape
[(1, 2), (4, 3, 2), (0, 2)], # map size 1 dim to different output dim value
]
)
lax_betainc = tuple(
Harness(f"_{jtu.dtype_str(dtype)}",
lax.betainc,
[arg1, arg2, arg3],
dtype=dtype)
for dtype in jtu.dtypes.all_floating
for arg1, arg2, arg3 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.3, 1, 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.2, 0.1, 1, 1.4, -1.6], dtype=dtype),
np.array([1.0, -1.0, 2.0, 1.0, 0.3, 0.3, -1.0, 2.4, 1.6], dtype=dtype))
]
)
_gather_input = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
lax_gather = tuple(
# Construct gather harnesses using take
[Harness(f"from_take_indices_shape={indices.shape}_axis={axis}",
lambda a, i, axis: jnp.take(a, i, axis=axis),
[_gather_input,
indices,
StaticArg(axis)])
for indices in [
# Ensure each set of indices has a distinct shape
np.array(2, dtype=np.int32),
np.array([2], dtype=np.int32),
np.array([2, 4], dtype=np.int32),
np.array([[2, 4], [5, 6]], dtype=np.int32),
np.array([0, 1, 10], dtype=np.int32), # Index out of bounds
np.array([0, 1, 2, -1], dtype=np.int32), # Index out of bounds
]
for axis in [0, 1, 2]] +
# Directly from lax.gather in lax_test.py.
[Harness(
f"_shape={shape}_idxs_shape={idxs.shape}_dnums={dnums}_slice_sizes={slice_sizes}",
lambda op, | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import remove_hairs
from remove_hairs import remove_hairs as remove_hairs_fn
from remove_hairs import remove_hairs_decorator
from .serializer import MARCXMLSerializer
from structures import Person
from structures import Corporation
from structures import PublicationType
# Variables ===================================================================
remove_hairs.HAIRS = r" :;<>(){}[]\/"
# Functions & classes =========================================================
def _undefined_pattern(value, fn, undefined):
"""
If ``fn(value) == True``, return `undefined`, else `value`.
"""
if fn(value):
return undefined
return value
class MARCXMLQuery(MARCXMLSerializer):
"""
This class defines highlevel getters over MARC XML / OAI records.
"""
def __init__(self, xml=None, resort=True):
super(MARCXMLQuery, self).__init__(xml, resort)
def _parse_corporations(self, datafield, subfield, roles=["any"]):
"""
Parse informations about corporations from given field identified
by `datafield` parameter.
Args:
datafield (str): MARC field ID ("``110``", "``610``", etc..)
subfield (str): MARC subfield ID with name, which is typically
stored in "``a``" subfield.
roles (str): specify which roles you need. Set to ``["any"]`` for
any role, ``["dst"]`` for distributors, etc.. For
details, see
http://www.loc.gov/marc/relators/relaterm.html
Returns:
list: :class:`Corporation` objects.
"""
if len(datafield) != 3:
raise ValueError(
"datafield parameter have to be exactly 3 chars long!"
)
if len(subfield) != 1:
raise ValueError(
"Bad subfield specification - subield have to be 3 chars long!"
)
parsed_corporations = []
for corporation in self.get_subfields(datafield, subfield):
other_subfields = corporation.other_subfields
# check if corporation have at least one of the roles specified in
# 'roles' parameter of function
if "4" in other_subfields and roles != ["any"]:
corp_roles = other_subfields["4"] # list of role parameters
relevant = any(map(lambda role: role in roles, corp_roles))
# skip non-relevant corporations
if not relevant:
continue
name = ""
place = ""
date = ""
name = corporation
if "c" in other_subfields:
place = ",".join(other_subfields["c"])
if "d" in other_subfields:
date = ",".join(other_subfields["d"])
parsed_corporations.append(Corporation(name, place, date))
return parsed_corporations
def _parse_persons(self, datafield, subfield, roles=["aut"]):
"""
Parse persons from given datafield.
Args:
datafield (str): code of datafield ("010", "730", etc..)
subfield (char): code of subfield ("a", "z", "4", etc..)
role (list of str): set to ["any"] for any role, ["aut"] for
authors, etc.. For details see
http://www.loc.gov/marc/relators/relaterm.html
Main records for persons are: "100", "600" and "700", subrecords "c".
Returns:
list: Person objects.
"""
# parse authors
parsed_persons = []
raw_persons = self.get_subfields(datafield, subfield)
for person in raw_persons:
# check if person have at least one of the roles specified in
# 'roles' parameter of function
other_subfields = person.other_subfields
if "4" in other_subfields and roles != ["any"]:
person_roles = other_subfields["4"] # list of role parameters
relevant = any(map(lambda role: role in roles, person_roles))
# skip non-relevant persons
if not relevant:
continue
# result of .strip() is string, so ind1/2 in MARCSubrecord are lost
ind1 = person.i1
ind2 = person.i2
person = person.strip()
name = ""
second_name = ""
surname = ""
title = ""
# here it gets nasty - there is lot of options in ind1/ind2
# parameters
if ind1 == "1" and ind2 == " ":
if "," in person:
surname, name = person.split(",", 1)
elif " " in person:
surname, name = person.split(" ", 1)
else:
surname = person
if "c" in other_subfields:
title = ",".join(other_subfields["c"])
elif ind1 == "0" and ind2 == " ":
name = person.strip()
if "b" in other_subfields:
second_name = ",".join(other_subfields["b"])
if "c" in other_subfields:
surname = ",".join(other_subfields["c"])
elif ind1 == "1" and ind2 == "0" or ind1 == "0" and ind2 == "0":
name = person.strip()
if "c" in other_subfields:
title = ",".join(other_subfields["c"])
parsed_persons.append(
Person(
name.strip(),
second_name.strip(),
surname.strip(),
title.strip()
)
)
return parsed_persons
@remove_hairs_decorator
def get_name(self):
"""
Returns:
str: Name of the book.
Raises:
KeyError: When name is not specified.
"""
return "".join(self.get_subfields("245", "a"))
@remove_hairs_decorator
def get_subname(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`subname` record is not found.
Returns:
str: Subname of the book or `undefined` if `subname` is not \
found.
"""
return _undefined_pattern(
"".join(self.get_subfields("245", "b")),
lambda x: x.strip() == "",
undefined
)
@remove_hairs_decorator
def get_price(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`price` record is not found.
Returns:
str: Price of the book (with currency) or `undefined` if `price` \
is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("020", "c")),
lambda x: x.strip() == "",
undefined
)
@remove_hairs_decorator
def get_part(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`part` record is not found.
Returns:
str: Which part of the book series is this record or `undefined` \
if `part` is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("245", "p")),
lambda x: x.strip() == "",
undefined
)
@remove_hairs_decorator
def get_part_name(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`part_name` record is not found.
Returns:
str: Name of the part of the series. or `undefined` if `part_name`\
is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("245", "n")),
lambda x: x.strip() == "",
undefined
)
@remove_hairs_decorator
def get_publisher(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`publisher` record is not found.
Returns:
str: Name of the publisher ("``Grada``" for example) or \
`undefined` if `publisher` is not found.
"""
publishers = set([
remove_hairs_fn(publisher)
for publisher in self["260b "] + self["264b"]
])
return _undefined_pattern(
", ".join(publishers),
lambda x: x.strip() == "",
undefined
)
def get_pub_date(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`pub_date` record is not found.
Returns:
str: Date of publication (month and year usually) or `undefined` \
if `pub_date` is not found.
"""
dates = self["260c "] + self["264c"]
def clean_date(date):
"""
Clean the `date` strings from special characters, but leave
sequences of numbers followed by -.
So:
[2015]- -> 2015
2015- -> 2015-
"""
out = ""
was_digit = False
for c in date:
if c.isdigit() or (c == "-" and was_digit) or c == " ":
out += c
was_digit = c.isdigit()
return out
# clean all the date strings
dates = set([
clean_date(date)
for date in self["260c "] + self["264c"]
])
return _undefined_pattern(
", ".join(dates),
lambda x: x.strip() == "",
undefined
)
@remove_hairs_decorator
def get_pub_order(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`pub_order` record is not found.
Returns:
str: Information about order in which was the book published or \
`undefined` if `pub_order` is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("901", "f")),
lambda x: x.strip() == "",
undefined
)
@remove_hairs_decorator
def get_pub_place(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`pub_place` record is not found.
Returns:
str: Name of city/country where the book was published or \
`undefined` if `pub_place` is not found.
"""
places = set([
remove_hairs_fn(place)
for place in self["260a "] + self["264a"]
])
return _undefined_pattern(
", ".join(places),
lambda x: x.strip() == "",
undefined
)
@remove_hairs_decorator
def get_format(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`format` record is not found.
Returns:
str: Dimensions of the book ('``23 cm``' for example) or
`undefined` if `format` is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("300", "c")),
lambda x: x.strip() == "",
undefined
)
def get_authors(self):
"""
Returns:
list: Authors represented as :class:`.Person` objects.
"""
authors = self._parse_persons("100", "a")
authors += self._parse_persons("600", "a")
authors += self._parse_persons("700", "a")
authors += self._parse_persons("800", "a")
return authors
def get_corporations(self, roles=["dst"]):
"""
Args:
roles (list, optional): Specify which types of corporations you
need. Set to ``["any"]`` for any role, ``["dst"]`` for
distributors, etc..
Note:
See http://www.loc.gov/marc/relators/relaterm.html for details.
Returns:
list: :class:`.Corporation` objects specified by roles parameter.
"""
corporations = self._parse_corporations("110", "a", roles)
corporations += self._parse_corporations("610", "a", roles)
corporations += self._parse_corporations("710", "a", roles)
corporations += self._parse_corporations("810", "a", roles)
return corporations
def get_distributors(self):
"""
Returns:
list: Distributors represented as :class:`.Corporation` object.
"""
return self.get_corporations(roles=["dst"])
def _clean_isbn(self, isbn):
"""
Clean ISBN from other information (binding).
"""
return isbn.strip().split(" ", 1)[0]
def get_invalid_ISBNs(self):
"""
Get list of invalid ISBN (``020z``).
Returns:
list: List with INVALID ISBN strings.
"""
return [
self._clean_isbn(isbn)
for isbn in self["020z"]
]
def get_ISBNs(self):
"""
Get list of VALID ISBN.
Returns:
list: List with *valid* ISBN strings.
| |
the caller's system
"""
# Ensure that StatePoint.read_results() was called first
if self.mean is None or self.std_dev is None:
msg = 'The Tally ID="{0}" has no data to return. Call the ' \
'StatePoint.read_results() method before using ' \
'Tally.get_pandas_dataframe(...)'.format(self.id)
raise KeyError(msg)
# If using Summary, ensure StatePoint.link_with_summary(...) was called
if summary and not self.with_summary:
msg = 'The Tally ID="{0}" has not been linked with the Summary. ' \
'Call the StatePoint.link_with_summary(...) method ' \
'before using Tally.get_pandas_dataframe(...) with ' \
'Summary info'.format(self.id)
raise KeyError(msg)
# Attempt to import Pandas
try:
import pandas as pd
except ImportError:
msg = 'The Pandas Python package must be installed on your system'
raise ImportError(msg)
# Initialize a pandas dataframe for the tally data
df = pd.DataFrame()
# Find the total length of the tally data array
data_size = self.mean.size
# Build DataFrame columns for filters if user requested them
if filters:
# Append each Filter's DataFrame to the overall DataFrame
for filter in self.filters:
filter_df = filter.get_pandas_dataframe(data_size, summary)
df = pd.concat([df, filter_df], axis=1)
# Include DataFrame column for nuclides if user requested it
if nuclides:
nuclides = []
for nuclide in self.nuclides:
# Write Nuclide name if Summary info was linked with StatePoint
if isinstance(nuclide, Nuclide):
nuclides.append(nuclide.name)
else:
nuclides.append(nuclide)
# Tile the nuclide bins into a DataFrame column
nuclides = np.repeat(nuclides, len(self.scores))
tile_factor = data_size / len(nuclides)
df['nuclide'] = np.tile(nuclides, tile_factor)
# Include column for scores if user requested it
if scores:
tile_factor = data_size / len(self.scores)
df['score'] = np.tile(self.scores, tile_factor)
# Append columns with mean, std. dev. for each tally bin
df['mean'] = self.mean.ravel()
df['std. dev.'] = self.std_dev.ravel()
df = df.dropna(axis=1)
# Expand the columns into Pandas MultiIndices for readability
if pd.__version__ >= '0.16':
columns = copy.deepcopy(df.columns.values)
# Convert all elements in columns list to tuples
for i, column in enumerate(columns):
if not isinstance(column, tuple):
columns[i] = (column,)
# Make each tuple the same length
max_len_column = len(max(columns, key=len))
for i, column in enumerate(columns):
delta_len = max_len_column - len(column)
if delta_len > 0:
new_column = list(column)
new_column.extend(['']*delta_len)
columns[i] = tuple(new_column)
# Create and set a MultiIndex for the DataFrame's columns
df.columns = pd.MultiIndex.from_tuples(columns)
return df
def get_reshaped_data(self, value='mean'):
"""Returns an array of tally data with one dimension per filter.
The tally data in OpenMC is stored as a 3D array with the dimensions
corresponding to filters, nuclides and scores. As a result, tally data
can be opaque for a user to directly index (i.e., without use of the
Tally.get_values(...) method) since one must know how to properly use
the number of bins and strides for each filter to index into the first
(filter) dimension.
This builds and returns a reshaped version of the tally data array with
unique dimensions corresponding to each tally filter. For example,
suppose this tally has arrays of data with shape (8,5,5) corresponding
to two filters (2 and 4 bins, respectively), five nuclides and five
scores. This method will return a version of the data array with the
with a new shape of (2,4,5,5) such that the first two dimensions
correspond directly to the two filters with two and four bins.
Parameters
----------
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
ndarray
The tally data array indexed by filters, nuclides and scores.
"""
# Get the 3D array of data in filters, nuclides and scores
data = self.get_values(value=value)
# Build a new array shape with one dimension per filter
new_shape = ()
for filter in self.filters:
new_shape += (filter.num_bins, )
new_shape += (self.num_nuclides,)
new_shape += (self.num_score_bins,)
# Reshape the data with one dimension for each filter
data = np.reshape(data, new_shape)
return data
def export_results(self, filename='tally-results', directory='.',
format='hdf5', append=True):
"""Exports tallly results to an HDF5 or Python pickle binary file.
Parameters
----------
filename : str
The name of the file for the results (default is 'tally-results')
directory : str
The name of the directory for the results (default is '.')
format : str
The format for the exported file - HDF5 ('hdf5', default) and
Python pickle ('pkl') files are supported
append : bool
Whether or not to append the results to the file (default is True)
Raises
------
KeyError
When this method is called before the Tally is populated with data
by the StatePoint.read_results() method.
"""
# Ensure that StatePoint.read_results() was called first
if self._sum is None or self._sum_sq is None and not self.derived:
msg = 'The Tally ID="{0}" has no data to export. Call the ' \
'StatePoint.read_results() method before using ' \
'Tally.export_results(...)'.format(self.id)
raise KeyError(msg)
if not isinstance(filename, basestring):
msg = 'Unable to export the results for Tally ID="{0}" to ' \
'filename="{1}" since it is not a ' \
'string'.format(self.id, filename)
raise ValueError(msg)
elif not isinstance(directory, basestring):
msg = 'Unable to export the results for Tally ID="{0}" to ' \
'directory="{1}" since it is not a ' \
'string'.format(self.id, directory)
raise ValueError(msg)
elif format not in ['hdf5', 'pkl', 'csv']:
msg = 'Unable to export the results for Tally ID="{0}" to format ' \
'"{1}" since it is not supported'.format(self.id, format)
raise ValueError(msg)
elif not isinstance(append, bool):
msg = 'Unable to export the results for Tally ID="{0}" since the ' \
'append parameter is not True/False'.format(self.id, append)
raise ValueError(msg)
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# HDF5 binary file
if format == 'hdf5':
import h5py
filename = directory + '/' + filename + '.h5'
if append:
tally_results = h5py.File(filename, 'a')
else:
tally_results = h5py.File(filename, 'w')
# Create an HDF5 group within the file for this particular Tally
tally_group = tally_results.create_group('Tally-{0}'.format(self.id))
# Add basic Tally data to the HDF5 group
tally_group.create_dataset('id', data=self.id)
tally_group.create_dataset('name', data=self.name)
tally_group.create_dataset('estimator', data=self.estimator)
tally_group.create_dataset('scores', data=np.array(self.scores))
# Add a string array of the nuclides to the HDF5 group
nuclides = []
for nuclide in self.nuclides:
nuclides.append(nuclide.name)
tally_group.create_dataset('nuclides', data=np.array(nuclides))
# Create an HDF5 sub-group for the Filters
filter_group = tally_group.create_group('filters')
for filter in self.filters:
filter_group.create_dataset(filter.type, data=filter.bins)
# Add all results to the main HDF5 group for the Tally
tally_group.create_dataset('sum', data=self.sum)
tally_group.create_dataset('sum_sq', data=self.sum_sq)
tally_group.create_dataset('mean', data=self.mean)
tally_group.create_dataset('std_dev', data=self.std_dev)
# Close the Tally results HDF5 file
tally_results.close()
# Python pickle binary file
elif format == 'pkl':
# Load the dictionary from the Pickle file
filename = directory + '/' + filename + '.pkl'
if os.path.exists(filename) and append:
tally_results = pickle.load(file(filename, 'rb'))
else:
tally_results = {}
# Create a nested dictionary within the file for this particular Tally
tally_results['Tally-{0}'.format(self.id)] = {}
tally_group = tally_results['Tally-{0}'.format(self.id)]
# Add basic Tally data to the nested dictionary
tally_group['id'] = self.id
tally_group['name'] = self.name
tally_group['estimator'] = self.estimator
tally_group['scores'] = np.array(self.scores)
# Add a string array of the nuclides to the HDF5 group
nuclides = []
for nuclide in self.nuclides:
nuclides.append(nuclide.name)
tally_group['nuclides'] = np.array(nuclides)
# Create a nested dictionary for the Filters
tally_group['filters'] = {}
filter_group = tally_group['filters']
for filter in self.filters:
filter_group[filter.type] = filter.bins
# Add all results to the main sub-dictionary for the Tally
tally_group['sum'] = self.sum
tally_group['sum_sq'] = self.sum_sq
tally_group['mean'] = self.mean
tally_group['std_dev'] = self.std_dev
# Pickle the Tally results to a file
pickle.dump(tally_results, open(filename, 'wb'))
def _outer_product(self, other, binary_op):
"""Combines filters, scores and nuclides with another tally.
This is a helper method for the tally arithmetic methods. The filters,
scores and nuclides from both tallies are enumerated into all possible
combinations and expressed as CrossFilter, CrossScore and
CrossNuclide objects in the new derived tally.
Parameters
----------
other : Tally
The tally on the right hand side of the outer product
binary_op : {'+', '-', '*', '/', '^'}
The binary operation in the outer product
Returns
-------
Tally
A new Tally that is the outer product with this one.
Raises
------
ValueError
When this method is called before the other tally is populated
with data by the StatePoint.read_results() method.
"""
# Check | |
#
# Author : <NAME>
#
from pyqtside.QtGui import QWidget, QLabel, QGroupBox, QComboBox, QSizePolicy, QButtonGroup
from pyqtside.QtCore import QObject, Qt, SIGNAL, Signal, Slot
import views
class JigWidget(QObject):
def __init__(self, key ,title, desc):
QObject.__init__(self)
self._key = key
self._title = title
self._desc = desc
self._path = [key]
self._view = None # a views.JigView object (or derived)
def getKey(self):
return self._key
def getTitle(self):
return self._title
def setTitle(self, title):
self._title = title
def setDescription(self, desc):
self._desc = desc
self._view.setDescription(desc)
def getPath(self):
return self._path
def getJSon(self):
raise NotImplementedError
def setHighlightPath(self, path, highlight):
""" Highlight (or un-highlight) an object causing an error """
pass
def getChild(self, key_or_num):
raise NotImplementedError
def findChildren(self, key):
raise NotImplementedError
def resolvePath(self, path):
if not len(path) or path[0] != self._key:
return None
w = self
for p in path[1:]:
w2 = w.getChild(p)
if w2 is None: return None
else: w = w2
return w
def _completePath(self, otherW):
self._path = otherW._path + self._path
def clone(self, newKey=None):
cls = self.__class__
key = newKey or self._key
ret = cls(key, self._title, self._desc)
# Do not invoke setVisibility() here: we don't want to actually show the widget as soon as it is cloned
# (would lead to many blinking stuff on screen everytime an array is extended). So just copy the attribute:
ret._view._visible = self._view._visible
ret.setActive(self.isActive())
return ret
def setVisibility(self, visible):
# if not self._visible:
# print "hiding", self._key, self._path
return self._view.setVisibility(visible)
def setActive(self, enable):
# print "setActive of ", self.getPath()
return self._view.setActive(enable)
def isActive(self):
return self._view.isActive()
def getLayout(self):
return self._view.getLayout()
def hideFromGUI(self):
self.setVisibility(False)
self.setActive(False) # to avoid validation errors on this widget!
def isConstrained(self):
""" A 'constrained' widget is a combo box or a set of radio buttons, for which there can
not be any validation error, since the user can only select in a set of pre-defined valid choices. """
return False
class JigComposed(JigWidget):
""" Anything that contains more than one child """
def __init__(self, key, title, desc):
JigWidget.__init__(self, key, title, desc)
self._leaves = []
def _completePath(self, otherW):
JigWidget._completePath(self, otherW)
for l in self._leaves:
l._completePath(otherW)
def clone(self, newKey=None):
ret = JigWidget.clone(self, newKey)
for l in self._leaves:
c = l.clone()
c._completePath(ret)
ret._leaves.append(c)
return ret
def getChild(self, key_or_num):
""" Return a child according to its key (str) or its ordinal position (int) """
if type(key_or_num) is int:
if key_or_num < len(self._leaves):
return self._leaves[key_or_num]
else:
return None
else:
for l in self._leaves:
if l.getKey() == key_or_num:
return l
return None
def findChildren(self, key):
ret = []
for l in self._leaves:
if l.getKey() == key:
ret.append(l)
l2 = l.findChildren(key)
ret.extend(l2)
return ret
def setHighlightPath(self, path, highlight):
if self.resolvePath(path) is None:
return
if not highlight:
self._view.setHighlight(highlight)
if len(path) >= 2:
w = self.resolvePath(path[:2]) # direct child
w.setHighlightPath(path[1:], highlight)
# Find child index in the _leaves array
keys = [l._key for l in self._leaves]
idx = keys.index(w._key)
if highlight:
self._view.aboutToHighlightChild(idx)
else:
self._view.setHighlight(highlight)
class JigComposedHeterogen(JigComposed):
def __init__(self, key, title, desc):
JigComposed.__init__(self, key, title, desc)
def addJigWidget(self, w, update_qt=True):
self._leaves.append(w)
w._completePath(self)
class JigObject(JigComposedHeterogen):
def __init__(self, key, title, desc):
JigComposedHeterogen.__init__(self, key, title, desc)
def getChild(self, key):
""" For an object, child are necessarily strings! """
if not isinstance(key, basestring):
return None
return JigComposedHeterogen.getChild(self, key)
def getJSon(self):
ret = {}
for w in self._leaves:
if not isinstance(w, JigWidget):
raise ValueError("JigObject has non JigWidget as children!")
k, j = w.getKey(), w.getJSon()
ret[k] = j
return ret
class JigObjectFlat(JigObject):
""" Flat representation of an object (all subwidgets inserted in a group box) """
def __init__(self, key, title, desc):
JigObject.__init__(self, key, title, desc)
self._view = views.JigViewBlockWidget(self, title, desc)
class JigObjectVoid(JigObject):
def __init__(self, key, title, desc):
JigObject.__init__(self, key, title, desc)
self._view = views.JigViewVoid(self)
class JigArray(JigComposed):
""" An array of items, all of the same types. New elements are added by internally cloning
a reference item set with "setItem()". Tricky point: this item is ultimately a QWidget and hence
must be parented in the displayed window (hidden orphan widgets don't behave so well).
"""
def __init__(self, key, title, desc):
JigComposed.__init__(self, key, title, desc)
self._item = None
def clone(self, newKey=None):
ret = JigComposed.clone(self, newKey=newKey)
ret.setItem(self._item.clone())
ret._view.placeLeaves()
return ret
def getChild(self, num):
""" For an array, child are necessarily integers! """
if not isinstance(num, int):
return None
return JigComposed.getChild(self, num)
def getItem(self):
return self._item
def setItem(self, w):
self._item = w
self._item.setVisibility(False)
self._view.addInvisibleWidget(w)
def setNumberOfItems(self, n, update_view=True):
if self._item is None:
raise ValueError("Call setItem() first!")
if n == len(self._leaves):
return
if n < len(self._leaves):
for i, w in enumerate(self._leaves[n:]):
w._view.setVisibility(False)
if update_view:
self._view.popFromView()
self._leaves = self._leaves[:n]
elif n > len(self._leaves):
for i in range(len(self._leaves), n):
w = self._item.clone(newKey=i)
# Do not show in Qt yet (to avoid widgets appearing all over the place):
# the final 'pushToView' will do it properly after placement.
w._view.setVisibility(True,update_qt=False)
w._completePath(self)
w.setTitle("%s (compo %d)" % (self._title, i))
self._leaves.append(w)
if update_view:
self._view.pushToView(w)
def setElementTitle(self, idx, title):
self._leaves[idx].setTitle(title)
def getJSon(self):
ret = []
for w in self._leaves:
if not isinstance(w, JigWidget):
raise ValueError("JigObject has non JigWidget as children!")
ret.append(w.getJSon())
return ret
class JigArrayFlat(JigArray):
def __init__(self, key, title, desc):
JigArray.__init__(self, key, title, desc)
self._view = views.JigViewBlockWidget(self, title, desc)
def setHighlightPath(self, path, highlight):
""" Override to highlight the whole array when no sub-items could be provided """
if len(path) == 1:
self._view.setHighlightArray(highlight)
return JigArray.setHighlightPath(self, path, highlight)
class JigObjectTab(JigObject):
""" Tabulated representation of an object: one tab per property """
def __init__(self, key, title, desc):
JigObject.__init__(self, key, title, desc)
self._view = views.JigViewTab(self, title, desc)
def clone(self, newKey=None):
# Some work to do here:
raise NotImplementedError
class JigArrayTab(JigArray):
def __init__(self, key, title, desc):
JigArray.__init__(self, key, title, desc)
self._view = views.JigViewTab(self, title, desc)
def setElementTitle(self, idx, title):
JigArray.setElementTitle(self, idx, title)
self._view._tabW.setTabText(idx, title)
def clone(self, newKey=None):
# Some work to do here:
raise NotImplementedError
def clearArrayLayout(self):
self._tabW.clear()
def completeArrayLayout(self, widget):
self.completeTab(widget)
class JigOneOf(JigComposedHeterogen):
def __init__(self, key, title, desc):
JigComposedHeterogen.__init__(self, key, title, desc)
def getChild(self, num):
""" For a oneOf, child are necessarily integers! """
if not isinstance(num, int):
return None
return JigComposed.getChild(self, num)
def getJSon(self):
cl = self._currentLeaf()
if not cl is None:
return cl.getJSon()
else:
return None
class JigOneOfRadio(JigOneOf):
def __init__(self, key, title, desc):
JigOneOf.__init__(self, key, title, desc)
self._radios = [] # List of tuple (radio button, qlabel)
self._but_group = QButtonGroup()
self._view = views.JigViewRadio(self, title, desc)
def clone(self, newKey=None):
""" Also activate the same radio in the clone """
ret = JigOneOf.clone(self, newKey=newKey)
for _ in ret._leaves:
ret.__addRadio()
return ret
def __addRadio(self):
from pyqtside.QtGui import QRadioButton
r = QRadioButton(self._view._mainW) # ugly, TODO think about this
self._radios.append(r)
self._but_group.addButton(r)
self.connect(r, SIGNAL("toggled(bool)"), self.onRadio)
# if len(self._radios) == 1:
# self._radios[0].setChecked(True)
self.onRadio()
def addJigWidget(self, w, update_qt=True):
JigOneOf.addJigWidget(self, w, update_qt=update_qt)
self.__addRadio()
def onRadio(self, togg=True):
if not togg:
return
for i, r in enumerate(self._radios):
if r.isChecked():
self._leaves[i].setActive(True)
else:
self._leaves[i].setActive(False)
def _currentLeaf(self):
l = [r.isChecked() for r in self._radios]
try:
idx = l.index(True)
except ValueError:
return None
return self._leaves[idx]
class JigOneOfCombo(JigOneOf):
def __init__(self, key, title, desc):
JigOneOf.__init__(self, key, title, desc)
self._combo = QComboBox()
self.connect(self._combo, SIGNAL("currentIndexChanged(int)"), self.onIndexChanged)
self._currLeaf = None
self._view = views.JigViewDynamicCombo(self, title, desc)
self._blockSig = False
def _currentLeaf(self):
return self._currLeaf
def clone(self, newKey=None):
ret = JigOneOf.clone(self, newKey=newKey)
# Warning: add item will trigger "onIndexChange"
ret._blockSig = True
for w in ret._leaves:
ret._combo.addItem(w.getTitle())
ret._blockSig = False
if len(ret._leaves):
ret._currLeaf = ret._leaves[0]
ret._currLeaf._view.setVisibility(True, update_qt=False)
ret._currLeaf.setActive(True)
for l in ret._leaves[1:]:
l.setActive(False)
return ret
def onIndexChanged(self, newIdx, update_qt=True):
if self._blockSig: return
if not self._currLeaf is None:
self._currLeaf._view.setVisibility(False)
self._currLeaf.setActive(False)
self._currLeaf = self._leaves[newIdx]
self._currLeaf._view.setVisibility(True, update_qt=update_qt)
self._currLeaf.setActive(True)
def addJigWidget(self, w, update_qt=True):
JigOneOf.addJigWidget(self, w, update_qt=update_qt)
if not update_qt:
self._blockSig = True
self._combo.addItem(w.getTitle())
if not update_qt:
self._blockSig = False
if len(self._leaves) == 1:
self.onIndexChanged(0, update_qt=update_qt)
else:
w.setActive(False)
class JigLeafWidget(JigWidget):
def __init__(self, key, title, desc):
JigWidget.__init__(self, key, title, desc)
self._lab = QLabel("%s" % title)
# self._lab.setToolTip(desc)
# self._lab.setSizePolicy(QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred))
self._widgets = [self._lab]
self._view = views.JigViewMW(self)
def getChild(self, key_or_num):
return None
def findChildren(self, key):
return []
def setTitle(self, title):
JigWidget.setTitle(self, title)
self._lab.setText(title)
def setHighlightPath(self, path, highlight):
if self.resolvePath(path) is None:
return
self._view.setHighlight(highlight)
class JigDisplay(JigLeafWidget):
def __init__(self, | |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: all_in_one.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import List
import betterproto
@dataclass
class Data(betterproto.Message):
"""通用消息"""
proto: int = betterproto.int32_field(1)
bin: bytes = betterproto.bytes_field(2)
@dataclass
class NetErr(betterproto.Message):
"""错误信息"""
err: int = betterproto.sint32_field(1)
why: bytes = betterproto.bytes_field(2)
vars: bytes = betterproto.bytes_field(3)
stacktrace: str = betterproto.string_field(4)
@dataclass
class C2s(betterproto.Message):
seq: int = betterproto.int32_field(1)
type: int = betterproto.int32_field(2)
data: bytes = betterproto.bytes_field(3)
@dataclass
class S2c(betterproto.Message):
seq: int = betterproto.int32_field(1)
type: int = betterproto.int32_field(2)
err: "NetErr" = betterproto.message_field(3)
data: bytes = betterproto.bytes_field(4)
@dataclass
class LoginDev(betterproto.Message):
login: "Login" = betterproto.message_field(1)
uid_base: int = betterproto.int64_field(2)
random_seed: int = betterproto.int64_field(3)
@dataclass
class Login(betterproto.Message):
"""登录请求"""
channel_id: str = betterproto.string_field(1)
channel_uid: str = betterproto.string_field(2)
role_id: int = betterproto.int64_field(3)
verify_code: str = betterproto.string_field(4)
server_id: int = betterproto.int32_field(5)
os_type: int = betterproto.uint32_field(6)
app_ver: str = betterproto.string_field(7)
res_ver: str = betterproto.string_field(8)
device: "Device" = betterproto.message_field(9)
channel_extra: str = betterproto.string_field(10)
@dataclass
class Device(betterproto.Message):
"""设备信息"""
device_label: str = betterproto.string_field(1)
device_model: str = betterproto.string_field(2)
device_os_info: str = betterproto.string_field(3)
device_cpu: str = betterproto.string_field(4)
device_cpu_count: str = betterproto.string_field(5)
device_memory: str = betterproto.string_field(6)
device_graphic: str = betterproto.string_field(7)
device_graphic_type: str = betterproto.string_field(8)
device_graphic_memory: str = betterproto.string_field(9)
device_shader_level: str = betterproto.string_field(10)
device_recommend_level: str = betterproto.string_field(11)
device_mac: str = betterproto.string_field(12)
device_ip: str = betterproto.string_field(13)
@dataclass
class RoleDevice(betterproto.Message):
"""玩家设备信息"""
label: str = betterproto.string_field(1)
model: str = betterproto.string_field(2)
@dataclass
class RdBase(betterproto.Message):
"""
注意: 修改此处基础数据结构(rd_base,rd_device,rd_render,rd_ui,rd_net,rd_extend)时, 注意:
需对应修改RuntimeDataNetMgr.lua中的fn.CutData方法逻辑,还涉及到C#层逻辑,慎重修改 运行时统计基础数据
"""
game_time: str = betterproto.string_field(1)
time: str = betterproto.string_field(2)
client_version: str = betterproto.string_field(3)
@dataclass
class RdDevice(betterproto.Message):
"""运行时设备状态数据"""
opt_type: int = betterproto.int32_field(1)
battery_level: int = betterproto.int32_field(2)
battery_status: int = betterproto.int32_field(3)
free_state: bool = betterproto.bool_field(4)
persist_time: int = betterproto.int32_field(5)
device_name: str = betterproto.string_field(6)
@dataclass
class RdRender(betterproto.Message):
"""运行时渲染数据"""
scene_name: str = betterproto.string_field(1)
fps: int = betterproto.int32_field(2)
@dataclass
class RdUi(betterproto.Message):
"""运行时统计UI操作数据"""
name: str = betterproto.string_field(1)
opt_type: int = betterproto.int32_field(2)
remain_time: int = betterproto.int32_field(3)
@dataclass
class RdNet(betterproto.Message):
"""运行时统计网络通信数据"""
opt_type: int = betterproto.int32_field(1)
port_name: str = betterproto.string_field(2)
error_num: int = betterproto.int32_field(3)
size: int = betterproto.int32_field(4)
state: int = betterproto.int32_field(5)
delay: int = betterproto.int32_field(6)
@dataclass
class RdExtend(betterproto.Message):
"""运行时统计扩展数据"""
type: int = betterproto.int32_field(1)
content: str = betterproto.string_field(2)
@dataclass
class RdDeviceData(betterproto.Message):
"""设备状态数据结构"""
base: "RdBase" = betterproto.message_field(1)
device_data: "RdDevice" = betterproto.message_field(2)
@dataclass
class RdNetData(betterproto.Message):
"""网络状态数据结构"""
base: "RdBase" = betterproto.message_field(1)
net_data: "RdNet" = betterproto.message_field(2)
@dataclass
class RdRenderData(betterproto.Message):
"""渲染数据结构"""
base: "RdBase" = betterproto.message_field(1)
render_data: "RdRender" = betterproto.message_field(2)
@dataclass
class RdUiData(betterproto.Message):
"""UI操作数据结构"""
base: "RdBase" = betterproto.message_field(1)
ui_data: "RdUi" = betterproto.message_field(2)
@dataclass
class RdExtendData(betterproto.Message):
"""扩展数据结构"""
base: "RdBase" = betterproto.message_field(1)
extend_data: "RdExtend" = betterproto.message_field(2)
@dataclass
class RdDataList(betterproto.Message):
"""运行时统计数据结构"""
rd_device_list: List["RdDeviceData"] = betterproto.message_field(1)
rd_ui_list: List["RdUiData"] = betterproto.message_field(2)
rd_net_list: List["RdNetData"] = betterproto.message_field(3)
rd_render_list: List["RdRenderData"] = betterproto.message_field(4)
rd_extend_list: List["RdExtendData"] = betterproto.message_field(5)
@dataclass
class AchievementReceiveData(betterproto.Message):
"""成就全服奖励领取返回数据"""
receive_achievements: List[int] = betterproto.uint32_field(1)
res: "ResChange" = betterproto.message_field(2)
@dataclass
class AchievementGetRankInfo(betterproto.Message):
"""成就获取排行榜返回数据"""
rank_infos: List["CommonSimplePlayInfo"] = betterproto.message_field(1)
achievement_tids: List[int] = betterproto.uint32_field(2)
@dataclass
class ArenaFightInfo(betterproto.Message):
"""竞技场对战记录信息"""
role_id: int = betterproto.int64_field(1)
server_id: int = betterproto.uint32_field(2)
power: int = betterproto.uint32_field(3)
level: int = betterproto.uint32_field(4)
fight_id: int = betterproto.uint64_field(5)
score_change: int = betterproto.sint32_field(6)
time: int = betterproto.uint32_field(7)
robot_name: bytes = betterproto.bytes_field(8)
old_score: int = betterproto.uint32_field(9)
@dataclass
class ArenaLoginInfo(betterproto.Message):
"""通信返给前台的登录信息"""
season_start_day: int = betterproto.uint32_field(1)
last_day: int = betterproto.uint32_field(2)
ref_free_num: int = betterproto.uint32_field(3)
highest_rank: int = betterproto.uint32_field(4)
self_score: int = betterproto.uint32_field(5)
@dataclass
class ArenaMatchPlayerInfo(betterproto.Message):
"""通信返给前台的匹配对手信息"""
arena_player_infos: List["CommonPlayInfo"] = betterproto.message_field(1)
@dataclass
class ArenaFightBackInfo(betterproto.Message):
"""通信返给前台的战斗结果信息"""
rank: int = betterproto.uint32_field(1)
self_score: int = betterproto.uint32_field(2)
target_score: int = betterproto.uint32_field(3)
highest_rank: int = betterproto.uint32_field(4)
res: "ResChange" = betterproto.message_field(5)
daily: "RoleDailyState" = betterproto.message_field(6)
record_infos: List["ArenaRecordInfo"] = betterproto.message_field(7)
@dataclass
class RevArenaRecordInfos(betterproto.Message):
"""推送前台对战记录信息"""
record_infos: List["ArenaRecordInfo"] = betterproto.message_field(1)
@dataclass
class ArenaRecordInfo(betterproto.Message):
"""竞技场对战记录信息"""
info: "ArenaFightInfo" = betterproto.message_field(1)
name: bytes = betterproto.bytes_field(2)
alliance_info: bytes = betterproto.bytes_field(3)
icon_tid: int = betterproto.int32_field(5)
@dataclass
class ProduceAddition(betterproto.Message):
"""挂机加成信息"""
tid: int = betterproto.int64_field(1)
start_time: int = betterproto.int64_field(2)
@dataclass
class ProduceExtra(betterproto.Message):
"""挂机附加信息"""
global_produce_tid: int = betterproto.int64_field(1)
produce_equip_rate: int = betterproto.int64_field(2)
last_reap_time: int = betterproto.int64_field(3)
@dataclass
class AutoProduceToClient(betterproto.Message):
"""通信返回给前台的数据"""
awards: List["Item"] = betterproto.message_field(1)
produce_additions: List["ProduceAddition"] = betterproto.message_field(2)
produce_extra: "ProduceExtra" = betterproto.message_field(3)
res: "ResChange" = betterproto.message_field(4)
last_gather_time: int = betterproto.uint32_field(5)
daily: "RoleDailyState" = betterproto.message_field(6)
@dataclass
class ChatLinkParam(betterproto.Message):
"""聊天链接参数结构,用于客户端参数传递,不参与网络通信"""
id: str = betterproto.string_field(1)
type: int = betterproto.int32_field(2)
data: bytes = betterproto.bytes_field(3)
@dataclass
class SimpleChatLinkParam(betterproto.Message):
"""聊天链接参数结构,参与网络通信"""
type: int = betterproto.int32_field(1)
data: bytes = betterproto.bytes_field(3)
@dataclass
class SingleMsgData(betterproto.Message):
"""单条聊天数据"""
sender_role_id: int = betterproto.int64_field(1)
time: int = betterproto.int32_field(2)
index: int = betterproto.int32_field(3)
content: "ChatMsgContent" = betterproto.message_field(4)
@dataclass
class ChannelMsgList(betterproto.Message):
"""频道的消息列表(用于登陆数据推送)"""
channel_id: int = betterproto.int64_field(1)
start_idx: int = betterproto.int32_field(2)
end_idx: int = betterproto.int32_field(3)
msg_list: List["SingleMsgData"] = betterproto.message_field(4)
@dataclass
class PrivateChannelMsgList(betterproto.Message):
"""私聊频道消息列表"""
role_id: int = betterproto.int64_field(1)
data: "ChannelMsgList" = betterproto.message_field(2)
@dataclass
class ChatMsgContent(betterproto.Message):
"""聊天消息内容"""
text_content: str = betterproto.string_field(1)
voice_content: bytes = betterproto.bytes_field(2)
at_role_id: int = betterproto.int64_field(3)
link_params: List["SimpleChatLinkParam"] = betterproto.message_field(4)
@dataclass
class ChatRoleBaseInfo(betterproto.Message):
"""聊天玩家显示基本信息"""
role_id: int = betterproto.int64_field(1)
head: int = betterproto.int32_field(2)
level: int = betterproto.int32_field(3)
vip: int = betterproto.int32_field(4)
title: int = betterproto.int32_field(5)
name: str = betterproto.string_field(6)
@dataclass
class ResChatLoginData(betterproto.Message):
"""聊天登陆消息"""
world_channel_id: int = betterproto.int64_field(1)
guild_channel_id: int = betterproto.int64_field(2)
is_banned: bool = betterproto.bool_field(3)
release_time: int = betterproto.int32_field(4)
black_list: List[int] = betterproto.int32_field(5)
@dataclass
class ResChatInitData(betterproto.Message):
"""聊天初始化数据"""
msg_data: List["ChannelMsgList"] = betterproto.message_field(6)
private_msg_data: List["PrivateChannelMsgList"] = betterproto.message_field(7)
role_info_list: List["ChatRoleBaseInfo"] = betterproto.message_field(8)
@dataclass
class ResReceiveChatMsg(betterproto.Message):
"""新消息推送"""
index: int = betterproto.int32_field(1)
channel_id: int = betterproto.int64_field(2)
msg_data: "SingleMsgData" = betterproto.message_field(3)
with_role_info: bool = betterproto.bool_field(4)
sender_role_info: bytes = betterproto.bytes_field(5)
@dataclass
class EmptyMsg(betterproto.Message):
"""空消息"""
sender_role_id: int = betterproto.int64_field(1)
channel_id: int = betterproto.int64_field(2)
at_role_id: int = betterproto.int64_field(3)
@dataclass
class ResReceiveEmptyMsg(betterproto.Message):
"""空消息推送"""
msg_list: List["EmptyMsg"] = betterproto.message_field(1)
@dataclass
class ResChatMsgList(betterproto.Message):
"""请求消息列表回调"""
msg_list: "ChannelMsgList" = betterproto.message_field(1)
role_info_list: List["ChatRoleBaseInfo"] = betterproto.message_field(2)
@dataclass
class ReqSendChatMsgToRole(betterproto.Message):
"""请求发送私聊消息"""
target_role_id: int = betterproto.int64_field(1)
content: "ChatMsgContent" = betterproto.message_field(2)
@dataclass
class Currency(betterproto.Message):
"""主角货币"""
tid: int = betterproto.int64_field(1)
state: bool = betterproto.bool_field(2)
count: int = betterproto.int64_field(3)
recharge_count: int = betterproto.int64_field(4)
@dataclass
class DailyDungeonInfo(betterproto.Message):
"""日常副本信息"""
progress_list: List[int] = betterproto.uint32_field(1)
flush_time: int = betterproto.uint32_field(2)
attack_count_list: List[int] = betterproto.uint32_field(3)
@dataclass
class DailyDungeonAttackBack(betterproto.Message):
"""日常副本讨伐返回数据"""
res: "ResChange" = betterproto.message_field(1)
new_flush_time: int = betterproto.uint32_field(2)
new_progress: int = betterproto.uint32_field(3)
new_count: int = betterproto.uint32_field(4)
is_double: bool = betterproto.bool_field(5)
@dataclass
class DailyDungeonMopUpBack(betterproto.Message):
"""日常副本扫荡返回数据"""
res: "ResChange" = betterproto.message_field(1)
new_flush_time: int = betterproto.uint32_field(2)
new_count: int = betterproto.uint32_field(3)
is_double: bool = betterproto.bool_field(4)
@dataclass
class DailyDungeonBuyCountBack(betterproto.Message):
"""日常副本购买次数返回数据"""
res: "ResChange" = betterproto.message_field(1)
new_flush_time: int = betterproto.uint32_field(2)
new_buy_count: int = betterproto.uint32_field(3)
@dataclass
class EquipInfo(betterproto.Message):
"""装备信息"""
role_equips: List["HeroEquipInfo"] = betterproto.message_field(1)
@dataclass
class HeroEquipInfo(betterproto.Message):
"""英雄装备信息"""
hero_uid: int = betterproto.int64_field(1)
equip_list: List["Equipment"] = betterproto.message_field(2)
@dataclass
class EquipParam(betterproto.Message):
"""装备参数(作为装备的唯一标识)"""
tid: int = betterproto.int64_field(1)
star: int = betterproto.int32_field(2)
exp: int = betterproto.int32_field(3)
@dataclass
class EquipHolderInfo(betterproto.Message):
"""装备的归属信息"""
holder_uid: int = betterproto.int64_field(1)
equip_param: "EquipParam" = betterproto.message_field(2)
@dataclass
class WearEquipReq(betterproto.Message):
"""请求穿/替换/脱装备"""
hero_uid: int = betterproto.int64_field(1)
equip_list: List["EquipHolderInfo"] = betterproto.message_field(2)
@dataclass
class WearEquipBack(betterproto.Message):
"""穿/脱装备返回前台"""
res: "ResChange" = betterproto.message_field(1)
@dataclass
class EnhanceEquip(betterproto.Message):
"""装备强化请求参数"""
hero_uid: int = betterproto.int64_field(1)
equip_param: "EquipParam" = betterproto.message_field(2)
mat_equip_list: List["Equipment"] = betterproto.message_field(3)
@dataclass
class EnhanceEquipBack(betterproto.Message):
"""装备强化返回"""
res: "ResChange" = betterproto.message_field(1)
new_exp: int = betterproto.int32_field(2)
new_star: int = betterproto.int64_field(3)
@dataclass
class FighterBaseInfo(betterproto.Message):
"""战斗者基础数据"""
id: int = betterproto.int64_field(1)
tid: int = betterproto.int64_field(2)
type: int = betterproto.int32_field(3)
uid: int = betterproto.int64_field(4)
role: int = betterproto.int64_field(5)
team: int = betterproto.int32_field(6)
loc: int = betterproto.int32_field(7)
star: int = betterproto.int32_field(8)
level: int = betterproto.int32_field(9)
hp: int = betterproto.int64_field(10)
max_hp: int = betterproto.int64_field(11)
anger: int = betterproto.int64_field(12)
max_anger: int = betterproto.int64_field(13)
enable: bool = betterproto.bool_field(14)
@dataclass
class FighterInfo(betterproto.Message):
"""战斗者初始数据"""
tid: int = betterproto.int64_field(1)
type: int = betterproto.int32_field(2)
uid: int = betterproto.int64_field(3)
role: int = betterproto.int32_field(4)
style: int = betterproto.int32_field(5)
name: int = betterproto.int32_field(6)
camp: int = betterproto.int32_field(7)
profession: int = betterproto.int32_field(8)
team: int = betterproto.int32_field(9)
loc: int = betterproto.int32_field(10)
star: int = betterproto.int32_field(11)
level: int = betterproto.int32_field(12)
attack: int = betterproto.int64_field(13)
defense: int = betterproto.int64_field(14)
hp: int = betterproto.int64_field(15)
speed: int = betterproto.int32_field(16)
atk_pp: float = betterproto.float_field(17)
defense_pp: float = betterproto.float_field(18)
hp_pp: float = betterproto.float_field(19)
ab_deepen: float = betterproto.float_field(20)
dodge: float = betterproto.float_field(21)
hit: float = betterproto.float_field(22)
wreck: float = betterproto.float_field(23)
parry: float = betterproto.float_field(24)
parry_reduce: float = betterproto.float_field(25)
crit: float = betterproto.float_field(26)
crit_resist: float = betterproto.float_field(27)
crit_ratio: float = betterproto.float_field(28)
crit_harm: float = betterproto.float_field(29)
breaking: float = betterproto.float_field(30)
ctrl_resist: float = betterproto.float_field(31)
reduce: float = betterproto.float_field(32)
rel_harm: float = betterproto.float_field(33)
normal_ab: int = betterproto.int32_field(34)
anger_ab: int = betterproto.int32_field(35)
attr_ab: int = betterproto.int32_field(36)
passive_ab: int = betterproto.int32_field(37)
genius_ab: int = betterproto.int32_field(38)
@dataclass
class PoolFighterKey(betterproto.Message):
"""数据池战斗者Key"""
role_id: int = betterproto.int64_field(1)
uid: int = betterproto.int64_field(2)
tid: int = betterproto.int64_field(3)
team: int = betterproto.int32_field(4)
loc: int = betterproto.int32_field(5)
fight_tid: int = betterproto.int64_field(6)
@dataclass
class PoolFighter(betterproto.Message):
"""数据池战斗者"""
key: "PoolFighterKey" = betterproto.message_field(1)
hp: int = betterproto.int32_field(3)
anger: int = betterproto.int32_field(5)
@dataclass
class FormationFighter(betterproto.Message):
"""布阵战斗者"""
uid: int = betterproto.int64_field(1)
tid: int = betterproto.int64_field(2)
type: int = betterproto.int32_field(3)
loc: int = betterproto.int32_field(4)
@dataclass
class Formation(betterproto.Message):
"""阵型数据"""
fight_type: int = betterproto.int32_field(1)
power: int = betterproto.int32_field(2)
max_power: int = betterproto.int32_field(3)
team: List["FormationFighter"] = betterproto.message_field(4)
warcraft_id: int = betterproto.int64_field(5)
@dataclass
class FormationPool(betterproto.Message):
"""阵型数据池"""
fight_type: int = betterproto.int32_field(1)
pool: List["PoolFighter"] = betterproto.message_field(2)
passive_ab: List[int] = betterproto.int64_field(3)
passive_def: List[int] = betterproto.int64_field(4)
@dataclass
class FormationData(betterproto.Message):
"""阵型存储"""
atk_formations: List["Formation"] = betterproto.message_field(1)
def_formations: List["Formation"] = betterproto.message_field(2)
pool: List["FormationPool"] = betterproto.message_field(3)
@dataclass
class FightMirror(betterproto.Message):
"""战斗数据镜像"""
role_id: int = betterproto.int64_field(1)
power: int | |
"""Represent interactions between context and item layers."""
import numpy as np
from scipy import stats
import h5py
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from cymr import operations
def save_patterns(h5_file, items, **kwargs):
"""
Write patterns and similarity matrices to hdf5.
Parameters
----------
h5_file : str
Path to hdf5 file to save patterns in.
items : list of str
Item strings corresponding to the patterns.
Additional keyword arguments set named feature vectors. Feature
vector arrays must have shape [items x units].
"""
with h5py.File(h5_file, 'w') as f:
# items
dt = h5py.special_dtype(vlen=str)
items = np.asarray(items)
dset = f.create_dataset('items', items.shape, dtype=dt)
for i, item in enumerate(items):
dset[i] = item
# features
features = list(kwargs.keys())
dset = f.create_dataset('features', (len(features),), dtype=dt)
for i, feature in enumerate(features):
dset[i] = feature
# patterns
for name, vectors in kwargs.items():
# save vectors
f.create_dataset('vector/' + name, data=vectors)
# set pattern similarity to dot product
sim = np.dot(vectors, vectors.T)
f.create_dataset('similarity/' + name, data=sim)
def load_patterns(h5_file, features=None):
"""
Load weights from an hdf5 file.
Parameters
----------
h5_file : str
Path to file saved with `save_patterns`.
features : list of str, optional
Names of features to load. Default is to load all features.
Returns
-------
patterns : dict of (str: dict of (str: numpy.array))
Loaded patterns. The "vector" field contains vector patterns.
The "similarity" field contains pairwise similarity matrices.
Each type of pattern contains a field for each loaded feature.
"""
with h5py.File(h5_file, 'r') as f:
patterns = {
'items': np.array([item for item in f['items'].asstr()]),
'vector': {},
'similarity': {},
}
if features is None:
features = f['features'].asstr()
for name in features:
patterns['vector'][name] = f['vector/' + name][()]
patterns['similarity'][name] = f['similarity/' + name][()]
return patterns
def expand_param(param, size):
"""
Expand a scalar parameter to array format.
Parameters
----------
param : float or numpy.ndarray
Parameter to expand.
size : iterable or numpy.ndarray
Size of the expanded parameter.
Returns
-------
param : numpy.ndarray
Expanded parameter.
"""
size = np.asarray(size)
if not isinstance(param, np.ndarray):
# expand scalar to full array
param = np.tile(param, size).astype(float)
elif size.shape and param.ndim < len(size):
# expand array to have to correct number of dimensions
axis = tuple(size[param.ndim :])
param = np.expand_dims(param, axis)
if param.shape != tuple(size):
# expand singleton dimensions as needed
rep = np.ones(size.shape)
for i, n in enumerate(param.shape):
if n == 1:
rep[i] = size[i]
elif n != size[i] and n < size[i]:
raise ValueError('Cannot expand parameter.')
param = np.tile(param, rep).astype(float)
return param
def prepare_study_param(n_item, n_sub, B, Lfc, Lcf, distract_B=None):
"""Prepare parameters for simulating a study phase."""
B = expand_param(B, (n_item, n_sub))
Lfc = expand_param(Lfc, (n_item, n_sub))
Lcf = expand_param(Lcf, (n_item, n_sub))
param = {'B': B, 'Lfc': Lfc, 'Lcf': Lcf, 'distract_B': None}
if distract_B is not None:
distract_B = expand_param(distract_B, (n_item + 1, n_sub))
param['distract_B'] = distract_B
return param
def prepare_recall_param(n_item, n_sub, B, T, amin):
"""Prepare parameters for simulating a recall phase."""
B = expand_param(B, (n_item, n_sub))
if T < amin:
T = amin
param = {'B': B, 'T': T}
return param
def sample_response_lba(A, b, v, s, tau):
"""Sample a response and response time."""
while True:
k = stats.uniform.rvs(0, scale=A)
d = stats.norm.rvs(loc=v, scale=s)
t = tau + (b - k) / d
if np.any(d > 0):
break
t[d <= 0] = np.nan
response = np.nanargmin(t)
rt = np.nanmin(t)
return int(response), rt
def init_plot(**kwargs):
fig = plt.figure(constrained_layout=True, **kwargs)
gs = GridSpec(10, 5, figure=fig)
ax = {
'c': fig.add_subplot(gs[0, 1:4]),
'c_in': fig.add_subplot(gs[1, 1:4]),
'f_in': fig.add_subplot(gs[8, 1:4]),
'f': fig.add_subplot(gs[9, 1:4]),
'w_fc_pre': fig.add_subplot(gs[2:8, 0]),
'w_fc_exp': fig.add_subplot(gs[2:8, 1]),
'w_ff_pre': fig.add_subplot(gs[2:8, 2]),
'w_cf_exp': fig.add_subplot(gs[2:8, 3]),
'w_cf_pre': fig.add_subplot(gs[2:8, 4]),
}
return fig, ax
class LayerIndex(object):
"""
Representation of one layer of a network.
Parameters
----------
layer_segments : dict of (str: dict of (str: int))
Size of each named segment for each sublayer.
Attributes
----------
size : int
Number of units in the layer.
size_sublayer : dict of (str: int)
Number of units in each sublayer.
size_segment : dict of (str: dict of (str: int))
Size of each segment in each sublayer.
sublayer : dict of (str: numpy.ndarray)
Start and stop units of each sublayer.
segment : dict of (str: dict of (str: numpy.ndarray)
Start and stop units of each segment.
"""
def __init__(self, layer_segments):
self.size = 0
self.size_sublayer = {}
self.size_segment = layer_segments.copy()
self.sublayer = {}
self.segment = {}
for sub, segs in layer_segments.items():
self.segment[sub] = {}
self.size_sublayer[sub] = 0
start = self.size
for seg, s in segs.items():
self.segment[sub][seg] = np.array(
[self.size, self.size + s], dtype=np.dtype('i')
)
self.size += s
self.size_sublayer[sub] += s
self.sublayer[sub] = np.array(
[start, start + self.size_sublayer[sub]], dtype=np.dtype('i')
)
def __repr__(self):
s = ''
for sublayer, segments in self.size_segment.items():
size_sublayer = self.size_sublayer[sublayer]
s += f'{sublayer}: {size_sublayer} units\n'
for segment, size_segment in segments.items():
s += f' {segment}: {size_segment} units\n'
return s
def copy(self):
return self.__init__(self.size_segment)
def get_sublayer(self, sublayer):
return self.sublayer[sublayer]
def get_segment(self, sublayer, segment):
return self.segment[sublayer][segment]
def get_slice(self, sublayer, segment):
ind = self.get_segment(sublayer, segment)
return slice(ind[0], ind[1])
def get_unit(self, sublayer, segment, index):
return self.segment[sublayer][segment][0] + index
class Network(object):
"""
Representation of interacting item and context layers.
Parameters
----------
f_segment : dict of str: (dict of str: int)
For each item sublayer, the number of units in each segment.
c_segment : dict of str: (dict of str: int)
For each context sublayer, the number of units in each segment.
Attributes
----------
f_segment : dict of str: (dict of str: int)
Number of item units for each segment.
c_segment : dict of str: (dict of str: int)
Number of context units for each segment.
f_ind : cymr.network.LayerIndex
Index of units in the item layer.
c_ind : cymr.network.LayerIndex
Index of units in the context layer.
n_f : int
Total number of item units.
n_c : int
Total number of context units.
f : numpy.array
Item layer vector.
c : numpy.array
Context layer vector.
c_in : numpy.array
Current input to context.
w_fc_pre : numpy.array
Pre-experimental weights connecting f to c.
w_fc_exp : numpy.array
Weights learned during the experiment connecting f to c.
w_cf_pre : numpy.array
Pre-experimental weights connecting c to f.
w_cf_exp : numpy.array
Weights learned during the experiment connecting c to f.
w_ff_pre : numpy.array
Pre-experimental weights connecting f to f.
w_ff_exp : numpy.array
Weights learned during the experiment connecting f to f.
"""
def __init__(self, f_segment, c_segment):
self.f_segment = f_segment.copy()
self.c_segment = c_segment.copy()
self.f_sublayers = list(self.f_segment.keys())
self.c_sublayers = list(self.c_segment.keys())
self.f_ind = LayerIndex(self.f_segment)
self.c_ind = LayerIndex(self.c_segment)
n_f = self.f_ind.size
n_c = self.c_ind.size
self.n_f = n_f
self.n_c = n_c
self.f = np.zeros(n_f)
self.f_in = np.zeros(n_f)
self.match = np.zeros(n_f)
self.c = np.zeros(n_c)
self.c_in = np.zeros(n_c)
self.w_fc_pre = np.zeros((n_f, n_c))
self.w_fc_exp = np.zeros((n_f, n_c))
self.w_cf_pre = np.zeros((n_f, n_c))
self.w_cf_exp = np.zeros((n_f, n_c))
self.w_ff_pre = np.zeros((n_f, n_f))
self.w_ff_exp = np.zeros((n_f, n_f))
def __repr__(self):
s = f'f:\n{self.f_ind}\nc:\n{self.c_ind}'
return s
def reset(self):
"""Reset network weights and activations to zero."""
self.f[:] = 0
self.f_in[:] = 0
self.match[:] = 0
self.c[:] = 0
self.c_in[:] = 0
self.w_fc_exp[:] = 0
self.w_fc_pre[:] = 0
self.w_cf_exp[:] = 0
self.w_cf_pre[:] = 0
self.w_ff_exp[:] = 0
self.w_ff_pre[:] = 0
def copy(self, include=None, exclude=None):
"""
Copy the network to a new network object.
Parameters
----------
include : list of str, optional
List of fields to include. Default is a standard list of
fields.
exclude : list of str, optional
List of fields to exclude. Overrides values for include.
Returns
-------
net : cymr.Network
Network with the same segments, weights, and activations.
"""
# all attributes that contain an array
fields = [
'f',
'f_in',
'match',
'c',
'c_in',
'w_fc_exp',
'w_fc_pre',
'w_cf_exp',
'w_cf_pre',
'w_ff_exp',
'w_ff_pre',
]
# set the fields to include in the copy (default is all)
if include is None:
include = fields
if exclude is not None:
include = [i for i in include if i not in exclude]
# copy included attributes
net = Network(self.f_segment, self.c_segment)
for field in fields:
if field in include:
setattr(net, field, getattr(self, field).copy())
else:
delattr(net, field)
return net
def get_sublayer(self, layer, sublayer):
"""
Get indices for a sublayer.
Parameters
----------
layer : str
Layer to access.
| |
<filename>evolve_soft_2d/result/analyse.py
## Functions used for obtaining and inspecting results
# Imports
import csv
import fileinput
import linecache
import numpy
import pandas
from scipy.optimize import curve_fit
from scipy.spatial.distance import directed_hausdorff
from evolve_soft_2d import plotting, utility
from evolve_soft_2d.evolve import gen_alg, lsystems
from evolve_soft_2d.file_paths import create_fp_file
from evolve_soft_2d.result import obtain
from evolve_soft_2d.unit import create, modify
################################################################################
def monte_carlo(
template,
meth: str,
) -> None:
"""Perform a Monte Carlo analysis on a population of units
Parameters
----------
template : template
The unit template parameters
meth : str
The unit generation method
l : L-Systems
c : CPPNs
r : Random generation
"""
# Check if the unit generation method is set to L-Systems
if meth == "l":
# Generate a list of unit parameters
l_u = create.gen_init_units(template, gen_alg.n_u, meth, [gen_alg.ls_all_max, gen_alg.ls_all_min])[0]
# Check if the unit generation method is set to CPPNs
elif meth == "c":
# Generate a list of unit parameters
l_u = create.gen_init_units(template, gen_alg.n_u, meth, [gen_alg.cppn_all_max, gen_alg.cppn_all_min])[0]
# Check if the unit generation method is set to random
else:
# Generate a list of unit parameters
l_u = create.gen_init_units(template, gen_alg.n_u, meth, [[gen_alg.n_u + 1, len(template.e_internal) + 1], [1, 0]])[0]
# Run the population of units
fp_lu, empty_id, full_id = create.run_units(template, l_u, meth)
# Rank the population of units
rank_u(template, meth, empty_id, full_id, fp_lu)
return
################################################################################
def rank_u(
template,
g_meth: str,
empty_id: str,
full_id: str,
fp_lu: list,
) -> None:
"""Rank units according to their performance
Parameters
----------
template
The unit template parameters
fp_lu : str
The file path of the log file of units created during the last simulation
"""
# Initialisations
v = []
data = pandas.DataFrame()
label = []
label.append("Constraint Energy X")
label.append("Constraint Energy Y")
label.append("Constraint Energy")
label.append("Internal Energy X")
label.append("Internal Energy Y")
label.append("Internal Energy")
# Read the list of units created during the last simulation
lu = ranking(fp_lu, empty_id, full_id)
# Loop through all labels
for i in label:
# Append the data to the list
v.append(obtain.read_all(template, lu, i))
# Add the Hausdorff distance to the list of labels
label.append("Hausdorff Distance")
# Read the Hausdorff distance variables
v.append(obtain.read_all_hd(template, lu))
label.append("Number of Elements Removed")
# Create a list of the number of elements removed from every element
v.append([utility.find_int_in_str(i) for i in lu])
# Check if the template case is 1
if template.case == 1:
# Add the necessary labels for comparison
label.append("Height to Width Ratio")
label.append("Absolute Change In Width")
# Read the fitness data
v_fit = obtain.read_all_fit(template, lu)
# Append the fitness data to the list
for i in v_fit:
v.append(i)
if g_meth == "l":
label.append("Axiom ID")
label.append("Number of Rules")
label.append("Rule Length")
label.append("Number of Iterations")
l_axiom = []
l_rule_n = []
l_rule_l = []
l_i = []
for i in lu:
curr_mod = utility.open_v(template, i)
l_axiom.append(lsystems.a_all.index(curr_mod.ls.axiom))
l_rule_n.append(len(curr_mod.ls.gramm))
l_rule_l.append(ls_avg_rule_l(curr_mod.ls))
l_i.append(curr_mod.ls.n)
v.append(l_axiom)
v.append(l_rule_n)
v.append(l_rule_l)
v.append(l_i)
elif g_meth == "c":
label.append("Model ID")
label.append("Scale")
label.append("Number of Hidden Layers")
label.append("Size of the Initial Hidden Layer")
label.append("Element Removal Threshold")
c_m_id = []
c_scale = []
c_hl_n = []
c_hl_s = []
c_thresh = []
for i in lu:
curr_mod = utility.open_v(template, i)
c_m_id.append(curr_mod.cp.mod_id)
c_scale.append(curr_mod.cp.cppn.scale)
c_hl_n.append(curr_mod.cp.cppn.hl_n)
c_hl_s.append(curr_mod.cp.cppn.hl_s)
c_thresh.append(curr_mod.cp.cppn.thresh)
v.append(c_m_id)
v.append(c_scale)
v.append(c_hl_n)
v.append(c_hl_s)
v.append(c_thresh)
# Store the list of units
data["Unit ID"] = lu
# Loop through the values
for i in range(0, len(v)):
# Add the values to the dataframe
data[label[i]] = v[i]
# Check if the template case is 1
if template.case == 1:
# Studentize the fitness values
data["Height to Width Ratio"] = (data["Height to Width Ratio"] - data["Height to Width Ratio"].mean())/data["Height to Width Ratio"].std()
data["Absolute Change In Width"] = (data["Absolute Change In Width"] - data["Absolute Change In Width"].mean())/data["Absolute Change In Width"].std()
# Calculate a single fitness value
data["Fitness"] = data["Height to Width Ratio"] + data["Absolute Change In Width"]
# Read the timestamp of the simulation
tm = utility.read_str(fp_lu[0], -25, -4)
# plot_data =
# # Plot the desired graphs from the results
# plotting.plot_all(template, v, n_e, label, tm)
data = data.replace(0, numpy.nan)
# plotting.hist_all(template, tm, data)
# plotting.scat_all(template, tm, data)
# Sort the dataframe according to the fitness values
data.sort_values(by = ["Fitness"], ascending = False, inplace = True, ignore_index = True)
# Save the list of best performing units
# data["Unit ID"].to_csv(fp_lu[5], header = False, index = False)
return data
################################################################################
def ranking(
fp_lu: list,
empty_id: str,
full_id: str,
) -> list:
# # Read the list of all units created
# lu = obtain.read_lu(fp_lu[0])
# Read the ranked list of all units created
with open(fp_lu[5], 'r') as f:
lu_rank = f.read()
try:
# Read the list of empty units created
with open(fp_lu[3], 'r') as f:
lu_empty = f.read()
# Replace the placeholder empty unit ID in the ranked list with all generated empty units
lu_rank = lu_rank.replace(empty_id, lu_empty)
except:
# Replace the placeholder empty unit ID in the ranked list with a blank space
lu_rank = lu_rank.replace(empty_id, "")
try:
# Read the list of full units created
with open(fp_lu[4], 'r') as f:
lu_full = f.read()
# Replace the placeholder full unit ID in the ranked list with all generated empty units
lu_rank = lu_rank.replace(full_id, lu_full)
except:
# Replace the placeholder full unit ID in the ranked list with a blank space
lu_rank = lu_rank.replace(full_id, "")
# try:
# # Read the list of failed units created
# with open(fp_lu[2], 'r') as f:
# lu_fail = f.read()
# # Append the list of failed units to the ranked list
# lu_rank += lu_fail
# except:
# pass
# Format the list of ranked units
lu_rank = list(lu_rank.split("\n"))
while "" in lu_rank:
lu_rank.remove("")
return lu_rank
################################################################################
def rank_pop(
fp_lu: list,
empty_id: str,
full_id: str,
par: list,
) -> list:
# Initialisations
data = pandas.DataFrame()
# Read the list of all units created
lu = obtain.read_lu(fp_lu[0])
# Read the ranked list of all units created
with open(fp_lu[5], 'r') as f:
lu_rank = f.read()
try:
# Read the list of empty units created
with open(fp_lu[3], 'r') as f:
lu_empty = f.read()
# Replace the placeholder empty unit ID in the ranked list with all generated empty units
lu_rank = lu_rank.replace(empty_id, lu_empty)
except:
# Replace the placeholder empty unit ID in the ranked list with a blank space
lu_rank = lu_rank.replace(empty_id, "")
try:
# Read the list of full units created
with open(fp_lu[4], 'r') as f:
lu_full = f.read()
# Replace the placeholder full unit ID in the ranked list with all generated empty units
lu_rank = lu_rank.replace(full_id, lu_full)
except:
# Replace the placeholder full unit ID in the ranked list with a blank space
lu_rank = lu_rank.replace(full_id, "")
try:
# Read the list of failed units created
with open(fp_lu[2], 'r') as f:
lu_fail = f.read()
# Append the list of failed units to the ranked list
lu_rank += lu_fail
except:
pass
# Format the list of ranked units
lu_rank = list(lu_rank.split("\n"))
while "" in lu_rank:
lu_rank.remove("")
# Add the relevant data to the dataframe
data["Unit ID"] = lu
data["Parameters"] = par
# Create the sorting index according to the ranked unit list
lu_rank_index = dict(zip(lu_rank, range(0, len(lu_rank))))
# Add the sorting index to the dataframe
data["Rank"] = data["Unit ID"].map(lu_rank_index)
# Sort the dataframe according to the sorting index
data.sort_values(["Rank"], ascending = [True], inplace = True)
# Remove the sorting index from the dataframe
data.drop("Rank", 1, inplace = True)
par_sort = data["Parameters"].tolist()
return par_sort
################################################################################
def ls_avg_rule_l(ls) -> float:
ls_rules = [i[1] for i in ls.gramm]
avg_rule_l = utility.avg_str_l(ls_rules)
return avg_rule_l
################################################################################
def constraint_energy(
template,
l: str,
) -> None:
"""Calculate the constraint energy for a unit
Parameters
----------
template : template
The unit template parameters
l : str
The label for the results file
Either a template or unit identifier
"""
# Initialisations
label = []
label.append("Displacement X")
label.append("Displacement Y")
label.append("Displacement")
label.append("Reaction Force X")
label.append("Reaction Force Y")
label.append("Reaction Force")
label_c_e = []
label_c_e.append("Constraint Energy X")
label_c_e.append("Constraint Energy Y")
label_c_e.append("Constraint Energy")
v = numpy.zeros((len(label), template.n_steps + 1, template.n_n))
# Loop through all the variable labels
for i in range(0, len(label)):
# Create the file | |
<gh_stars>1-10
"""
Title: Masked image modeling with Autoencoders
Author: [<NAME>](https://twitter.com/arig23498), [<NAME>](https://twitter.com/RisingSayak)
Date created: 2021/12/20
Last modified: 2021/12/21
Description: Implementing Masked Autoencoders for self-supervised pretraining.
"""
"""
## Introduction
In deep learning, models with growing **capacity** and **capability** can easily overfit
on large datasets (ImageNet-1K). In the field of natural language processing, the
appetite for data has been **successfully addressed** by self-supervised pretraining.
In the academic paper
[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)
by He et. al. the authors propose a simple yet effective method to pretrain large
vision models (here [ViT Huge](https://arxiv.org/abs/2010.11929)). Inspired from
the pretraining algorithm of BERT ([Devlin et al.](https://arxiv.org/abs/1810.04805)),
they mask patches of an image and, through an autoencoder predict the masked patches.
In the spirit of "masked language modeling", this pretraining task could be referred
to as "masked image modeling".
In this example, we implement
[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)
with the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. After
pretraining a scaled down version of ViT, we also implement the linear evaluation
pipeline on CIFAR-10.
This implementation covers (MAE refers to Masked Autoencoder):
- The masking algorithm
- MAE encoder
- MAE decoder
- Evaluation with linear probing
As a reference, we reuse some of the code presented in
[this example](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
"""
"""
## Imports
This example requires TensorFlow Addons, which can be installed using the following
command:
```shell
pip install -U tensorflow-addons
```
"""
from tensorflow.keras import layers
import tensorflow_addons as tfa
from tensorflow import keras
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import random
# Setting seeds for reproducibility.
SEED = 42
keras.utils.set_random_seed(SEED)
"""
## Hyperparameters for pretraining
Please feel free to change the hyperparameters and check your results. The best way to
get an intuition about the architecture is to experiment with it. Our hyperparameters are
heavily inspired by the design guidelines laid out by the authors in
[the original paper](https://arxiv.org/abs/2111.06377).
"""
# DATA
BUFFER_SIZE = 1024
BATCH_SIZE = 256
AUTO = tf.data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
NUM_CLASSES = 10
# OPTIMIZER
LEARNING_RATE = 5e-3
WEIGHT_DECAY = 1e-4
# PRETRAINING
EPOCHS = 100
# AUGMENTATION
IMAGE_SIZE = 48 # We will resize input images to this size.
PATCH_SIZE = 6 # Size of the patches to be extracted from the input images.
NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2
MASK_PROPORTION = 0.75 # We have found 75% masking to give us the best results.
# ENCODER and DECODER
LAYER_NORM_EPS = 1e-6
ENC_PROJECTION_DIM = 128
DEC_PROJECTION_DIM = 64
ENC_NUM_HEADS = 4
ENC_LAYERS = 6
DEC_NUM_HEADS = 4
DEC_LAYERS = (
2 # The decoder is lightweight but should be reasonably deep for reconstruction.
)
ENC_TRANSFORMER_UNITS = [
ENC_PROJECTION_DIM * 2,
ENC_PROJECTION_DIM,
] # Size of the transformer layers.
DEC_TRANSFORMER_UNITS = [
DEC_PROJECTION_DIM * 2,
DEC_PROJECTION_DIM,
]
"""
## Load and prepare the CIFAR-10 dataset
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
train_ds = tf.data.Dataset.from_tensor_slices(x_train)
train_ds = train_ds.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(AUTO)
val_ds = tf.data.Dataset.from_tensor_slices(x_val)
val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)
test_ds = tf.data.Dataset.from_tensor_slices(x_test)
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
"""
## Data augmentation
In previous self-supervised pretraining methodologies
([SimCLR](https://arxiv.org/abs/2002.05709) alike), we have noticed that the data
augmentation pipeline plays an important role. On the other hand the authors of this
paper point out that Masked Autoencoders **do not** rely on augmentations. They propose a
simple augmentation pipeline of:
- Resizing
- Random cropping (fixed-sized or random sized)
- Random horizontal flipping
"""
def get_train_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="train_data_augmentation",
)
return model
def get_test_augmentation_model():
model = keras.Sequential(
[layers.Rescaling(1 / 255.0), layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),],
name="test_data_augmentation",
)
return model
"""
## A layer for extracting patches from images
This layer takes images as input and divides them into patches. The layer also includes
two utility method:
- `show_patched_image` -- Takes a batch of images and its corresponding patches to plot a
random pair of image and patches.
- `reconstruct_from_patch` -- Takes a single instance of patches and stitches them
together into the original image.
"""
class Patches(layers.Layer):
def __init__(self, patch_size=PATCH_SIZE, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
# Assuming the image has three channels each patch would be
# of size (patch_size, patch_size, 3).
self.resize = layers.Reshape((-1, patch_size * patch_size * 3))
def call(self, images):
# Create patches from the input images
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
# Reshape the patches to (batch, num_patches, patch_area) and return it.
patches = self.resize(patches)
return patches
def show_patched_image(self, images, patches):
# This is a utility function which accepts a batch of images and its
# corresponding patches and help visualize one image and its patches
# side by side.
idx = np.random.choice(patches.shape[0])
print(f"Index selected: {idx}.")
plt.figure(figsize=(4, 4))
plt.imshow(keras.utils.array_to_img(images[idx]))
plt.axis("off")
plt.show()
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[idx]):
ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (self.patch_size, self.patch_size, 3))
plt.imshow(keras.utils.img_to_array(patch_img))
plt.axis("off")
plt.show()
# Return the index chosen to validate it outside the method.
return idx
# taken from https://stackoverflow.com/a/58082878/10319735
def reconstruct_from_patch(self, patch):
# This utility function takes patches from a *single* image and
# reconstructs it back into the image. This is useful for the train
# monitor callback.
num_patches = patch.shape[0]
n = int(np.sqrt(num_patches))
patch = tf.reshape(patch, (num_patches, self.patch_size, self.patch_size, 3))
rows = tf.split(patch, n, axis=0)
rows = [tf.concat(tf.unstack(x), axis=1) for x in rows]
reconstructed = tf.concat(rows, axis=0)
return reconstructed
"""
Let's visualize the image patches.
"""
# Get a batch of images.
image_batch = next(iter(train_ds))
# Augment the images.
augmentation_model = get_train_augmentation_model()
augmented_images = augmentation_model(image_batch)
# Define the patch layer.
patch_layer = Patches()
# Get the patches from the batched images.
patches = patch_layer(images=augmented_images)
# Now pass the images and the corresponding patches
# to the `show_patched_image` method.
random_index = patch_layer.show_patched_image(images=augmented_images, patches=patches)
# Chose the same chose image and try reconstructing the patches
# into the original image.
image = patch_layer.reconstruct_from_patch(patches[random_index])
plt.imshow(image)
plt.axis("off")
plt.show()
"""
## Patch encoding with masking
Quoting the paper
> Following ViT, we divide an image into regular non-overlapping patches. Then we sample
a subset of patches and mask (i.e., remove) the remaining ones. Our sampling strategy is
straightforward: we sample random patches without replacement, following a uniform
distribution. We simply refer to this as “random sampling”.
This layer includes masking and encoding the patches.
The utility methods of the layer are:
- `get_random_indices` -- Provides the mask and unmask indices.
- `generate_masked_image` -- Takes patches and unmask indices, results in a random masked
image. This is an essential utility method for our training monitor callback (defined
later).
"""
class PatchEncoder(layers.Layer):
def __init__(
self,
patch_size=PATCH_SIZE,
projection_dim=ENC_PROJECTION_DIM,
mask_proportion=MASK_PROPORTION,
downstream=False,
**kwargs,
):
super().__init__(**kwargs)
self.patch_size = patch_size
self.projection_dim = projection_dim
self.mask_proportion = mask_proportion
self.downstream = downstream
# This is a trainable mask token initialized randomly from a normal
# distribution.
self.mask_token = tf.Variable(
tf.random.normal([1, patch_size * patch_size * 3]), trainable=True
)
def build(self, input_shape):
(_, self.num_patches, self.patch_area) = input_shape
# Create the projection layer for the patches.
self.projection = layers.Dense(units=self.projection_dim)
# Create the positional embedding layer.
self.position_embedding = layers.Embedding(
input_dim=self.num_patches, output_dim=self.projection_dim
)
# Number of patches that will be masked.
self.num_mask = int(self.mask_proportion * self.num_patches)
def call(self, patches):
# Get the positional embeddings.
batch_size = tf.shape(patches)[0]
positions = tf.range(start=0, limit=self.num_patches, delta=1)
pos_embeddings = self.position_embedding(positions[tf.newaxis, ...])
pos_embeddings = tf.tile(
pos_embeddings, [batch_size, 1, 1]
) # (B, num_patches, projection_dim)
# Embed the patches.
patch_embeddings = (
self.projection(patches) + pos_embeddings
) # (B, num_patches, projection_dim)
if self.downstream:
return patch_embeddings
else:
mask_indices, unmask_indices = self.get_random_indices(batch_size)
# The encoder input is the unmasked patch embeddings. Here we gather
# all the patches that should be unmasked.
unmasked_embeddings = tf.gather(
patch_embeddings, unmask_indices, axis=1, batch_dims=1
) # (B, unmask_numbers, projection_dim)
# Get the unmasked and masked position embeddings. We will need them
# for the decoder.
unmasked_positions = tf.gather(
pos_embeddings, unmask_indices, axis=1, batch_dims=1
) # (B, unmask_numbers, projection_dim)
masked_positions = tf.gather(
pos_embeddings, mask_indices, axis=1, batch_dims=1
) # (B, mask_numbers, projection_dim)
# Repeat the mask token number of mask times.
# Mask tokens replace the masks of the image.
mask_tokens = tf.repeat(self.mask_token, repeats=self.num_mask, axis=0)
mask_tokens = tf.repeat(
mask_tokens[tf.newaxis, ...], repeats=batch_size, axis=0
)
# Get the masked embeddings for the tokens.
masked_embeddings = self.projection(mask_tokens) + masked_positions
return (
unmasked_embeddings, # Input to the encoder.
masked_embeddings, # First part of input to the decoder.
unmasked_positions, # Added to the encoder outputs.
mask_indices, # The indices that were masked.
unmask_indices, # The indices that were unmaksed.
)
def get_random_indices(self, batch_size):
# Create random indices from a uniform distribution and then split
# it into mask and unmask indices.
rand_indices = tf.argsort(
tf.random.uniform(shape=(batch_size, self.num_patches)), axis=-1
)
mask_indices = rand_indices[:, : self.num_mask]
unmask_indices = rand_indices[:, self.num_mask :]
return mask_indices, unmask_indices
| |
u1 = User(id=1, name="u1")
a1 = Address(id=1, user_id=1, email_address="a2")
sess.add_all([u1, a1])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (id, name) VALUES (:id, :name)",
{"id": 1, "name": "u1"},
),
CompiledSQL(
"INSERT INTO addresses (id, user_id, email_address) "
"VALUES (:id, :user_id, :email_address)",
{"email_address": "a2", "user_id": 1, "id": 1},
),
)
sess.delete(u1)
sess.delete(a1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id", [{"id": 1}]
),
CompiledSQL("DELETE FROM users WHERE users.id = :id", [{"id": 1}]),
)
def test_natural_selfref(self):
"""test that unconnected items take relationship()
into account regardless."""
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n1 = Node(id=1)
n2 = Node(id=2, parent_id=1)
n3 = Node(id=3, parent_id=2)
# insert order is determined from add order since they
# are the same class
sess.add_all([n1, n2, n3])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (id, parent_id, data) VALUES "
"(:id, :parent_id, :data)",
[
{"parent_id": None, "data": None, "id": 1},
{"parent_id": 1, "data": None, "id": 2},
{"parent_id": 2, "data": None, "id": 3},
],
),
)
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
mapper(
Item,
items,
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
)
mapper(Keyword, keywords)
sess = create_session()
k1 = Keyword(name="k1")
i1 = Item(description="i1", keywords=[k1])
sess.add(i1)
self.assert_sql_execution(
testing.db,
sess.flush,
AllOf(
CompiledSQL(
"INSERT INTO keywords (name) VALUES (:name)",
{"name": "k1"},
),
CompiledSQL(
"INSERT INTO items (description) VALUES (:description)",
{"description": "i1"},
),
),
CompiledSQL(
"INSERT INTO item_keywords (item_id, keyword_id) "
"VALUES (:item_id, :keyword_id)",
lambda ctx: {"item_id": i1.id, "keyword_id": k1.id},
),
)
# test that keywords collection isn't loaded
sess.expire(i1, ["keywords"])
i1.description = "i2"
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE items SET description=:description "
"WHERE items.id = :items_id",
lambda ctx: {"description": "i2", "items_id": i1.id},
),
)
def test_m2o_flush_size(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(
Address,
addresses,
properties={"user": relationship(User, passive_updates=True)},
)
sess = create_session()
u1 = User(name="ed")
sess.add(u1)
self._assert_uow_size(sess, 2)
def test_o2m_flush_size(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
sess = create_session()
u1 = User(name="ed")
sess.add(u1)
self._assert_uow_size(sess, 2)
sess.flush()
u1.name = "jack"
self._assert_uow_size(sess, 2)
sess.flush()
a1 = Address(email_address="foo")
sess.add(a1)
sess.flush()
u1.addresses.append(a1)
self._assert_uow_size(sess, 6)
sess.flush()
sess = create_session()
u1 = sess.query(User).first()
u1.name = "ed"
self._assert_uow_size(sess, 2)
u1.addresses
self._assert_uow_size(sess, 6)
class SingleCycleTest(UOWTest):
def teardown(self):
engines.testing_reaper.rollback_all()
# mysql can't handle delete from nodes
# since it doesn't deal with the FKs correctly,
# so wipe out the parent_id first
testing.db.execute(self.tables.nodes.update().values(parent_id=None))
super(SingleCycleTest, self).teardown()
def test_one_to_many_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2"), Node(data="n3")
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{"parent_id": None, "data": "n1"},
),
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n2"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n3"},
),
),
)
def test_one_to_many_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_one_to_many_delete_parent(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
AllOf(
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id "
"WHERE nodes.id = :nodes_id",
lambda ctx: [
{"nodes_id": n3.id, "parent_id": None},
{"nodes_id": n2.id, "parent_id": None},
],
)
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_many_to_one_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = create_session()
n1 = Node(data="n1")
n2, n3 = Node(data="n2", parent=n1), Node(data="n3", parent=n1)
sess.add_all([n2, n3])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{"parent_id": None, "data": "n1"},
),
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n2"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n3"},
),
),
)
def test_many_to_one_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = create_session()
n1 = Node(data="n1")
n2, n3 = Node(data="n2", parent=n1), Node(data="n3", parent=n1)
sess.add_all([n2, n3])
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_many_to_one_set_null_unloaded(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = create_session()
n1 = Node(data="n1")
n2 = Node(data="n2", parent=n1)
sess.add_all([n1, n2])
sess.flush()
sess.close()
n2 = sess.query(Node).filter_by(data="n2").one()
n2.parent = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id WHERE "
"nodes.id = :nodes_id",
lambda ctx: {"parent_id": None, "nodes_id": n2.id},
),
)
def test_cycle_rowswitch(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n3.id = n2.id
n1.children.append(n3)
sess.flush()
def test_bidirectional_mutations_one(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=nodes.c.id)
)
},
)
sess = create_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n1.children.append(n3)
sess.flush()
sess.delete(n1)
sess.delete(n3)
sess.flush()
def test_bidirectional_multilevel_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=nodes.c.id)
)
},
)
sess = create_session()
n1 = Node(data="n1")
n1.children.append(Node(data="n11"))
n12 = Node(data="n12")
n1.children.append(n12)
n1.children.append(Node(data="n13"))
n1.children[1].children.append(Node(data="n121"))
n1.children[1].children.append(Node(data="n122"))
n1.children[1].children.append(Node(data="n123"))
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": None, "data": "n1"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n11"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n12"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n13"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n121"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n122"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n123"},
),
)
def test_singlecycle_flush_size(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={"children": relationship(Node)})
sess = create_session()
n1 = Node(data="ed")
sess.add(n1)
self._assert_uow_size(sess, 2)
sess.flush()
n1.data = "jack"
self._assert_uow_size(sess, 2)
sess.flush()
n2 = Node(data="foo")
sess.add(n2)
sess.flush()
n1.children.append(n2)
self._assert_uow_size(sess, 3)
sess.flush()
sess = create_session()
n1 = sess.query(Node).first()
n1.data = "ed"
self._assert_uow_size(sess, 2)
n1.children
self._assert_uow_size(sess, 2)
def test_delete_unloaded_m2o(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
parent = Node()
c1, c2 = Node(parent=parent), Node(parent=parent)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
session.delete(parent)
# testing that relationships
# are loaded even if all ids/references are
# expired
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# ensure all three m2os are loaded.
# the selects here are in fact unexpiring
# each row - the m2o comes from the identity map.
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {"param_1": pid},
),
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {"param_1": c1id},
),
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {"param_1": c2id},
),
AllOf(
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": c1id}, {"id": c2id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id | |
tag for an XML element.
Args:
has_contents: Boolean indicating if the element has
sub-elements or text.
"""
if has_contents:
self.write_to_xmlfile(">")
self.indent_level += 1
else:
self.write_to_xmlfile(" />")
def display_cpu_time(self, start):
"""
Displays the elapsed CPU time since the start time.
Args:
start: Floating-point value representing start time in seconds.
"""
idc.msg('CPU time: %6.4f' % (time.clock() - start))
def end_element(self, tag, newline=True):
"""
Writes the element end tag to the XML file.
Args:
tag: String containing the element name.
newline: Boolean indicating if end tag should go on new line.
"""
self.indent_level -= 1
if newline:
start = '\n' + (" " * self.indent_level)
else:
start = ''
self.write_to_xmlfile(start + "</" + tag + ">")
# BIT_MASK not currently supported for ENUM
def export_bitmask(self, eid, mask):
"""
Exports an enum bitmask member as BIT_MASK element.
Args:
eid: Integer representing the IDA enum id
mask: Integer representing the IDA enum mask value
"""
name = idc.get_bmask_name(eid, mask)
if name == None:
return
self.start_element(BIT_MASK)
self.write_attribute(NAME, name)
self.write_numeric_attribute(VALUE, mask)
regcmt = idc.get_bmask_cmt(eid, mask, False)
rptcmt = idc.get_bmask_cmt(eid, mask, True)
has_comment = regcmt != None or rptcmt != None
self.close_tag(has_comment)
if regcmt != None and len(regcmt) > 0:
self.export_regular_cmt(regcmt)
if rptcmt != None and len(rptcmt) > 0:
self.export_repeatable_cmt(rptcmt)
if (has_comment):
self.end_element(BIT_MASK)
def export_bookmarks(self):
"""
Exports marked location descriptions as BOOKMARK elements.
"""
found = False
timer = time.clock()
for slot in range(0, 1025):
address = idc.get_bookmark(slot)
description = idc.get_bookmark_desc(slot)
if address == BADADDR:
continue
if description == None:
continue
if found == False:
found = True
self.update_status(BOOKMARKS)
self.start_element(BOOKMARKS, True)
self.start_element(BOOKMARK)
self.write_address_attribute(ADDRESS, address)
self.write_attribute(DESCRIPTION, description)
self.close_tag()
if found:
self.end_element(BOOKMARKS)
self.display_cpu_time(timer)
def export_c_comments(self):
"""
Exports block and end-of-line comments entered in the decompiler
interface.
"""
if self.hexrays == False:
return
functions = idautils.Functions()
if functions == None:
return
for addr in functions:
try:
if ida_segment.is_spec_ea(addr):
continue
ccmts = ida_hexrays.restore_user_cmts(addr)
if ccmts == None:
continue
p = ida_hexrays.user_cmts_begin(ccmts)
while p != ida_hexrays.user_cmts_end(ccmts):
cmk = ida_hexrays.user_cmts_first(p)
cmv = ida_hexrays.user_cmts_second(p)
if cmk.itp < (ida_hexrays.ITP_COLON + 1):
self.export_comment(cmk.ea, "end-of-line", cmv.c_str())
else:
self.export_comment(cmk.ea, "pre", cmv.c_str())
p = ida_hexrays.user_cmts_next(p)
ida_hexrays.user_cmts_free(ccmts)
except:
continue
def export_code(self):
"""
Exports the address ranges of code sequences as CODE_BLOCK(s)
with START and END address attributes.
"""
addr = self.min_ea
if idc.is_code(idc.get_full_flags(addr)) == False:
addr = ida_bytes.next_that(addr, self.max_ea, idc.is_code)
if (addr == BADADDR):
return
self.update_status(CODE)
timer = time.clock()
data = ida_bytes.next_that(addr, self.max_ea, idc.is_data)
unknown = ida_bytes.next_unknown(addr, self.max_ea)
self.start_element(CODE, True)
while (addr != BADADDR):
start = addr
end = min(data, unknown)
if (end == BADADDR):
if (ida_segment.getseg(start).end_ea < self.max_ea):
codeend = ida_segment.getseg(start).end_ea - 1
addr = ida_segment.getseg(idc.next_addr(codeend)).start_ea
if idc.is_code(idc.get_full_flags(addr)) == False:
addr = ida_bytes.next_that(addr, self.max_ea,
idc.is_code)
else:
codeend = self.max_ea - 1
addr = BADADDR
else:
if (ida_segment.getseg(start).end_ea < end):
codeend = ida_segment.getseg(start).end_ea - 1
addr = ida_segment.getseg(idc.next_addr(codeend)).start_ea
if idc.is_code(ida_bytes.get_full_flags(addr)) == False:
addr = ida_bytes.next_that(addr, self.max_ea,
idc.is_code)
else:
codeend = idc.get_item_end(ida_bytes.prev_that(end,
start, idc.is_code)) - 1
addr = ida_bytes.next_that(end, self.max_ea, idc.is_code)
if (data < addr):
data = ida_bytes.next_that(addr, self.max_ea,
idc.is_data)
if (unknown < addr):
unknown = ida_bytes.next_unknown(addr, self.max_ea)
self.start_element(CODE_BLOCK)
self.write_address_attribute(START, start)
self.write_address_attribute(END, codeend)
self.close_tag()
self.end_element(CODE)
self.display_cpu_time(timer)
def export_comment(self, addr, cmt_type, cmt):
"""
Exports a <COMMENT> element with ADDRESS and TYPE attributes.
The comment is exported as the element text (parsed character data).
Args:
addr: Integers representing address of comment.
cmt_type: String indicating the comment type.
cmt: String containing the comment.
"""
self.start_element(COMMENT)
self.write_address_attribute(ADDRESS, addr)
self.write_attribute(TYPE, cmt_type)
self.close_tag(True)
# tag_remove seems to be losing last character
# work around is to add a space
cmt_text = ida_lines.tag_remove(cmt + ' ')
self.write_text(cmt_text)
self.end_element(COMMENT, False)
def export_comments(self):
"""
Exports all comments in the IDA database as <COMMENT> elements.
"""
addr = self.min_ea
if ida_bytes.has_cmt(idc.get_full_flags(addr)) == False:
addr = ida_bytes.next_that(addr, self.max_ea, ida_bytes.has_cmt)
if (addr == BADADDR):
return
self.update_status(COMMENTS)
timer = time.clock()
self.start_element(COMMENTS, True)
while (addr != BADADDR):
cmt = idc.get_cmt(addr, False)
if (cmt != None):
self.export_comment(addr, "end-of-line", cmt)
cmt = idc.get_cmt(addr, True)
if (cmt != None):
self.export_comment(addr, "repeatable", cmt)
addr = ida_bytes.next_that(addr, self.max_ea, ida_bytes.has_cmt)
addr = self.min_ea
if ida_bytes.has_extra_cmts(idc.get_full_flags(addr)) == False:
addr = ida_bytes.next_that(
addr, self.max_ea, ida_bytes.has_extra_cmts)
while (addr != BADADDR):
extra = idc.get_extra_cmt(addr, idc.E_PREV)
if (extra != None):
self.export_extra_comment(addr, "pre", idc.E_PREV)
extra = idc.get_extra_cmt(addr, idc.E_NEXT)
if (extra != None):
self.export_extra_comment(addr, "post", idc.E_NEXT)
addr = ida_bytes.next_that(
addr, self.max_ea, ida_bytes.has_extra_cmts)
self.export_c_comments()
self.end_element(COMMENTS)
self.display_cpu_time(timer)
def export_data(self):
"""
Exports the data items in the database as <DEFINED_DATA> elements.
"""
addr = self.min_ea
if idc.is_data(idc.get_full_flags(addr)) == False:
addr = ida_bytes.next_that(addr, self.max_ea, idc.is_data)
if (addr == BADADDR):
return
timer = time.clock()
self.update_status(DATA)
self.start_element(DATA, True)
while (addr != BADADDR):
f = idc.get_full_flags(addr)
if ida_bytes.is_align(f) == True:
addr = ida_bytes.next_that(addr, self.max_ea, idc.is_data)
continue
dtype = self.get_datatype(addr)
size = idc.get_item_size(addr)
ti = ida_nalt.opinfo_t()
msize = ida_bytes.get_data_elsize(addr, f, ti)
if ida_bytes.is_struct(f) == True:
s = idc.get_struc_id(dtype)
msize = idc.get_struc_size(s)
if msize == 0:
msize = 1
if idc.is_strlit(f) == False and size != msize:
dtype = "%s[%d]" % (dtype, size / msize)
self.start_element(DEFINED_DATA)
self.write_address_attribute(ADDRESS, addr)
self.write_attribute(DATATYPE, dtype)
self.write_numeric_attribute(SIZE, size * self.cbsize)
# TODO consider using GetTrueNameEx and Demangle
demangled = ida_name.get_demangled_name(addr,
DEMANGLED_TYPEINFO, self.inf.demnames, idc.GN_STRICT)
outbuf = ''
# TODO: How to handle print_type for data mangled names?
#outbuf = idaapi.print_type(addr, False)
if demangled == "'string'":
demangled == None
has_typeinfo = ((demangled != None and len(demangled) > 0) or
(outbuf != None and len(outbuf) > 0))
# TODO export_data: add DISPLAY_SETTINGS
self.close_tag(has_typeinfo)
if has_typeinfo == True:
if demangled != None and len(demangled) > 0:
self.export_typeinfo_cmt(demangled)
elif len(outbuf) > 0:
self.export_typeinfo_cmt(outbuf)
self.end_element(DEFINED_DATA)
addr = ida_bytes.next_that(addr, self.max_ea, idc.is_data)
self.end_element(DATA)
self.display_cpu_time(timer)
def export_datatypes(self):
"""
Exports the structures and enums in IDA database.
"""
# skip if no structures/unions to export
if idc.get_struc_qty() == 0:
return
self.update_status(DATATYPES)
timer = time.clock()
self.start_element(DATATYPES, True)
self.export_structures()
self.export_enums()
self.end_element(DATATYPES)
self.display_cpu_time(timer)
def export_enum_member(self, cid, bf, mask, radix, signness):
"""
Exports a member of an enum.
Args:
cid: Integer representing id of enum member
bf: Boolean indicates if a bitfield
mask: Integer representing bitmask if bitfield
radix: Integer representing numeric display format
signness: Boolean indicating if signed value
"""
cname = ida_enum.get_enum_member_name(cid)
if cname == None or len(cname) == 0:
return
regcmt = ida_enum.get_enum_member_cmt(cid, False)
rptcmt = ida_enum.get_enum_member_cmt(cid, True)
has_comment = regcmt != None or rptcmt != None
self.start_element(ENUM_ENTRY)
self.write_attribute(NAME, cname)
value = ida_enum.get_enum_member_value(cid)
self.write_numeric_attribute(VALUE, value, radix, signness)
# BIT_MASK attribute not currently supported for ENUM_ENTRY
# if bf == True:
# self.write_numeric_attribute(BIT_MASK, mask)
self.close_tag(has_comment)
if regcmt != None and len(regcmt) > 0:
self.export_regular_cmt(regcmt)
if rptcmt != None and len(rptcmt) > 0:
self.export_repeatable_cmt(rptcmt)
if (has_comment):
self.end_element(ENUM_ENTRY)
def export_enum_members(self, eid, bf, eflags):
"""
Exports the members of an enum.
This function can only be called by IDA versions newer than 6.3
Args:
eid: Integer representing id of enum
bf: Boolean indicates if a bitfield
eflags: Integer representing the enum flags
"""
mask = 0xFFFFFFFF
if bf == True:
mask = idc.get_first_bmask(eid)
first = True
for n in range(idc.get_enum_size(eid)):
if (first == True):
value = ida_enum.get_first_enum_member(eid, mask)
first = False
else:
value = ida_enum.get_next_enum_member(eid, value, mask)
(cid, serial) = ida_enum.get_first_serial_enum_member(eid, value, mask)
main_cid = cid
while cid != BADNODE:
self.export_enum_member(cid, bf, mask,
ida_bytes.get_radix(eflags, 0),
self.is_signed_data(eflags))
last_value = ida_enum.get_last_enum_member(eid, mask)
if value == last_value:
# ENUM BIT_MASK exporting not currently supported
#self.export_bitmask(eid, mask)
mask = idc.get_next_bmask(eid, mask)
first = True
(cid, serial) = ida_enum.get_next_serial_enum_member(serial, main_cid)
def export_enum_reference(self, addr, op):
"""
Exports the enum reference for an operand at an address.
Args:
addr: Integer representing the instruction address.
op: Integer representing the operand index (0-based)
"""
(eid, serial) = ida_bytes.get_enum_id(addr, op)
insn = ida_ua.insn_t()
ida_ua.decode_insn(insn, addr)
value = insn.ops[op].value
cid = BADNODE
last = idc.get_last_bmask(eid)
if idc.is_bf(eid) == True:
last = idc.get_last_bmask(eid)
mask = idc.get_first_bmask(eid)
while cid == BADNODE:
cid = ida_enum.get_enum_member(eid, (value & mask), 0, mask)
if cid != BADNODE or mask == last:
break
mask = idc.get_next_bmask(eid, mask)
else:
cid = ida_enum.get_enum_member(eid, value, 0, last)
if cid == BADNODE:
return
self.start_element(EQUATE_REFERENCE)
self.write_address_attribute(ADDRESS, addr)
self.write_numeric_attribute(OPERAND_INDEX, op, 10)
self.write_numeric_attribute(
VALUE, ida_enum.get_enum_member_value(cid))
cname = ida_enum.get_enum_member_name(cid)
if cname != None | |
just a nice shortcut method to something I seemed to do a lot
return -- unicode
'''
return 'http{}://{}.com'.format(
's' if random.choice([True, False]) else '',
get_ascii()
)
def get_str(str_size=0, chars=None):
'''
generate a random unicode string
if chars is None, this can generate up to a 4-byte utf-8 unicode string, which can
break legacy utf-8 things
str_size -- integer -- how long you want the string to be
chars -- sequence -- the characters you want the string to use, if this is None, it
will default to pretty much the entire unicode range of characters
return -- unicode
'''
if str_size == 0:
str_size = random.randint(3, 20)
sg = None
if chars is None:
# chars can be any range in unicode (based off of table 3.7 of Unicode 6.2.0
# pg 42 - http://www.unicode.org/versions/Unicode6.2.0/ch03.pdf
# via: http://stackoverflow.com/questions/1477294/generate-random-utf-8-string-in-python
byte_range = lambda first, last: range(first, last+1)
first_values = list(byte_range(0x00, 0x7F)) + list(byte_range(0xC2, 0xF4))
trailing_values = list(byte_range(0x80, 0xBF))
def random_utf8_seq():
while True:
first = random.choice(first_values)
if first <= 0x7F: # U+0000...U+007F
return bytearray([first])
elif (first >= 0xC2) and (first <= 0xDF): # U+0080...U+07FF
return bytearray([first, random.choice(trailing_values)])
elif first == 0xE0: # U+0800...U+0FFF
return bytearray([first, random.choice(byte_range(0xA0, 0xBF)), random.choice(trailing_values)])
elif (first >= 0xE1) and (first <= 0xEC): # U+1000...U+CFFF
return bytearray([first, random.choice(trailing_values), random.choice(trailing_values)])
elif first == 0xED: # U+D000...U+D7FF
return bytearray([first, random.choice(byte_range(0x80, 0x9F)), random.choice(trailing_values)])
elif (first >= 0xEE) and (first <= 0xEF): # U+E000...U+FFFF
return bytearray([first, random.choice(trailing_values), random.choice(trailing_values)])
else:
if sys.maxunicode > 65535:
if first == 0xF0: # U+10000...U+3FFFF
return bytearray(
[
first,
random.choice(byte_range(0x90, 0xBF)),
random.choice(trailing_values),
random.choice(trailing_values)
]
)
elif (first >= 0xF1) and (first <= 0xF3): # U+40000...U+FFFFF
return bytearray(
[
first,
random.choice(trailing_values),
random.choice(trailing_values),
random.choice(trailing_values)
]
)
elif first == 0xF4: # U+100000...U+10FFFF
return bytearray(
[
first,
random.choice(byte_range(0x80, 0x8F)),
random.choice(trailing_values),
random.choice(trailing_values)
]
)
sg = (random_utf8_seq().decode('utf-8') for c in range(str_size))
else:
# we have a defined set of chars
sg = (random.choice(chars) for c in range(str_size))
s = ''.join(sg)
return s
get_unicode = get_str
get_string = get_str
def get_hex(str_size=0):
'''
generate a string of just hex characters
str_size -- integer -- how long you want the string to be
return -- unicode
'''
return get_str(str_size=str_size, chars=string.hexdigits.lower())
def get_ascii(str_size=0):
'''
generate a random string full of just ascii characters
str_size -- integer -- how long you want the string to be
return -- unicode
'''
chars=string.ascii_letters + string.digits
return get_str(str_size=str_size, chars=chars)
get_ascii_str = get_ascii
get_ascii_string = get_ascii
get_alphanum = get_ascii
get_alphanum_str = get_ascii
get_alphanum_string = get_ascii
get_alphanumeric = get_ascii
get_alphanumeric_str = get_ascii
get_alphanumeric_string = get_ascii
def get_hash(str_size=32):
"""Returns a random hash, if you want an md5 use get_md5(), if you want an
uuid use get_uuid()"""
return get_ascii(str_size)
def get_md5(val=""):
"""Return an md5 hash of val, if no val then return a random md5 hash
:param val: string, the value you want to md5 hash
:returns: string, the md5 hash as a 32 char hex string
"""
if not val:
val = get_uuid()
ret = ""
if is_py2:
ret = hashlib.md5(str(val)).hexdigest()
else:
if getattr(val, "encode", None):
ret = hashlib.md5(val.encode("utf-8")).hexdigest()
else:
ret = hashlib.md5(val).hexdigest()
return ret
def get_uuid():
"""Generate a random UUID"""
return str(uuid.uuid4())
# 3088D703-6AD0-4D62-B0D3-0FF824A707F5
# return '{}-{}-{}-{}-{}'.format(
# get_ascii(8).upper(),
# get_ascii(4).upper(),
# get_ascii(4).upper(),
# get_ascii(4).upper(),
# get_ascii(12).upper()
# )
def get_float(min_size=None, max_size=None):
"""return a random float
sames as the random method but automatically sets min and max
:param min_size: float, the minimum float size you want
:param max_size: float, the maximum float size you want
:returns: float, a random value between min_size and max_size
"""
float_info = sys.float_info
if min_size is None:
min_size = float_info.min
if max_size is None:
max_size = float_info.max
return random.uniform(min_size, max_size)
def get_posfloat(max_size=None):
"""Similar to get_float but the random float will always be positive
:param max_size: float, the maximum float size
:returns: float, a random float between 0.0 and max_size
"""
return get_float(0.0, max_size)
get_positive_float = get_posfloat
get_positivefloat = get_posfloat
def get_unique_float(min_size=None, max_size=None):
'''
get a random unique float
no different than random.uniform() except it automatically can set range, and
guarrantees that no 2 floats are the same
return -- float
'''
global _previous_floats
i = 0;
while True:
i = get_float(min_size, max_size)
if i not in _previous_floats:
_previous_floats.add(i)
# we cap the list at 100000 unique values
if len(_previous_floats) > environ.MAX_UNIQUE:
_previous_floats.pop()
break
return i
get_uniq_float = get_unique_float
def get_digits(count, n=None):
"""return a string value that contains count digits
:param count: int, how many digits you want, so if you pass in 4, you would get
4 digits
:param n: int, if you already have a value and want it to for sure by count digits
:returns: string, this returns a string because the digits might start with
zero
"""
max_size = int("9" * count)
if n is None:
n = get_int(0, max_size)
else:
if n > max_size:
raise ValueError("n={} has more than {} digits".format(n, count))
ret = "{{:0>{}}}".format(count).format(n)
return ret
get_digit = get_digits
get_count_digits = get_digits
def get_posint(max_size=2**31-1):
"""
just return a positive 32-bit integer, this is basically a wrapper around
random.randint where you don't have to specify a minimum (or a maximum if you
don't want)
"""
min_size = 1
return random.randint(min_size, max_size)
get_positive_int = get_posint
get_positive_integer = get_posint
get_posinteger = get_posint
get_pint = get_posint
def get_int(min_size=1, max_size=sys.maxsize):
return random.randint(min_size, max_size)
get_integer=get_int
def get_int32(min_size=1):
"""returns a 32-bit positive integer"""
return random.randint(min_size, 2**31-1)
get_integer32=get_int32
def get_int64(min_size=1):
"""returns up to a 64-bit positive integer"""
return random.randint(min_size, 2**63-1)
get_integer64=get_int64
def get_unique_int(min_size=1, max_size=sys.maxsize):
'''
get a random unique integer
no different than random.randint except that it guarrantees no int will be
the same, and also you don't have to set a range, it will default to all max
int size
return -- integer
'''
global _previous_ints
i = 0;
found = False
max_count = max_size - min_size
for x in range(max_count):
i = random.randint(min_size, max_size)
if i not in _previous_ints:
found = True
_previous_ints.add(i)
# we cap the list at 100000 unique values
if len(_previous_ints) > environ.MAX_UNIQUE:
_previous_ints.pop()
break
if not found:
raise ValueError("no unique ints from {} to {} could be found".format(min_size, max_size))
return i
get_uniq_int = get_unique_int
get_uniq_integer = get_unique_int
get_unique_integer = get_unique_int
def get_counter(start=1, step=1):
"""Because sometimes you just want to count, this is just a wrapper around
itertools.count
:Example:
c = testdata.get_counter()
c() # 1
c() # 2
c() # 3
:param start: int, the number to start at
:param step: int, the increment each time the callback is called
:returns: callable, everytime you invoke it it will increment by step
"""
counter = itertools.count(start, step)
return lambda: next(counter)
def get_range(max_size=10):
"""Because sometimes you just want a random range
https://github.com/Jaymon/testdata/issues/74
:param max_size: int, the max range stop value you want
:returns: range that can be iterated
"""
if yes():
start = 1
stop = get_int(1, max_size + 1)
else:
start = 0
stop = get_int(max_size=max_size)
return range(start, stop)
def get_list(callback, max_size=100):
"""Create a list filled with values returned from callback
https://github.com/Jaymon/testdata/issues/73
:param callback: callable, each item in the list will be populated by calling this
:param max_size: int, the maximum size of the list
:returns: list, the randomly generated list
"""
ret = []
for x in get_range(max_size):
ret.append(callback())
return ret
def get_dict(*keys, **kv):
"""Create a dict filled with key/values returned from kv
https://github.com/Jaymon/testdata/issues/73
:param kv: dict, each key/callable will be used to generate a random dict key/val
:returns: dict, the randomly generated dict
"""
if keys:
kv = {}
for k in keys:
kv[k] = (lambda: get_words(5)) if yes() else get_int
if not kv:
kv = {}
for x in get_range(5):
k = get_ascii_string()
v = (lambda: get_words(5)) if yes() else get_int
kv[k] = v
ret = {}
for k, callback in kv.items():
ret[k] = callback()
return ret
def get_ascii_words(count=0, as_str=True, **kwargs):
return get_words(count, as_str, words=_ascii_words, **kwargs)
def get_ascii_word():
return get_words(1, as_str=True, words=_ascii_words)
def get_unicode_words(count=0, as_str=True, **kwargs):
return get_words(count, as_str, words=_unicode_words, **kwargs)
get_uni_words = get_unicode_words
def get_unicode_word():
return get_words(1, as_str=True, words=_unicode_words)
get_uni_word = get_unicode_word
def get_words(count=0, as_str=True, words=None, **kwargs):
'''get some amount of random words
:param count: integer, how many words you want, 0 means a random amount (at most 20)
:param as_str: boolean, True to return as string, false to return as list of words
:param words: list, a list of | |
1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0, 1.0], [1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0,
1.0]], [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank0(self):
arg=Data(-24.7105931613,self.functionspace)
arg.setTaggedValue(1,-37.638505349)
res=sin(arg)
ref=Data(0.40972088744,self.functionspace)
ref.setTaggedValue(1,0.0605693981609)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank1(self):
arg=Data(numpy.array([19.860974255803598, 48.899013130941427]),self.functionspace)
arg.setTaggedValue(1,numpy.array([14.319017737469665, -59.326252904429587]))
res=sin(arg)
ref=Data(numpy.array([0.84758534887649317, -0.97919776342443343]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.98351066065067827, -0.3560220138624291]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank2(self):
arg=Data(numpy.array([[37.890743553866486, -52.175642782800914, 61.917008025503975, -8.538416676807941,
-94.304749798245496], [-17.787570828089727, -19.048274463511873, -8.2634570563295142, -56.253500812466228, 87.627404284894396],
[-14.454217499387354, 73.713310630128319, -52.818033941567855, 90.807246316901796, 59.632923220807299], [2.3430650859352511,
56.726750975618302, -69.98474018040875, -30.128841460819984, 0.11683572211893534]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-60.059131288860598, -78.931098378024842, -99.522738887570867, -9.6007074071729619,
-66.250286193785655], [15.651568400631106, 57.654505938017678, -21.858524591969015, -92.849176312405305, -45.214082756051297],
[-85.045751900057368, 10.170104148330267, 85.540180625403167, 34.743740334373229, 27.680023474288177], [72.313181060961483,
-93.451973592336017, 68.715544032783157, -57.013152797460179, 69.395677045629242]]))
res=sin(arg)
ref=Data(numpy.array([[0.19046098975424755, -0.94296657311066345, -0.7924680880494267, -0.77477635663664268,
-0.056939378452443026], [0.87332421967504115, -0.1974132538348578, -0.91732979816211846, 0.29089958624583467,
-0.33083665313437571], [-0.95015908369497537, -0.99349574918962724, -0.55556403598677151, 0.29450799309098907,
0.057305786038470398], [0.71632946014175625, 0.17714342493014262, -0.76413661669322097, 0.96002319680218495,
0.11657009080686483]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.36056248350542847, 0.38137387117559401, 0.84588700623357704, 0.17502331279847,
0.2733177892111176], [0.056364979209719938, 0.89384032979663164, -0.13223553506078178, 0.98521137495670197,
-0.94308411592712293], [0.22091275938263169, -0.67821155091384655, -0.65726119089937152, -0.18514670365491534,
0.5599375367095778], [-0.056519892938693105, 0.7144278132655969, -0.38895255454538685, -0.44796245325739548,
0.2769693862212248]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank3(self):
arg=Data(numpy.array([[[-55.804968616645148, 58.560992915206214], [42.652192703056301, -91.42882116095241]],
[[39.310441995226739, 33.870993341596233], [80.401033234710297, 73.000828209637689]], [[5.7360515152169285,
82.330874482727353], [-75.426134234758621, 7.5453684113771118]], [[-19.895965390103115, -88.950469683568315],
[31.355932404642459, 36.487846363447858]], [[67.666456279782437, 58.020389340319895], [-37.89476101041673,
-42.399630457776482]], [[-23.830782444196501, 6.0849055767691738], [91.294861085921525,
-52.847710860098182]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-18.125779887526264, -95.420123122001257], [-8.1568796731757516,
32.219735537825017]], [[26.245851241680057, 96.102520961925848], [14.02431043315147, -9.9572364002324321]],
[[17.181359346351925, 47.963801042849468], [-95.527667200507665, -64.204019349910141]], [[-98.658267090216341,
-7.0939733146426107], [-41.783037015039959, -46.517744129299096]], [[-57.202627940362859, 79.223818560607498],
[-70.017222005175654, 23.987327490175844]], [[71.375583584624991, 89.788775552486129], [98.882752617270086,
21.455679838723768]]]))
res=sin(arg)
ref=Data(numpy.array([[[0.67701499649890673, 0.90409941974537544], [-0.97117328078000487, 0.31706594876811195]],
[[0.99917861697072197, 0.63385392022976472], [-0.95812352836612924, -0.67738144661254696]], [[-0.52024157266400017,
0.60476080407034305], [-0.027906925031102141, 0.95275570243286156]], [[-0.8656310935760867, -0.83375573809919057],
[-0.059958148294456545, -0.93606199112953326]], [[-0.99254113222478446, 0.99509611012034227], [-0.19440335819459723,
0.99992954803650558]], [[0.96408030914008547, -0.19698305893325982], [-0.18755672118757361,
-0.53064744896026339]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.66221880833880609, -0.92166239454618104], [-0.95447604942411934,
0.72000463780845769]], [[0.89707725485516432, 0.95995773196345946], [0.9936384606300962, 0.50765297031738676]],
[[-0.99526034460150747, -0.74458387237142676], [-0.95798362538691173, -0.98033782538997782]], [[0.95477283132130442,
-0.72483027934968591], [0.80893159374577939, -0.56970402250150143]], [[-0.6083342836862955, -0.63189999754289639],
[-0.78468240482370322, -0.9108809171944825]], [[0.77140583862359613, 0.96806942866170043], [-0.99700488005111876,
0.51024422262880564]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank4(self):
arg=Data(numpy.array([[[[95.057014858860498, -50.023546676067944, -19.520550887074563, -14.915416158905259],
[-72.276262048791182, -57.778697211921127, -10.589425732964969, 25.789691603835237], [71.432158308304565, 83.59773672148529,
32.062292384526415, -22.527648292677569]], [[-39.074624167039062, 92.303231204531414, -1.4192851682411742,
-68.883629510294497], [46.825962360174429, 85.58307574133471, 83.176640836526758, -93.888543574320394], [59.195891546840528,
-43.884372920271829, 46.885208516026694, -24.330067940056807]]], [[[-85.391419005371418, -52.993590690134319, 41.1653245235631,
70.963880995127738], [-6.8099927112965162, 14.755258748362692, -77.400445539133742, -3.6276152651411877], [-88.775588041032492,
89.080955577757038, 0.97522108268380236, 11.904044693769748]], [[28.114564123404421, 17.406751514150216, -90.824155259332073,
66.590378374684491], [-23.314357242078572, 66.459924224373196, 1.190010463508969, -19.129294185825657], [12.759163310131783,
94.16098679455763, -80.470912052594556, -87.769040453881502]]], [[[-68.103901459227245, 96.524362598603318,
-3.2834594710336376, -25.520289808877067], [56.393892750276962, 17.548302326605253, 15.721717465663659, 76.20380788007958],
[-65.13810360798314, -4.9406764890286041, 65.373960553505867, -11.670204391287569]], [[54.171569268655503, 53.359368061868707,
-46.012260984758143, -78.151318891542985], [20.615711960999178, 40.160242458140658, -80.640118059335776, -94.602105820605374],
[58.356391780158305, -78.074396086921837, 69.50163735189372, -68.547938015025153]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[1.279534719127895, 77.967895548901566, 56.093855457217416, 55.241022797731574],
[-99.18622666243968, -10.886097986772711, 44.708474069781573, -26.616286906326849], [-92.350922530980355, 54.858168650538886,
51.906834850649233, 77.865437338097109]], [[24.233171382130436, -49.713594059906626, 75.407909417958365, 86.691179170294532],
[96.871157363423322, 23.148017134014822, -29.421912304159292, -58.976308318504977], [-5.4545343262298189, -89.036199846063681,
-83.742519983327739, 35.372319522991887]]], [[[-95.511576008994936, -83.10806319229718, 63.825192259702391, 92.80910275607684],
[44.426319323500707, 88.815074429332554, -18.021325932633019, -69.917789857742505], [-84.319087816871672, -30.317627038327316,
-38.345827346198959, -81.91833965828738]], [[11.186751110650022, -54.257619696250828, 84.729270493118236, -8.0244377640246114],
[77.805655721275429, -14.229050163525699, 32.671007471850089, -96.882778316793122], [-56.456065533953058, -25.01675593935984,
65.68053871510449, -14.266571167222295]]], [[[-39.965547886942353, 19.317802794261738, 80.566440631464729, 43.745566353754214],
[28.366421988006579, 68.970448204763755, -64.475182800936267, 20.331319130101249], [-87.117125888478327, 91.992851667866603,
30.281916963531046, -39.27414258485895]], [[93.364522015716602, 78.717156004724472, 61.222491284093536, 86.104631528043967],
[29.395392816847448, 16.532063410538484, 10.319065205651995, 10.917748038478663], [-92.263775558488874, 50.00911791017316,
-6.6661922286034354, -51.536766809586055]]]]))
res=sin(arg)
ref=Data(numpy.array([[[[0.72375966950305115, 0.23958247784190015, -0.62176555507714748, -0.71214373694684574],
[0.019629755360536964, -0.94249864392330207, 0.9186492389762253, 0.61070482468498899], [0.73418989352805997,
0.9408956015682427, 0.60228932671485913, 0.51113062555018463]], [[-0.98099259974089892, -0.93096397739276848,
-0.98854412433116323, 0.22934906491349744], [0.29353958469160507, -0.68897514951619387, 0.99714636265035372,
0.35155913660386867], [0.47447618324952018, 0.097767803237166412, 0.23642150489763264, 0.71921602354647907]]],
[[[0.53829894600069828, -0.4018025366804851, -0.31894868102661073, 0.96159352277239407], [-0.50277617521839357,
0.8149857013480003, -0.90837056606621547, 0.46711279434815001], [-0.7249719991634016, 0.89850915924785046, 0.82782593247756842,
-0.61495265106171171]], [[0.15909090210144514, -0.99182021016560207, -0.27830764816239967, -0.57853598042401821],
[0.96950642440138313, -0.46751584043254807, 0.92837285606475217, -0.27610409657055596], [0.19160059401890014,
-0.086683885904533534, 0.93579131574343599, 0.19430985516438759]]], [[[0.84743598014352139, 0.76110153119788515,
0.14139142291111614, -0.37791993461054291], [-0.15415780695875661, -0.96389276338977681, -0.013753764053628931,
0.72123543134514545], [-0.74152608990617153, 0.97405537665333219, 0.56421749260196419, 0.78093804490852481]],
[[-0.69217200421492153, 0.04768895464089825, -0.89642177035913229, -0.37879826270669459], [0.98097790580385125,
0.62915222544178329, 0.86305700540000696, -0.34695851228481017], [0.97206368588434766, -0.44879839287225581,
0.37704058161193998, 0.53718857715535373]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.9578823534926072, 0.54124816795457964, -0.43929370911716231,
-0.96557502749159507], [0.97455749834004068, 0.99401345246169626, 0.66401590677623057, -0.99619607619697281],
[0.94731148656605035, -0.99284417199371577, 0.99751195316042607, 0.62441237853622522]], [[-0.78305944951663353,
0.52429620121273146, 0.0096855803621312825, -0.95610712166060408], [0.49533020111560083, -0.9155481075478985,
0.91177208072521287, -0.65482579444370448], [0.73702027966623906, -0.8779702346494217, -0.88218472628424938,
-0.72758863915572014]]], [[[-0.95324448144023388, -0.9896294977803074, 0.83785349282853971, -0.99128491043904499],
[0.42957507779222781, 0.75159719546211767, 0.73673567820434016, -0.71927034677937474], [-0.48285656690859402,
0.89043473057109679, -0.60256841133763539, -0.23472014974367561]], [[-0.98178130166608535, 0.75163971078732728,
0.093593967784617274, -0.98550749523114423], [0.66996424044290459, -0.99578170573160452, 0.95057449576530817,
-0.48520180467023327], [0.092469940703161432, 0.11572541384732027, 0.2887366377307638, -0.99163895037731464]]],
[[[-0.7676438791646546, 0.45132255753166978, -0.89789686094785226, -0.23452586033429529], [-0.091958006320412053,
-0.14408689254970225, -0.99737060586631121, 0.99603916939064607], [0.74960719408299126, -0.77496816002780011,
-0.9061156382123059, -0.99999103487825647]], [[-0.77281036418314564, -0.17641158915267149, -0.99925644477650222,
-0.95848189929893357], [-0.90056129662048501, -0.73393688041745886, -0.77976304128985197, -0.99697306576558797],
[0.91583747858958031, -0.25356559568768045, -0.37371120994166129, -0.95548059670784435]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank0(self):
arg=Data(52.3923651613,self.functionspace)
arg.setTaggedValue(1,92.6499316384)
res=cos(arg)
ref=Data(-0.527866301451,self.functionspace)
ref.setTaggedValue(1,-0.0270483432209)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank1(self):
arg=Data(numpy.array([-74.897126998165533, 76.673400450800756]),self.functionspace)
arg.setTaggedValue(1,numpy.array([99.065445380314515, -86.748306948983256]))
res=cos(arg)
ref=Data(numpy.array([0.87705625402072684, 0.29133259267605394]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.10508243263067833, 0.34712991573165969]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank2(self):
arg=Data(numpy.array([[40.593544538866865, -8.8839015039393558, -49.468879573084436, -24.708042838510465,
20.413703995745891], [-79.108713409558405, -68.647136982462371, -80.858963259372672, -43.373193372132903, -19.507573187625411],
[64.214585816318845, -78.826300537435486, 57.661889712775803, 95.493641862455291, -48.386749127960769], [67.334847000926004,
-34.70671409523483, -36.873199353443709, 3.6386929918643176, 35.181153901083945]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-30.787970015064928, 82.074060959202797, 25.479756845345577, 10.895119259966464,
63.74412167304564], [-60.035262414428935, 54.332578347635263, 18.293985264200202, -9.7571535510820695, -70.419305661969503],
[-66.629926110044835, -43.57208065884415, 57.437026616340574, 20.73240225691022, -80.496461940478952], [19.883318148806438,
-98.729450313914597, 73.172600335425471, -53.114967436072469, 41.781624603862156]]))
res=cos(arg)
ref=Data(numpy.array([[-0.96961115090719441, -0.85725773398423355, 0.69913962613000602, 0.91116305149951837,
0.0066482036132297587], [-0.84249563945044104, 0.8925167529418252, 0.68043098049226469, 0.82016110495020733,
0.79120632545455827], [0.18695697167168232, -0.95924330257900359, 0.44177331900046657, 0.3192463084776716,
-0.3030933270774539], [-0.20786820316301155, -0.98889106925546555, 0.67788641598410604, -0.87896904245554386,
-0.81176118995632829]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.80922972543055438, 0.92389756193357364, 0.9403918665642913, -0.10028616065108438,
0.6119531025181365], [-0.9410747774886985, -0.60143262098807782, 0.8495995978399381, -0.94526988131298229,
0.26338463011163266], [-0.79214526943475394, 0.91703450582859369, 0.63068646834096875, -0.30701028605243086,
0.3763461069696134], [0.5115898554852758, -0.22867682408200724, -0.60902205590663616, -0.95763905321643927,
-0.5890447354610614]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank3(self):
arg=Data(numpy.array([[[-36.309518950317376, 0.93081070250144649], [31.019427711604664, -74.09863093545404]],
[[-38.496677203305893, -85.824133574935331], [95.203836891504238, 22.838846451350705]], [[60.75609230931488,
6.003670139700219], [-31.49567872236139, -63.206983059929222]], [[-9.6812822737183666, 0.078728886948780996],
[66.900652835446493, -94.869473621923703]], [[-6.6770163744116076, 22.876520146740972], [-55.737787303088737,
6.2425399184533319]], [[-81.429470177177521, -81.6116687923749], [-97.082967034525325,
-67.37269287178016]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-76.097111647315714, 55.656786197463788], [11.140883121429439, 54.147885791873421]],
[[-24.180524234728694, -45.703945118544723], [42.10979532559989, -22.79785029244421]], [[67.322737034238003,
18.304852118006011], [7.015704936158869, -94.401853589660817]], [[35.279952362677818, -7.8217175297602637],
[-81.23891082515344, 54.069639284286751]], [[4.2395499199061106, -11.974337349322099], [-77.095389819359994,
26.711493864407473]], [[-66.565935528207518, 41.011773246282445], [-62.114425668075299, -64.456999774045073]]]))
res=cos(arg)
ref=Data(numpy.array([[[0.18021343448473101, 0.59718391060744369], [0.92241876797360978, 0.26789121482669265]],
[[0.69845114643777229, -0.53934947535935607], [0.57674535988171327, -0.66171093596184249]], [[-0.48377631503826568,
0.96118931155712628], [0.99682147963778267, 0.93046000296873854]], [[-0.967282744906935, 0.99690248160545425],
[-0.60007048402362761, 0.81289300751647198]], [[0.92344574856902162, -0.63300189293768494], [0.68886045878833047,
0.99917408990060674]], [[0.96843093014337922, 0.99756913767256539], [-0.95342403093885042,
-0.17070899404470352]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.76555810931252977, 0.62794880060935965], [0.14479801574845574,
-0.73792168383216183]], [[0.57987832415218665, -0.15028015665535041], [-0.29714910041836201, -0.69188358205701828]],
[[-0.21969811854411034, 0.85528080687409014], [0.74349177468513394, 0.98815406589512933]], [[-0.75019910814961466,
0.032258506831785543], [0.90368477270236081, -0.78841749829099372]], [[-0.45541567307928488, 0.82980773284067688],
[-0.12603373471688831, -0.0079562249516561077]], [[-0.82955287573817371, -0.9854033904934385], [0.75349957000523238,
-0.054323621236985108]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank4(self):
arg=Data(numpy.array([[[[37.610264246462435, -85.560668463311075, 92.790982866326573, -21.753674410293172],
[-23.164181244709354, 64.496397223384463, 69.277186049494105, 6.3927475799028457], [67.583896168477764, 36.111360823700437,
30.266912701944563, -54.963319263159384]], [[-58.145969583496672, -97.225443498755453, -56.934313916342269,
35.421162068171839], [65.866615855863898, -57.072034755161027, -95.488754117534285, 81.149953518095799], [-18.30949886526929,
-89.680457620572071, -73.87886392983259, 81.259366551703209]]], [[[1.8677491996480029, 36.828382975770609, -80.40672114911041,
-49.292595896369647], [-37.983864569797767, 35.583525872048824, -42.451157688857613, 33.755615612774108], [32.674252940671579,
90.058275023987306, -96.26155980692819, -90.500098763836021]], [[90.079955965660446, -70.687430685137031, -51.111371179982747,
-74.109677346578138], [-32.896920002886091, 62.26499948195692, -59.833741060334056, 11.794198300820895], [43.437452546746755,
97.455115222231768, 87.354131572829402, 3.2818247457694412]]], [[[78.306182680183269, -64.892175839143391, -55.104588214315342,
-96.744717049677931], [-38.337933398739985, -72.796076467431135, 60.379171901212146, -81.927733276050247], [63.885059436029167,
-31.980639093805863, -57.261994523508044, 17.357515328643643]], [[77.429908518363192, 9.5882415367278355, 72.484182388500756,
63.089077313098954], [84.07047179403375, -21.092477779767819, 41.614178023999727, -98.204118862286279], [-71.275012546567766,
78.730240012789466, -11.573247145900382, 33.098945113087012]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-40.533710954365063, -21.161469670738327, -69.120302774135837, -14.38267699844107],
[-91.158843533364944, -85.491074434980831, 87.152587959079909, -33.044835488961624], [-68.672525163755367, -1.8217843916724235,
-33.6594071845862, -94.719797609019921]], [[5.7039466733317994, 69.091962753216535, 42.000508648719546, 63.142145355765422],
[79.524244986771464, 62.133683756888729, -63.061242691543782, 51.048740976244147], [-88.653022332832293, -81.214225577010723,
35.550248226917518, 76.160743630564809]]], [[[-63.524226576744191, -56.896009941669014, 63.19522201987138, 66.388629592533931],
[-56.646135485855687, 8.9406032636504307, 89.111063185551444, 12.201705041404125], [64.844923341968638, 93.705153189621086,
42.451679671109446, 55.611996897559266]], [[-50.4500969589295, -56.48304920853591, -43.397487648713806, 24.970484957830536],
[10.247946263340424, 53.164372653170489, 20.567237785266812, 9.4104989925598801], [-56.157152366568909, 42.552761052044843,
56.078516299029076, 18.940543407164128]]], [[[-33.632224346804193, -69.405810068119834, 44.225943185591831,
95.213025790079087], [-38.509288601106675, -62.938695142627999, 82.460256045254965, -40.372955049612315], [71.091785922673608,
-67.332900637102753, 99.968681344820283, 87.597127665814384]], [[-15.352405373769315, 13.607690117097107, -27.035258608117374,
-88.065123343235953], [46.351984421658017, 40.175457656434133, 90.498104230403385, -29.926375524616702], [89.955509906700911,
75.738059235642481, 92.170833583735543, 28.373336853066405]]]]))
res=cos(arg)
ref=Data(numpy.array([[[[0.99605564800414159, -0.74003978479165422, 0.11375282452021319, -0.97193527337579688],
[-0.38735756213150352, -0.09361056558100582, 0.98688284383678593, 0.99400405568827155], [0.039643724768732305,
-0.016953880290015939, 0.40938737891293392, -0.014551661058647967]], [[-0.026502388661538694, -0.98659374258249288,
0.92655557103089836, -0.6496724166496719], [-0.99429911442146879, 0.86614127304686683, 0.32387445454567942,
0.86207036562540851], [0.85767934528605649, -0.14455871961558475, 0.051413892338749011, 0.91225420946081004]]],
[[[-0.29260780746660703, 0.64426928163414932, 0.29180026978390428, 0.56291609637951678], [0.95973114604298926,
-0.51823819807950822, 0.039646471705651949, -0.69533990552752001], [0.3074098932007972, -0.49938120217558235,
-0.42863722843891311, -0.82188268941215192]], [[-0.51804635050563663, -0.0015959786891496938, 0.66306628166652848,
0.27851730967463495], [0.089682205138485488, 0.84359470875913611, -0.98972429938704287, 0.71639675290673688],
[0.85520784080680545, -0.99783970980023062, 0.81938284847117593, -0.9901835826774219]]], [[[-0.9728315762339087,
-0.47020799835491661, 0.12637793045273601, -0.79930700568503443], [0.80279896236009785, -0.85799367135344373,
-0.77193843948759455, 0.96981526124126383], [0.49478722014669613, 0.84474246657231211, 0.75618918968201176,
0.078674345855532332]], [[-0.4447440791197817, -0.98666955220849251, -0.97422171127622192, 0.96709984785995873],
[-0.7299620834657633, -0.62265062060091214, -0.71548829876243725, -0.68605277637160145], [-0.55567766942870023,
-0.98192413256230948, 0.54607588753058456, -0.11198684728536197]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.9532464104225401, -0.67511267180012213, 0.99998614310380007,
-0.24305111882757385], [-0.99861396263192814, -0.78501698113748208, 0.68804387880332429, -0.058079923319158462],
[0.90367832948274707, -0.24836118666524837, -0.62308777688597838, 0.89065254831191276]], [[0.8368796543112903,
0.99973376956186366, -0.39951907575386714, 0.95224436545888613], [-0.55333911666953939, 0.76602026469360152,
0.97380536659847572, 0.70861811863632929], [0.77224567755776974, 0.89284034215998742, -0.54640627400327102,
0.72309766743732995]]], [[[0.76973301984773468, 0.94028075556243462, 0.93470472120159609, -0.91504217164360968],
[0.99525378084999272, -0.8850594108800458, 0.4117079619272091, 0.93424308242332088], [-0.42799571611881249,
0.85635538858959426, 0.040168038134317727, 0.59247321999590674]], [[0.98300708870591891, 0.99784787493267124,
0.83381726388896149, 0.9868653057824498], [-0.67990129583749026, -0.97069204733006775, -0.14635792168284548,
-0.9998980572655366], [0.92433186130108524, 0.14079090200582819, 0.89149968128133894, 0.99586349366046734]]],
[[[-0.60159909873717321, 0.9580229239875917, 0.97046484832780555, 0.56921448509653549], [0.68937039860779081,
0.99429781336097167, 0.71172390998364865, -0.89258527400759391], [-0.39489284771819805, -0.20977165731745406,
0.84603990208711677, 0.93324045680470225]], [[-0.93745242426842468, 0.5050818721371243, -0.32567073631884641,
0.99495120989406094], [-0.71658296354855922, -0.78676444613630847, -0.82074485588505508, 0.081155964310960879],
[-0.40786936285205416, 0.94280949490341759, -0.48481330596382211, -0.99510320754671833]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank0(self):
arg=Data(81.2949649872,self.functionspace)
arg.setTaggedValue(1,12.3613553191)
res=tan(arg)
ref=Data(-0.406904128478,self.functionspace)
ref.setTaggedValue(1,-0.207936773642)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank1(self):
arg=Data(numpy.array([-95.851047486395899, -87.804262570020512]),self.functionspace)
arg.setTaggedValue(1,numpy.array([35.849126399037175, 13.927401673303507]))
| |
# -*- coding: utf-8 -*-
"""Aula_Python_8.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wdj-lQTeUJ8_6rHRe5B1DXqY6O_PRltP
#Funções, variáveis globais e classes
"""
a = 5. # esta é uma variável global
if a > 0:
b = 10. # esta é outra variável global
def func(c):
d = 0. # esta é uma variável local
print('a=',a,' b=',b)
print('c=',c,' d=',d)
func(3.) # chamando a função func
print('a=',a,' b=',b) # a e b ainda existem, pois são globais
print('c=',c,' d=',d) # c e d não existem mais
"""Quando usamos funções corretamente, é muito comum acabar passando vários argumentos para a função. Por exemplo:
> def particula(indice,x,y,z,vx,vy,vz,m,q,s,nome,taxa_decaimento):
Uma alternativa para evitar passar tantas variáveis é usar variáveis globais.
"""
a = 5
def funcao_boa(b):
a = 10 * b # essa é uma variável local 'a'
print("Na funcao_boa 'a' é:", a)
def funcao_hacky(b):
global a
a = 10 * b # essa é uma variável global 'a'
print('Na funcao_hacky "a" é:', a)
print("Antes de iniciar 'a' é:", a)
funcao_boa(2)
print("Após funcao_boa 'a' é:", a)
funcao_hacky(2)
print('Após funcao_hacky "a" é:', a)
"""Entretanto, nem sempre é recomendado usar variáves globais. Uma solução melhor é usar **Classes**.
#Uma brevíssima introdução a Classes em Python
Em Python, toda informação que já usamos é representada na forma de um objeto. Por exemplo, o número 1 é um objeto da classe int, 2.75 é um objeto da classe float e assim por diante.
Classes permitem organizar dados e funcionalidades juntos. Criar uma nova classe significa criar um novo “tipo” de objeto, permitindo que novas “instâncias” (exemplos) desse tipo sejam produzidas. Esse mecanismo de classes constitui a chamada **programação orientada a objetos**. Linguagens orientadas a objetos como o Python permitem a definição de novas classes quando se programa.
Classes são abstrações de alguma 'coisa'. Essa 'coisa' possui um estado e um comportamento. Um estado é definido por um conjunto de variáveis, que chamamos de **atributos**. Os estados podem ser modificados por meio de ações sobre o objeto, que definem o seu comportamento. Essas ações correspondem a funções e são chamadas de **métodos**. Então, cada instância de uma classe pode ter atributos anexados a ela ou métodos.
Por exemplo, podemos definir uma classe Veiculo.
Os atributos podem ser:
* Tipo (avião, carro, barco, ...)
* Ano
* Modelo
* Cor
* Número de passageiros que comporta
* velocidade
Os métodos podem ser:
* acelera(vel): acelera até velocidade vel
* pare(): faz velocidade = 0
Assim como definições de funções (com comando def), é preciso definir e executar as classes antes para que elas tenham qualquer efeito.
"""
#definição de classe
class Carro:
pass
"""A classe Carro não possui nenhum atributo nem métodos, mas com ela já podemos criar objetos, ou seja, novas instâncias desta classe.
Para a instanciação de uma classe (“invocar” um objeto classe), usamos a mesma sintaxe de chamar uma função. É como se o objeto classe do exemplo acima fosse uma função sem parâmetros, que devolve uma nova instância da classe.
"""
onix = Carro()
hb20 = Carro()
onix
"""Além de instanciação, as classes também permitem outro tipo de operação: as referências a atributos. Para criar atributos, basta fazer atribuições usando objeto.atributo e usar os atributos como variáveis. """
onix.ano = 2017
onix.cor = 'preto'
onix.modelo = 'Onix'
hb20.ano = 2015
hb20.cor = 'branco'
hb20.modelo = 'HB20'
hb20.ano += 5
print(hb20.ano)
"""
A operação de instanciação (“invocar” um objeto classe) acima criou um objeto vazio. Mas também podemos criar novos objetos com um estado inicial já pré-determinado. No caso da classe Carro, podemos deixar as propriedades de modelo, ano e cor como atributos da classe e inicializá-las quando um objeto é criado (instanciado). Para isso, usamos na definição da classe um método especial chamado \_\_init\_\_(), conhecido como construtor da classe.
"""
class Carro:
def __init__(self,m,a,c):
self.ano = a
self.cor = c
self.modelo = m
# Novas instâncias são criadas e nelas os atributos ‘modelo’, ‘ano’ e ‘cor’
# são criados automaticamente com os valores passados como argumentos do construtor
renegade = Carro('Renegade',2019, 'marrom')
t_cross = Carro('T-Cross',2021,'cinza')
renegade.ano += 2 # podemos acessar e modificar os atributos do objeto renegade
print(renegade.ano)
"""Você deve estar se perguntando o que é esse tal de *self*.
Cada método de uma classe recebe como primeiro argumento uma referência à instância que chama o método. Isso permite que o objeto acesse os seus próprios atributos e métodos. Por convenção, chamamos esse primeiro argumento de self.
Desta forma, ao definirmos qualquer método dentro da classe, um atributo pode ser criado, acessado ou modificado usando self.atributo.
**Métodos**
"""
class Carro:
def __init__(self, m, a, c, vm):
self.modelo = m
self.ano = a
self.cor = c
self.vel = 0 # o objeto carro é iniciado com velocidade zero
self.vmax = vm # velocidade limite (máxima)
def imprime_info(self):
if self.vel == 0: # carro está parado e podemos ver o modelo e a cor
print( f'O carro {self.modelo} {self.cor} está parado.')
elif self.vel < self.vmax:
print( f'O {self.modelo} {self.cor} está andando a {self.vel:.1f} km/h')
else:
print( f'Perigo! O {self.modelo} {self.cor} está desgovernado!')
def acelera(self, v):
self.vel = v
if self.vel > self.vmax:
self.vel = self.vmax
Carro.imprime_info(self)
def para(self):
self.vel = 0
Carro.imprime_info(self)
g = Carro('Gol', 1998, 'preto', 180.) # g é uma instância de Carro
Carro.acelera(g,80.) # chama o método acelera de Carro
"""O método *acelera* tem dois argumentos: *self* e *v*. Quando o método é executado, *self* é o objeto g e *v* é 60. """
Carro.para(g) # chama o método para de Carro
"""Note que na chamada do método *para* apenas a instância *self* é passada, já que este método só recebe um argumento.
Esse notação deixa explícita a passagem dos objetos como primeiro argumento de cada método, mas ela é redundante, pois todo objeto sabe a que classe ele pertence. (Mas lembre do Zen do Python: 'Explícito é melhor que implícito')
Uma maneira mais enxuta de chamar os métodos é semelhante a dos atributos (usando '.'). Como o primeiro argumento é sempre o próprio objeto, ele pode ser evitado. Por exemplo:
"""
g.acelera(50.)
print(g.vel)
"""Nesse caso em que o objeto é diretamente invocado, o argumento para self é desnecessário e só *v* é passado.
"""
g.para()
print(g.vel)
"""Abaixo mostramos o mesmo código que define a classe de antes, só que agora com uma notação mais enxuta. """
class Carro:
def __init__(self, m, a, c, vm):
self.modelo = m
self.ano = a
self.cor = c
self.vel = 0
self.vmax = vm # velocidade limite (máxima)
def imprime_info(self):
if self.vel == 0: # carro está parado e podemos ver o modelo e a cor
print( f'O carro {self.modelo} {self.cor} está parado.')
elif self.vel < self.vmax:
print( f'O {self.modelo} {self.cor} está andando a {self.vel:.1f} km/h')
else:
print( f'Perigo! O {self.modelo} {self.cor} está desgovernado!')
def acelera(self, v):
self.vel = v
if self.vel > self.vmax:
self.vel = self.vmax
self.imprime_info() #chama o método imprime_info para exibir o estado da instância
def para(self):
self.vel = 0
self.imprime_info() #chama o método imprime_info para exibir o estado da instância
"""# Caminhante Aleatório
Um modelo físico fundamental na Mecânica Estatística é o do Caminhante Aleatório. Na sua versão mais simples, trata-se de uma partícula movendo-se em um espaço unidimensional que a tempos fixos (por exemplo, t=1,2,3,4,...) desloca-se com um passo l de tamanho fixo (por exemplo, l=1) com igual probabilidade para um lado ou para o outro nesse espaço unidimensional.
Podemos usar a ideia de classes em Python para explorar o movimento de caminhantes aleatórios.
"""
import random as rand
class RandomWalker:
x = 0 #todos os caminhantes começam na origem
def mov(self):
self.x += 2*rand.randint(0,1)-1
#criando uma instância, um elemento, da classe Walker
c1 = RandomWalker()
print(f"Posição inicial de c1, c1.x = {c1.x:2d}") # verificando a posição
for i in range(10): # movendo c1 10 passos
c1.mov()
print(f"Posição de c1 após 10 passos c1.x = {c1.x:2d}" ) # verificando a nova posição
#criando o elemento c2
c2 = RandomWalker()
print(f"Posição inicial de c2, c2.x= {c2.x:2d}") # verificando a posição de c2
for i in range(10): # movendo c2 10 passos
c2.mov()
print(f"Posição de c2 após 10 passos c2.x = {c2.x:2d}") # verificando a nova posição de c2
#Criando uma lista de M caminhantes
M =10
c = list(RandomWalker() for i in range(M))
#acessando a posição do quinto elemento da lista
print(f"A posição inicial do elemento 5 da lista é: {c[4].x:2d}")
#Movendo N passos todos os elementos da lista
N=20
for i | |
<filename>scripts/us_bls/cpi/generate_csv_mcf.py<gh_stars>10-100
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Generates the CSVs, StatisticalVariable MCFs, and template MCFs for importing
US Burea of Labor Statistics CPI-U, CPI-W, and C-CPI-U series into Data Commons.
Only monthly series for the US as a whole and not for parts of the US are
generated. The semi-annually series overlap with the monthly series so they
are not generated.
The script replies heavily on the CSVs provided by BLS that contain information,
such as whether the series are seasonally adjusted, about series of a
particular type, e.g., https://download.bls.gov/pub/time.series/cu/cu.series.
The DataFrames loaded from these CSVs are often referred to as "info_df"
in the script.
Running the script generates these files:
- CSVs
- cpi_u.csv
- cpi_w.csv
- c_cpi_u.csv
- Node MCFs
- cpi_u.mcf
- Contains StatisticalVariables for CPI-U series.
- cpi_w.mcf
- c_cpi_u.mcf
- pop_type_enums.mcf
- Contains populationType enums for all three types of series.
- unit_enums.mcf
- Contains unit enums for all three types of series.
- Template MCFs
- cpi_u.tmcf
- Contains the template MCF for CPI-U series.
- cpi_w.tmcf
- c_cpi_u.tmcf
The CSVs have these columns:
- value
- Observation values for the series.
- date
- Dates of the observations. For monthly series, the dates are of the form
"YYYY-MM" For semi-annually series, the format is the same and the dates
are the last months of the half years, i.e., June and December.
- duration
- Observation periods of the series. The durations are "P1M" and "P6M" for
monthly series and semi-annually series respectively.
- statvar
- DCIDs of the StatisticalVariables meausred by the series.
- unit
- DCIDs of the units of the observations.
Usage: python3 generate_csv_mcf.py
'''
import re
import io
import dataclasses
from typing import Set, List, Tuple, Iterable
import requests
import frozendict
import pandas as pd
_PREFIX = "https://download.bls.gov/pub/time.series/"
# From series types to lists of CSV URLs containing series of those types
SERIES_TYPES_TO_DATA_URLS = frozendict.frozendict({
"cpi_u": (f"{_PREFIX}/cu/cu.data.1.AllItems",
f"{_PREFIX}/cu/cu.data.11.USFoodBeverage",
f"{_PREFIX}/cu/cu.data.12.USHousing",
f"{_PREFIX}/cu/cu.data.13.USApparel",
f"{_PREFIX}/cu/cu.data.14.USTransportation",
f"{_PREFIX}/cu/cu.data.15.USMedical",
f"{_PREFIX}/cu/cu.data.16.USRecreation",
f"{_PREFIX}/cu/cu.data.17.USEducationAndCommunication",
f"{_PREFIX}/cu/cu.data.18.USOtherGoodsAndServices",
f"{_PREFIX}/cu/cu.data.20.USCommoditiesServicesSpecial"),
"cpi_w": (f"{_PREFIX}/cw/cw.data.1.AllItems",
f"{_PREFIX}/cw/cw.data.11.USFoodBeverage",
f"{_PREFIX}/cw/cw.data.12.USHousing",
f"{_PREFIX}/cw/cw.data.13.USApparel",
f"{_PREFIX}/cw/cw.data.14.USTransportation",
f"{_PREFIX}/cw/cw.data.15.USMedical",
f"{_PREFIX}/cw/cw.data.16.USRecreation",
f"{_PREFIX}/cw/cw.data.17.USEducationAndCommunication",
f"{_PREFIX}/cw/cw.data.18.USOtherGoodsAndServices",
f"{_PREFIX}/cw/cw.data.20.USCommoditiesServicesSpecial"),
"c_cpi_u": (f"{_PREFIX}/su/su.data.1.AllItems",)
})
# From series types to URLs of CSVs describing the series
SERIES_TYPES_TO_INFO_URLS = frozendict.frozendict({
"cpi_u": f"{_PREFIX}/cu/cu.series",
"cpi_w": f"{_PREFIX}/cw/cw.series",
"c_cpi_u": f"{_PREFIX}/su/su.series"
})
# From series types to URLs of CSVs containing mappings from
# item code to item name
SERIES_TYPES_TO_EXPENDITURE_TYPES_URLS = frozendict.frozendict({
"cpi_u": f"{_PREFIX}/cu/cu.item",
"cpi_w": f"{_PREFIX}/cw/cw.item",
"c_cpi_u": f"{_PREFIX}/su/su.item"
})
@dataclasses.dataclass(frozen=True)
class SeriesInfo:
"""Information about a series. For descriptions of the fields, see
Section 4 of {_PREFIX}/cu/cu.txt.
"""
survey_abbreviation: str
seasonal_code: str
periodicity_code: str
area_code: str
item_code: str
series_id: str
def __post_init__(self):
"""Validates the fields after init."""
self._validate()
def _validate(self) -> None:
"""Validates the fields.
Raises:
ValueError: Some field(s) is invalid.
"""
if (not self.series_id or len(self.series_id) < 11 or
len(self.series_id) > 17):
self._raise_validation_error("invalid series_id")
if self.survey_abbreviation not in ("SU", "CU", "CW"):
self._raise_validation_error(
f"nvalid survey_abbreviation: {self.survey_abbreviation}")
if self.seasonal_code not in ("S", "U"):
self._raise_validation_error(
f"invalid survey_abbreviation: {self.survey_abbreviation}")
if self.periodicity_code not in ("R", "S"):
self._raise_validation_error(
f"invalid periodicity_code: {self.periodicity_code}")
if (not self.area_code or len(self.area_code) != 4):
self._raise_validation_error(f"invalid area_code: {self.area_code}")
def _raise_validation_error(self, message: str) -> None:
raise ValueError(f"{self.series_id}: {message}")
def is_us(self) -> bool:
"""Returns if the series is for US as a whole and
not for parts of US."""
return self.area_code == "0000"
def is_monthly(self) -> bool:
"""Returns if the series is monthly."""
return self.periodicity_code == "R"
def is_semiannually(self) -> bool:
"""Returns if the series is semi-annually."""
return self.periodicity_code == "S"
def get_mmethod(self) -> str:
"""Returns the DCID of the measurement method for this series."""
if self.survey_abbreviation == "SU":
return "BLSChained"
return "BLSUnchained"
def get_pop_type(self) -> str:
"""Returns the DCID of the population type for this series."""
return f"BLSItem/{self.item_code}"
def get_consumer(self) -> str:
"""Returns the DCID of the consumer for this series."""
if self.survey_abbreviation == "CW":
return "UrbanWageEarnerAndClericalWorker"
return "UrbanConsumer"
def get_mqual(self) -> str:
"""Returns the DCID of the measurement qualifier for this series."""
if self.seasonal_code == "S":
return "BLSSeasonallyAdjusted"
return "BLSSeasonallyUnadjusted"
def get_statvar(self) -> str:
"""Returns the DCID of the statistical variable for this series."""
return ("ConsumerPriceIndex_"
f"{self.get_pop_type()}_"
f"{self.get_consumer()}_"
f"{self.get_mqual()}")
def get_unit(self, info_df: pd.DataFrame) -> Tuple[str, str]:
"""Returns the DCID of the unit for this series and a description
of the unit.
Args:
info_df: DataFrame containing information about the series.
Raises:
ValueError: The base period obtained from the dataframe is invalid.
"""
row = info_df[info_df["series_id"] == self.series_id]
num_rows = row.shape[0]
if num_rows != 1:
self._raise_validation_error(f"found {num_rows} in info_df")
base = row["base_period"].iloc[0]
# base is described in one of three ways:
# "YYYY=100", e.g., "1967=100",
# "YYYY-YY=100", e.g., "1982-84=100", or
# "MONTH YYYY=100", e.g., "DECEMBER 2009=100"
if not re.fullmatch(r"\d{4}=100|\d{4}-\d{2}=100|[A-Z]+ \d{4}=100",
base):
self._raise_validation_error(f"invalid base_period: {base}")
if " " in base:
month, year, _ = re.split(r"[ =]", base)
month = month.lower().title()
return (f"IndexPointBasePeriod{month}{year}Equals100",
f"The reference base is {month} {year} equals 100.")
elif "-" in base:
year_start, year_end, _ = re.split(r"[-=]", base)
year_end = year_start[:2] + year_end
return (
f"IndexPointBasePeriod{year_start}To{year_end}Equals100",
f"The reference base is {year_start} to {year_end} equals 100.")
year, _ = base.split("=")
return (f"IndexPointBasePeriod{year}Equals100",
f"The reference base is {year} equals 100.")
def parse_series_id(series_id: str) -> SeriesInfo:
"""Parses a series ID to a SeriesInfo. See Section 4 of
{_PREFIX}/cu/cu.txt
for a breakdown of series IDs."""
return SeriesInfo(survey_abbreviation=series_id[:2],
seasonal_code=series_id[2],
periodicity_code=series_id[3],
area_code=series_id[4:8],
item_code=series_id[8:],
series_id=series_id)
def generate_unit_enums(info_df: pd.DataFrame, targets: Set[str]) -> Set[str]:
"""Returns a list of enum definitions for the units required by the series
identified by their IDs in "targets".
Args:
info_df: DataFrame containing information about
all the series in targets.
targets: Set of series IDs to generate unit enums for.
"""
generated = set()
for series_id in targets:
unit, desc = parse_series_id(series_id).get_unit(info_df)
generated.add((f"Node: dcid:{unit}\n"
"typeOf: dcs:UnitOfMeasure\n"
f"description: \"{desc}\"\n"
"descriptionUrl: \"https://www.bls.gov/cpi/"
"technical-notes/home.htm\"\n\n"))
return generated
def generate_pop_type_enums(url: str, targets: Set[str]) -> Set[str]:
"""Returns a list of enum definitions for the population types required
by the series identified by their IDs in "targets".
Args:
url: URL to the CSV containing the mappings from item codes to item
names needed by the type of the series in "targets".
targets: Set of series IDs to generate population
type enums for.
Raises:
ValueError: Some series(s) does not have an item code mapping.
"""
df = _download_df(url, sep="\t", usecols=("item_code", "item_name"))
if "item_code" not in df.columns or "item_name" not in df.columns:
raise ValueError("item_code or/and item_name columns missing")
# Make sure every series of interest has an item_code mapping, i.e., has
# an enum defined for pop type
df = df[["item_code", "item_name"]]
codes = set(df["item_code"])
for series_id in targets:
series_info = parse_series_id(series_id)
if series_info.item_code not in codes:
raise ValueError(
f"{series_info} does not have an item_code mapping")
generated = set()
for row in df.itertuples(index=False):
generated.add((f"Node: dcid:BLSItem/{row.item_code}\n"
"typeOf: dcs:EconomicProductEnum\n"
f"name: \"{row.item_name}\"\n\n"))
return generated
def write_csv(urls: Iterable[str], dest: str, info_df: pd.DataFrame,
targets: Set[str]) -> None:
"""Writes out the CSV containing series of a particular type, e.g., CPI-U.
Args:
urls: URLs to the CSVs containing the series.
dest: Path to the output CSV.
info_df: DataFrame containing information about the series.
targets: Series to include in the output CSV.
"""
result = pd.DataFrame()
for url in urls:
result = result.append(_generate_csv(url, info_df, targets))
result.to_csv(dest, index=False)
return result
def _download_df(url: str,
sep: str = "\t",
usecols: Tuple[str] = None) -> pd.DataFrame:
"""Downloads a CSV from a URL and loads it into a DataFrame,
Args:
url: URL to the CSV.
sep: Separators used by the CSV. Can be a regex pattern.
usecols: Columns to keep.
"""
response = requests.get(url)
response.raise_for_status()
return pd.read_csv(io.StringIO(response.text),
sep=sep,
dtype="str",
usecols=usecols).rename(columns=lambda col: col.strip())
def _generate_csv(url: str, info_df: pd.DataFrame,
targets: List[str]) -> pd.DataFrame:
"""Returns a DataFrame containing series obtained from "url" and specified
by "targets".
Args:
url: URL to a CSV containing some of the series in "targets".
info_df: DataFrame containing informatino about the series.
targets: Series to include in the return DataFrame.
Returns:
A DataFrame | |
/= temperature
with timings("dec.sample"):
local_order_idxs = Categorical(logits=filtered_logits).sample()
all_local_order_idxs.append(local_order_idxs)
with timings("dec.order_idxs"):
# skip clamp_and_mask since it is handled elsewhere and is slow
global_order_idxs = local_order_idxs_to_global(
local_order_idxs, cand_idxs, clamp_and_mask=False
)
all_global_order_idxs.append(global_order_idxs)
with timings("dec.order_emb"):
sampled_order_input = global_order_idxs.masked_fill(
global_order_idxs == EOS_IDX, 0
)
if teacher_force_orders is None:
order_input = sampled_order_input
else:
order_input = torch.where(
teacher_force_orders[:, step] == NO_ORDER_ID,
sampled_order_input,
teacher_force_orders[:, step],
)
order_emb = self.order_embedding(order_input)
if self.featurize_output:
order_emb += self.order_feat_lin(self.order_feats[order_input])
if self.relfeat_output:
order_enc = order_enc + order_emb[:, None] * alignments[:, :, None]
with timings("dec.fin"):
stacked_global_order_idxs = torch.stack(all_global_order_idxs, dim=1)
stacked_local_order_idxs = torch.stack(all_local_order_idxs, dim=1)
stacked_logits = cat_pad_sequences(
[x.unsqueeze(1) for x in all_logits],
seq_dim=2,
cat_dim=1,
pad_value=LOGIT_MASK_VAL,
)
r = stacked_global_order_idxs, stacked_local_order_idxs, stacked_logits
# logging.debug(f"Timings[dec, {enc.shape[0]}x{step}] {timings}")
return r
def _pad_last_dims(tensor, partial_new_shape, pad_value):
assert len(tensor.shape) >= len(partial_new_shape), (tensor.shape, partial_new_shape)
new_shape = list(tensor.shape)[: len(tensor.shape) - len(partial_new_shape)] + list(
partial_new_shape
)
new_tensor = tensor.new_full(new_shape, pad_value)
new_tensor[[slice(None, D) for D in tensor.shape]].copy_(tensor)
return new_tensor
def top_p_filtering(
logits: torch.Tensor, top_p: Union[float, torch.Tensor], min_tokens_to_keep=1
) -> torch.Tensor:
"""Filter a distribution of logits using nucleus (top-p) filtering.
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Args:
logits: tensor of shape [batch_size, vocab]. Logits distribution shape
top_p: float or tensor of shape [batch_size, 1]. Keep the top tokens
with cumulative probability >= top_p (nucleus filtering). Nucleus
filtering is described in Holtzman et al.
(http://arxiv.org/abs/1904.09751)
min_tokens_to_keep: int, make sure we keep at least
min_tokens_to_keep per batch example in the output
Returns:
top_p_mask: boolean tensor of shape [batch_size, vocab] with elements to remove.
"""
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(
1, sorted_indices, sorted_indices_to_remove
)
return indices_to_remove
class DiplomacyModelEncoder(nn.Module):
def __init__(
self,
*,
board_state_size, # 35
prev_orders_size, # 40
inter_emb_size, # 120
num_blocks, # 16
A, # 81x81
dropout,
learnable_A=False,
residual_linear=False,
merged_gnn=False,
layerdrop=0,
use_global_pooling=False,
):
super().__init__()
# board state blocks
self.board_blocks = nn.ModuleList()
self.board_blocks.append(
DiplomacyModelBlock(
in_size=board_state_size,
out_size=inter_emb_size,
A=A,
residual=False,
learnable_A=learnable_A,
dropout=dropout,
residual_linear=residual_linear,
use_global_pooling=use_global_pooling,
)
)
for _ in range(num_blocks - 1):
self.board_blocks.append(
DiplomacyModelBlock(
in_size=inter_emb_size,
out_size=inter_emb_size,
A=A,
residual=True,
learnable_A=learnable_A,
dropout=dropout,
residual_linear=residual_linear,
use_global_pooling=use_global_pooling,
)
)
if layerdrop > 1e-5:
assert 0 < layerdrop <= 1.0, layerdrop
self.layerdrop_rng = np.random.RandomState(0)
else:
self.layerdrop_rng = None
self.layerdrop = layerdrop
# prev orders blocks
self.prev_orders_blocks = nn.ModuleList()
self.prev_orders_blocks.append(
DiplomacyModelBlock(
in_size=prev_orders_size,
out_size=inter_emb_size,
A=A,
residual=False,
learnable_A=learnable_A,
dropout=dropout,
residual_linear=residual_linear,
use_global_pooling=use_global_pooling,
)
)
for _ in range(num_blocks - 1):
self.prev_orders_blocks.append(
DiplomacyModelBlock(
in_size=inter_emb_size,
out_size=inter_emb_size,
A=A,
residual=True,
learnable_A=learnable_A,
dropout=dropout,
residual_linear=residual_linear,
use_global_pooling=use_global_pooling,
)
)
self.merged_gnn = merged_gnn
if self.merged_gnn:
self.merged_blocks = nn.ModuleList()
for _ in range(num_blocks // 2):
self.merged_blocks.append(
DiplomacyModelBlock(
in_size=2 * inter_emb_size,
out_size=2 * inter_emb_size,
A=A,
residual=True,
learnable_A=learnable_A,
dropout=dropout,
residual_linear=residual_linear,
use_global_pooling=use_global_pooling,
)
)
def forward(self, x_bo, x_po):
def apply_blocks_with_layerdrop(blocks, tensor):
for i, block in enumerate(blocks):
drop = (
i > 0
and self.training
and self.layerdrop_rng is not None
and self.layerdrop_rng.uniform() < self.layerdrop
)
if drop:
# To make distrubited happy we need to have grads for all params.
dummy = sum(w.sum() * 0 for w in block.parameters())
tensor = dummy + tensor
else:
tensor = block(tensor)
return tensor
y_bo = apply_blocks_with_layerdrop(self.board_blocks, x_bo)
y_po = apply_blocks_with_layerdrop(self.prev_orders_blocks, x_po)
state_emb = torch.cat([y_bo, y_po], -1)
if self.merged_gnn:
state_emb = apply_blocks_with_layerdrop(self.merged_blocks, state_emb)
return state_emb
class DiplomacyModelBlock(nn.Module):
def __init__(
self,
*,
in_size,
out_size,
A,
dropout,
residual=True,
learnable_A=False,
residual_linear=False,
use_global_pooling=False,
):
super().__init__()
self.graph_conv = GraphConv(in_size, out_size, A, learnable_A=learnable_A)
self.batch_norm = nn.BatchNorm1d(A.shape[0])
self.dropout = nn.Dropout(dropout or 0.0)
self.residual = residual
self.residual_linear = residual_linear
if residual_linear:
self.residual_lin = nn.Linear(in_size, out_size)
self.use_global_pooling = use_global_pooling
if use_global_pooling:
self.post_pool_lin = nn.Linear(out_size, out_size, bias=False)
def forward(self, x):
# Shape [batch_idx, location, channel]
y = self.graph_conv(x)
if self.residual_linear:
y += self.residual_lin(x)
y = self.batch_norm(y)
if self.use_global_pooling:
# Global average pool over location
g = torch.mean(y, dim=1, keepdim=True)
g = self.dropout(g)
g = self.post_pool_lin(g)
# Add back transformed-pooled values as per-channel biases
y += g
y = F.relu(y)
y = self.dropout(y)
if self.residual:
y += x
return y
class TransformerEncoder(nn.Module):
def __init__(
self, *, board_state_size, prev_orders_size, spatial_size, inter_emb_size, encoder_cfg
):
super().__init__()
# Torch's encoder implementation has the restriction that the input size must match
# the output size and also be equal to the number of heads times the channels per head
# in the attention layer. That means that the input size must be evenly divisible by
# the number of heads.
# Also due to historical accident, inter_emb_size is actually only half of the actual internal
# number of channels, this is the reason for all the "* 2" everywhere.
num_heads = encoder_cfg.num_heads
channels_per_head = inter_emb_size * 2 // num_heads
assert inter_emb_size * 2 == channels_per_head * num_heads
self.initial_linear = nn.Linear(
board_state_size + prev_orders_size, inter_emb_size * 2, bias=False
)
self.initial_positional_bias = nn.Parameter(he_init((spatial_size, inter_emb_size * 2)))
self.blocks = nn.ModuleList()
for _ in range(encoder_cfg.num_blocks):
self.blocks.append(
nn.TransformerEncoderLayer(
d_model=inter_emb_size * 2,
nhead=encoder_cfg.num_heads,
dim_feedforward=encoder_cfg.ff_channels,
dropout=encoder_cfg.dropout,
)
)
layerdrop = encoder_cfg.layerdrop
if layerdrop is not None and layerdrop > 1e-5:
assert 0 < layerdrop <= 1.0, layerdrop
self.layerdrop_rng = np.random.RandomState(0)
else:
self.layerdrop_rng = None
self.layerdrop = layerdrop
def forward(self, x_bo, x_po):
x = torch.cat([x_bo, x_po], -1)
x = self.initial_linear(x)
x = x + self.initial_positional_bias
# x: Shape [batch_size, spatial_size, inter_emb_size*2]
# But torch needs [spatial_size, batch_size, inter_emb_size*2]
x = x.transpose(0, 1).contiguous()
def apply_blocks_with_layerdrop(blocks, tensor):
for i, block in enumerate(blocks):
drop = (
self.training
and self.layerdrop_rng is not None
and self.layerdrop_rng.uniform() < self.layerdrop
)
if drop:
# To make distributed happy we need to have grads for all params.
dummy = sum(w.sum() * 0 for w in block.parameters())
tensor = dummy + tensor
else:
tensor = block(tensor)
return tensor
x = apply_blocks_with_layerdrop(self.blocks, x)
x = x.transpose(0, 1).contiguous()
return x
def he_init(shape):
fan_in = shape[-2] if len(shape) >= 2 else shape[-1]
init_range = math.sqrt(2.0 / fan_in)
return torch.randn(shape) * init_range
class GraphConv(nn.Module):
def __init__(self, in_size, out_size, A, learnable_A=False):
super().__init__()
"""
A -> (81, 81)
"""
self.A = nn.Parameter(A).requires_grad_(learnable_A)
self.W = nn.Parameter(he_init((len(self.A), in_size, out_size)))
self.b = nn.Parameter(torch.zeros(1, 1, out_size))
def forward(self, x):
"""Computes A*x*W + b
x -> (B, 81, in_size)
returns (B, 81, out_size)
"""
x = x.transpose(0, 1) # (b, N, in ) => (N, b, in )
x = torch.matmul(x, self.W) # (N, b, in) * (N, in, out) => (N, b, out)
x = x.transpose(0, 1) # (N, b, out) => (b, N, out)
x = torch.matmul(self.A, x) # (b, N, N) * (b, N, out) => (b, N, out)
x += self.b
return x
class ValueDecoder(nn.Module):
def __init__(
self,
*,
inter_emb_size,
spatial_size,
dropout,
init_scale=1.0,
softmax=False,
):
super().__init__()
emb_flat_size = spatial_size * inter_emb_size * 2
self.prelin = nn.Linear(emb_flat_size, inter_emb_size)
self.lin = nn.Linear(inter_emb_size, len(POWERS))
self.dropout = nn.Dropout(dropout if dropout is not None else 0.0)
self.softmax = softmax
# scale down init
torch.nn.init.xavier_normal_(self.lin.weight, gain=init_scale)
bound = init_scale / (len(POWERS) ** 0.5)
torch.nn.init.uniform_(self.lin.bias, -bound, bound)
def forward(self, enc):
"""Returns [B, 7] FloatTensor summing to 1 across dim=1"""
B = enc.shape[0]
y = enc.view(B, -1)
y = self.prelin(y)
y = F.relu(y)
y = self.dropout(y)
y = self.lin(y)
if self.softmax:
y = F.softmax(y, -1)
else:
y = y ** 2
y = y / y.sum(dim=1, keepdim=True)
# y = nn.functional.softmax(y, dim=1)
return y
def compute_order_features():
"""Returns a [13k x D] tensor where each row contains (one-hot) features for one order in the vocabulary.
"""
order_vocabulary = get_order_vocabulary()
# assert order_vocabulary[0] == EOS_TOKEN
# order_vocabulary = order_vocabulary[1:] # we'll fix this up at the end
order_split = [o.split() for o in order_vocabulary]
# fixup strange stuff in the dataset
for s in order_split:
# fixup "A SIL S A PRU"
if len(s) == 5 and s[2] == "S":
s.append("H")
# fixup "A SMY - ROM VIA"
if len(s) == 5 and s[-1] == "VIA":
s.pop()
loc_idx = {loc: i | |
= _splom_part_boxes(
box, n,
x, col, x_tickvals, x_ticktext, x_range,
y, row, y_tickvals, y_ticktext, y_range,
background_opacity=0.05,
)
for s in shapes:
fig.add_shape(s)
if refpoint is not None:
shapes = _splom_part_ref_point(
n,
x, x_range, x_tickvals, x_ticktext, refpoint.get(col, None),
y, y_range, y_tickvals, y_ticktext, refpoint.get(row, None),
)
for s in shapes: fig.add_shape(s)
if colnum == 1:
fig.update_yaxes(
range=y_range,
row=rownum,
col=colnum,
)
if not row_titles_top:
fig.update_yaxes(
title_text=scope.tagged_shortname(row, wrap_width=18),
row=rownum,
col=colnum,
)
if y_ticktext is not None:
fig.update_yaxes(
row=rownum,
col=colnum,
tickmode = 'array',
ticktext = y_ticktext,
tickvals = y_tickvals,
)
if rownum == len(rows):
fig.update_xaxes(
title_text=scope.tagged_shortname(col, wrap_width=18),
row=rownum,
col=colnum,
range=x_range,
)
if x_ticktext is not None:
fig.update_xaxes(
row=rownum,
col=colnum,
tickmode='array',
ticktext=x_ticktext,
tickvals=x_tickvals,
)
fig.update_layout(margin=dict(
l=10, r=10, t=30 if row_titles_top else 10, b=10,
))
metadata = dict(
rows=rows,
cols=cols,
selected_color=selected_color,
unselected_color=unselected_color,
use_gl=use_gl,
row_titles=row_titles,
size=size,
refpoint=refpoint,
marker_size=marker_size,
)
if isinstance(mass, int):
metadata['mass'] = mass
fig.update_layout(meta=metadata)
if figure_class is not None:
fig = figure_class(fig)
return fig
from ...scope.box import Bounds
def _splom_marker_opacity(
data_index,
selection,
mass=1000,
):
if isinstance(mass, int):
from ...viz import ScatterMass
mass = ScatterMass(mass)
data_index = pandas.Index(data_index)
if selection is None:
marker_opacity = mass.get_opacity(data_index)
else:
mo = [
mass.get_opacity(data_index[~selection]),
mass.get_opacity(data_index[selection]),
]
marker_opacity = pandas.Series(data=mo[0], index=data_index)
marker_opacity[selection] = mo[1]
return marker_opacity
def _splom_part_boxes(
box, ax_num,
x, x_label, x_tickvals, x_ticktext, x_range,
y, y_label, y_tickvals, y_ticktext, y_range,
background_opacity=0.2,
):
background_shapes, foreground_shapes = [], []
if box is None: return []
if x_label in box.thresholds or y_label in box.thresholds:
x_lo, x_hi = None, None
thresh = box.thresholds.get(x_label)
if isinstance(thresh, Bounds):
x_lo, x_hi = thresh
if isinstance(thresh, set):
x_lo, x_hi = [], []
for tickval, ticktext in zip(x_tickvals, x_ticktext):
if ticktext in thresh:
x_lo.append(tickval -0.33)
x_hi.append(tickval +0.33)
if x_range is None:
x_range = [x.min(), x.max()]
x_width_buffer = (x_range[1] - x_range[0]) * 0.02
else:
x_width_buffer = -(x_range[1] - x_range[0]) * 0.02
if x_lo is None:
x_lo = x_range[0] - x_width_buffer
if x_hi is None:
x_hi = x_range[1] + x_width_buffer
if not isinstance(x_lo, list):
x_lo = [x_lo]
if not isinstance(x_hi, list):
x_hi = [x_hi]
y_lo, y_hi = None, None
thresh = box.thresholds.get(y_label)
if isinstance(thresh, Bounds):
y_lo, y_hi = thresh
if isinstance(thresh, set):
y_lo, y_hi = [], []
for tickval, ticktext in zip(y_tickvals, y_ticktext):
if ticktext in thresh:
y_lo.append(tickval -0.33)
y_hi.append(tickval +0.33)
if y_range is None:
y_range = [y.min(), y.max()]
y_width_buffer = (y_range[1] - y_range[0]) * 0.02
else:
y_width_buffer = -(y_range[1] - y_range[0]) * 0.02
if y_lo is None:
y_lo = y_range[0] - y_width_buffer
if y_hi is None:
y_hi = y_range[1] + y_width_buffer
if not isinstance(y_lo, list):
y_lo = [y_lo]
if not isinstance(y_hi, list):
y_hi = [y_hi]
x_pairs = list(zip(x_lo, x_hi))
y_pairs = list(zip(y_lo, y_hi))
background_shapes += [
# Rectangle background color
go.layout.Shape(
type="rect",
xref=f"x{ax_num}",
yref=f"y{ax_num}",
x0=x_pair[0],
y0=y_pair[0],
x1=x_pair[1],
y1=y_pair[1],
line=dict(
width=0,
),
fillcolor=colors.DEFAULT_BOX_BG_COLOR,
opacity=background_opacity,
layer="below",
)
for x_pair in x_pairs
for y_pair in y_pairs
]
foreground_shapes += [
# Rectangle reference to the axes
go.layout.Shape(
type="rect",
xref=f"x{ax_num}",
yref=f"y{ax_num}",
x0=x_pair[0],
y0=y_pair[0],
x1=x_pair[1],
y1=y_pair[1],
line=dict(
width=2,
color=colors.DEFAULT_BOX_LINE_COLOR,
),
fillcolor='rgba(0,0,0,0)',
opacity=1.0,
)
for x_pair in x_pairs
for y_pair in y_pairs
]
return background_shapes + foreground_shapes
def _splom_part_ref_point(
ax_num,
x, x_range, x_tickvals, x_ticktext, x_refpoint,
y, y_range, y_tickvals, y_ticktext, y_refpoint,
):
foreground_shapes = []
if x_refpoint is not None:
if isinstance(x_refpoint, (bool, numpy.bool_)):
x_refpoint = str(x_refpoint)
if x_tickvals is not None and x_ticktext is not None:
for tickval, ticktext in zip(x_tickvals, x_ticktext):
if ticktext == x_refpoint:
x_refpoint = tickval
break
if y_range is None:
y_range = [y.min(), y.max()]
if y_refpoint is not None:
if y_refpoint < y_range[0]:
y_range[0] = y_refpoint
if y_refpoint > y_range[1]:
y_range[1] = y_refpoint
y_width_buffer = (y_range[1] - y_range[0]) * 0.02
else:
y_width_buffer = -(y_range[1] - y_range[0]) * 0.02
y_lo = y_range[0] - y_width_buffer
y_hi = y_range[1] + y_width_buffer
foreground_shapes.append(
go.layout.Shape(
type="line",
xref=f"x{ax_num}",
yref=f"y{ax_num}",
y0=y_lo,
x0=x_refpoint,
y1=y_hi,
x1=x_refpoint,
**colors.DEFAULT_REF_LINE_STYLE,
)
)
if y_refpoint is not None:
if isinstance(y_refpoint, (bool, numpy.bool_)):
y_refpoint = str(y_refpoint)
if y_tickvals is not None and y_ticktext is not None:
for tickval, ticktext in zip(y_tickvals, y_ticktext):
if ticktext == y_refpoint:
y_refpoint = tickval
break
if x_range is None:
x_range = [x.min(), x.max()]
if x_refpoint is not None:
if x_refpoint < x_range[0]:
x_range[0] = x_refpoint
if x_refpoint > x_range[1]:
x_range[1] = x_refpoint
x_width_buffer = (x_range[1] - x_range[0]) * 0.02
else:
x_width_buffer = -(x_range[1] - x_range[0]) * 0.02
x_lo = x_range[0] - x_width_buffer
x_hi = x_range[1] + x_width_buffer
foreground_shapes.append(
go.layout.Shape(
type="line",
xref=f"x{ax_num}",
yref=f"y{ax_num}",
x0=x_lo,
y0=y_refpoint,
x1=x_hi,
y1=y_refpoint,
**colors.DEFAULT_REF_LINE_STYLE,
)
)
return foreground_shapes
def update_splom_figure(
scope,
data,
fig,
selection,
box,
mass=None,
rows=None,
cols=None,
selected_color=None,
unselected_color=None,
size=None,
):
existing_rows = fig['layout']['meta']['rows']
existing_cols = fig['layout']['meta']['cols']
if selected_color is None:
selected_color = fig['layout']['meta'].get('selected_color', colors.DEFAULT_HIGHLIGHT_COLOR)
if unselected_color is None:
unselected_color = fig['layout']['meta'].get('unselected_color', colors.DEFAULT_BASE_COLOR)
if mass is None:
mass = fig['layout']['meta'].get('mass', 250)
change_dims = False
if rows is None:
rows = existing_rows
else:
if rows != existing_rows:
change_dims = True
if cols is None:
cols = existing_cols
else:
if cols != existing_cols:
change_dims = True
if change_dims:
new_fig = new_splom_figure(
scope,
data,
rows=rows,
cols=cols,
use_gl=fig['layout']['meta'].get('use_gl', True),
mass=mass,
row_titles=fig['layout']['meta'].get('row_titles', 'side'),
size=size if size is not None else fig['layout']['meta'].get('size', None),
selection=selection,
box=box,
refpoint=fig['layout']['meta'].get('refpoint', None),
figure_class=None,
on_select=None, # lambda *a: self._on_select_from_histogram(*a,name=col)
on_deselect=None, # lambda *a: self._on_deselect_from_histogram(*a,name=col)
selected_color=selected_color,
unselected_color=unselected_color,
marker_size=fig['layout']['meta'].get('marker_size', 3),
)
fig['data'] = new_fig['data']
fig['layout'] = new_fig['layout']
return fig
existing_lines = fig_existing_lines(fig)
marker_opacity = _splom_marker_opacity(
data.index,
selection,
mass=mass,
)
if selection is None:
marker_color = None
else:
marker_color = pandas.Series(data=unselected_color, index=data.index)
marker_color[selection] = selected_color
n = 0
trace_n = 0
box_shapes = []
for rownum, row in enumerate(rows, start=1):
for colnum, col in enumerate(cols, start=1):
if row == col:
n += 1
trace_n += 3
continue
if marker_color is None:
color = _pick_color(scope, row, col)
else:
color = marker_color
x = perturb_categorical_df(data, col)
y = perturb_categorical_df(data, row)
x_ticktext, x_tickvals, x_range = axis_info(data[col], range_padding=0.3)
y_ticktext, y_tickvals, y_range = axis_info(data[row], range_padding=0.3)
fig['data'][trace_n]['x'] = x
fig['data'][trace_n]['y'] = y
fig['data'][trace_n]['marker']['color'] = color
fig['data'][trace_n]['marker']['opacity'] = marker_opacity
n += 1
trace_n += 1
box_shapes.extend(_splom_part_boxes(
box, n,
x, col, x_tickvals, x_ticktext, x_range,
y, row, y_tickvals, y_ticktext, y_range,
background_opacity=0.05,
))
fig['layout']['shapes'] = box_shapes + existing_lines
return fig
import datashader as ds
def _hue_mix(selected_array, unselected_array, selected_rgb, unselected_rgb):
selected_rgb = numpy.asanyarray(selected_rgb)
unselected_rgb = numpy.asanyarray(unselected_rgb)
use255 = False
if selected_rgb.max() > 1 or unselected_rgb.max() > 1:
selected_rgb = selected_rgb/255
unselected_rgb = unselected_rgb/255
use255 = True
selected_array = numpy.asanyarray(selected_array)
unselected_array = numpy.asanyarray(unselected_array)
selection_total = selected_array + unselected_array
selection_intensity = numpy.nan_to_num(selected_array / (selection_total+0.00001))
# selection_total /= numpy.max(selection_total)
selection_total = selection_total / numpy.percentile(selection_total, 99)
selection_total = numpy.clip(selection_total, 0, 1)
from matplotlib.colors import LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list("BlOr", [unselected_rgb, selected_rgb])
hue_array = cmap(selection_intensity)
if use255:
hue_array = numpy.round(hue_array*255)
hue_array[...,-1] = selection_total
return hue_array
def _get_bins_and_range(ticktext, label, in_range, scope):
bins = 20
range_ = in_range
if ticktext is not None:
bins = len(ticktext) * 2 + 1
range_ = (in_range[0] - 0.25, in_range[1] + 0.25)
else:
param = scope[label]
try:
range_ = (param.min, param.max)
except AttributeError:
pass
try:
this_type = scope.get_dtype(label)
except:
this_type = 'float'
if this_type == 'int':
if param.max - param.min + 1 <= bins * 4:
bins = param.max - param.min + 1
range_ = (param.min-0.5, param.max+0.5)
return bins, range_
def new_hmm_figure(
scope,
data,
rows="LX",
cols="M",
row_titles='top',
size=150,
selection=None,
box=None,
refpoint=None,
figure_class=None,
on_select=None, # lambda *a: self._on_select_from_histogram(*a,name=col)
on_deselect=None, # lambda *a: self._on_deselect_from_histogram(*a,name=col)
selected_color=None,
unselected_color=None,
emph_selected=True,
show_points=50,
marker_size=5,
):
if unselected_color is None:
unselected_color = colors.DEFAULT_BASE_COLOR_RGB
else:
unselected_color = colors.interpret_color(unselected_color)
if selected_color is None:
selected_color = colors.DEFAULT_HIGHLIGHT_COLOR_RGB
else:
selected_color = colors.interpret_color(selected_color)
selected_color_str = ", ".join(str(int(i)) for i in selected_color)
unselected_color_str = ", ".join(str(int(i)) for i in unselected_color)
def _make_axis_list(j):
if isinstance(j, str):
if set('XLM').issuperset(j.upper()):
use = []
for i in j.upper():
if i=='X':
use += scope.get_uncertainty_names()
elif i=='L':
use += scope.get_lever_names()
if i=='M':
use += scope.get_measure_names()
return use
return [j]
return j
rows = _make_axis_list(rows)
cols = _make_axis_list(cols)
row_titles_top = (row_titles=='top')
subplot_titles = []
specs = []
for rownum, row in enumerate(rows, start=1):
specs.append([])
for colnum, col in enumerate(cols, start=1):
specs[-1].append({
# "type": "xy",
# 'l':0.03,
# 'r':0.03,
# 't':0.03,
# 'b':0.03,
})
if colnum == 1 and row_titles_top:
subplot_titles.append(scope.tagged_shortname(row, wrap_width=18))
else:
subplot_titles.append(None)
fig = make_subplots(
rows=len(rows), cols=len(cols),
shared_xaxes=True,
shared_yaxes=True,
vertical_spacing=(0.18 if row_titles_top else 0.1)/len(rows),
horizontal_spacing=0.1/len(cols),
subplot_titles=subplot_titles,
specs=specs,
)
if row_titles_top:
for rowtitle in fig['layout']['annotations']:
rowtitle['x'] = 0
rowtitle['xanchor'] = 'left'
fig['layout']['height'] = size * len(rows) + 75
fig['layout']['width'] = size * len(cols) + 100
if figure_class is not None:
fig = figure_class(fig)
experiment_name = "Experiment"
if data.index.name:
experiment_name = data.index.name
if selection is None:
selection = pandas.Series(True, index=data.index)
if selection is not None:
n_selected = numpy.sum(selection)
n_unselected = numpy.sum(~selection)
else:
n_selected = 0
n_unselected = len(data)
n = 0
extra_y_ax = len(rows) * len(cols)
# saved_bins = {}
for rownum, row in enumerate(rows, start=1):
for colnum, col in enumerate(cols, start=1):
n += 1
# x = perturb_categorical_df(data, col, suffix="numberize")
# y = perturb_categorical_df(data, row, suffix="numberize")
x_points = perturb_categorical_df(data, col)
y_points = perturb_categorical_df(data, row)
x_ticktext, x_tickvals, x_range = axis_info(data[col], range_padding=0.25, epsilon=0.25)
y_ticktext, y_tickvals, y_range = axis_info(data[row], range_padding=0.25, epsilon=0.25)
if row == col:
extra_y_ax += 1
import scipy.stats
try:
kde0 = scipy.stats.gaussian_kde(data[~selection][row])
kde1 = scipy.stats.gaussian_kde(data[selection][row])
except TypeError:
kde0 = scipy.stats.gaussian_kde(data[~selection][row].cat.codes)
kde1 = scipy.stats.gaussian_kde(data[selection][row].cat.codes)
x_fill = numpy.linspace(*x_range, 200)
y_0 = kde0(x_fill)
y_1 = kde1(x_fill)
topline = max(y_0.max(), y_1.max())
y_range_kde = (-0.07*topline, 1.07*topline)
layout_updates = {}
layout_updates[f'yaxis{extra_y_ax}'] = dict(
domain=fig['layout'][f'yaxis{n}']['domain'],
anchor=f'free',
showticklabels=False,
range=y_range_kde,
)
fig.update_layout(**layout_updates)
fig.add_trace(
go.Scatter(
x=[],
y=[],
mode='markers',
showlegend=False,
),
row=rownum, col=colnum,
)
fig.add_trace(
go.Scatter(
x=x_fill,
y=y_0,
yaxis=f"y{extra_y_ax}",
xaxis=f"x{n}",
showlegend=False,
line_color=f'rgb({unselected_color_str})',
fill='tozeroy',
)
)
fig.add_trace(
go.Scatter(
x=x_fill,
y=y_1,
yaxis=f"y{extra_y_ax}",
xaxis=f"x{n}",
showlegend=False,
line_color=f'rgb({selected_color_str})',
fill='tozeroy',
)
)
else:
x_bins, x_range_ = _get_bins_and_range(x_ticktext, col, x_range, scope)
y_bins, y_range_ = _get_bins_and_range(y_ticktext, row, y_range, scope)
# saved_bins[(rownum, colnum)] = (x_bins, x_range_, y_bins, y_range_)
cvs = ds.Canvas(plot_width=x_bins, plot_height=y_bins, x_range=x_range_, y_range=y_range_)
_col = f"_{col}_perturb" if f"_{col}_perturb" in data.columns else col
_row = f"_{row}_perturb" if f"_{row}_perturb" in data.columns else row
agg1 = cvs.points(data[selection], _col, _row)
agg0 = cvs.points(data[~selection], _col, _row)
if x_ticktext is not None:
x_arr = data[col].to_numpy().astype('U')
x_hovertag = "%{meta[2]}"
else:
x_arr = None
x_hovertag = "%{x:.3s}"
if y_ticktext is not None:
y_arr = data[row].to_numpy().astype('U')
y_hovertag = "%{meta[3]}" if x_hovertag=="%{meta[2]}" else "%{meta[2]}"
else:
y_arr = None
y_hovertag = "%{y:.3s}"
hovertemplate = (
f"<b>{scope.shortname(col)}</b>: {x_hovertag}<br>" +
f"<b>{scope.shortname(row)}</b>: {y_hovertag}" +
"<extra>%{meta[0]} selected<br>%{meta[1]} unselected</extra>"
)
agg0_arr = numpy.asanyarray(agg0)
agg1_arr = numpy.asanyarray(agg1)
wtype_def = [
('ns', agg1_arr.dtype),
('nu', agg0_arr.dtype),
]
if x_arr is not None:
wtype_def.append(
('x', x_arr.dtype)
)
if y_arr is not None:
wtype_def.append(
('y', y_arr.dtype)
)
wtype = numpy.dtype(wtype_def)
meta = numpy.empty(agg0_arr.shape, dtype=wtype)
meta['ns'] = agg1_arr
meta['nu'] = agg0_arr
if x_ticktext is not None:
meta[:,1::2]['x']=x_ticktext
if y_ticktext is not None:
meta[1::2,:]['y']=numpy.asarray(y_ticktext)[:,None]
y_label, x_label = agg0.dims[0], agg0.dims[1]
# np.datetime64 is not handled correctly by go.Heatmap
for ax in [x_label, y_label]:
if numpy.issubdtype(agg0.coords[ax].dtype, numpy.datetime64):
agg0.coords[ax] = agg0.coords[ax].astype(str)
x = agg0.coords[x_label]
y = agg0.coords[y_label]
if not emph_selected:
fig.add_trace(
go.Image(
z=_hue_mix(agg1, agg0, selected_color, unselected_color),
x0=float(x[0]),
dx=float(x[1]-x[0]),
y0=float(y[0]),
dy=float(y[1]-y[0]),
hovertemplate=hovertemplate,
meta=meta,
colormodel='rgba',
),
row=rownum, col=colnum,
)
else:
zmax = max(numpy.percentile(agg0_arr, 98), numpy.percentile(agg1_arr, 98))
agg0_arr = agg0_arr.astype(numpy.float64)
agg0_arr[agg0_arr==0] = numpy.nan
fig.add_trace(
go.Heatmap(
x=x,
y=y,
z=agg0_arr,
showlegend=False,
hovertemplate=hovertemplate,
meta=meta,
coloraxis=f"coloraxis{n*2}",
hoverongaps=False,
zmax=zmax,
zmin=0,
),
row=rownum, col=colnum,
)
agg1_arr = agg1_arr.astype(numpy.float64)
agg1_arr[agg1_arr == 0] = numpy.nan
fig.add_trace(
go.Heatmap(
x=x,
y=y,
z=agg1_arr,
showlegend=False,
hovertemplate=hovertemplate,
meta=meta,
coloraxis=f"coloraxis{n * 2 - 1}",
hoverongaps=False,
zmax=zmax,
zmin=0,
),
row=rownum, col=colnum,
)
if n_selected <= show_points:
_x_points_selected = x_points[selection]
_y_points_selected = y_points[selection]
else:
_x_points_selected = [None]
_y_points_selected = [None]
if x_ticktext is not None or y_ticktext is not None:
hovertemplate_s = (
f'<b>{scope.shortname(row)}</b>: %{{meta[1]}}<br>' +
f'<b>{scope.shortname(col)}</b>: %{{meta[2]}}' +
f'<extra>{experiment_name} %{{meta[0]}}</extra>'
)
meta_s = data[selection][[row, col]].reset_index().to_numpy()
else:
hovertemplate_s = (
f'<b>{scope.shortname(row)}</b>: %{{y}}<br>' +
f'<b>{scope.shortname(col)}</b>: %{{x}}' +
f'<extra>{experiment_name} %{{meta}}</extra>'
)
meta_s = data[selection].index
fig.add_trace(
go.Scatter(
x=_x_points_selected,
y=_y_points_selected,
mode='markers',
marker=dict(
color=f'rgb({selected_color_str})',
size=marker_size,
),
showlegend=False,
hovertemplate=hovertemplate_s,
meta=meta_s,
),
row=rownum, col=colnum,
)
fig.update_layout({
f"coloraxis{n*2-1}": {
'showscale': False,
'colorscale': [
[0.0, f'rgba({selected_color_str}, 0.0)'],
[0.5, f'rgba({selected_color_str}, 0.6)'],
[1.0, f'rgba({selected_color_str}, 1.0)'],
],
'cmax':zmax,
'cmin':0,
},
f"coloraxis{n*2}": {
'showscale': False,
'colorscale': [
[0.0, f'rgba({unselected_color_str}, 0.0)'],
[0.5, f'rgba({unselected_color_str}, 0.6)'],
[1.0, f'rgba({unselected_color_str}, 1.0)'],
],
'cmax': zmax,
'cmin': 0,
},
})
if on_select is not None:
fig.data[-1].on_selection(lambda *args: on_select(col, row, *args))
if on_deselect is not None:
fig.data[-1].on_deselect(lambda *args: on_deselect(col, row, *args))
if box is not None:
shapes = _splom_part_boxes(
box, n,
x, col, x_tickvals, x_ticktext, x_range,
y, row, y_tickvals, y_ticktext, y_range,
background_opacity=0.05,
)
for s in shapes:
fig.add_shape(s)
if refpoint is not None:
shapes = _splom_part_ref_point(
n,
x, x_range, x_tickvals, x_ticktext, refpoint.get(col, None),
y, y_range, y_tickvals, y_ticktext, refpoint.get(row, None),
)
for s in shapes:
fig.add_shape(s)
if colnum == 1:
fig.update_yaxes(
range=y_range,
row=rownum,
col=colnum,
)
if not row_titles_top:
fig.update_yaxes(
title_text=scope.tagged_shortname(row, wrap_width=18),
row=rownum,
col=colnum,
)
if y_ticktext is not None:
fig.update_yaxes(
row=rownum,
col=colnum,
tickmode = 'array',
ticktext = y_ticktext,
tickvals = y_tickvals,
)
# elif (colnum-1)%3==0 and len(cols)>4:
# fig.update_yaxes(
# title_text=scope.tagged_shortname(row, wrap_width=18),
# title_font_size=7,
# title_standoff=0,
# row=rownum,
# col=colnum,
# )
if rownum == len(rows):
fig.update_xaxes(
title_text=scope.tagged_shortname(col, wrap_width=18),
row=rownum,
col=colnum,
range=x_range,
)
if x_ticktext is not None:
fig.update_xaxes(
row=rownum,
col=colnum,
tickmode='array',
ticktext=x_ticktext,
tickvals=x_tickvals,
)
# elif | |
"""DHCPv6 Relay Agent"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import misc
import srv_msg
import references
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_interface_local_and_relay_interface_in_the_same_subnet():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::10')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.add_line_to_subnet('0', ',"interface":"$(SERVER_IFACE)"')
srv_control.set_conf_parameter_subnet('interface-id', '"abc"', '0')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv_during_process('DHCP', 'configuration')
references.references_check('Kea')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_interface_two_subnets():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::10')
srv_control.set_conf_parameter_subnet('interface-id', '"abc"', '0')
srv_control.config_srv_another_subnet_no_interface('2001:db8:2::/64',
'2001:db8:2::1-2001:db8:2::10')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:1::1')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'xyz')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:2::1')
references.references_check('Kea')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_relayaddress_two_subnets():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '200fc00:e968:6179::de52:7100-2001:fdf8:f53e:61e4::18')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.config_srv_another_subnet_no_interface('2001:db8:2::/64',
'20fdf8:f53e:61e4::18-20fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.add_line_to_subnet('1', ',"relay": {"ip-address": "fc00:db20:35b:7399::5"}')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fdf8:f53e:61e4::18')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:1::1')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fc00:db20:35b:7399::5')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:2::1')
references.references_check('Kea')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
@pytest.mark.disabled
def test_v6_relay_relayaddress_interface_id_just_one_matching():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::10')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '0')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fc00:db20:35b:7399::5')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'xyz')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', 'NOT ', 'sub-option', '5')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Relayed Message',
'13',
'3',
None,
'statuscode',
'2')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
@pytest.mark.disabled
def test_v6_relay_relayaddress_interface_id_just_one_matching_2():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::10')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '0')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fdf8:f53e:61e4::18')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', 'NOT ', 'sub-option', '5')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Relayed Message',
'13',
'3',
None,
'statuscode',
'2')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
@pytest.mark.disabled
def test_v6_relay_relayaddress_interface_id_just_one_matching_3():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '0')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', '2001:db8:1::1000')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', 'NOT ', 'sub-option', '5')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Relayed Message',
'13',
'3',
None,
'statuscode',
'2')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_relayaddress_interface_id_two_subnets():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::10')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '0')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.config_srv_another_subnet_no_interface('2001:db8:2::/64',
'2001:db8:2::1-2001:db8:2::10')
srv_control.set_conf_parameter_subnet('interface-id', '"abc"', '1')
srv_control.add_line_to_subnet('1', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fdf8:f53e:61e4::18')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'xyz')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:1::1')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fdf8:f53e:61e4::18')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:2::1')
references.references_check('Kea')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_relayaddress_interface_id_two_subnets_2():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::10')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '0')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fc00:db20:35b:7399::5"}')
srv_control.config_srv_another_subnet_no_interface('2001:db8:2::/64',
'2001:db8:2::1-20fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.set_conf_parameter_subnet('interface-id', '"abc"', '1')
srv_control.add_line_to_subnet('1', ',"relay": {"ip-address": "fdf8:f53e:61e4::18"}')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fc00:db20:35b:7399::5')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'xyz')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:1::1')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', 'fdf8:f53e:61e4::18')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:2::1')
references.references_check('Kea')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_relayaddress_not_matching():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '20fdf8:f53e:61e4::18-2001:db8:1::10')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fc00:db20:35b:7399::5"}')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', '2001:db8:2::100')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Relayed Message',
'13',
'3',
None,
'statuscode',
'2')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_relayaddress_within_subnet():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-20fc00:e968:6179::de52:7100')
srv_control.add_line_to_subnet('0', ',"relay": {"ip-address": "fc00:db20:35b:7399::5"}')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'linkaddr', '2001:db8:1::100')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:1::1')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_interface_one_subnet_not_matching_id():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::10')
srv_control.set_conf_parameter_subnet('interface-id', '"xyz"', '0')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Relayed Message',
'13',
'3',
None,
'statuscode',
'2')
references.references_check('Kea')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_interface_two_subnets_direct_client():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2fc00:e968:6179::de52:7100-2001:fc00:db20:35b:7399::5')
srv_control.set_conf_parameter_subnet('interface-id', '"abc"', '0')
srv_control.config_srv_another_subnet_no_interface('2001:db8:2::/64',
'2001:db8:2::1-2001:dbfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'xyz')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Relayed Message',
'13',
'3',
None,
'statuscode',
'2')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response',
'5',
'3',
'NOT ',
'addr',
'2001:db8:1::1')
references.references_check('Kea')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.kea_only
def test_v6_relay_interface_two_subnets_same_interface_id():
misc.test_setup()
# that is basically misconfiguration!
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.set_conf_parameter_subnet('interface-id', '"abc"', '0')
srv_control.config_srv_another_subnet_no_interface('2001:db8:2::/64',
'2001:db8:2::11-2001:db8:2::20')
srv_control.set_conf_parameter_subnet('interface-id', '"abc"', '1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
# just saving server-id - start
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_save_option('server-id')
# just saving server-id - end
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
None,
'addr',
'2001:db8:1::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_sets_value('Client', 'IA_Address', '2001:db8:1::1')
srv_msg.client_does_include('Client', None, 'IA_Address')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
srv_msg.client_sets_value('RelayAgent', 'ifaceid', 'abc')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
srv_msg.response_check_option_content('Response', '9', None, 'Relayed', 'Message')
srv_msg.response_check_include_option('Relayed Message', None, '1')
srv_msg.response_check_include_option('Relayed Message', None, '2')
srv_msg.response_check_include_option('Relayed Message', None, '3')
srv_msg.response_check_option_content('Relayed Message', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Relayed Message',
'5',
'3',
| |
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List
class GetPropertyInfoHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetPropertyInfoRequest(TeaModel):
def __init__(
self,
property_corp_id: str = None,
):
# dingCropId
self.property_corp_id = property_corp_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.property_corp_id is not None:
result['propertyCorpId'] = self.property_corp_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('propertyCorpId') is not None:
self.property_corp_id = m.get('propertyCorpId')
return self
class GetPropertyInfoResponseBody(TeaModel):
def __init__(
self,
name: str = None,
org_id: int = None,
admin_name: str = None,
admin_user_id: str = None,
unified_social_credit: str = None,
):
self.name = name
self.org_id = org_id
self.admin_name = admin_name
self.admin_user_id = admin_user_id
self.unified_social_credit = unified_social_credit
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.org_id is not None:
result['orgId'] = self.org_id
if self.admin_name is not None:
result['adminName'] = self.admin_name
if self.admin_user_id is not None:
result['adminUserId'] = self.admin_user_id
if self.unified_social_credit is not None:
result['unifiedSocialCredit'] = self.unified_social_credit
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('orgId') is not None:
self.org_id = m.get('orgId')
if m.get('adminName') is not None:
self.admin_name = m.get('adminName')
if m.get('adminUserId') is not None:
self.admin_user_id = m.get('adminUserId')
if m.get('unifiedSocialCredit') is not None:
self.unified_social_credit = m.get('unifiedSocialCredit')
return self
class GetPropertyInfoResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetPropertyInfoResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetPropertyInfoResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetSpaceIdByTypeHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetSpaceIdByTypeRequest(TeaModel):
def __init__(
self,
department_type: str = None,
):
# 部门类型
self.department_type = department_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.department_type is not None:
result['departmentType'] = self.department_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('departmentType') is not None:
self.department_type = m.get('departmentType')
return self
class GetSpaceIdByTypeResponseBody(TeaModel):
def __init__(
self,
refer_id: int = None,
):
# 部门id
self.refer_id = refer_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.refer_id is not None:
result['referId'] = self.refer_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('referId') is not None:
self.refer_id = m.get('referId')
return self
class GetSpaceIdByTypeResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetSpaceIdByTypeResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetSpaceIdByTypeResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListSubSpaceHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ListSubSpaceRequest(TeaModel):
def __init__(
self,
resident_corp_id: str = None,
refer_id: int = None,
):
# A short description of struct
self.resident_corp_id = resident_corp_id
self.refer_id = refer_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.resident_corp_id is not None:
result['residentCorpId'] = self.resident_corp_id
if self.refer_id is not None:
result['referId'] = self.refer_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('residentCorpId') is not None:
self.resident_corp_id = m.get('residentCorpId')
if m.get('referId') is not None:
self.refer_id = m.get('referId')
return self
class ListSubSpaceResponseBodySpaceList(TeaModel):
def __init__(
self,
refer_id: int = None,
space_name: str = None,
tag_code: str = None,
type: str = None,
floor: str = None,
is_virtual: int = None,
billing_area: float = None,
building_area: float = None,
house_state: int = None,
parent_refer_id: int = None,
):
self.refer_id = refer_id
self.space_name = space_name
self.tag_code = tag_code
# 空间类型为楼时,1高层/2低层/3别墅/4其他,空间类型为房屋是,1住宅/2公寓/3排屋/4洋房/5叠墅/6别墅/7商铺/8办公用房/9经营用房/10其他
self.type = type
self.floor = floor
self.is_virtual = is_virtual
self.billing_area = billing_area
self.building_area = building_area
# 房屋状态:0空置/1未领/2入住/3空关/4装修
self.house_state = house_state
self.parent_refer_id = parent_refer_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.refer_id is not None:
result['referId'] = self.refer_id
if self.space_name is not None:
result['spaceName'] = self.space_name
if self.tag_code is not None:
result['tagCode'] = self.tag_code
if self.type is not None:
result['type'] = self.type
if self.floor is not None:
result['floor'] = self.floor
if self.is_virtual is not None:
result['isVirtual'] = self.is_virtual
if self.billing_area is not None:
result['billingArea'] = self.billing_area
if self.building_area is not None:
result['buildingArea'] = self.building_area
if self.house_state is not None:
result['houseState'] = self.house_state
if self.parent_refer_id is not None:
result['parentReferId'] = self.parent_refer_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('referId') is not None:
self.refer_id = m.get('referId')
if m.get('spaceName') is not None:
self.space_name = m.get('spaceName')
if m.get('tagCode') is not None:
self.tag_code = m.get('tagCode')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('floor') is not None:
self.floor = m.get('floor')
if m.get('isVirtual') is not None:
self.is_virtual = m.get('isVirtual')
if m.get('billingArea') is not None:
self.billing_area = m.get('billingArea')
if m.get('buildingArea') is not None:
self.building_area = m.get('buildingArea')
if m.get('houseState') is not None:
self.house_state = m.get('houseState')
if m.get('parentReferId') is not None:
self.parent_refer_id = m.get('parentReferId')
return self
class ListSubSpaceResponseBody(TeaModel):
def __init__(
self,
space_list: List[ListSubSpaceResponseBodySpaceList] = None,
):
# result
self.space_list = space_list
def validate(self):
if self.space_list:
for k in self.space_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['spaceList'] = []
if self.space_list is not None:
for k in self.space_list:
result['spaceList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.space_list = []
if m.get('spaceList') is not None:
for k in m.get('spaceList'):
temp_model = ListSubSpaceResponseBodySpaceList()
self.space_list.append(temp_model.from_map(k))
return self
class ListSubSpaceResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListSubSpaceResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, | |
<filename>contrapartes/views.py
from django.shortcuts import render
from .models import *
from .forms import *
from notas.models import *
from notas.forms import *
from agendas.models import *
from agendas.forms import *
from foros.forms import *
from publicaciones.models import *
from publicaciones.forms import *
from galerias.models import *
from galerias.forms import *
from catalogo.models import *
from catalogo.forms import *
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.forms import inlineformset_factory
from django.core.mail import send_mail, EmailMultiAlternatives
from django.template.loader import render_to_string
# Create your views here.
@login_required
def perfil_editar(request,template='admin/editar_user.html'):
object = get_object_or_404(UserProfile, user=request.user)
if request.method == 'POST':
form = UserForm(request.POST, instance=request.user)
form_avatar = UserProfileForm(request.POST,files=request.FILES,instance=object)
if form.is_valid() and form_avatar.is_valid():
form.save()
form_avatar.save()
return HttpResponseRedirect('/accounts/profile/')
else:
form = UserForm(instance=request.user)
form_avatar = UserProfileForm(instance=object)
return render(request, template, locals())
@login_required
def editar_contraparte(request, slug, template='admin/editar_contraparte.html'):
contra = get_object_or_404(Contraparte, slug=slug)
FormSetInit = inlineformset_factory(Contraparte, Redes, form=RedesFrom,extra=11,max_num=11)
if request.method == 'POST':
form = ContraparteForms(data=request.POST,instance=contra,files=request.FILES)
formset = FormSetInit(request.POST,request.FILES,instance=contra)
if form.is_valid() and formset.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.user = request.user
form_uncommited.save()
formset.save()
return HttpResponseRedirect('/accounts/profile/')
else:
form = ContraparteForms(instance=contra)
formset = FormSetInit(instance=contra)
return render(request, template, locals())
@login_required
def notas_contraparte(request, template='admin/notaadmin.html'):
object_list = Notas.objects.filter(user_id = request.user.id)
dic_temas = {}
for tema in Temas.objects.all():
count = Notas.objects.filter(temas = tema,user = request.user).count()
if count != 0:
dic_temas[tema] = count
return render(request, template, locals())
@login_required
def redactar_notas_contraparte(request, template='admin/redactar_notaadmin.html'):
if request.method == 'POST':
form = NotasForms(request.POST, request.FILES)
if form.is_valid():
nota = form.save(commit=False)
nota.user = request.user
nota.correo_enviado = False
nota.save()
form.save_m2m()
if nota.publicada == True:
try:
subject, from_email = 'Nueva nota', '<EMAIL>'
text_content = render_to_string('email/nota.txt', {'nota': nota,})
html_content = render_to_string('email/nota.txt', {'nota': nota,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
nota.correo_enviado = True
nota.save()
return HttpResponseRedirect('/contrapartes/notas/')
except:
pass
else:
form = NotasForms()
return render(request, template, locals())
@login_required
def filtro_temas_contra(request, temas, template='admin/notaadmin.html'):
object_list = Notas.objects.filter(user_id = request.user.id,temas__nombre = temas).order_by('-id')
dic_temas = {}
for tema in Temas.objects.all():
count = Notas.objects.filter(temas = tema).count()
dic_temas[tema] = count
return render(request, template, locals())
@login_required
def editar_nota(request, slug, template='admin/editar_nota.html'):
object = get_object_or_404(Notas, slug=slug)
if request.method == 'POST':
form = NotasForms(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.user = request.user
form_uncommited.save()
if form_uncommited.publicada == True and form_uncommited.correo_enviado == False:
try:
subject, from_email = 'Nueva nota', '<EMAIL>'
text_content = render_to_string('email/nota.txt', {'nota': form_uncommited,})
html_content = render_to_string('email/nota.txt', {'nota': form_uncommited,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
form_uncommited.correo_enviado = True
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/notas/')
except:
pass
return HttpResponseRedirect('/contrapartes/notas/')
else:
form = NotasForms(instance=object)
return render(request, template, locals())
@login_required
def eliminar_notas_contraparte(request, slug):
nota = Notas.objects.filter(slug = slug).delete()
return HttpResponseRedirect('/contrapartes/notas/')
@login_required
def eventos_contraparte(request, template='admin/list_eventos.html'):
object_list = Agendas.objects.filter(user_id = request.user.id)
return render(request, template, locals())
@login_required
def nuevo_evento_contraparte(request, template='admin/nuevo_evento.html'):
FormSetInit = inlineformset_factory(Agendas,AgendaEvento,form=AgendaEventoForm,extra=12,max_num=12)
FormSetInit2 = inlineformset_factory(Agendas,DocumentosEvento,form=DocuForm,extra=6,max_num=6)
if request.method == 'POST':
form = AgendaForm(request.POST, request.FILES)
formset = FormSetInit(request.POST,request.FILES)
formset2 = FormSetInit2(request.POST,request.FILES)
if form.is_valid() and formset.is_valid() and formset2.is_valid():
evento = form.save(commit=False)
evento.user = request.user
evento.correo_enviado = False
evento.save()
instances = formset.save(commit=False)
for instance in instances:
instance.evento = evento
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.evento = evento
instance2.save()
formset2.save_m2m()
if evento.publico == True:
try:
subject, from_email = 'Nuevo evento', '<EMAIL>'
text_content = render_to_string('email/evento.txt', {'evento': evento,})
html_content = render_to_string('email/evento.txt', {'evento': evento,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
evento.correo_enviado = True
evento.save()
return HttpResponseRedirect('/contrapartes/eventos/')
except:
pass
else:
form = AgendaForm()
formset = FormSetInit()
formset2 = FormSetInit2()
return render(request, template, locals())
@login_required
def eliminar_evento_contraparte(request, slug):
evento = Agendas.objects.get(slug = slug).delete()
return HttpResponseRedirect('/contrapartes/eventos/')
@login_required
def editar_evento(request, slug, template='admin/editar_evento.html'):
object = get_object_or_404(Agendas, slug=slug)
FormSetInit = inlineformset_factory(Agendas,AgendaEvento,form=AgendaEventoForm,extra=12,max_num=12)
FormSetInit2 = inlineformset_factory(Agendas,DocumentosEvento,form=DocuForm,extra=6,max_num=6)
if request.method == 'POST':
form = AgendaForm(request.POST, request.FILES,instance=object)
formset = FormSetInit(request.POST,request.FILES,instance=object)
formset2 = FormSetInit2(request.POST,request.FILES,instance=object)
if form.is_valid() and formset.is_valid() and formset2.is_valid():
evento = form.save(commit=False)
evento.user = request.user
evento.correo_enviado = False
evento.save()
instances = formset.save(commit=False)
for instance in instances:
instance.evento = evento
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.evento = evento
instance2.save()
formset2.save_m2m()
if evento.publico == True and evento.correo_enviado == False:
try:
subject, from_email = 'Nuevo evento', '<EMAIL>'
text_content = render_to_string('email/evento.txt', {'evento': evento,})
html_content = render_to_string('email/evento.txt', {'evento': evento,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
evento.correo_enviado = True
evento.save()
return HttpResponseRedirect('/contrapartes/eventos/')
except:
pass
return HttpResponseRedirect('/contrapartes/eventos/')
else:
form = AgendaForm(instance=object)
formset = FormSetInit(instance=object)
formset2 = FormSetInit2(instance=object)
return render(request, template, locals())
#foros
@login_required
def list_foros(request, template='admin/list_foros.html'):
current_date = datetime.date.today()
object_list = Foros.objects.order_by('-id')
mis_foros = Foros.objects.filter(contraparte = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_foro(request, id):
foro = Foros.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/foros/')
@login_required
def editar_foro(request, id, template='admin/editar_foro.html'):
object = get_object_or_404(Foros, id=id)
if request.method == 'POST':
form = ForosForm(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.contraparte = request.user
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/foros/')
else:
form = ForosForm(instance=object)
return render(request, template, locals())
@login_required
def ver_foro(request, id, template='admin/ver_foro.html'):
current_date = datetime.date.today()
discusion = get_object_or_404(Foros, id=id)
aportes = Aportes.objects.filter(foro = id).order_by('-id')
if request.method == 'POST':
form = AporteForm(request.POST)
if form.is_valid():
aporte = form.save(commit=False)
aporte.foro = discusion
aporte.user = request.user
aporte.save()
try:
subject, from_email = 'Nuevo aporte al foro ' + discusion.nombre, '<EMAIL>'
text_content = render_to_string('email/aporte.txt', {'aporte': aporte,})
html_content = render_to_string('email/aporte.txt', {'aporte': aporte,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
return redirect('ver-foro', id=discusion.id)
except:
pass
else:
form = AporteForm()
return render(request, template, locals())
from interteam.tasks import *
import datetime
@login_required
def agregar_foro(request, template='admin/nuevo_foro.html'):
if request.method == 'POST':
form = ForosForm(request.POST, request.FILES)
if form.is_valid():
foro = form.save(commit=False)
foro.contraparte = request.user
foro.correo_enviado = False
foro.save()
hoy = datetime.date.today()
if foro.apertura == hoy and foro.correo_enviado == False:
try:
subject, from_email = 'Nuevo foro', '<EMAIL>'
text_content = render_to_string('email/foro.txt', {'foro': foro,})
html_content = render_to_string('email/foro.txt', {'foro': foro,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
foro.correo_enviado = True
foro.save()
return HttpResponseRedirect('/contrapartes/foros/')
except:
pass
else:
id = foro.id
user = request.user.id
send_mail_foro.apply_async((id,user),eta=foro.apertura)
return HttpResponseRedirect('/contrapartes/foros/')
else:
form = ForosForm()
return render(request, template, locals())
#publicaciones
@login_required
def publicaciones_contraparte(request, template='admin/list_publicaciones.html'):
object_list = Publicacion.objects.filter(usuario = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_publicacion(request, id):
evento = Publicacion.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/publicaciones/')
@login_required
def editar_publicacion(request, id, template='admin/editar_publicacion.html'):
object = get_object_or_404(Publicacion, id=id)
FormSetInit = inlineformset_factory(Publicacion,ArchivosPublicacion,form=ArchivosPubliForm,extra=9,max_num=9)
FormSetInit2 = inlineformset_factory(Publicacion,AudiosPublicacion,form=AudiosPubliForm,extra=6,max_num=6)
FormSetInit3 = inlineformset_factory(Publicacion,VideosPublicacion,form=VideosPubliForm,extra=6,max_num=6)
if request.method == 'POST':
form = PublicacionForm(request.POST, request.FILES, instance=object)
formset = FormSetInit(request.POST,request.FILES, instance=object)
formset2 = FormSetInit2(request.POST,request.FILES, instance=object)
formset3 = FormSetInit3(request.POST,request.FILES, instance=object)
if form.is_valid() and formset.is_valid() and formset2.is_valid() and formset3.is_valid():
form_uncommited = form.save()
form_uncommited.usuario = request.user
form_uncommited.save()
instances = formset.save(commit=False)
for instance in instances:
instance.publicacion = form_uncommited
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.publicacion = form_uncommited
instance2.save()
formset2.save_m2m()
instances3 = formset3.save(commit=False)
for instance3 in instances3:
instance3.publicacion = form_uncommited
instance3.save()
formset3.save_m2m()
if form_uncommited.publicada == True and form_uncommited.correo_enviado == False:
try:
subject, from_email = 'Nueva publicación', '<EMAIL>'
text_content = render_to_string('email/publicacion.txt', {'publi': form_uncommited,})
html_content = render_to_string('email/publicacion.txt', {'publi': form_uncommited,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
form_uncommited.correo_enviado = True
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/publicaciones/')
except:
pass
return HttpResponseRedirect('/contrapartes/publicaciones/')
else:
form = PublicacionForm(instance=object)
formset = FormSetInit(instance=object)
formset2 = FormSetInit2(instance=object)
formset3 = FormSetInit3(instance=object)
return render(request, template, locals())
@login_required
def agregar_publicacion(request, template='admin/nueva_publicacion.html'):
FormSetInit = inlineformset_factory(Publicacion,ArchivosPublicacion,form=ArchivosPubliForm,extra=9,max_num=9)
FormSetInit2 = inlineformset_factory(Publicacion,AudiosPublicacion,form=AudiosPubliForm,extra=6,max_num=6)
FormSetInit3 = inlineformset_factory(Publicacion,VideosPublicacion,form=VideosPubliForm,extra=6,max_num=6)
if request.method == 'POST':
form = PublicacionForm(request.POST, request.FILES)
formset = FormSetInit(request.POST,request.FILES)
formset2 = FormSetInit2(request.POST,request.FILES)
formset3 = FormSetInit3(request.POST,request.FILES)
if form.is_valid() and formset.is_valid() and formset2.is_valid() and formset3.is_valid():
publi = form.save(commit=False)
publi.usuario = request.user
publi.correo_enviado = False
publi.save()
instances = formset.save(commit=False)
for instance in instances:
instance.publicacion = publi
instance.save()
formset.save_m2m()
instances2 = formset2.save(commit=False)
for instance2 in instances2:
instance2.publicacion = publi
instance2.save()
formset2.save_m2m()
instances3 = formset3.save(commit=False)
for instance3 in instances3:
instance3.publicacion = publi
instance3.save()
formset3.save_m2m()
if publi.publicada == True and publi.correo_enviado == False:
try:
subject, from_email = 'Nueva publicación', '<EMAIL>'
text_content = render_to_string('email/publicacion.txt', {'publi': publi,})
html_content = render_to_string('email/publicacion.txt', {'publi': publi,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
publi.correo_enviado = True
publi.save()
return HttpResponseRedirect('/contrapartes/publicaciones/')
except:
pass
else:
form = PublicacionForm()
formset = FormSetInit()
formset2 = FormSetInit2()
formset3 = FormSetInit3()
return render(request, template, locals())
@login_required
def editar_aporte(request, id, template='admin/editar_aporte.html'):
object = get_object_or_404(Aportes, id=id)
if request.method == 'POST':
form = AporteForm(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.user = request.user
form_uncommited.save()
return redirect('ver-foro', id=object.foro.id)
else:
form = AporteForm(instance=object)
return render(request, template, locals())
@login_required
def eliminar_aporte(request, id):
aporte = Aportes.objects.get(id = id)
foro = aporte.foro.id
aporte.delete()
return redirect('ver-foro', id=foro)
@login_required
def agregar_comentario(request, id, template='admin/comentario.html'):
object = get_object_or_404(Aportes, id=id)
if request.method == 'POST':
form = ComentarioForm(request.POST, request.FILES)
if form.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.aporte = object
form_uncommited.usuario = request.user
form_uncommited.save()
try:
subject, from_email = 'Nuevo comentario al foro ' + object.foro.nombre, '<EMAIL>'
text_content = render_to_string('email/comentario.txt', {'object': form_uncommited,})
html_content = render_to_string('email/comentario.txt', {'object': form_uncommited,})
list_mail = UserProfile.objects.exclude(user__id = request.user.id).values_list('user__email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
return redirect('ver-foro', id=object.foro.id)
except:
pass
else:
form = ComentarioForm()
return render(request, template, locals())
@login_required
def editar_comentario(request, id, template='admin/comentario.html'):
object = get_object_or_404(Comentarios, id=id)
if request.method == 'POST':
form = ComentarioForm(request.POST, request.FILES,instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.aporte = object.aporte
form_uncommited.usuario = request.user
form_uncommited.save()
return redirect('ver-foro', id=object.aporte.foro.id)
else:
form = ComentarioForm(instance=object)
return render(request, template, locals())
@login_required
def eliminar_comentario(request, id):
comentario = Comentarios.objects.get(id = id)
foro = comentario.aporte.foro.id
comentario.delete()
return redirect('ver-foro', id=foro)
#galerias
@login_required
def galerias_contraparte(request, template='admin/list_galerias.html'):
object_list_img = GaleriaImagenes.objects.filter(user = request.user.id).order_by('-id')
object_list_vid = GaleriaVideos.objects.filter(user = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_galeria_img(request, id):
img = GaleriaImagenes.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/galerias/')
@login_required
def agregar_galeria_img(request, template='admin/galeria_img.html'):
FormSetInit = inlineformset_factory(GaleriaImagenes, Imagenes, form=ImagenesForm,extra=12,max_num=12)
if request.method == 'POST':
form = GaleriaImagenesForm(request.POST, request.FILES)
formset = FormSetInit(request.POST,request.FILES)
if form.is_valid() and formset.is_valid():
galeria = form.save(commit=False)
galeria.user = request.user
galeria.save()
instances = formset.save(commit=False)
for instance in instances:
instance.imagenes = galeria
instance.save()
formset.save_m2m()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaImagenesForm()
formset = FormSetInit()
return render(request, template, locals())
@login_required
def editar_galeria_img(request, id, template='admin/galeria_img.html'):
object = get_object_or_404(GaleriaImagenes, id=id)
FormSetInit = inlineformset_factory(GaleriaImagenes, Imagenes, form=ImagenesForm,extra=12,max_num=12)
if request.method == 'POST':
form = GaleriaImagenesForm(data=request.POST,instance=object,files=request.FILES)
formset = FormSetInit(request.POST,request.FILES,instance=object)
if form.is_valid() and formset.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.save()
formset.save()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaImagenesForm(instance=object)
formset = FormSetInit(instance=object)
return render(request, template, locals())
@login_required
def agregar_galeria_vid(request, template='admin/nueva_galeria_vid.html'):
if request.method == 'POST':
form = GaleriaVideosForm(request.POST, request.FILES)
if form.is_valid():
publi = form.save(commit=False)
publi.user = request.user
publi.save()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaVideosForm()
return render(request, template, locals())
@login_required
def eliminar_video(request, id):
img = GaleriaVideos.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/galerias/')
@login_required
def editar_video(request, id, template='admin/nueva_galeria_vid.html'):
object = get_object_or_404(GaleriaVideos, id=id)
if request.method == 'POST':
form = GaleriaVideosForm(request.POST, request.FILES, instance=object)
if form.is_valid():
form_uncommited = form.save()
form_uncommited.user = request.user
form_uncommited.save()
return HttpResponseRedirect('/contrapartes/galerias/')
else:
form = GaleriaVideosForm(instance=object)
return render(request, template, locals())
@login_required
def mensajes(request, template='admin/mensajes.html'):
if request.method == 'POST':
form = MensajeForm(request.POST, request.FILES)
form.fields['user'].queryset = User.objects.exclude(id=request.user.id)
if form.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.usuario = request.user
form_uncommited.save()
form.save_m2m()
try:
subject, from_email = 'Nuevo mensaje ','<EMAIL>'
text_content = render_to_string('email/mensaje.txt', {'object': form_uncommited,})
html_content = render_to_string('email/mensaje.txt', {'object': form_uncommited,})
list_mail = []
for user in form_uncommited.user.all():
list_mail.append(user.email)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
enviado = 1
except:
pass
else:
form = MensajeForm()
form.fields['user'].queryset = User.objects.exclude(id=request.user.id)
enviado = 0
return render(request, template, locals())
@login_required
def estadisticas(request, template='admin/estadisticas.html'):
dic_notas = {}
dic_foros = {}
dic_aportes = {}
dic_coment = {}
list_resumen = []
for org in Contraparte.objects.all():
conteo = Notas.objects.filter(user__userprofile__contraparte = org).count()
dic_notas[org.siglas] = conteo
conteo_foros = Foros.objects.filter(contraparte__userprofile__contraparte = org).count()
dic_foros[org.siglas] = conteo_foros
conteo_aportes = Aportes.objects.filter(user__userprofile__contraparte = org).count()
dic_aportes[org.siglas] = conteo_aportes
conteo_coment = Comentarios.objects.filter(usuario__userprofile__contraparte = org).count()
dic_coment[org.siglas] = conteo_coment
conteo_eventos = Agendas.objects.filter(user__userprofile__contraparte = org).count()
conteo_img = GaleriaImagenes.objects.filter(user__userprofile__contraparte = org).count()
conteo_vid = GaleriaVideos.objects.filter(user__userprofile__contraparte = org).count()
conteo_publi = Publicacion.objects.filter(usuario__userprofile__contraparte = org).count()
list_resumen.append((org.siglas,conteo,conteo_eventos,conteo_foros,conteo_aportes,conteo_coment,conteo_img,conteo_vid,conteo_publi))
return render(request, template, locals())
@login_required
def catalogo(request, template='admin/lista_catalogo.html'):
object_list = Producto.objects.filter(user = request.user.id).order_by('-id')
return render(request, template, locals())
@login_required
def eliminar_producto(request, id):
prod = Producto.objects.filter(id = id).delete()
return HttpResponseRedirect('/contrapartes/catalogo/')
@login_required
def agregar_producto(request, template='admin/agregar_producto.html'):
FormSetInit = inlineformset_factory(Producto, Propuesta_valor, form=Propuesta_valorForm,extra=1)
FormSetInit2 = inlineformset_factory(Producto, FotosProducto, form=FotosProductoForm,extra=1,max_num=4)
FormSetInit3 = inlineformset_factory(Producto, ArchivosProducto, form=ArchivosProductoForm,extra=1)
FormSetInit4 = inlineformset_factory(Producto, RedesProducto, form=RedesFormProducto,extra=1)
if request.method == 'POST':
form = ProductoForm(request.POST, request.FILES)
formset = FormSetInit(request.POST)
formset2 = FormSetInit2(request.POST,request.FILES)
formset3 = FormSetInit3(request.POST,request.FILES)
formset4 = FormSetInit4(request.POST,request.FILES)
if form.is_valid() and formset.is_valid() and formset2.is_valid() and | |
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import scrolledtext
from tkinter import messagebox
from PIL import ImageTk, Image
import requests
import pickle
import numpy as np
import base64
from io import BytesIO
import json
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import os
matplotlib.use("TkAgg")
# ---------------------------Login Screen--------------------------------
def login_window():
# Initialize global variables
global login_screen
global username
global url
url = "http://127.0.0.1:5000"
# Login command
def validateLogin():
r = requests.post("http://127.0.0.1:5000/api/login",
json=username.get())
if r.status_code == 200:
print("{} is logged in".format(username.get()))
login_screen.withdraw()
data_interface_window(username.get())
return
else:
username_not_recognized()
# New user command
def validateNewUser():
r = requests.post("http://127.0.0.1:5000/api/new_user",
json=username.get())
if r.status_code == 200:
print("{} is a new user".format(username.get()))
login_screen.withdraw()
data_interface_window(username.get())
return
else:
username_already_exists()
# Screen layout
login_screen = Tk()
login_screen.title("Login")
login_screen.geometry('300x200')
login_screen.grid_columnconfigure(0, weight=1)
login_screen.grid_columnconfigure(1, weight=1)
login_screen.grid_rowconfigure(0, weight=3)
login_screen.grid_rowconfigure(3, weight=1)
login_screen.grid_rowconfigure(5, weight=3)
usernameLabel = Label(login_screen, text="Enter Username:")
usernameLabel.grid(row=1, column=0, columnspan=2)
username = StringVar()
usernameEntry = Entry(login_screen, textvariable=username)
usernameEntry.grid(row=2, column=0, columnspan=2)
loginButton = Button(login_screen, text="Login", command=validateLogin)
loginButton.grid(row=4, column=0)
newButton = Button(login_screen, text="New User", command=validateNewUser)
newButton.grid(row=4, column=1)
login_screen.mainloop()
return
# -------------------------Invalid Login Screen-----------------------------
def username_not_recognized():
# Screen closed upon clicking "Ok" button
def exit():
invalid_screen.destroy()
return
# Screen layout
invalid_screen = Toplevel(login_screen)
invalid_screen.title("Invalid")
invalid_screen.geometry('200x100')
Label(invalid_screen, text="Username not recognized.").pack()
Button(invalid_screen, text="Ok", command=exit).pack()
# -----------------------Invalid Registration Screen---------------------------
def username_already_exists():
# Screen closed upon clicking "Ok" button
def exit():
invalid_screen.destroy()
return
# Screen layout
invalid_screen = Toplevel(login_screen)
invalid_screen.title("Invalid")
invalid_screen.geometry('200x100')
Label(invalid_screen, text="Username already exists.").pack()
Button(invalid_screen, text="Ok", command=exit).pack()
# ------------------------------Main UI Window---------------------------------
def data_interface_window(username='NA'):
# Set-up UI window
global window
window = Toplevel(login_screen)
window.title("{}'s Image Processing " # Sets window title
"Application.".format(username))
window.geometry('500x500') # Sets window size
# Create tab control
tab_control = ttk.Notebook(window)
tab_control.pack(expand=1, fill="both")
upload_tab = ttk.Frame(tab_control)
display_tab = ttk.Frame(tab_control)
download_tab = ttk.Frame(tab_control)
metrics_tab = ttk.Frame(tab_control)
# Label tabs
tab_control.add(upload_tab, text="Upload")
tab_control.add(display_tab, text="Display")
tab_control.add(download_tab, text="Download")
tab_control.add(metrics_tab, text="User Metrics")
# ----------------------------Upload tab--------------------------------
def sort_files(new_files, out_files):
# Returns sorted list of all elements with no repeated elements
for filepath in new_files:
if filepath not in all_files:
out_files.append(filepath)
# Returns all files wanting to read as tuple of string paths
return sorted(out_files)
# Appends only new files selected to display window
def display_files(root_box, files):
# Deletes current box and displays new files in alphabetical order
root_box.delete('1.0', END)
for filename in sorted(files):
head, tail = os.path.split(filename)
root_box.insert(END, tail+'\n')
return
# Function to choose files wanted to open
def choose_files(out_files):
# Open file selection box
ftypes = [('.png (Portable Graphics Format)', '*.png'),
('.jpeg (Joint Photographic Experts Group)', '*jpeg'),
('.tiff (Tagged Image File Format)', '*.tiff'),
('.zip (Compressed File Format)', '*.zip')]
new_files = filedialog.askopenfilenames(filetypes=ftypes)
# Sort for non-repeated list of files
out_files = sort_files(new_files, out_files) # Sorts files.
# Display all files selected
display_files(file_display, out_files) # Displays image names
# Allow selection of upload button as files are selected
if out_files:
upload_btn.config(state='normal',
bg='white',
fg='black')
return out_files
# Reset all files chosen upon upload
def reset_selection(files):
removable_files = tuple(files)
for filepath in removable_files:
files.remove(filepath)
# Function if select upload files button
def upload_files(files, processing):
# Submit post request to validate files to upload
# (including processing) and presence in dictionary
new_url = url + "/api/validate_images"
validate_dict = {"username": username,
"filepaths": files,
"processing": processing.get()}
r = requests.post(new_url, json=validate_dict)
out_dict = r.json()
if r.status_code != 200:
return
# Parse through dictionary to isolate files to upload.
present_images = out_dict["present"]
new_images = out_dict["not present"]
# Call function to display top level tab of files present and those
# uploading.
# If Continue button, move forward and delete display/reset file
# selection/disable upload. If not, simply return.
flag = False
for values in present_images.values():
if len(values) > 0:
flag = True
if flag:
images_already_present(present_images)
# For filepath not present - submit post request of files.
new_url = url + "/api/upload_images"
store_dict = {"username": username,
"images": new_images}
r = requests.post(new_url, json=store_dict)
status = r.json()
print(status)
# Reset GUI file download display and file selection
file_display.delete('1.0', END)
reset_selection(files)
upload_btn.config(state='disabled',
bg='grey',
fg='black')
return
# Choose File Section
all_files = [] # Stores all filepaths of files wanting to upload.
file_display = scrolledtext.ScrolledText(upload_tab, # Display files
width=50,
height=5)
file_display.grid(column=1, row=1) # Location to display files
file_btn = Button(upload_tab, # Choose files button
text="Choose Files",
bg="white",
fg="black",
command=lambda: choose_files(all_files))
file_btn.grid(column=2, row=1) # Choose file button location
# Choose Processing Type Section
processing_type = StringVar() # Variable for processing type
processing_type.set('_histogramEqualized')
hist_process = Radiobutton(upload_tab,
text='Histogram Equalization',
variable=processing_type,
value='_histogramEqualized')
hist_process.grid(column=1,
row=2,
sticky=W,
pady=5,
padx=100)
cont_stretch = Radiobutton(upload_tab,
text='Contrast Stretching',
variable=processing_type,
value='_contrastStretched')
cont_stretch.grid(column=1,
row=3,
sticky=W,
pady=5,
padx=100)
log_comp = Radiobutton(upload_tab,
text='Log Compression',
variable=processing_type,
value='_logCompressed')
log_comp.grid(column=1,
row=4,
sticky=W,
pady=5,
padx=100)
inv_img = Radiobutton(upload_tab,
text='Invert Image',
variable=processing_type,
value='_invertedImage')
inv_img.grid(column=1,
row=5,
sticky=W,
pady=5,
padx=100)
# Upload Selection Section
upload_btn = Button(upload_tab,
text="Upload Files",
bg="grey", # Set to grey when disabled
fg="black",
command=lambda: upload_files(all_files,
processing_type),
state="disabled")
upload_btn.grid(column=1, # Choose file button location
row=6,
sticky=W,
pady=5,
padx=100)
# ----------------------------Display tab---------------------------------
def left_display(): # find the picture according to the name
# Only dummy variables are used now, but should be easy to
# find image metrics if given image name
if image_name_1.get() == '':
messagebox.showerror("Error", "Please select an option first")
return
fetch_image_url = "http://127.0.0.1:5000/api/fetch_image/"\
+ username + "/" + image_name_1.get().strip("")
print(fetch_image_url)
image_file = requests.get(fetch_image_url)
image_file = image_file.json()
fetch_metrics_url = "http://127.0.0.1:5000/api/get_image_metrics/"\
+ username + "/" + image_name_1.get()
print(fetch_metrics_url)
image_metrics = requests.get(fetch_metrics_url)
image_metrics = image_metrics.json()
cpu = ttk.Label(display_tab, text="CPU Time: "
"{}".format(image_metrics[0]))
size = ttk.Label(display_tab, text="Size: "
"{}".format(image_metrics[1]))
timestamp = ttk.Label(display_tab, text="Timestamp: "
"{}".format(image_metrics[2]))
timestamp.grid(column=0, row=5, pady=5)
cpu.grid(column=0, row=6, pady=5)
size.grid(column=0, row=7, pady=5)
size_format = image_metrics[1]
size_list = size_format.split("x")
image_file = np.asarray(image_file)
image_file = image_file.astype(np.uint8)
# print(image_file)
reshape_arg = (int(size_list[1]), int(size_list[0]), int(size_list[2]))
image_file = image_file.reshape(reshape_arg)
histo_url = "http://127.0.0.1:5000/api/histo/"\
+ username + "/" + image_name_1.get().strip("")
histo = requests.get(histo_url)
histo = histo.json()
red = histo[0]
green = histo[1]
blue = histo[2]
figure = Figure(figsize=(0.5, 0.5), dpi=100)
plot = figure.add_subplot(1, 1, 1)
plot.bar(np.arange(0, len(red)), red)
canvas = FigureCanvasTkAgg(figure, display_tab)
canvas.get_tk_widget().grid(row=8, column=0)
figure2 = Figure(figsize=(0.5, 0.5), dpi=100)
plot2 = figure2.add_subplot(1, 1, 1)
plot2.bar(np.arange(0, len(green)), green)
canvas = FigureCanvasTkAgg(figure2, display_tab)
canvas.get_tk_widget().grid(row=9, column=0)
figure3 = Figure(figsize=(0.5, 0.5), dpi=100)
plot3 = figure3.add_subplot(1, 1, 1)
plot3.bar(np.arange(0, len(blue)), blue)
canvas = FigureCanvasTkAgg(figure3, display_tab)
canvas.get_tk_widget().grid(row=10, column=0)
image_display = Image.fromarray(image_file, 'RGB')
image_display = image_display.resize((100, 100), Image.ANTIALIAS)
render = ImageTk.PhotoImage(image_display)
img = Label(display_tab, image=render)
img.image = render
img.grid(column=0, row=4, pady=5)
return
def right_display(): # find the picture according to the name
if image_name_2.get() == '':
messagebox.showerror("Error", "Please select an option first")
return
fetch_image_url = "http://127.0.0.1:5000/api/fetch_image/"\
+ username + "/" + image_name_2.get()
image_file = requests.get(fetch_image_url)
image_file = image_file.json()
fetch_metrics_url = "http://127.0.0.1:5000/api/get_image_metrics/"\
+ username + "/" + image_name_2.get()
image_metrics = requests.get(fetch_metrics_url)
image_metrics = image_metrics.json()
cpu = ttk.Label(display_tab, text="CPU Time: "
"{}".format(image_metrics[0]))
size = ttk.Label(display_tab, text="Size: "
"{}".format(image_metrics[1]))
timestamp = ttk.Label(display_tab, text="Timestamp: "
"{}".format(image_metrics[2]))
timestamp.grid(column=2, row=5, pady=5)
cpu.grid(column=2, row=6, pady=5)
size.grid(column=2, row=7, pady=5)
size_format = image_metrics[1]
size_list = size_format.split("x")
image_file = np.asarray(image_file)
image_file = image_file.astype(np.uint8)
reshape_arg = (int(size_list[1]), int(size_list[0]), int(size_list[2]))
image_file = image_file.reshape(reshape_arg)
histo_url = "http://127.0.0.1:5000/api/histo/"\
+ username + "/" + image_name_2.get().strip("")
histo = requests.get(histo_url)
histo = histo.json()
red = histo[0]
green = histo[1]
blue = histo[2]
figure = Figure(figsize=(0.5, 0.5), dpi=100)
plot = figure.add_subplot(1, 1, 1)
plot.bar(np.arange(0, len(red)), red)
canvas = FigureCanvasTkAgg(figure, display_tab)
canvas.get_tk_widget().grid(row=8, column=2)
figure2 = Figure(figsize=(0.5, 0.5), dpi=100)
plot2 = figure2.add_subplot(1, 1, 1)
plot2.bar(np.arange(0, len(green)), green)
canvas = FigureCanvasTkAgg(figure2, display_tab)
canvas.get_tk_widget().grid(row=9, column=2)
figure3 = Figure(figsize=(0.5, 0.5), dpi=100)
plot3 = figure3.add_subplot(1, 1, 1)
plot3.bar(np.arange(0, len(blue)), blue)
canvas = FigureCanvasTkAgg(figure3, display_tab)
canvas.get_tk_widget().grid(row=10, column=2)
image_display = Image.fromarray(image_file, 'RGB')
image_display = image_display.resize((100, 100), Image.ANTIALIAS)
render = ImageTk.PhotoImage(image_display)
img = Label(display_tab, image=render)
img.image = render
img.grid(column=2, row=4, pady=5)
return
def refresh_list2():
get_image_list_url = "http://127.0.0.1:5000/api/get_all_images/"\
+ username
image_list = requests.get(get_image_list_url)
image_list = image_list.json()
display_sel_2['values'] = image_list
return
def refresh_list1():
get_image_list_url = "http://127.0.0.1:5000/api/get_all_images/"\
+ username
image_list = requests.get(get_image_list_url)
image_list = image_list.json()
# image_list = image_list.strip('][').split(',')
display_sel_1['values'] = image_list
return
ttk.Separator(display_tab, orient=VERTICAL).grid(column=1, row=1,
rowspan=10, sticky='ns')
choice_1 = ttk.Label(display_tab, text="Choose picture 1 from below")
choice_1.grid(column=0, row=1, padx=50, pady=5)
choice_2 = ttk.Label(display_tab, text="Choose picture 2 from below")
choice_2.grid(column=2, row=1, padx=50, pady=5)
image_name_1 = StringVar()
display_sel_1 = ttk.Combobox(display_tab, textvariable=image_name_1,
postcommand=refresh_list1)
display_sel_1.grid(column=0, row=2, pady=5)
# display_sel_1['values'] = image_list
display_sel_1.state(['readonly'])
image_name_2 = StringVar()
display_sel_2 | |
<reponame>pyansys/pyaedt
# Setup paths for module imports
from __future__ import division # noreorder
import math
from _unittest.conftest import BasisTest
from pyaedt.application.Variables import decompose_variable_value
from pyaedt.application.Variables import Variable
from pyaedt.generic.general_methods import isclose
from pyaedt.modeler.GeometryOperators import GeometryOperators
from pyaedt import MaxwellCircuit
# Import required modules
try:
import pytest # noqa: F401
except ImportError:
import _unittest_ironpython.conf_unittest as pytest # noqa: F401
class TestClass(BasisTest, object):
def setup_class(self):
BasisTest.my_setup(self)
self.aedtapp = BasisTest.add_app(self, "Test_09")
def teardown_class(self):
BasisTest.my_teardown(self)
def test_01_set_globals(self):
var = self.aedtapp.variable_manager
self.aedtapp["$Test_Global1"] = "5rad"
self.aedtapp["$Test_Global2"] = -1.0
self.aedtapp["$Test_Global3"] = "0"
self.aedtapp["$Test_Global4"] = "$Test_Global2*$Test_Global1"
independent = self.aedtapp._variable_manager.independent_variable_names
dependent = self.aedtapp._variable_manager.dependent_variable_names
val = var["$Test_Global4"]
assert val.numeric_value == -5.0
assert "$Test_Global1" in independent
assert "$Test_Global2" in independent
assert "$Test_Global3" in independent
assert "$Test_Global4" in dependent
pass
def test_01_set_var_simple(self):
var = self.aedtapp.variable_manager
self.aedtapp["Var1"] = "1rpm"
var_1 = self.aedtapp["Var1"]
var_2 = var["Var1"].expression
assert var_1 == var_2
assert isclose(var["Var1"].numeric_value, 1.0)
pass
def test_02_test_formula(self):
self.aedtapp["Var1"] = 3
self.aedtapp["Var2"] = "12deg"
self.aedtapp["Var3"] = "Var1 * Var2"
self.aedtapp["$PrjVar1"] = "2*pi"
self.aedtapp["$PrjVar2"] = 45
self.aedtapp["$PrjVar3"] = "sqrt(34 * $PrjVar2/$PrjVar1 )"
v = self.aedtapp.variable_manager
for var_name in v.variable_names:
print("{} = {}".format(var_name, self.aedtapp[var_name]))
pass
tol = 1e-9
c2pi = math.pi * 2.0
assert abs(v["$PrjVar1"].numeric_value - c2pi) < tol
assert abs(v["$PrjVar3"].numeric_value - math.sqrt(34 * 45.0 / c2pi)) < tol
assert abs(v["Var3"].numeric_value - 3.0 * 12.0) < tol
assert v["Var3"].units == "deg"
def test_03_test_evaluated_value(self):
self.aedtapp["p1"] = "10mm"
self.aedtapp["p2"] = "20mm"
self.aedtapp["p3"] = "p1 * p2"
v = self.aedtapp.variable_manager
eval_p3_nom = v._app.get_evaluated_value("p3")
assert isclose(eval_p3_nom, 0.0002)
v_app = self.aedtapp.variable_manager
assert v_app["p1"].read_only == False
v_app["p1"].read_only = True
assert v_app["p1"].read_only == True
assert v_app["p1"].hidden == False
v_app["p1"].hidden = True
assert v_app["p1"].hidden == True
assert v_app["p2"].description == ""
v_app["p2"].description = "myvar"
assert v_app["p2"].description == "myvar"
assert v_app["p2"].expression == "20mm"
v_app["p2"].expression = "5rad"
assert v_app["p2"].expression == "5rad"
def test_04_set_variable(self):
assert self.aedtapp.variable_manager.set_variable("p1", expression="10mm")
assert self.aedtapp["p1"] == "10mm"
assert self.aedtapp.variable_manager.set_variable("p1", expression="20mm", overwrite=False)
assert self.aedtapp["p1"] == "10mm"
assert self.aedtapp.variable_manager.set_variable("p1", expression="30mm")
assert self.aedtapp["p1"] == "30mm"
assert self.aedtapp.variable_manager.set_variable(
variable_name="p2",
expression="10mm",
readonly=True,
hidden=True,
description="This is a description of this variable",
)
assert self.aedtapp.variable_manager.set_variable("$p1", expression="10mm")
def test_05_variable_class(self):
v = Variable("4mm")
num_value = v.numeric_value
assert num_value == 4.0
v = v.rescale_to("meter")
test = v.evaluated_value
assert v.numeric_value == 0.004
v = Variable("100cel")
v.rescale_to("fah")
assert v.numeric_value == 212.0
pass
def test_06_multiplication(self):
v1 = Variable("10mm")
v2 = Variable(3)
v3 = Variable("3mA")
v4 = Variable("40V")
v5 = Variable("100NewtonMeter")
v6 = Variable("1000rpm")
tol = 1e-4
result_1 = v1 * v2
result_2 = v2 * v3
result_3 = v3 * v4
result_4 = v4 * v3
result_5 = v4 * 24.0 * v3
result_6 = v5 * v6
result_7 = v6 * v5
result_8 = (v5 * v6).rescale_to("kW")
assert result_1.numeric_value == 30.0
assert result_1.unit_system == "Length"
assert result_2.numeric_value == 9.0
assert result_2.units == "mA"
assert result_2.unit_system == "Current"
assert result_3.numeric_value == 0.12
assert result_3.units == "W"
assert result_3.unit_system == "Power"
assert result_4.numeric_value == 0.12
assert result_4.units == "W"
assert result_4.unit_system == "Power"
assert result_5.numeric_value == 2.88
assert result_5.units == "W"
assert result_5.unit_system == "Power"
assert abs(result_6.numeric_value - 10471.9755) / result_6.numeric_value < tol
assert result_6.units == "W"
assert result_6.unit_system == "Power"
assert abs(result_7.numeric_value - 10471.9755) / result_4.numeric_value < tol
assert result_7.units == "W"
assert result_7.unit_system == "Power"
assert abs(result_8.numeric_value - 10.4719755) / result_8.numeric_value < tol
assert result_8.units == "kW"
assert result_8.unit_system == "Power"
def test_07_addition(self):
v1 = Variable("10mm")
v2 = Variable(3)
v3 = Variable("3mA")
v4 = Variable("10A")
try:
v1 + v2
assert False
except AssertionError:
pass
try:
v2 + v1
assert False
except AssertionError:
pass
result_1 = v2 + v2
result_2 = v3 + v4
result_3 = v3 + v3
assert result_1.numeric_value == 6.0
assert result_1.unit_system == "None"
assert result_2.numeric_value == 10.003
assert result_2.units == "A"
assert result_2.unit_system == "Current"
assert result_3.numeric_value == 6.0
assert result_3.units == "mA"
assert result_3.unit_system == "Current"
def test_08_subtraction(self):
v1 = Variable("10mm")
v2 = Variable(3)
v3 = Variable("3mA")
v4 = Variable("10A")
try:
v1 - v2
assert False
except AssertionError:
pass
try:
v2 - v1
assert False
except AssertionError:
pass
result_1 = v2 - v2
result_2 = v3 - v4
result_3 = v3 - v3
assert result_1.numeric_value == 0.0
assert result_1.unit_system == "None"
assert result_2.numeric_value == -9.997
assert result_2.units == "A"
assert result_2.unit_system == "Current"
assert result_3.numeric_value == 0.0
assert result_3.units == "mA"
assert result_3.unit_system == "Current"
def test_09_specify_units(self):
# Scaling of the unit system "Angle"
angle = Variable("1rad")
angle.rescale_to("deg")
assert isclose(angle.numeric_value, 57.29577951308232)
angle.rescale_to("degmin")
assert isclose(angle.numeric_value, 57.29577951308232 * 60.0)
angle.rescale_to("degsec")
assert isclose(angle.numeric_value, 57.29577951308232 * 3600.0)
# Convert 200Hz to Angular speed numerically
omega = Variable(200 * math.pi * 2, "rad_per_sec")
assert omega.unit_system == "AngularSpeed"
assert isclose(omega.value, 1256.6370614359173)
omega.rescale_to("rpm")
assert isclose(omega.numeric_value, 12000.0)
omega.rescale_to("rev_per_sec")
assert isclose(omega.numeric_value, 200.0)
# test speed times time equals diestance
v = Variable("100m_per_sec")
assert v.unit_system == "Speed"
v.rescale_to("feet_per_sec")
assert isclose(v.numeric_value, 328.08398950131)
v.rescale_to("feet_per_min")
assert isclose(v.numeric_value, 328.08398950131 * 60)
v.rescale_to("miles_per_sec")
assert isclose(v.numeric_value, 0.06213711723534)
v.rescale_to("miles_per_minute")
assert isclose(v.numeric_value, 3.72822703412)
v.rescale_to("miles_per_hour")
assert isclose(v.numeric_value, 223.69362204724)
t = Variable("20s")
distance = v * t
assert distance.unit_system == "Length"
assert distance.evaluated_value == "2000.0meter"
distance.rescale_to("in")
assert isclose(distance.numeric_value, 2000 / 0.0254)
def test_10_division(self):
"""
'Power_divide_Voltage': 'Current',
'Power_divide_Current': 'Voltage',
'Power_divide_AngularSpeed': 'Torque',
'Power_divide_Torque': 'Angular_Speed',
'Angle_divide_AngularSpeed': 'Time',
'Angle_divide_Time': 'AngularSpeed',
'Voltage_divide_Current': 'Resistance',
'Voltage_divide_Resistance': 'Current',
'Resistance_divide_AngularSpeed': 'Inductance',
'Resistance_divide_Inductance': 'AngularSpeed',
'None_divide_Freq': 'Time',
'None_divide_Time': 'Freq',
'Length_divide_Time': 'Speed',
'Length_divide_Speed': 'Time'
"""
v1 = Variable("10W")
v2 = Variable("40V")
v3 = Variable("1s")
v4 = Variable("5mA")
v5 = Variable("100NewtonMeter")
v6 = Variable("1000rpm")
tol = 1e-4
result_1 = v1 / v2
assert result_1.numeric_value == 0.25
assert result_1.units == "A"
assert result_1.unit_system == "Current"
result_2 = v2 / result_1
assert result_2.numeric_value == 160.0
assert result_2.units == "ohm"
assert result_2.unit_system == "Resistance"
result_3 = 3 / v3
assert result_3.numeric_value == 3.0
assert result_3.units == "Hz"
assert result_3.unit_system == "Freq"
result_4 = v3 / 2
assert abs(result_4.numeric_value - 0.5) < tol
assert result_4.units == "s"
assert result_4.unit_system == "Time"
result_5 = v4 / v5
assert abs(result_5.numeric_value - 0.00005) < tol
assert result_5.units == ""
assert result_5.unit_system == "None"
result_6 = v1 / v5 + v6
assert abs(result_6.numeric_value - 104.8198) / result_6.numeric_value < tol
assert result_6.units == "rad_per_sec"
assert result_6.unit_system == "AngularSpeed"
def test_11_delete_variable(self):
assert self.aedtapp.variable_manager.delete_variable("Var1")
def test_12_decompose_variable_value(self):
assert decompose_variable_value("3.123456m") == (3.123456, "m")
assert decompose_variable_value("3m") == (3, "m")
assert decompose_variable_value("3") == (3, "")
assert decompose_variable_value("3.") == (3.0, "")
assert decompose_variable_value("3.123456m2") == (3.123456, "m2")
assert decompose_variable_value("3.123456Nm-2") == (3.123456, "Nm-2")
assert decompose_variable_value("3.123456kg2m2") == (3.123456, "kg2m2")
assert decompose_variable_value("3.123456kgm2") == (3.123456, "kgm2")
def test_13_postprocessing(self):
v1 = self.aedtapp.variable_manager.set_variable("test_post1", 10, postprocessing=True)
assert v1
assert not self.aedtapp.variable_manager.set_variable("test2", "v1+1")
assert self.aedtapp.variable_manager.set_variable("test2", "test_post1+1", postprocessing=True)
x1 = GeometryOperators.parse_dim_arg(
self.aedtapp.variable_manager["test2"].evaluated_value, variable_manager=self.aedtapp.variable_manager
)
assert x1 == 11
def test_14_intrinsics(self):
self.aedtapp["fc"] = "Freq"
assert self.aedtapp["fc"] == "Freq"
assert self.aedtapp.variable_manager.dependent_variables["fc"].numeric_value == 1e9
def test_15_arrays(self):
self.aedtapp["arr_index"] = 0
self.aedtapp["arr1"] = "[1, 2, 3]"
self.aedtapp["arr2"] = [1, 2, 3]
self.aedtapp["getvalue1"] = "arr1[arr_index]"
self.aedtapp["getvalue2"] = "arr2[arr_index]"
assert self.aedtapp.variable_manager["getvalue1"].numeric_value == 1.0
assert self.aedtapp.variable_manager["getvalue2"].numeric_value == 1.0
def test_16_maxwell_circuit_variables(self):
mc = MaxwellCircuit()
mc["var2"] = "10mm"
assert mc["var2"] == "10mm"
v_circuit = mc.variable_manager
var_circuit = v_circuit.variable_names
assert "var2" in var_circuit
assert v_circuit.independent_variables["var2"].units == "mm"
mc["var3"] = "10deg"
mc["var4"] = "10rad"
assert mc["var3"] == "10deg"
assert mc["var4"] == "10rad"
def test_17_project_sweep_variable(self):
self.aedtapp["$my_proj_test"] = "1mm"
self.aedtapp["$my_proj_test2"] = 2
self.aedtapp["$my_proj_test3"] = "$my_proj_test*$my_proj_test2"
assert self.aedtapp.variable_manager["$my_proj_test3"].units == "mm"
assert self.aedtapp.variable_manager["$my_proj_test3"].numeric_value == 2.0
self.aedtapp.materials.add_material_sweep(["copper", "aluminum"], "sweep_alu")
assert "$sweep_alupermittivity" in self.aedtapp.variable_manager.dependent_variables
def test_18_test_optimization_properties(self):
var = "v1"
self.aedtapp[var] = "10mm"
v = self.aedtapp.variable_manager
assert not v[var].is_optimization_enabled
v[var].is_optimization_enabled = True
assert v[var].is_optimization_enabled
assert v[var].optimization_min_value == "5mm"
v[var].optimization_min_value = "4mm"
assert v[var].optimization_min_value == "4mm"
assert v[var].optimization_max_value == "15mm"
v[var].optimization_max_value = "14mm"
assert v[var].optimization_max_value == "14mm"
assert not v[var].is_tuning_enabled
v[var].is_tuning_enabled = True
assert v[var].is_tuning_enabled
assert v[var].tuning_min_value == "5mm"
v[var].tuning_min_value = "4mm"
assert v[var].tuning_min_value == "4mm"
assert v[var].tuning_max_value == "15mm"
v[var].tuning_max_value = "14mm"
assert v[var].tuning_max_value == "14mm"
assert v[var].tuning_step_value == "1mm"
v[var].tuning_step_value = "0.5mm"
assert v[var].tuning_step_value == "0.5mm"
assert not v[var].is_statistical_enabled
v[var].is_statistical_enabled = True
assert v[var].is_statistical_enabled
assert not v[var].is_sensitivity_enabled
v[var].is_sensitivity_enabled = True
assert v[var].is_sensitivity_enabled
assert v[var].sensitivity_min_value == "5mm"
v[var].sensitivity_min_value = "4mm"
assert v[var].sensitivity_min_value == "4mm"
assert v[var].sensitivity_max_value == "15mm"
v[var].sensitivity_max_value = "14mm"
assert v[var].sensitivity_max_value == "14mm"
assert v[var].sensitivity_initial_disp == "1mm"
v[var].sensitivity_initial_disp = "0.5mm"
assert v[var].sensitivity_initial_disp == "0.5mm"
def test_19_test_optimization_global_properties(self):
var = "$v1"
self.aedtapp[var] = "10mm"
v = self.aedtapp.variable_manager
assert not v[var].is_optimization_enabled
v[var].is_optimization_enabled = True
assert v[var].is_optimization_enabled
| |
if isess.state:
isess.reset_state()
serialize_isession_and_close_db_session(isess)
return build_json_response({}, 204)
# Set identity at this moment for the interactive session
@app.route(nis_api_base + "/isession/identity", methods=["PUT"])
def interactive_session_set_identity():
# Recover InteractiveSession
# if request.method=="OPTIONS":
# r = build_json_response({}, 200)
# h = r.headers
# h['Access-Control-Allow-Origin'] = "http://localhost:4200"
# h['Access-Control-Allow-Methods'] = "PUT,POST,DELETE,GET,OPTIONS"
# h['Access-Control-Max-Age'] = str(21600)
# h['Access-Control-Allow-Credentials'] = "true"
# h['Access-Control-Allow-Headers'] = "Content-Type, Authorization, Content-Length, X-Requested-With"
# return r
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# If there is a current identity, issue an error. First "unidentify"
if isess.get_identity_id():
testing = is_testing_enabled()
if testing and request.args.get("user") and isess.get_identity_id() == request.args.get("user"):
result = True
else:
result = False
else:
# Two types of identification: external, using OAuth tokens, or application, using user+password
application_identification = True
if application_identification:
if request.args.get("user"):
testing = is_testing_enabled()
result = isess.identify({"user": request.args.get("user"),
"password": request.args.get("password", None)
},
testing=testing
)
else:
# TODO Check the validity of the token using the right Authentication service
result = isess.identify({"token": request.headers.get("token"),
"service": request.headers.get("auth_service")
}
)
serialize_isession_and_close_db_session(isess)
r = build_json_response({"identity": isess.get_identity_id()} if result else {},
200 if result else 401)
return r
@app.route(nis_api_base + "/isession/identity", methods=["GET"])
def interactive_session_get_identity():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
return build_json_response({"identity": isess.get_identity_id()})
# Set to anonymous user again (or "logout")
@app.route(nis_api_base + "/isession/identity", methods=["DELETE"])
def interactive_session_remove_identity():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Un-identify
if isess.get_identity_id():
if isess.reproducible_session_opened():
# If reproducible session open, error!
r = build_json_response({"error": "There is an open Reproducible Session. Close it first."}, 401)
else:
isess.unidentify()
r = build_json_response({"identity": isess.get_identity_id()})
serialize_isession_and_close_db_session(isess)
else:
r = build_json_response({"identity": isess.get_identity_id()})
return r
# Close interactive session (has to log out if some identity is active)
@app.route(nis_api_base + "/isession", methods=["DELETE"])
def interactive_session_close():
isess = deserialize_isession_and_prepare_db_session(False)
if isess:
isess.quit()
flask_session.clear()
flask_session["__invalidate__"] = True
return build_json_response({})
@app.route(nis_api_base + '/isession/generator.json', methods=['POST'])
def convert_generator_to_json_generator():
"""
Send the file to the service
Convert to native
Return it in JSON format
:return:
"""
# Check Interactive Session is Open. If not, open it
isess = deserialize_isession_and_prepare_db_session(False)
if not isess:
isess = InteractiveSession(DBSession)
testing = is_testing_enabled()
if testing:
result = isess.identify({"user": "test_user", "password": <PASSWORD>}, testing=True)
# Receive file
generator_type, content_type, buffer, _, _ = receive_file_submission(request)
if len(buffer) == 0:
raise Exception("No content was received. Please check the original file exists.")
output = convert_generator_to_native(generator_type, content_type, buffer)
# Return the conversion
r = build_json_response(output, 200)
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + '/isession/generator.to_dc.xml', methods=['POST'])
def convert_generator_to_dublin_core():
"""
Send the file to the service
Convert to native
Return the Dublin Core XML record
:return:
"""
# Check Interactive Session is Open. If not, open it
isess = deserialize_isession_and_prepare_db_session(False)
if not isess:
isess = InteractiveSession(DBSession)
testing = is_testing_enabled()
if testing:
result = isess.identify({"user": "test_user", "password": <PASSWORD>}, testing=True)
# Receive file
generator_type, content_type, buffer, _, _ = receive_file_submission(request)
if len(buffer) == 0:
raise Exception("No content was received. Please check the original file exists.")
output = convert_generator_to_native(generator_type, content_type, buffer)
xml = None
for c in output:
if "command" in c and c["command"] == "metadata" and "content" in c:
xml = generate_dublin_core_xml(c["content"])
break
# Return the conversion
if xml:
r = Response(xml,
mimetype="text/xml",
status=200)
else:
r = build_json_response({"message": "Could not elaborate Dublin Core XML record from the input generator"}, 401)
serialize_isession_and_close_db_session(isess)
return r
# -- Reproducible Sessions --
@app.route(nis_api_base + "/isession/rsession", methods=["POST"])
def reproducible_session_open():
def read_parameters(dd):
nonlocal uuid2, read_uuid_state, create_new, allow_saving
# Read query parameters
uuid2 = dd.get("uuid")
if "read_version_state" in dd:
read_uuid_state = dd["read_version_state"]
read_uuid_state = str2bool(read_uuid_state)
if "create_new" in dd:
create_new = str(dd["create_new"])
if create_new.lower() in ["1", "case_study", "casestudy"]:
create_new = CreateNew.CASE_STUDY
elif create_new.lower() in ["2", "version", "case_study_version"]:
create_new = CreateNew.VERSION
else:
create_new = CreateNew.NO
if "allow_saving" in dd:
allow_saving = dd["allow_saving"]
allow_saving = allow_saving.lower() == "true"
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Check identity
identity = isess.get_identity_id()
# Retrieve parameters
uuid2 = None
read_uuid_state = None
create_new = None
allow_saving = None
# First, read uploaded JSON
if len(request.files) > 0:
for k in request.files:
buffer = bytes(request.files[k].stream.getbuffer())
content_type = request.files[k].content_type
break
else:
buffer = bytes(io.BytesIO(request.get_data()).getbuffer())
if "Content-Type" in request.headers:
content_type = request.headers["Content-Type"]
if buffer:
read_parameters(json.loads(buffer))
if not uuid2 and not read_uuid_state and not create_new and not allow_saving:
read_parameters(request.form)
if not uuid2 and not read_uuid_state and not create_new and not allow_saving:
read_parameters(request.args)
if read_uuid_state is None:
read_uuid_state = True
if create_new is None:
create_new = CreateNew.NO
if allow_saving is None:
allow_saving = True
# Persistent object to open: None (new case study), UUID (case study version)
if isess.reproducible_session_opened():
r = build_json_response({"error": "There is an open Reproducible Session. Close it first."}, 401)
else:
if allow_saving and not identity:
r = build_json_response({"error": "When 'allow_saving==true' an identity is required."}, 401)
else:
try:
# TODO New, not checked
isess.reset_state()
isess.open_reproducible_session(case_study_version_uuid=uuid2,
recover_previous_state=read_uuid_state,
cr_new=create_new,
allow_saving=allow_saving
)
r = build_json_response({}, 204)
except Exception as e:
s = "Exception trying to open reproducible session: "+str(e)
logger.error(s)
r = build_json_response({"error": s}, 401)
#
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + "/isession/rsession", methods=["DELETE"])
def reproducible_session_save_close(): # Close the ReproducibleSession, with the option of saving it
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal it if not,
save = request.args.get("save_before_close", "False")
if save:
save = str2bool(save)
else:
save = False
# The reproducible session, if saved, could be assigned to an existing case study
cs_uuid = request.args.get("cs_uuid", None)
if cs_uuid:
cs_uuid = str(cs_uuid)
# If specified, it is the name for the case study Version
cs_name = request.args.get("cs_name", None)
if cs_name:
cs_name = str(cs_name)
# Close reproducible session
if not isess.reproducible_session_opened():
r = build_json_response({"error": "There is no open Reproducible Session. Cannot close"}, 401)
else:
try:
uuid_, v_uuid, cs_uuid = isess.close_reproducible_session(issues=None,
output=None,
save=save,
from_web_service=True,
cs_uuid=cs_uuid,
cs_name=cs_name)
r = build_json_response({"session_uuid": str(uuid_),
"version_uuid": str(v_uuid),
"case_study_uuid": str(cs_uuid)
},
200)
except Exception as e:
s = "Exception trying to close reproducible session: " + str(e)
logger.error(s)
r = build_json_response({"error": s}, 401)
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + "/isession/rsession", methods=["GET"])
def reproducible_session_get_status(): # Return current status of ReproducibleSession
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
r = build_json_response("rsession_open", 200)
else:
r = build_json_response("rsession_closed", 200)
return r
@app.route(nis_api_base + "/isession/rsession/command_generators/<order>", methods=["GET"])
def reproducible_session_get_command_generator(order): # Return one of the command generators
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
order = int(order)
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if order < len(isess.reproducible_session.ws_commands):
c = isess.reproducible_session.ws_commands[order]
r = Response(c.content, mimetype=c.content_type)
else:
r = build_json_response({"error":
"Command number " + str(order) +
" requested, only "+str(len(isess.reproducible_session.commands))+" available."})
else:
r = build_json_response("No open reproducible Session", 200)
return r
# ----------------------------------------------------------------------------------------------------------------------
# State management: save, list, get, delete ("update"" is "save", overwrite always)
# ----------------------------------------------------------------------------------------------------------------------
@app.route(nis_api_base + "/isession/rsession/state", methods=["PUT"])
def reproducible_session_save_state(): # Save state
"""
Save or overwrite state in-memory to a file at the backend side
Receives a "code" Query parameter with the name for the saved state file (which must be unique, unless an overwrite
is wanted)
:return: Empty if everything is ok, Error if there is an issue
"""
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
code = request.args.get("code", None)
try:
string_to_ast(simple_ident, code)
except:
code = None
if code is None:
r = build_json_response({"error": "Query parameter 'code' is mandatory"}, 401)
else:
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
ensure_dir(cs_path)
# Save state
s = serialize_state(isess.state)
with open(cs_path+os.sep+code+".state_serialized", "wt") as f:
f.write(s)
r = build_json_response({}, 204)
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot save state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state", methods=["DELETE"])
def reproducible_session_delete_state(): # Delete state
"""
Delete a saved state
Receives a "code" Query parameter with the name for the saved state file to delete
:return: Empty if everything is ok, Error if there is an | |
crop the feature map to the same size as the x_f2 after upsamling
# 360//32
# print('After upsample:', x_f2.size(), x_f3.size(), x_f4.size())
# h
h_min = min(x_f2.size(2), x_f3.size(2), x_f4.size(2))
ds2 = x_f2.size(2) - h_min
ds3 = x_f3.size(2) - h_min
ds4 = x_f4.size(2) - h_min
# print(ds3//2, x_f2.size(2)-(ds3-ds3//2))
# print(ds4//2, x_f2.size(2)-(ds4-ds4//2))
x_f2 = x_f2[:, :, ds2//2:x_f2.size(2)-(ds2-ds2//2), :]
x_f3 = x_f3[:, :, ds3//2:x_f3.size(2)-(ds3-ds3//2), :]
x_f4 = x_f4[:, :, ds4//2:x_f4.size(2)-(ds4-ds4//2), :]
# print(x_f2.size(), x_f3.size(), x_f4.size())
x_concat = torch.cat((x_f2, x_f3, x_f4), dim=1)
y = self.decoder_conv1(x_concat)
y = self.decoder_conv2(y)
return y
class LWShuffleNetV2_single_ASPP(nn.Module):
def __init__(self, width_mult=1.0):
super(LWShuffleNetV2_single_ASPP, self).__init__()
print('LWShuffleNetV2_single_ASPP, width_mult={}'.format(width_mult))
# width_mult = 0.5
# width_mult = 1.0
self.stage_repeats = [4, 8, 4]
# index 0 is invalid and should never be called.
# only used for indexing convenience.
if width_mult == 0.5:
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif width_mult == 1.0:
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif width_mult == 1.5:
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif width_mult == 2.0:
self.stage_out_channels = [-1, 24, 224, 488, 976, 2048]
else:
raise ValueError(
"""{} groups is not supported for
1x1 Grouped Convolutions""".format(width_mult))
# building first layer
input_channel = self.stage_out_channels[1]
self.conv1 = nn.Sequential(nn.Conv2d(3, input_channel, 3, 2, 1, bias=False),
nn.BatchNorm2d(input_channel),
nn.ReLU(inplace=True))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = []
self.stage_2 = []
self.stage_3 = []
self.stage_4 = []
# building inverted residual blocks
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage + 2]
for i in range(numrepeat):
if i == 0:
self.features.append(InvertedResidual(input_channel, output_channel, 2, 2))
else:
self.features.append(InvertedResidual(input_channel, output_channel, 1, 1))
input_channel = output_channel
# make it nn.Sequential
self.stage_2 = nn.Sequential(self.features[0], self.features[1], self.features[2], self.features[3])
self.stage_3 = nn.Sequential(self.features[4], self.features[5], self.features[6], self.features[7],
self.features[8], self.features[9], self.features[10], self.features[11])
self.stage_4 = nn.Sequential(self.features[12], self.features[13], self.features[14], self.features[15])
# Multi-stage Feature Aggregation
# Upsample
# self.upsample_s2 = nn.PixelShuffle(1) # 64
self.upsample_s3 = nn.PixelShuffle(2) # 256 = 64*2*2, 232 = 58*2*2
self.upsample_s4 = nn.PixelShuffle(4) # 384 = 24*4*4, 464 = 29*4*4
# concatenate
# 0.5: 48, 96, 192,
# 1.0: 116, 232, 464
# 1.5: 176, 352, 704
if width_mult == 0.5:
_c_cat_in = 84 # 48, 96/2/2=24, 192/4/4=12, 48+24+12=84
elif width_mult == 1.0:
_c_cat_in = 203 # 116 + 58 + 29 = 203
elif width_mult == 1.5:
_c_cat_in = 308 # 176, 352/2/2=88, 704/4/4=44, 176+88+44=308
# _c_cat_out = 96
# _c_aspp_in = 96
# _c_aspp = 48
# _c_aspp_out = 48
_c_cat_out = _c_cat_in//2
self.conv_cat = nn.Sequential(nn.Conv2d(_c_cat_in, _c_cat_out, 1, bias=True),
nn.BatchNorm2d(_c_cat_out),
nn.ReLU())
# 0.5: 42, 21, 42, 168
# 1.0: 101, 50, 101, 404
_c_aspp_in, _c_aspp, _c_aspp_out = _c_cat_out, _c_cat_out//2, _c_cat_out
self.aspp = LWASPP(c_in=_c_aspp_in, c=_c_aspp, c_out=_c_aspp_out, dilation=[1, 2, 4, 8], global_pool=False)
# Decoder
_c = _c_aspp_out * 4
self.decoder_conv1 = nn.Sequential(nn.Conv2d(_c, _c, kernel_size=1, bias=True),
nn.BatchNorm2d(_c),
nn.ReLU())
self.decoder_conv2 = nn.Sequential(nn.Conv2d(_c, _c, kernel_size=1, bias=True),
nn.BatchNorm2d(_c),
nn.ReLU())
def forward(self, x):
x = self.conv1(x) # 1/2
f1 = self.maxpool(x) # 1/4
f2 = self.stage_2(f1) # 1/8
f3 = self.stage_3(f2) # 1/16
f4 = self.stage_4(f3) # 1/32
# print('stage_1:', f1.size(), 'stage_2:', f2.size(),
# 'stage_3:', f3.size(), 'stage_4:', f4.size())
# x_f2 = self.upsample_s2(f2) # 1/8
f3 = self.upsample_s3(f3) # 1/8 [1, 64, 60, 80]
f4 = self.upsample_s4(f4) # 1/8 [1, 24, 60, 80]
# crop the feature map to the same size as the x_f2 after upsamling
# 360//32
# print('After upsample:', x_f2.size(), x_f3.size(), x_f4.size())
# h
h_min = min(f2.size(2), f3.size(2), f4.size(2))
ds2 = f2.size(2) - h_min
ds3 = f3.size(2) - h_min
ds4 = f4.size(2) - h_min
# print(ds3//2, x_f2.size(2)-(ds3-ds3//2))
# print(ds4//2, x_f2.size(2)-(ds4-ds4//2))
x_f2 = f2[:, :, ds2//2:f2.size(2)-(ds2-ds2//2), :]
x_f3 = f3[:, :, ds3//2:f3.size(2)-(ds3-ds3//2), :]
x_f4 = f4[:, :, ds4//2:f4.size(2)-(ds4-ds4//2), :]
# print(x_f2.size(), x_f3.size(), x_f4.size())
y = torch.cat((x_f2, x_f3, x_f4), dim=1)
y = self.conv_cat(y)
y = self.aspp(y)
y = self.decoder_conv1(y)
y = self.decoder_conv2(y)
return y
# Params: 1491373, 1.49M
# Flops: 3043214400.0, 3.04G
# LWASPP
# Params: 1288941, 1.29M
# Flops: 2845684800.0, 2.85G
# light-weight decoder
# Params: 451973, 451.97K
# Flops: 762806400.0, 762.81M
class LWNetwork(nn.Module):
def __init__(self, width_mult=1.0, numkeypoints=18, numlims=19, multistage=0, backbone=None, head=None): # Mod by Jie
super(LWNetwork, self).__init__()
if backbone == 'LWShuffleNetV2_baseline':
self.network = ShuffleNetV2(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 464
elif width_mult == 0.5:
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_HR_cat':
self.network = LWShuffleNetV2_HR_cat(width_mult=width_mult)
if width_mult == 1.0:
# c_pose_in = 616
c_pose_in = 412
elif width_mult == 0.5:
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_HR_catv2':
self.network = LWShuffleNetV2_HR_catv2(width_mult=width_mult)
if width_mult == 1.0:
# c_pose_in = 616
# c_pose_in = 388
# c_pose_in = 420
# c_pose_in = 452
c_pose_in = 580
elif width_mult == 0.5:
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_HRv2':
self.network = LWShuffleNetV2_HRv2(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 256
elif backbone == 'LWShuffleNetV2_baseline_v1':
self.network = ShuffleNetV2_v1(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 464
elif width_mult == 0.5:
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_baseline_v1_cat':
self.network = ShuffleNetV2_v1_cat(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 812
elif width_mult == 0.5:
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_v1_16_cat':
self.network = LWShuffleNetV2_v1_cat(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 464
# elif width_mult == 0.5:
# c_pose_in = 192
elif backbone == 'LWShuffleNetV2_baseline_v2':
self.network = ShuffleNetV2_v2(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 464
elif width_mult == 0.5:
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_baseline_v3':
self.network = ShuffleNetV2_v3(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 464
elif width_mult == 0.5:
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_SingleASPP':
self.network = LWShuffleNetV2_single_ASPP(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 404
elif width_mult == 0.5:
c_pose_in = 168
elif backbone == 'LWShuffleNetV2_MultiASPP':
self.network = LWShuffleNetV2_MultiASPP(width_mult=width_mult)
if width_mult == 1.0:
# c_pose_in = 464
c_pose_in = 232
elif width_mult == 0.5:
# c_pose_in = 192
c_pose_in = 96
elif backbone == 'LWShuffleNetV2_mscat':
self.network = LWShuffleNetV2_mscat(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 203
elif backbone == 'LWShuffleNetV2_msadd':
self.network = LWShuffleNetV2_msadd(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 116
elif backbone == 'ShuffleNetV2_cat':
self.network = ShuffleNetV2_cat(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 812
elif backbone == 'ShuffleNetV2_Adaptive_cat':
self.network = ShuffleNetV2_Adaptive_cat(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 812
elif backbone == 'ShuffleNetV2_Adaptive_catV2':
self.network = ShuffleNetV2_Adaptive_catV2(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 812
elif backbone == 'ShuffleNetV2_add':
self.network = ShuffleNetV2_add(width_mult=width_mult)
if width_mult == 1.0:
c_pose_in = 464
elif backbone == 'LWShuffleNetV2_MultiASPP152':
self.network = LWShuffleNetV2(width_mult=width_mult)
c_pose_in = 152
elif backbone == 'LWShuffleNetV2_SingleASPP192':
self.network = LWShuffleNetV2_single_ASPP(width_mult=width_mult)
c_pose_in = 192
elif backbone == 'LWShuffleNetV2_MultiASPP192':
self.network = LWShuffleNetV2_MultiASPP192(width_mult=width_mult)
c_pose_in = 192
else:
print('Please set the right backbone.')
exit(0)
# head = 'ResidualAdapter'
if head == 'ResidualAdapter':
print("Network head == 'ResidualAdapter'")
self.paf = nn.Sequential(
ResidualAdapter(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in, kernel=3, residual=True),
ResidualAdapter(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in//2, kernel=3, residual=False),
nn.Conv2d(c_pose_in//2, numlims * 2, 1)
)
self.heatmap = nn.Sequential(
ResidualAdapter(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in, kernel=3, residual=True),
ResidualAdapter(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in//2, kernel=3, residual=False),
nn.Conv2d(c_pose_in//2, numkeypoints + 1, 1)
)
if head == 'ResidualAdapterV2':
print("Network head == 'ResidualAdapterV2'")
# remove relu
k = 1
self.paf = nn.Sequential(
ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in, kernel=k, residual=True),
ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in//2, kernel=k, residual=False),
nn.Conv2d(c_pose_in//2, numlims * 2, 1)
)
self.heatmap = nn.Sequential(
ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in, kernel=k, residual=True),
ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in//2, kernel=k, residual=False),
nn.Conv2d(c_pose_in//2, numkeypoints + 1, 1)
)
if head == 'LWResidualAdapterV2':
print("Network head == 'LWResidualAdapterV2'")
# remove relu
k = 1
c = 96
self.paf = nn.Sequential(
# ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in, kernel=k, residual=True),
# ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//8, c_out=c_pose_in//4, kernel=k, residual=False),
ResidualAdapterV3(c_in=c_pose_in, c=c, c_out=(numlims * 2), kernel=k, groups=4, residual=False),
# nn.Conv2d(c_pose_in//4, numlims * 2, 1)
)
self.heatmap = nn.Sequential(
# ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//4, c_out=c_pose_in, kernel=k, residual=True),
# ResidualAdapterV2(c_in=c_pose_in, c=c_pose_in//8, c_out=c_pose_in//4, kernel=k, residual=False),
ResidualAdapterV3(c_in=c_pose_in, c=c, c_out=(numkeypoints + 1), kernel=k, groups=4, residual=False),
# nn.Conv2d(c_pose_in//4, numkeypoints + 1, 1)
)
elif head == 'ResidualAdapterMix':
print("Network head == 'ResidualAdapterMix'")
k = 1
self.paf = nn.Sequential(
ResidualAdapterMix(c_in=c_pose_in, c=c_pose_in // 4, c_out=c_pose_in, kernel=k, residual=True),
ResidualAdapterMix(c_in=c_pose_in, c=c_pose_in // 4, c_out=c_pose_in // 2, kernel=k, residual=False),
nn.Conv2d(c_pose_in // 2, numlims * 2, 1)
)
self.heatmap = nn.Sequential(
ResidualAdapterMix(c_in=c_pose_in, c=c_pose_in // 4, c_out=c_pose_in, kernel=k, residual=True),
ResidualAdapterMix(c_in=c_pose_in, c=c_pose_in // 4, c_out=c_pose_in // 2, kernel=k, residual=False),
nn.Conv2d(c_pose_in // 2, numkeypoints + 1, 1)
)
else:
self.paf = nn.Conv2d(c_pose_in, numlims * 2, 1)
self.heatmap = nn.Conv2d(c_pose_in, numkeypoints + 1, 1) # channels: background + numkeypoints
# self.th_paf = nn.Tanh()
# | |
0.148462793867),
(-0.521379367272, 0.148410003095),
(-0.527231998581, 0.148356510878),
(-0.533078164867, 0.148302308467),
(-0.538917769111, 0.148247386912),
(-0.54475071301, 0.148191737075),
(-0.550576896933, 0.148135349642),
(-0.556396219867, 0.148078215134),
(-0.562208579378, 0.148020323917),
(-0.568013871552, 0.147961666223),
(-0.573811990953, 0.14790223216),
(-0.579602830565, 0.147842011726),
(-0.585386281743, 0.147780994834),
(-0.591162234156, 0.14771917132),
(-0.596930575736, 0.147656530971),
(-0.602691192617, 0.147593063538),
(-0.608443969083, 0.147528758763),
(-0.614188787505, 0.147463606398),
(-0.614161347554, 0.150392501849),
(-0.614133301122, 0.153320947048),
(-0.614104647435, 0.156248933297),
(-0.614075385706, 0.159176451886),
(-0.614045515132, 0.162103494089),
(-0.614015034894, 0.165030051169),
(-0.613983944162, 0.167956114374),
(-0.613952242089, 0.170881674936),
(-0.613919927811, 0.173806724074),
(-0.613887000454, 0.176731252987),
(-0.613853459124, 0.179655252863),
(-0.613819302918, 0.182578714868),
(-0.613784530912, 0.185501630154),
(-0.613749142172, 0.188423989853),
(-0.613713135747, 0.191345785079),
(-0.613676510671, 0.194267006927),
(-0.613639265965, 0.197187646471),
(-0.613601400633, 0.200107694767),
(-0.613562913666, 0.203027142847),
(-0.613523804039, 0.205945981725),
(-0.613484070712, 0.20886420239),
(-0.613443712632, 0.211781795811),
(-0.61340272873, 0.214698752932),
(-0.613361117921, 0.217615064673),
(-0.613318879108, 0.220530721931),
(-0.613276011177, 0.223445715578),
(-0.613232513, 0.226360036459),
(-0.613188383434, 0.229273675394),
(-0.613143621323, 0.232186623176),
(-0.613098225493, 0.235098870572),
(-0.613052194758, 0.238010408318),
(-0.613005527916, 0.240921227124),
(-0.607276462672, 0.241026766527),
(-0.601539255248, 0.241130955689),
(-0.595794026684, 0.241233808187),
(-0.590040896246, 0.241335337584),
(-0.584279981479, 0.241435557397),
(-0.578511398245, 0.241534481083),
(-0.572735260772, 0.241632122009),
(-0.566951681693, 0.241728493438),
(-0.561160772093, 0.241823608507),
(-0.555362641544, 0.24191748021),
(-0.549557398155, 0.242010121381),
(-0.543745148603, 0.242101544676),
(-0.537925998182, 0.242191762561),
(-0.532100050834, 0.242280787298),
(-0.526267409191, 0.242368630929),
(-0.520428174612, 0.242455305268),
(-0.514582447222, 0.242540821889),
(-0.508730325943, 0.242625192113),
(-0.502871908535, 0.242708427003),
(-0.497007291626, 0.242790537355),
(-0.49113657075, 0.242871533686),
(-0.48525984038, 0.242951426235),
(-0.479377193955, 0.243030224951),
(-0.473488723919, 0.24310793949),
(-0.467594521751, 0.243184579211),
(-0.46169467799, 0.243260153174),
(-0.455789282273, 0.243334670133),
(-0.449878423357, 0.243408138537),
(-0.443962189153, 0.243480566528),
(-0.438040666749, 0.24355196194),
(-0.432113942443, 0.243622332297),
(-0.426182101763, 0.243691684817)]},
69: {'color': 'violet',
'polygon': [(-0.626237567708, 0.240632263132),
(-0.626284376392, 0.237724509284),
(-0.626330528937, 0.234816031026),
(-0.626376026596, 0.231906837718),
(-0.626420870612, 0.228996938696),
(-0.626465062208, 0.226086343263),
(-0.626508602597, 0.223175060696),
(-0.626551492975, 0.220263100245),
(-0.626593734521, 0.217350471132),
(-0.626635328403, 0.214437182554),
(-0.626676275772, 0.211523243681),
(-0.626716577763, 0.20860866366),
(-0.626756235499, 0.205693451612),
(-0.626795250085, 0.202777616636),
(-0.626833622612, 0.199861167805),
(-0.626871354157, 0.196944114174),
(-0.62690844578, 0.194026464771),
(-0.626944898527, 0.191108228606),
(-0.62698071343, 0.188189414667),
(-0.627015891503, 0.185270031922),
(-0.627050433748, 0.182350089319),
(-0.62708434115, 0.179429595787),
(-0.627117614678, 0.176508560238),
(-0.627150255289, 0.173586991564),
(-0.627182263921, 0.170664898641),
(-0.627213641499, 0.167742290326),
(-0.627244388934, 0.164819175463),
(-0.627274507118, 0.161895562878),
(-0.627303996931, 0.158971461381),
(-0.627332859237, 0.156046879771),
(-0.627361094884, 0.153121826829),
(-0.627388704706, 0.150196311324),
(-0.62741568952, 0.147270342013),
(-0.633133353077, 0.147199536459),
(-0.638842529369, 0.147127842585),
(-0.644543089612, 0.147055250668),
(-0.650234902715, 0.146981751166),
(-0.655917835212, 0.146907334745),
(-0.661591751195, 0.146831992319),
(-0.667256512247, 0.146755715075),
(-0.672911977366, 0.146678494515),
(-0.678558002899, 0.146600322489),
(-0.684194442464, 0.146521191235),
(-0.689821146881, 0.14644109342),
(-0.695437964093, 0.146360022174),
(-0.701044739089, 0.146277971142),
(-0.706641313832, 0.146194934521),
(-0.712227527174, 0.146110907108),
(-0.717803214778, 0.146025884348),
(-0.723368209039, 0.145939862379),
(-0.728922339, 0.145852838088),
(-0.734465430268, 0.145764809157),
(-0.739997304932, 0.14567577412),
(-0.745517781473, 0.145585732417),
(-0.751026674683, 0.14549468445),
(-0.756523795572, 0.145402631644),
(-0.762008951281, 0.145309576505),
(-0.767481944992, 0.145215522682),
(-0.772942575836, 0.145120475034),
(-0.778390638803, 0.14502443969),
(-0.783825924642, 0.144927424123),
(-0.789248219777, 0.144829437214),
(-0.794657306201, 0.144730489325),
(-0.800052961384, 0.144630592373),
(-0.805434958177, 0.144529759905),
(-0.805393202435, 0.147406082209),
(-0.805350521398, 0.150281900783),
(-0.805306913941, 0.15315720565),
(-0.805262378924, 0.156031986815),
(-0.805216915192, 0.158906234262),
(-0.805170521578, 0.161779937955),
(-0.805123196897, 0.164653087837),
(-0.805074939952, 0.167525673829),
(-0.805025749534, 0.170397685828),
(-0.804975624418, 0.173269113709),
(-0.804924563367, 0.176139947322),
(-0.804872565132, 0.179010176493),
(-0.80481962845, 0.181879791021),
(-0.804765752048, 0.18474878068),
(-0.804710934639, 0.187617135215),
(-0.804655174927, 0.190484844346),
(-0.804598471601, 0.193351897761),
(-0.804540823344, 0.19621828512),
(-0.804482228825, 0.199083996055),
(-0.804422686705, 0.201949020163),
(-0.804362195632, 0.204813347013),
(-0.804300754249, 0.207676966138),
(-0.804238361186, 0.210539867042),
(-0.804175015067, 0.213402039191),
(-0.804110714506, 0.216263472019),
(-0.804045458109, 0.219124154922),
(-0.803979244477, 0.221984077261),
(-0.8039120722, 0.224843228361),
(-0.803843939863, 0.227701597506),
(-0.803774846046, 0.230559173944),
(-0.803704789321, 0.233415946882),
(-0.803633768255, 0.236271905486),
(-0.798273751424, 0.236432422284),
(-0.792899993074, 0.236591388597),
(-0.787512706938, 0.236748793376),
(-0.782112103066, 0.236904627299),
(-0.776698387889, 0.237058882698),
(-0.771271764283, 0.237211553472),
(-0.765832431629, 0.237362635016),
(-0.76038058588, 0.237512124141),
(-0.754916419618, 0.237660019005),
(-0.749440122117, 0.237806319037),
(-0.743951879408, 0.23795102487),
(-0.738451874337, 0.238094138278),
(-0.732940286623, 0.238235662101),
(-0.727417292924, 0.238375600192),
(-0.721883066891, 0.238513957347),
(-0.716337779233, 0.238650739252),
(-0.71078159777, 0.23878595242),
(-0.705214687493, 0.238919604137),
(-0.699637210626, 0.239051702411),
(-0.694049326677, 0.239182255911),
(-0.688451192498, 0.239311273926),
(-0.682842962341, 0.23943876631),
(-0.677224787913, 0.239564743436),
(-0.671596818431, 0.239689216148),
(-0.665959200678, 0.239812195723),
(-0.660312079054, 0.239933693821),
(-0.654655595632, 0.24005372245),
(-0.648989890209, 0.240172293923),
(-0.643315100359, 0.24028942082),
(-0.637631361483, 0.240405115954),
(-0.631938806863, 0.240519392334),
(-0.626237567708, 0.240632263132)]},
70: {'color': 'skyblue',
'polygon': [(0.783183154053, 0.332197936521),
(0.783289758103, 0.329369819531),
(0.783395405111, 0.326540535923),
(0.783500096439, 0.323710097574),
(0.783603833446, 0.32087851631),
(0.783706617488, 0.318045803909),
(0.783808449916, 0.315211972102),
(0.783909332079, 0.312377032576),
(0.784009265318, 0.309540996969),
(0.784108250971, 0.306703876875),
(0.784206290372, 0.303865683843),
(0.784303384847, 0.30102642938),
(0.784399535718, 0.298186124947),
(0.784494744299, 0.295344781963),
(0.7845890119, 0.292502411807),
(0.784682339823, 0.289659025813),
(0.784774729364, 0.286814635277),
(0.78486618181, 0.283969251453),
(0.784956698443, 0.281122885556),
(0.785046280537, 0.27827554876),
(0.785134929355, 0.275427252203),
(0.785222646156, 0.272578006982),
(0.785309432188, 0.269727824157),
(0.785395288691, 0.266876714753),
(0.785480216896, 0.264024689754),
(0.785564218025, 0.261171760112),
(0.785647293291, 0.258317936741),
(0.785729443897, 0.25546323052),
(0.785810671036, 0.252607652293),
(0.785890975892, 0.24975121287),
(0.785970359636, 0.246893923027),
(0.786048823432, 0.244035793508),
(0.786126368431, 0.241176835021),
(0.780737280765, 0.241316414287),
(0.775335134253, 0.241455109452),
(0.769920109071, 0.241592860782),
(0.764492383523, 0.241729612768),
(0.759052134036, 0.241865313955),
(0.753599535153, 0.241999916759),
(0.748134759526, 0.242133377303),
(0.742657977917, 0.242265655252),
(0.737169359189, 0.242396713655),
(0.731669070312, 0.242526518786),
(0.726157276361, 0.242655039998),
(0.720634140516, 0.242782249575),
(0.715099824069, 0.242908122592),
(0.709554486426, 0.243032636777),
(0.703998285116, 0.243155772381),
(0.698431375793, 0.243277512046),
(0.692853912248, 0.243397840683),
(0.687266046417, 0.243516745351),
(0.681667928391, 0.243634215141),
(0.676059706427, 0.243750241063),
(0.670441526961, 0.243864815937),
(0.66481353462, 0.243977934289),
(0.659175872234, 0.244089592248),
(0.653528680855, 0.244199787451),
(0.64787209977, 0.244308518945),
(0.642206266516, 0.244415787097),
(0.636531316899, 0.244521593509),
(0.63084738501, 0.244625940929),
(0.625154603245, 0.244728833174),
(0.61945310232, 0.244830275049),
(0.613743011296, 0.244930272272),
(0.608024457595, 0.245028831404),
(0.607970176867, 0.247938121998),
(0.607915260059, 0.250846661216),
(0.607859706042, 0.253754439368),
(0.607803513668, 0.256661446736),
(0.607746681774, 0.259567673578),
(0.607689209181, 0.262473110125),
(0.607631094696, 0.265377746578),
(0.607572337107, 0.268281573114),
(0.607512935187, 0.27118457988),
(0.607452887694, 0.274086756996),
(0.60739219337, 0.276988094555),
(0.60733085094, 0.279888582619),
(0.607268859112, 0.282788211222),
(0.607206216581, 0.285686970372),
(0.607142922022, 0.288584850043),
(0.607078974097, 0.291481840182),
(0.607014371451, 0.294377930707),
(0.606949112711, 0.297273111503),
(0.60688319649, 0.300167372427),
(0.606816621383, 0.303060703304),
(0.606749385971, 0.305953093928),
(0.606681488817, 0.308844534061),
(0.606612928467, 0.311735013434),
(0.606543703453, 0.314624521746),
(0.606473812289, 0.317513048663),
(0.606403253473, 0.320400583817),
(0.606332025487, 0.323287116809),
(0.606260126796, 0.326172637205),
(0.606187555849, 0.329057134536),
(0.606114311079, 0.331940598301),
(0.606040390902, 0.334823017963),
(0.605965793717, 0.337704382948),
(0.611659260028, 0.337562741714),
(0.617344059723, 0.337419123172),
(0.62302006659, 0.337273518265),
(0.628687153085, 0.337125918852),
(0.634345190331, 0.336976317785),
(0.639994048106, 0.336824708991),
(0.645633594833, 0.336671087555),
(0.651263697577, 0.336515449809),
(0.656884222035, 0.336357793424),
(0.662495032533, 0.336198117501),
(0.668095992017, 0.336036422673),
(0.673686962056, 0.335872711204),
(0.679267802831, 0.335706987095),
(0.684838373137, 0.335539256195),
(0.690398530379, 0.33536952631),
(0.695948130576, 0.335197807324),
(0.701487028357, 0.335024111319),
(0.707015076963, 0.334848452694),
(0.712532128254, 0.334670848305),
(0.718038032705, 0.33449131759),
(0.723532639421, 0.334309882707),
(0.729015796133, 0.334126568681),
(0.734487349213, 0.333941403547),
(0.739947143679, 0.333754418503),
(0.745395023205, 0.333565648063),
(0.750830830133, 0.333375130222),
(0.756254405488, 0.333182906617),
(0.761665588988, 0.332989022704),
(0.767064219061, 0.332793527924),
(0.772450132866, 0.332596475894),
(0.777823166306, 0.332397924586),
(0.783183154053, 0.332197936521)]},
71: {'color': 'skyblue',
'polygon': [(0.592824401868, 0.338115611055),
(0.592894153534, 0.335231218218),
(0.592963247458, 0.332345775788),
(0.593031685219, 0.329459294265),
(0.593099468375, 0.326571784116),
(0.593166598471, 0.32368325577),
(0.593233077033, 0.320793719626),
(0.593298905572, 0.317903186048),
(0.59336408558, 0.315011665367),
(0.593428618536, 0.312119167882),
(0.593492505901, 0.309225703859),
(0.593555749117, 0.306331283533),
(0.593618349615, 0.303435917106),
(0.593680308804, 0.300539614751),
(0.59374162808, 0.297642386608),
(0.593802308822, 0.294744242788),
(0.593862352393, 0.29184519337),
(0.593921760138, 0.288945248405),
(0.593980533388, 0.286044417913),
(0.594038673457, 0.283142711886),
(0.594096181641, 0.280240140284),
(0.594153059223, 0.277336713042),
(0.594209307469, 0.274432440063),
(0.594264927627, 0.271527331225),
(0.59431992093, 0.268621396376),
(0.594374288597, 0.265714645336),
(0.594428031829, 0.2628070879),
(0.594481151811, 0.259898733832),
(0.594533649712, 0.256989592872),
(0.594585526688, 0.254079674732),
(0.594636783875, 0.251168989099),
(0.594687422397, 0.248257545631),
(0.594737443359, 0.245345353964),
(0.588991624025, 0.245445130023),
(0.583237876218, 0.24554350885),
(0.577476318971, 0.245640500286),
(0.571707069813, 0.245736114626),
(0.565930244796, 0.24583036256),
(0.560145958513, 0.245923255126),
(0.554354324131, 0.246014803662),
(0.548555453403, 0.246105019758),
(0.542749456701, 0.246193915219),
(0.536936443037, 0.246281502014),
(0.531116520085, 0.246367792246),
(0.52528979421, 0.246452798111),
(0.519456370488, 0.246536531863),
(0.513616352734, 0.246619005782),
(0.507769843525, 0.246700232143),
(0.501916944227, 0.246780223184),
(0.496057755015, 0.246858991083),
(0.490192374902, 0.246936547928),
(0.484320901764, 0.247012905697),
(0.478443432361, 0.247088076232),
(0.472560062366, 0.247162071218),
(0.466670886386, 0.247234902168),
(0.460775997991, 0.247306580399),
(0.454875489732, 0.24737711702),
(0.448969453172, 0.247446522916),
(0.443057978907, 0.247514808732),
(0.43714115659, 0.247581984863),
(0.431219074954, 0.247648061442),
(0.42529182184, 0.24771304833),
(0.419359484216, 0.247776955106),
(0.413422148203, 0.247839791061),
(0.407479899095, 0.247901565189),
(0.407447725246, 0.250846440748),
(0.407415166994, 0.253790617742),
(0.407382223837, 0.256734087194),
(0.407348895259, 0.259676840112),
(0.407315180735, 0.262618867479),
(0.407281079729, 0.26556016026),
(0.407246591695, 0.268500709397),
(0.407211716073, 0.271440505812),
(0.407176452295, 0.274379540407),
(0.407140799779, 0.277317804059),
(0.407104757932, 0.280255287627),
(0.40706832615, 0.283191981947),
(0.407031503818, 0.286127877833),
(0.406994290307, 0.289062966078),
(0.406956684978, 0.29199723745),
(0.406918687177, 0.2949306827),
(0.40688029624, 0.297863292551),
(0.40684151149, 0.300795057706),
(0.406802332238, 0.303725968847),
(0.406762757781, 0.306656016629),
(0.406722787404, 0.309585191688),
(0.406682420378, 0.312513484633),
(0.406641655962, 0.315440886052),
(0.406600493402, 0.318367386509),
(0.406558931928, 0.321292976544),
(0.406516970761, 0.324217646671),
(0.406474609103, 0.327141387383),
(0.406431846147, 0.330064189145),
(0.406388681069, 0.332986042401),
(0.406345113031, 0.335906937567),
(0.406301141183, 0.338826865036),
(0.40625676466, 0.341745815172),
(0.412179971103, 0.341657746978),
(0.418098136766, 0.341568176488),
(0.424011172455, 0.341477092051),
(0.429918988173, 0.341384481682),
(0.435821493103, 0.341290333075),
(0.441718595589, 0.341194633609),
(0.447610203122, 0.341097370361),
(0.453496222321, 0.340998530113),
(0.459376558914, 0.34089809937),
(0.465251117723, 0.340796064374),
(0.471119802648, 0.340692411113),
(0.476982516644, 0.340587125349),
(0.48283916171, 0.340480192625),
(0.48868963887, 0.340371598295),
(0.494533848154, 0.340261327538),
(0.500371688582, 0.340149365386),
(0.506203058149, 0.340035696748),
(0.512027853804, 0.339920306435),
(0.517845971439, 0.33980317919),
(0.523657305867, 0.33968429972),
(0.529461750809, 0.339563652725),
(0.535259198876, 0.339441222932),
(0.541049541556, 0.339316995137),
(0.546832669194, 0.339190954235),
(0.55260847098, 0.339063085268),
(0.558376834932, 0.338933373462),
(0.564137647883, 0.338801804277),
(0.569890795462, 0.338668363448),
(0.575636162086, 0.338533037043),
(0.581373630942, 0.338395811507),
(0.587103083973, 0.33825667372),
(0.592824401868, 0.338115611055)]},
72: {'color': 'skyblue',
'polygon': [(0.392552148662, 0.341853804353),
(0.392594654034, 0.338932929598),
(0.392636770863, 0.336011081511),
(0.392678499964, 0.333088269677),
(0.392719842136, 0.330164503656),
(0.392760798166, 0.327239792984),
(0.392801368828, 0.324314147172),
(0.392841554879, 0.321387575707),
(0.392881357067, 0.318460088052),
(0.392920776124, 0.315531693646),
(0.392959812769, 0.312602401904),
(0.392998467708, 0.309672222217),
(0.393036741635, 0.306741163955),
(0.393074635231, 0.303809236462),
(0.393112149164, 0.300876449062),
(0.39314928409, 0.297942811054),
(0.393186040652, 0.295008331716),
(0.393222419481, 0.292073020303),
(0.393258421196, 0.289136886047),
(0.393294046405, 0.28619993816),
(0.393329295704, 0.28326218583),
(0.393364169675, 0.280323638225),
(0.393398668891, 0.277384304489),
(0.393432793913, 0.274444193748),
(0.393466545291, 0.271503315104),
(0.393499923563, 0.268561677639),
(0.393532929255, 0.265619290413),
(0.393565562886, 0.262676162466),
(0.393597824961, 0.259732302817),
(0.393629715975, 0.256787720465),
(0.393661236412, 0.253842424388),
(0.393692386748, 0.250896423542),
(0.393723167446, 0.247949726865),
(0.387765224474, 0.248008549233),
(0.381802727577, 0.248066345722),
(0.375835758045, 0.248123123673),
(0.369864396483, 0.248178890111),
(0.363888722837, 0.248233651746),
(0.357908816407, 0.24828741497),
(0.351924755878, 0.248340185862),
(0.345936619331, 0.248391970187),
(0.339944484269, 0.248442773401),
(0.333948427634, 0.248492600651),
(0.32794852583, 0.248541456783),
(0.321944854736, 0.248589346342),
(0.315937489733, 0.24863627358),
(0.309926505713, 0.24868224246),
(0.303911977108, 0.24872725666),
(0.297893977898, 0.248771319582),
(0.291872581634, 0.248814434357),
(0.285847861455, 0.248856603851),
(0.279819890103, 0.248897830673),
(0.27378873994, 0.248938117184),
(0.267754482963, 0.248977465502),
(0.261717190824, 0.249015877511),
(0.25567693484, 0.249053354869),
(0.249633786011, 0.249089899017),
(0.243587815034, 0.249125511187),
(0.237539092319, 0.249160192411),
(0.231487687998, 0.249193943529),
(0.225433671945, 0.249226765199),
(0.219377113785, 0.249258657905),
(0.213318082908, 0.249289621966),
(0.207256648482, 0.249319657547),
(0.201192879465, 0.249348764664),
(0.201176615898, 0.25231356223),
(0.201160170622, 0.255277687307),
(0.201143543553, 0.258241131328),
(0.201126734606, 0.261203885706),
(0.201109743689, 0.264165941839),
(0.201092570704, 0.267127291106),
(0.201075215548, 0.27008792487),
(0.201057678115, 0.273047834477),
(0.20103995829, 0.276007011253),
(0.201022055954, 0.27896544651),
(0.201003970983, 0.281923131539),
(0.200985703247, 0.284880057615),
(0.200967252608, 0.287836215994),
(0.200948618925, 0.290791597915),
(0.200929802049, 0.293746194599),
(0.200910801826, 0.296699997247),
(0.200891618094, 0.299652997042),
(0.200872250687, 0.30260518515),
(0.20085269943, 0.305556552718),
(0.200832964143, 0.308507090871),
(0.20081304464, 0.311456790719),
(0.200792940725, 0.314405643352),
(0.2007726522, 0.317353639839),
(0.200752178855, 0.320300771231),
(0.200731520476, 0.323247028559),
(0.200710676842, 0.326192402835),
(0.200689647722, 0.329136885049),
(0.200668432881, 0.332080466174),
(0.200647032074, 0.33502313716),
(0.200625445049, 0.337964888938),
(0.200603671548, 0.340905712419),
(0.200581711303, 0.343845598492),
(0.206628630478, 0.343804015378),
(0.212673200897, 0.343761128244),
(0.218715351443, 0.343716935941),
(0.224755010701, 0.343671437128),
(0.230792106946, 0.343624630268),
(0.23682656813, 0.343576513611),
(0.242858321872, 0.34352708519),
(0.248887295445, 0.34347634281),
(0.254913415762, 0.343424284036),
(0.260936609366, 0.343370906192),
(0.266956802416, 0.343316206342),
(0.272973920673, 0.343260181289),
(0.278987889488, 0.343202827562),
(0.284998633789, 0.343144141411),
(0.291006078068, 0.343084118795),
(0.297010146362, 0.343022755379),
(0.303010762247, 0.342960046523),
(0.309007848818, 0.342895987275),
(0.315001328675, 0.342830572368),
(0.320991123913, 0.342763796208),
(0.326977156101, 0.342695652872),
(0.332959346272, 0.342626136101),
(0.338937614905, 0.342555239297),
(0.34491188191, 0.342482955516),
(0.350882066614, 0.342409277463),
(0.356848087745, 0.342334197494),
(0.362809863414, 0.342257707608),
(0.368767311103, 0.342179799448),
(0.374720347644, 0.342100464297),
(0.380668889207, 0.34201969308),
(0.386612851282, 0.341937476363),
(0.392552148662, 0.341853804353)]},
73: {'color': 'skyblue',
'polygon': [(0.186696244661, 0.344049368378),
(0.186716071456, 0.341108651486),
(0.186735724655, 0.338166998322),
(0.186755204503, 0.335224417976),
(0.186774511238, 0.332280919516),
(0.186793645088, 0.329336511992),
(0.186812606275, 0.326391204433),
(0.186831395016, 0.323445005847),
(0.186850011517, 0.320497925223),
(0.186868455982, 0.317549971528),
(0.186886728605, 0.314601153713),
(0.186904829573, 0.311651480708),
(0.186922759069, 0.308700961421),
(0.186940517267, 0.305749604746),
(0.186958104336, 0.302797419553),
(0.186975520439, 0.299844414696),
(0.186992765731, 0.29689059901),
(0.187009840363, 0.293935981311),
(0.187026744479, 0.290980570397),
(0.187043478217, 0.288024375047),
(0.18706004171, 0.285067404021),
(0.187076435083, 0.282109666064),
(0.187092658459, 0.2791511699),
(0.187108711952, 0.276191924236),
(0.187124595672, 0.273231937762),
(0.187140309725, 0.27027121915),
(0.187155854209, 0.267309777054),
(0.187171229219, 0.264347620112),
(0.187186434844, 0.261384756943),
(0.187201471168, 0.25842119615),
(0.18721633827, 0.255456946318),
(0.187231036224, 0.252492016016),
(0.187245565101, | |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import datetime
import glob
import os
import re
import shutil
import stat
import string
import sys
import tempfile
from ambari_commons.exceptions import FatalException
from ambari_commons.os_check import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons.os_utils import run_os_command, search_file, set_file_permissions
from ambari_commons.logging_utils import get_debug_mode, print_info_msg, print_warning_msg, print_error_msg, \
set_debug_mode
from ambari_server.properties import Properties
from ambari_server.userInput import get_validated_string_input
from ambari_server.utils import compare_versions, locate_file
OS_VERSION = OSCheck().get_os_major_version()
OS_TYPE = OSCheck.get_os_type()
OS_FAMILY = OSCheck.get_os_family()
PID_NAME = "ambari-server.pid"
# Non-root user setup commands
NR_USER_PROPERTY = "ambari-server.user"
BLIND_PASSWORD = "*****"
# Common messages
PRESS_ENTER_MSG = "Press <enter> to continue."
OS_FAMILY_PROPERTY = "server.os_family"
OS_TYPE_PROPERTY = "server.os_type"
BOOTSTRAP_DIR_PROPERTY = "bootstrap.dir"
AMBARI_CONF_VAR = "AMBARI_CONF_DIR"
AMBARI_PROPERTIES_FILE = "ambari.properties"
AMBARI_KRB_JAAS_LOGIN_FILE = "krb5JAASLogin.conf"
GET_FQDN_SERVICE_URL = "server.fqdn.service.url"
SERVER_OUT_FILE_KEY = "ambari.output.file.path"
VERBOSE_OUTPUT_KEY = "ambari.output.verbose"
DEBUG_MODE_KEY = "ambari.server.debug"
SUSPEND_START_MODE_KEY = "ambari.server.debug.suspend.start"
# Environment variables
AMBARI_SERVER_LIB = "AMBARI_SERVER_LIB"
JAVA_HOME = "JAVA_HOME"
AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
# JDK
JAVA_HOME_PROPERTY = "java.home"
JDK_NAME_PROPERTY = "jdk.name"
JCE_NAME_PROPERTY = "jce.name"
# JDBC
JDBC_PATTERNS = {"oracle": "*ojdbc*.jar", "mysql": "*mysql*.jar", "mssql": "*sqljdbc*.jar"}
#TODO property used incorrectly in local case, it was meant to be dbms name, not postgres database name,
# has workaround for now, as we don't need dbms name if persistence_type=local
JDBC_DATABASE_PROPERTY = "server.jdbc.database" # E.g., embedded|oracle|mysql|mssql|postgres
JDBC_DATABASE_NAME_PROPERTY = "server.jdbc.database_name" # E.g., ambari. Not used on Windows.
JDBC_HOSTNAME_PROPERTY = "server.jdbc.hostname"
JDBC_PORT_PROPERTY = "server.jdbc.port"
JDBC_POSTGRES_SCHEMA_PROPERTY = "server.jdbc.postgres.schema" # Only for postgres, defaults to same value as DB name
JDBC_USER_NAME_PROPERTY = "server.jdbc.user.name"
JDBC_PASSWORD_PROPERTY = "server.jdbc.user.passwd"
JDBC_PASSWORD_FILENAME = "password.dat"
JDBC_RCA_PASSWORD_FILENAME = "rca_password.dat"
CLIENT_API_PORT_PROPERTY = "client.api.port"
CLIENT_API_PORT = "8080"
SERVER_VERSION_FILE_PATH = "server.version.file"
PERSISTENCE_TYPE_PROPERTY = "server.persistence.type"
JDBC_DRIVER_PROPERTY = "server.jdbc.driver"
JDBC_DRIVER_PATH_PROPERTY = "server.jdbc.driver.path"
JDBC_URL_PROPERTY = "server.jdbc.url"
# connection pool (age and time are in seconds)
JDBC_CONNECTION_POOL_TYPE = "server.jdbc.connection-pool"
JDBC_CONNECTION_POOL_ACQUISITION_SIZE = "server.jdbc.connection-pool.acquisition-size"
JDBC_CONNECTION_POOL_MAX_AGE = "server.jdbc.connection-pool.max-age"
JDBC_CONNECTION_POOL_MAX_IDLE_TIME = "server.jdbc.connection-pool.max-idle-time"
JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS = "server.jdbc.connection-pool.max-idle-time-excess"
JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL = "server.jdbc.connection-pool.idle-test-interval"
JDBC_RCA_DATABASE_PROPERTY = "server.jdbc.database"
JDBC_RCA_HOSTNAME_PROPERTY = "server.jdbc.hostname"
JDBC_RCA_PORT_PROPERTY = "server.jdbc.port"
JDBC_RCA_SCHEMA_PROPERTY = "server.jdbc.schema"
JDBC_RCA_DRIVER_PROPERTY = "server.jdbc.rca.driver"
JDBC_RCA_URL_PROPERTY = "server.jdbc.rca.url"
JDBC_RCA_USER_NAME_PROPERTY = "server.jdbc.rca.user.name"
JDBC_RCA_PASSWORD_FILE_PROPERTY = "server.jdbc.rca.user.passwd"
JDBC_RCA_PASSWORD_ALIAS = "<PASSWORD>"
### # Windows-specific # ###
JDBC_USE_INTEGRATED_AUTH_PROPERTY = "server.jdbc.use.integrated.auth"
JDBC_RCA_USE_INTEGRATED_AUTH_PROPERTY = "server.jdbc.rca.use.integrated.auth"
### # End Windows-specific # ###
# resources repo configuration
RESOURCES_DIR_PROPERTY = "resources.dir"
# stack repo upgrade
STACK_LOCATION_KEY = 'metadata.path'
# LDAP security
IS_LDAP_CONFIGURED = "ambari.ldap.isConfigured"
LDAP_MGR_PASSWORD_ALIAS = "ambari.ldap.manager.password"
LDAP_MGR_PASSWORD_PROPERTY = "authentication.ldap.managerPassword"
LDAP_MGR_PASSWORD_FILENAME = "ldap-password.dat"
LDAP_MGR_USERNAME_PROPERTY = "authentication.ldap.managerDn"
LDAP_PRIMARY_URL_PROPERTY = "authentication.ldap.primaryUrl"
# SSL truststore
SSL_TRUSTSTORE_PASSWORD_ALIAS = "ambari.ssl.trustStore.password"
SSL_TRUSTSTORE_PATH_PROPERTY = "ssl.trustStore.path"
SSL_TRUSTSTORE_PASSWORD_PROPERTY = "ssl.trustStore.password"
SSL_TRUSTSTORE_TYPE_PROPERTY = "ssl.trustStore.type"
# SSL common
SSL_API = 'api.ssl'
SSL_API_PORT = 'client.api.ssl.port'
DEFAULT_SSL_API_PORT = 8443
# JDK
JDK_RELEASES="java.releases"
VIEWS_DIR_PROPERTY = "views.dir"
#Common setup or upgrade message
SETUP_OR_UPGRADE_MSG = "- If this is a new setup, then run the \"ambari-server setup\" command to create the user\n" \
"- If this is an upgrade of an existing setup, run the \"ambari-server upgrade\" command.\n" \
"Refer to the Ambari documentation for more information on setup and upgrade."
DEFAULT_DB_NAME = "ambari"
class ServerConfigDefaults(object):
def __init__(self):
self.JAVA_SHARE_PATH = "/usr/share/java"
self.OUT_DIR = os.sep + os.path.join("var", "log", "ambari-server")
self.SERVER_OUT_FILE = os.path.join(self.OUT_DIR, "ambari-server.out")
self.SERVER_LOG_FILE = os.path.join(self.OUT_DIR, "ambari-server.log")
self.ROOT_FS_PATH = os.sep
self.JDK_INSTALL_DIR = ""
self.JDK_SEARCH_PATTERN = ""
self.JAVA_EXE_SUBPATH = ""
self.JDK_SECURITY_DIR = os.path.join("jre", "lib", "security")
self.SERVER_RESOURCES_DIR = ""
# Configuration defaults
self.DEFAULT_CONF_DIR = ""
self.PID_DIR = os.sep + os.path.join("var", "run", "ambari-server")
self.DEFAULT_LIBS_DIR = ""
self.AMBARI_PROPERTIES_BACKUP_FILE = ""
self.AMBARI_KRB_JAAS_LOGIN_BACKUP_FILE = ""
# ownership/permissions mapping
# path - permissions - user - group - recursive
# Rules are executed in the same order as they are listed
# {0} in user/group will be replaced by customized ambari-server username
self.NR_ADJUST_OWNERSHIP_LIST = []
self.NR_CHANGE_OWNERSHIP_LIST = []
self.NR_USERADD_CMD = ""
self.MASTER_KEY_FILE_PERMISSIONS = "640"
self.CREDENTIALS_STORE_FILE_PERMISSIONS = "640"
self.TRUST_STORE_LOCATION_PERMISSIONS = "640"
self.DEFAULT_DB_NAME = "ambari"
self.STACK_LOCATION_DEFAULT = ""
self.DEFAULT_VIEWS_DIR = ""
#keytool commands
self.keytool_bin_subpath = ""
#Standard messages
self.MESSAGE_SERVER_RUNNING_AS_ROOT = ""
self.MESSAGE_ERROR_SETUP_NOT_ROOT = ""
self.MESSAGE_ERROR_RESET_NOT_ROOT = ""
self.MESSAGE_ERROR_UPGRADE_NOT_ROOT = ""
self.MESSAGE_CHECK_FIREWALL = ""
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class ServerConfigDefaultsWindows(ServerConfigDefaults):
def __init__(self):
super(ServerConfigDefaultsWindows, self).__init__()
self.JDK_INSTALL_DIR = "C:\\"
self.JDK_SEARCH_PATTERN = "j[2se|dk|re]*"
self.JAVA_EXE_SUBPATH = "bin\\java.exe"
# Configuration defaults
self.DEFAULT_CONF_DIR = "conf"
self.DEFAULT_LIBS_DIR = "lib"
self.AMBARI_PROPERTIES_BACKUP_FILE = "ambari.properties.backup"
self.AMBARI_KRB_JAAS_LOGIN_BACKUP_FILE = "" # ToDo: should be adjusted later
# ownership/permissions mapping
# path - permissions - user - group - recursive
# Rules are executed in the same order as they are listed
# {0} in user/group will be replaced by customized ambari-server username
# The permissions are icacls
self.NR_ADJUST_OWNERSHIP_LIST = [
(self.OUT_DIR, "M", "{0}", True), #0110-0100-0100 rw-r-r
(self.OUT_DIR, "F", "{0}", False), #0111-0101-0101 rwx-rx-rx
(self.PID_DIR, "M", "{0}", True),
(self.PID_DIR, "F", "{0}", False),
("bootstrap", "F", "{0}", False),
("ambari-env.cmd", "F", "{0}", False),
("keystore", "M", "{0}", True),
("keystore", "F", "{0}", False),
("keystore\\db", "700", "{0}", False),
("keystore\\db\\newcerts", "700", "{0}", False),
("resources\\stacks", "755", "{0}", True),
("resources\\custom_actions", "755", "{0}", True),
("conf", "644", "{0}", True),
("conf", "755", "{0}", False),
("conf\\password.dat", "640", "{0}", False),
# Also, /etc/ambari-server/conf/password.dat
# is generated later at store_password_file
]
self.NR_USERADD_CMD = "cmd /C net user {0} {1} /ADD"
self.SERVER_RESOURCES_DIR = "resources"
self.STACK_LOCATION_DEFAULT = "resources\\stacks"
self.DEFAULT_VIEWS_DIR = "resources\\views"
#keytool commands
self.keytool_bin_subpath = "bin\\keytool.exe"
#Standard messages
self.MESSAGE_SERVER_RUNNING_AS_ROOT = "Ambari Server running with 'root' privileges."
self.MESSAGE_ERROR_SETUP_NOT_ROOT = "Ambari-server setup must be run with administrator-level privileges"
self.MESSAGE_ERROR_RESET_NOT_ROOT = "Ambari-server reset must be run with administrator-level privileges"
self.MESSAGE_ERROR_UPGRADE_NOT_ROOT = "Ambari-server upgrade must be run with administrator-level privileges"
self.MESSAGE_CHECK_FIREWALL = "Checking firewall status..."
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class ServerConfigDefaultsLinux(ServerConfigDefaults):
def __init__(self):
super(ServerConfigDefaultsLinux, self).__init__()
# JDK
self.JDK_INSTALL_DIR = "/usr/jdk64"
self.JDK_SEARCH_PATTERN = "jdk*"
self.JAVA_EXE_SUBPATH = "bin/java"
# Configuration defaults
self.DEFAULT_CONF_DIR = "/etc/ambari-server/conf"
self.DEFAULT_LIBS_DIR = "/usr/lib/ambari-server"
self.AMBARI_PROPERTIES_BACKUP_FILE = "ambari.properties.rpmsave"
self.AMBARI_KRB_JAAS_LOGIN_BACKUP_FILE = "krb5JAASLogin.conf.rpmsave"
# ownership/permissions mapping
# path - permissions - user - group - recursive
# Rules are executed in the same order as they are listed
# {0} in user/group will be replaced by customized ambari-server username
self.NR_ADJUST_OWNERSHIP_LIST = [
("/var/log/ambari-server/", "644", "{0}", True),
("/var/log/ambari-server/", "755", "{0}", False),
("/var/run/ambari-server/", "644", "{0}", True),
("/var/run/ambari-server/", "755", "{0}", False),
("/var/run/ambari-server/bootstrap", "755", "{0}", False),
("/var/lib/ambari-server/ambari-env.sh", "700", "{0}", False),
("/var/lib/ambari-server/ambari-sudo.sh", "700", "{0}", False),
("/var/lib/ambari-server/keys/", "600", "{0}", True),
("/var/lib/ambari-server/keys/", "700", "{0}", False),
("/var/lib/ambari-server/keys/db/", "700", "{0}", False),
("/var/lib/ambari-server/keys/db/newcerts/", "700", "{0}", False),
("/var/lib/ambari-server/keys/.ssh", "700", "{0}", False),
("/var/lib/ambari-server/resources/common-services/", "755", "{0}", True),
("/var/lib/ambari-server/resources/stacks/", "755", "{0}", True),
("/var/lib/ambari-server/resources/custom_actions/", "755", "{0}", True),
("/var/lib/ambari-server/resources/host_scripts/", "755", "{0}", True),
("/var/lib/ambari-server/resources/views/", "644", "{0}", True),
("/var/lib/ambari-server/resources/views/", "755", "{0}", False),
("/var/lib/ambari-server/resources/views/work/", "755", "{0}", True),
("/etc/ambari-server/conf/", "644", "{0}", True),
("/etc/ambari-server/conf/", "755", "{0}", False),
("/etc/ambari-server/conf/password.dat", "640", "{0}", False),
("/var/lib/ambari-server/keys/pass.txt", "600", "{0}", False),
("/etc/ambari-server/conf/ldap-password.dat", "640", "{0}", False),
("/var/run/ambari-server/stack-recommendations/", "744", "{0}", True),
("/var/run/ambari-server/stack-recommendations/", "755", "{0}", False),
("/var/lib/ambari-server/resources/data/", "644", "{0}", False),
("/var/lib/ambari-server/resources/data/", "755", "{0}", False),
("/var/lib/ambari-server/data/tmp/", "644", "{0}", True),
("/var/lib/ambari-server/data/tmp/", "755", "{0}", False),
("/var/lib/ambari-server/data/cache/", "600", "{0}", True),
("/var/lib/ambari-server/data/cache/", "700", "{0}", False),
# Also, /etc/ambari-server/conf/password.dat
# is generated later at store_password_file
]
self.NR_CHANGE_OWNERSHIP_LIST = [
("/var/lib/ambari-server", "{0}", True),
("/usr/lib/ambari-server", "{0}", True),
("/var/log/ambari-server", "{0}", True),
("/var/run/ambari-server", "{0}", True),
("/etc/ambari-server", "{0}", True),
]
self.NR_USERADD_CMD = 'useradd -M --comment "{1}" ' \
'--shell %s -d /var/lib/ambari-server/keys/ {0}' % locate_file('nologin', '/sbin')
self.SERVER_RESOURCES_DIR = "/var/lib/ambari-server/resources"
self.STACK_LOCATION_DEFAULT = "/var/lib/ambari-server/resources/stacks"
self.DEFAULT_VIEWS_DIR = "/var/lib/ambari-server/resources/views"
#keytool commands
self.keytool_bin_subpath = "bin/keytool"
#Standard messages
self.MESSAGE_SERVER_RUNNING_AS_ROOT = "Ambari Server running with administrator privileges."
self.MESSAGE_ERROR_SETUP_NOT_ROOT = "Ambari-server setup should be run with root-level privileges"
self.MESSAGE_ERROR_RESET_NOT_ROOT = "Ambari-server reset should be run with root-level privileges"
self.MESSAGE_ERROR_UPGRADE_NOT_ROOT = "Ambari-server upgrade must be run with root-level privileges"
self.MESSAGE_CHECK_FIREWALL = "Checking firewall status..."
configDefaults = ServerConfigDefaults()
# Security
SECURITY_KEYS_DIR = "security.server.keys_dir"
SECURITY_MASTER_KEY_LOCATION = "security.master.key.location"
SECURITY_KEY_IS_PERSISTED = "security.master.key.ispersisted"
SECURITY_KEY_ENV_VAR_NAME = "AMBARI_SECURITY_MASTER_KEY"
SECURITY_MASTER_KEY_FILENAME = "master"
SECURITY_IS_ENCRYPTION_ENABLED = "security.passwords.encryption.enabled"
SECURITY_KERBEROS_JASS_FILENAME = "krb5JAASLogin.conf"
SECURITY_PROVIDER_GET_CMD = "{0} -cp {1} " + \
"org.apache.ambari.server.security.encryption" + \
".CredentialProvider GET {2} {3} {4} " + \
"> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
SECURITY_PROVIDER_PUT_CMD = "{0} -cp {1} " + \
"org.apache.ambari.server.security.encryption" + \
".CredentialProvider PUT {2} {3} {4} " + \
"> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
SECURITY_PROVIDER_KEY_CMD = "{0} -cp {1} " + \
"org.apache.ambari.server.security.encryption" + \
".MasterKeyServiceImpl {2} {3} {4} " + \
"> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
def get_conf_dir():
try:
conf_dir = os.environ[AMBARI_CONF_VAR]
return conf_dir
except KeyError:
default_conf_dir = configDefaults.DEFAULT_CONF_DIR
print_info_msg(AMBARI_CONF_VAR + " is not set, using default " + default_conf_dir)
return default_conf_dir
def find_properties_file():
conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
if conf_file is None:
err = 'File %s not found in search path $%s: %s' % (AMBARI_PROPERTIES_FILE,
AMBARI_CONF_VAR, get_conf_dir())
print err
raise FatalException(1, err)
else:
print_info_msg('Loading properties from ' + conf_file)
return conf_file
# Load ambari properties and return dict with values
def get_ambari_properties():
conf_file = find_properties_file()
| |
self.old_context = None # Mapping filename
# headers corresponding to the new context
self.new_headers = headers.HeaderGenerator(None,None,None) # Abstract for pylint, replaced later
# comparison variables
self.compare_prior = None # bool
self.old_headers = headers.HeaderGenerator(None,None,None) # Abstract for pylint, replaced later
self.old_bestrefs_name = None # info str identifying comparison results source, .pmap filename or text
self.pickle_headers = None # any headers loaded from pickle files
if self.args.remote_bestrefs:
os.environ["CRDS_MODE"] = "remote"
self.datasets_since = self.args.datasets_since
self.active_header = None # new or old header last processed with bestrefs
def complex_init(self):
"""Complex init tasks run inside any --pdb environment, also unfortunately --profile."""
assert not (self.args.sync_references and self.readonly_cache), "Readonly cache, cannot fetch references."
self.new_context, self.old_context = self.setup_contexts()
# Support 0 to 1 mutually exclusive source modes and/or any number of pickles
exclusive_source_modes = [self.args.files, self.args.datasets, self.args.instruments,
self.args.diffs_only, self.args.all_instruments]
source_modes = len(exclusive_source_modes) - exclusive_source_modes.count(None)
using_pickles = int(bool(self.args.load_pickles))
assert source_modes <= 1 and (source_modes + using_pickles) >= 1, \
"Must specify one of: --files, --datasets, --instruments, --all-instruments, --diffs-only and/or --load-pickles."
if self.args.diffs_only:
assert self.new_context and self.old_context, \
"--diffs-only only works for context-to-context bestrefs."
differ = diff.MappingDifferencer(
self.observatory, self.old_context, self.new_context,
include_header_diffs=True, hide_boring_diffs=True)
self.affected_instruments = differ.get_affected()
log.info("Mapping differences from", repr(self.old_context),
"-->", repr(self.new_context), "affect:\n",
log.PP(self.affected_instruments))
self.instruments = self.affected_instruments.keys()
if not self.instruments:
log.info("No instruments were affected.")
return False
if (self.args.datasets_since == "auto" and
(differ.header_modified() or differ.files_deleted())):
log.info("Checking all dates due to header changes or file deletions.")
self.args.datasets_since = MIN_DATE
elif self.args.instruments:
self.instruments = self.args.instruments
elif self.args.all_instruments:
instruments = list(self.obs_pkg.INSTRUMENTS)
for instr in ("all","system","synphot"):
if instr in instruments:
instruments.remove(instr)
self.instruments = instruments
else:
self.instruments = []
if self.args.datasets_since == "auto":
datasets_since = self.auto_datasets_since()
else:
datasets_since = self.args.datasets_since
# headers corresponding to the new context
self.new_headers = self.init_headers(self.new_context, datasets_since)
self.compare_prior, self.old_headers, self.old_bestrefs_name = self.init_comparison(datasets_since)
if not self.compare_prior:
log.info("No comparison context or source comparison requested.")
if self.args.files and not self.args.update_bestrefs:
log.info("No file header updates requested; dry run. Use --update-bestrefs to update FITS headers.")
return True
@property
def only_ids(self):
return self._normalized(self.args.only_ids) or None
@property
def drop_ids(self):
return self._normalized(self.args.drop_ids)
def _normalized(self, id_list):
if id_list:
return [self.normalize_id(dataset) for dataset in id_list]
else:
return []
def normalize_id(self, dataset):
"""Convert a given `dataset` ID to uppercase. For the sake of simplicity convert
simple IDs into unassociated exposure IDs in <exposure>:<exposure> form. This is a
convenience for JWST where currently the <product> term is always identical to
<exposure>. Where they're different as-in associated exposures for HST, you must
specify the drop ID fully to avoid misinterpretation as an unassociated exposure.
"""
dataset = dataset.upper()
if ":" not in dataset:
dataset = dataset + ":" + dataset
return dataset
def auto_datasets_since(self):
"""Support --datasets-since="auto" and compute min EXPTIME for all references determined by diffs.
Returns { instrument: EXPTIME, ... }
"""
datasets_since = {}
self.oldctx = crds.get_pickled_mapping(self.old_context) # reviewed
self.newctx = crds.get_pickled_mapping(self.new_context) # reviewed
for instrument in self.oldctx.selections:
old_imap = self.oldctx.get_imap(instrument)
new_imap = self.newctx.get_imap(instrument)
added_references = diff.get_added_references(old_imap, new_imap)
deleted_references = diff.get_deleted_references(old_imap, new_imap)
added_exp_time = deleted_exp_time = MAX_DATE
if added_references:
added_exp_time = matches.get_minimum_exptime(new_imap, added_references)
if deleted_references:
deleted_exp_time = matches.get_minimum_exptime(old_imap, deleted_references)
exp_time = min(added_exp_time, deleted_exp_time)
if exp_time != MAX_DATE: # if a USEAFTER min found, remember it.
datasets_since[instrument] = exp_time
log.info("Possibly affected --datasets-since dates determined by",
repr(self.old_context), "-->", repr(self.new_context), "are:\n", log.PP(datasets_since))
return datasets_since
def add_args(self):
"""Add bestrefs script-specific command line parameters."""
self.add_argument("-n", "--new-context", dest="new_context",
help="Compute the updated best references using this context. "
"Uses current operational context by default.",
default=None, type=cmdline.mapping_spec)
self.add_argument("-o", "--old-context", dest="old_context",
help="Compare bestrefs recommendations from two contexts.",
metavar="OLD_CONTEXT", default=None, type=cmdline.mapping_spec)
self.add_argument("--fetch-old-headers", dest="fetch_old_headers", action="store_true",
help="Fetch old headers in accord with old parameter lists. Slower, avoid unless required.")
self.add_argument("-f", "--files", nargs="+", metavar="FILES", default=None,
help="Dataset files to compute best references for and optionally update headers.")
self.add_argument("-d", "--datasets", nargs="+", metavar="IDs", default=None,
help="Dataset ids to consult database for matching parameters and old results.")
self.add_argument("--all-instruments", action="store_true", default=None,
help="Compute best references for cataloged datasets for all supported instruments in database.")
self.add_argument("-i", "--instruments", nargs="+", metavar="INSTRUMENTS", default=None,
help="Instruments to compute best references for, all historical datasets in database.")
self.add_argument("-p", "--load-pickles", nargs="*", default=None,
help="Load dataset headers and prior bestrefs from pickle files, in worst-to-best update order. Can also load .json files.")
self.add_argument("-a", "--save-pickle", default=None,
help="Write out the combined dataset headers to the specified pickle file. Can also store .json file.")
self.add_argument("-t", "--types", nargs="+", metavar="REFERENCE_TYPES", default=(),
help="Explicitly define the list of reference types to process, --skip-types also still applies.")
self.add_argument("-k", "--skip-types", nargs="+", metavar="SKIPPED_REFERENCE_TYPES", default=(),
help="A list of reference types which should not be processed, defaulting to nothing.")
self.add_argument("--all-types", action="store_true",
help="Evaluate every reference file type regardless of dataset exposure type.")
self.add_argument("--diffs-only", action="store_true", default=None,
help="For context-to-context comparison, choose only instruments and types from context differences.")
self.add_argument("--datasets-since", default=None, type=reformat_date_or_auto,
help="Cut-off date for datasets, none earlier than this. Use 'auto' to exploit reference USEAFTER. OFF by default.")
self.add_argument("-c", "--compare-source-bestrefs", dest="compare_source_bestrefs", action="store_true",
help="Compare new bestrefs recommendations to recommendations from data source, files or database.")
self.add_argument("--update-pickle", action="store_true",
help="Replace source bestrefs with CRDS bestrefs in output pickle. For setting up regression tests.")
self.add_argument("--only-ids", nargs="*", default=None, dest="only_ids", metavar="IDS",
help="If specified, process only the listed dataset ids.")
self.add_argument("--drop-ids", nargs="*", default=[], dest="drop_ids", metavar="IDS",
help="If specified, skip these dataset ids.")
self.add_argument("-u", "--update-bestrefs", dest="update_bestrefs", action="store_true",
help="Update sources with new best reference recommendations.")
self.add_argument("--print-affected", dest="print_affected", action="store_true",
help="Print names of products for which the new context would assign new references for some exposure.")
self.add_argument("--print-affected-details", action="store_true",
help="Include instrument and affected types in addition to compound names of affected exposures.")
self.add_argument("--print-new-references", action="store_true",
help="Prints one line per reference file change. If no comparison requested, prints all bestrefs.")
self.add_argument("--print-update-counts", action="store_true",
help="Prints dictionary of update counts by instrument and type, status on updated files.")
self.add_argument("--print-error-headers", action="store_true",
help="For each tracked error, print out the corresponding dataset header for offline analysis.")
self.add_argument("-r", "--remote-bestrefs", action="store_true",
help="Compute best references on CRDS server, convenience for env var CRDS_MODE='remote'")
self.add_argument("-m", "--sync-mappings", default="1", dest="sync_mappings", type=int,
help="Fetch the required context mappings to the local cache. Defaults TRUE.")
self.add_argument("-s", "--sync-references", default="0", dest="sync_references", type=int,
help="Fetch the refefences recommended by new context to the local cache. Defaults FALSE.")
self.add_argument("--differences-are-errors", action="store_true",
help="Treat recommendation differences between new context and original source as errors.")
self.add_argument("--allow-bad-rules", action="store_true",
help="Only warn if a context which is marked 'bad' is used, otherwise error.")
self.add_argument("--allow-bad-references", action="store_true",
help="Only warn if a reference which is marked bad is recommended, otherwise error.")
self.add_argument("--undefined-differences-matter", action="store_true",
help="If not set, a transition from UNDEFINED to anything else is not considered a difference error.")
self.add_argument("--na-differences-matter", action="store_true",
help="If not set, either CDBS or CRDS recommending N/A is OK to mismatch.")
self.add_argument("-g", "--regression", action="store_true",
help="Abbreviation for --compare-source-bestrefs --differences-are-errors --dump-unique-errors --stats")
self.add_argument("--check-context", action="store_true",
help="Abbreviation for --undefined-differences-matter "
"--na-differences-matter --dump-unique-errors --stats")
self.add_argument("--affected-datasets", action="store_true",
help="Abbreviation for --diffs-only --datasets-since=auto --undefined-differences-matter "
"--na-differences-matter --print-update-counts --print-affected --dump-unique-errors --stats")
self.add_argument("-z", "--optimize-tables", action="store_true",
help="If set, apply row-based optimizations to screen out inconsequential table updates.")
self.add_argument("--eliminate-duplicate-cases", action="store_true",
help="Categorize unique bestrefs results as errors to determine representative test cases... Replaces normal error counts with coverage counts and ids.")
cmdline.UniqueErrorsMixin.add_args(self)
def setup_contexts(self):
"""Determine and cache the new and comparison .pmap's for this run."""
if self.args.new_context is None:
log.verbose("Using default new context", repr(self.default_context),
"for computing updated best references.", verbosity=25)
new_context = self.default_context
else:
log.verbose("Using explicit new context", repr(self.args.new_context),
"for computing updated best references.", verbosity=25)
new_context = self.resolve_context(self.args.new_context)
if self.args.old_context is not None:
log.verbose("Using explicit old context", repr(self.args.old_context), verbosity=25)
old_context = self.resolve_context(self.args.old_context)
else:
old_context = None
if self.server_info.effective_mode != "remote":
if old_context is not None and not os.path.dirname(old_context):
self.dump_mappings([old_context])
if not os.path.dirname(new_context):
self.dump_mappings([new_context])
return new_context, old_context
@utils.cached
def warn_bad_context(self, name, context, instrument):
"""Issue a warning if `context` of named `name` is a known bad file."""
# Get subset of bad files contained by this context.
if context is None:
return
bad_contained = heavy_client.get_bad_mappings_in_context(self.observatory, context, instrument)
if bad_contained:
if not config.ALLOW_BAD_RULES:
self.log_and_track_error("ALL", "ALL", "ALL", name, "=", repr(context),
"is bad or contains bad rules. Use is not recommended, results may not be scientifically valid.")
else:
log.warning(name, "=", repr(context),
"is bad or contains bad rules. Use is not recommended, results may not be scientifically valid.")
log.verbose(name, "=", repr(context), "contains bad rules", repr(bad_contained))
def warn_bad_reference(self, dataset, instrument, filekind, | |
{ self.model_name}.')
cur_stats = self.latest_round.get_curation_stats()
self.json_stats['curation_summary'] = cur_stats
def make_changes_over_time(self):
"""Add changes to model over time to json_stats."""
logger.info(f'Comparing changes over time for {self.model_name}.')
self.json_stats['changes_over_time'] = {
'number_of_statements': self.get_over_time(
'model_summary', 'number_of_statements'),
'number_of_raw_papers': self.get_over_time(
'paper_summary', 'number_of_raw_papers'),
'number_of_assembled_papers': self.get_over_time(
'paper_summary', 'number_of_assembled_papers'),
'dates': self.get_dates()}
def get_over_time(self, section, metrics, mc_type='pysb'):
logger.info(f'Getting changes over time in {metrics} '
f'for {self.model_name}.')
# First available stats
if not self.previous_json_stats:
previous_data = []
else:
previous_data = (
self.previous_json_stats['changes_over_time'].get(metrics, []))
previous_data.append(self.json_stats[section][metrics])
return previous_data
def save_to_s3(self):
date_str = self.latest_round.date_str
stats_key = (
f'model_stats/{self.model_name}/model_stats_{date_str}.json')
super().save_to_s3_key(stats_key)
def _get_latest_round(self):
latest_key = find_latest_s3_file(
self.bucket, f'results/{self.model_name}/model_manager_',
extension='.pkl')
if latest_key is None:
logger.info(f'Could not find a key to the latest model manager '
f'for {self.model_name} model.')
return
logger.info(f'Loading latest round from {latest_key}')
mr = ModelRound.load_from_s3_key(latest_key, bucket=self.bucket,
load_estmts=True)
return mr
def _get_previous_round(self):
if not self.previous_json_stats:
logger.info('Not loading previous round without previous stats')
return
previous_key = (f'results/{self.model_name}/model_manager_'
f'{self.previous_date_str}.pkl')
if previous_key is None:
logger.info(f'Could not find a key to the previous model manager '
f'for {self.model_name} model.')
return
logger.info(f'Loading previous round from {previous_key}')
mr = ModelRound.load_from_s3_key(previous_key, bucket=self.bucket)
return mr
def _get_previous_json_stats(self):
key = find_latest_s3_file(
self.bucket, f'model_stats/{self.model_name}/model_stats_', '.json')
# This is the first time statistics is generated for this model
if key is None:
logger.info(f'Could not find a key to the previous statistics ')
return
# If stats for this date exists, previous stats is the second latest
if strip_out_date(key) == self.latest_round.date_str:
logger.info(f'Statistics for latest round already exists')
key = find_nth_latest_s3_file(
1, self.bucket, f'model_stats/{self.model_name}/model_stats_',
'.json')
# Store the date string to find previous round with it
self.previous_date_str = strip_out_date(key)
logger.info(f'Loading earlier statistics from {key}')
previous_json_stats = load_json_from_s3(self.bucket, key)
return previous_json_stats
class TestStatsGenerator(StatsGenerator):
"""Generates statistic for a given test round.
Parameters
----------
model_name : str
A name of a model the tests were run against.
test_corpus_str : str
A name of a test corpus the model was tested against.
latest_round : emmaa.analyze_tests_results.TestRound
An instance of a TestRound to generate statistics for. If not given,
will be generated by loading test results from s3.
previous_round : emmaa.analyze_tests_results.TestRound
A different instance of a TestRound to find delta between two rounds.
If not given, will be generated by loading test results from s3.
previous_json_stats : list[dict]
A JSON-formatted dictionary containing test statistics for previous
test round.
Attributes
----------
json_stats : dict
A JSON-formatted dictionary containing test statistics.
"""
def __init__(self, model_name, test_corpus_str='large_corpus_tests',
latest_round=None, previous_round=None,
previous_json_stats=None, bucket=EMMAA_BUCKET_NAME):
self.test_corpus = test_corpus_str
super().__init__(model_name, latest_round, previous_round,
previous_json_stats, bucket)
def make_stats(self):
"""Check if two latest test rounds were found and add statistics to
json_stats dictionary. If both latest round and previous round
were passed or found on s3, a dictionary will have three key-value
pairs: test_round_summary, tests_delta, and changes_over_time.
"""
if not self.latest_round:
logger.info(f'Latest round for {self.model_name} is not found.')
return
if self.previous_json_stats and not self.previous_round:
logger.info(f'Latest stats are found but latest round is not.')
return
logger.info(f'Generating stats for {self.model_name}.')
self.make_test_summary()
self.make_tests_delta()
self.make_changes_over_time()
def make_test_summary(self):
"""Add latest test round summary to json_stats."""
logger.info(f'Generating test summary for {self.model_name}.')
self.json_stats['test_round_summary'] = {
'test_data': self.latest_round.json_results[0].get('test_data'),
'number_applied_tests': self.latest_round.get_total_applied_tests(),
'all_test_results': self.latest_round.english_test_results,
'path_stmt_counts': self.latest_round.get_path_stmt_counts()}
for mc_type in self.latest_round.mc_types_results:
self.json_stats['test_round_summary'][mc_type] = {
'number_passed_tests': (
self.latest_round.get_number_passed_tests(mc_type)),
'passed_ratio': self.latest_round.passed_over_total(mc_type)}
def make_tests_delta(self):
"""Add tests delta between two latest test rounds to json_stats."""
logger.info(f'Generating tests delta for {self.model_name}.')
date = self.latest_round.date_str[:10]
test_name = None
test_data = self.latest_round.json_results[0].get('test_data')
if test_data:
test_name = test_data.get('name')
if not self.previous_round:
tests_delta = {
'applied_hashes_delta': {'added': [], 'removed': []}}
else:
applied_delta = self.latest_round.find_delta_hashes(
self.previous_round, 'applied_tests')
tests_delta = {
'applied_hashes_delta': applied_delta}
msg = _make_twitter_msg(
self.model_name, 'applied_tests', applied_delta, date,
test_corpus=self.test_corpus, test_name=test_name)
if msg:
logger.info(msg)
for mc_type in self.latest_round.mc_types_results:
if not self.previous_round or mc_type not in \
self.previous_round.mc_types_results:
tests_delta[mc_type] = {
'passed_hashes_delta': {'added': [], 'removed': []}}
else:
passed_delta = self.latest_round.find_delta_hashes(
self.previous_round, 'passed_tests', mc_type=mc_type)
tests_delta[mc_type] = {
'passed_hashes_delta': passed_delta}
msg = _make_twitter_msg(
self.model_name, 'passed_tests', passed_delta, date,
mc_type, test_corpus=self.test_corpus, test_name=test_name)
if msg:
logger.info(msg)
self.json_stats['tests_delta'] = tests_delta
def make_changes_over_time(self):
"""Add changes to tests over time to json_stats."""
logger.info(f'Comparing changes over time for {self.model_name}.')
self.json_stats['changes_over_time'] = {
'number_applied_tests': self.get_over_time(
'test_round_summary', 'number_applied_tests'),
'dates': self.get_dates()}
for mc_type in self.latest_round.mc_types_results:
self.json_stats['changes_over_time'][mc_type] = {
'number_passed_tests': self.get_over_time(
'test_round_summary', 'number_passed_tests', mc_type),
'passed_ratio': self.get_over_time(
'test_round_summary', 'passed_ratio', mc_type)}
def get_over_time(self, section, metrics, mc_type='pysb'):
logger.info(f'Getting changes over time in {metrics} '
f'for {self.model_name}.')
# Not mc_type relevant data
if metrics == 'number_applied_tests':
# First available stats
if not self.previous_json_stats:
previous_data = []
else:
previous_data = (
self.previous_json_stats['changes_over_time'][metrics])
previous_data.append(self.json_stats[section][metrics])
# Mc_type relevant data
else:
# First available stats
if not self.previous_json_stats:
previous_data = []
else:
# This mc_type wasn't available in previous stats
if mc_type not in \
self.previous_json_stats['changes_over_time']:
previous_data = []
else:
previous_data = (
self.previous_json_stats[
'changes_over_time'][mc_type][metrics])
previous_data.append(self.json_stats[section][mc_type][metrics])
return previous_data
def save_to_s3(self):
date_str = self.latest_round.date_str
stats_key = (f'stats/{self.model_name}/test_stats_{self.test_corpus}_'
f'{date_str}.json')
super().save_to_s3_key(stats_key)
def _get_latest_round(self):
latest_key = find_latest_s3_file(
self.bucket,
f'results/{self.model_name}/results_{self.test_corpus}',
extension='.json')
if latest_key is None:
logger.info(f'Could not find a key to the latest test results '
f'for {self.model_name} model.')
return
logger.info(f'Loading latest round from {latest_key}')
tr = TestRound.load_from_s3_key(latest_key, bucket=self.bucket)
return tr
def _get_previous_round(self):
if not self.previous_json_stats:
logger.info('Not loading previous round without previous stats')
return
previous_key = (f'results/{self.model_name}/results_{self.test_corpus}'
f'_{self.previous_date_str}.json')
if previous_key is None:
logger.info(f'Could not find a key to the previous test results '
f'for {self.model_name} model.')
return
logger.info(f'Loading previous round from {previous_key}')
tr = TestRound.load_from_s3_key(previous_key, bucket=self.bucket)
return tr
def _get_previous_json_stats(self):
key = find_latest_s3_file(
self.bucket,
f'stats/{self.model_name}/test_stats_{self.test_corpus}_', '.json')
# This is the first time statistics is generated for this model
if key is None:
logger.info(f'Could not find a key to the previous statistics ')
return
# If stats for this date exists, previous stats is the second latest
if strip_out_date(key) == self.latest_round.date_str:
logger.info(f'Statistics for latest round already exists')
key = find_nth_latest_s3_file(
1, self.bucket,
f'stats/{self.model_name}/test_stats_{self.test_corpus}_',
'.json')
# Store the date string to find previous round with it
self.previous_date_str = strip_out_date(key)
logger.info(f'Loading earlier statistics from {key}')
previous_json_stats = load_json_from_s3(self.bucket, key)
return previous_json_stats
def generate_stats_on_s3(
model_name, mode, test_corpus_str='large_corpus_tests',
upload_stats=True, bucket=EMMAA_BUCKET_NAME):
"""Generate statistics for latest round of model update or tests.
Parameters
----------
model_name : str
A name of EmmaaModel.
mode : str
Type of stats to generate (model or tests)
test_corpus_str : str
A name of a test corpus.
upload_stats : Optional[bool]
Whether to upload latest statistics about model and a test.
Default: True
"""
if mode == 'model':
sg = ModelStatsGenerator(model_name, bucket=bucket)
elif mode == 'tests':
sg = TestStatsGenerator(model_name, test_corpus_str, bucket=bucket)
else:
raise TypeError('Mode must be either model or tests')
sg.make_stats()
# Optionally upload stats to S3
if upload_stats:
sg.save_to_s3()
return sg
def _make_twitter_msg(model_name, msg_type, delta, date, mc_type=None,
test_corpus=None, test_name=None, new_papers=None):
if len(delta['added']) == 0:
logger.info(f'No {msg_type} delta found')
return
if not test_name:
test_name = test_corpus
plural = 's' if len(delta['added']) > 1 else ''
if msg_type == 'stmts':
if not new_papers:
logger.info(f'No new papers found')
return
else:
paper_plural = 's' if new_papers > 1 else ''
msg = (f'Today I read {new_papers} new publication{paper_plural} '
f'and learned {len(delta["added"])} new mechanism{plural}. '
f'See https://emmaa.indra.bio/dashboard/{model_name}'
f'?tab=model&date={date}#addedStmts for more '
'details.')
elif msg_type == 'applied_tests':
msg = (f'Today I applied {len(delta["added"])} new test{plural} in '
f'the {test_name}. See '
f'https://emmaa.indra.bio/dashboard/{model_name}?tab=tests'
f'&test_corpus={test_corpus}&date={date}#newAppliedTests for '
'more details.')
elif msg_type == 'passed_tests' and mc_type:
msg = (f'Today I explained {len(delta["added"])} new '
f'observation{plural} in the {test_name} with my '
f'{TWITTER_MODEL_TYPES[mc_type]} model. See '
f'https://emmaa.indra.bio/dashboard/{model_name}?tab=tests'
f'&test_corpus={test_corpus}&date={date}#newPassedTests for '
'more details.')
else:
raise TypeError(f'Invalid message type: {msg_type}.')
return msg
def tweet_deltas(model_name, test_corpora, date, bucket=EMMAA_BUCKET_NAME):
model_stats, _ = get_model_stats(model_name, 'model', date=date)
test_stats_by_corpus = {}
for test_corpus in test_corpora:
test_stats, _ = get_model_stats(model_name, 'test', tests=test_corpus,
date=date)
if not test_stats:
logger.info(f'Could not find test stats for {test_corpus}')
test_stats_by_corpus[test_corpus] = test_stats
if not model_stats or not test_stats_by_corpus:
logger.warning('Stats are not found, not tweeting')
return
config = load_config_from_s3(model_name, bucket)
twitter_key = config.get('twitter')
twitter_cred = get_credentials(twitter_key)
if not twitter_cred:
logger.warning('Twitter credentials are not found, not tweeting')
# Model message
stmts_delta = model_stats['model_delta']['statements_hashes_delta']
paper_delta = model_stats['paper_delta']['raw_paper_ids_delta']
new_papers = len(paper_delta['added'])
stmts_msg = _make_twitter_msg(model_name, 'stmts', stmts_delta, date,
new_papers=new_papers)
if stmts_msg:
logger.info(stmts_msg)
if twitter_cred:
update_status(stmts_msg, twitter_cred)
# Tests messages
for test_corpus, test_stats in test_stats_by_corpus.items():
test_name = None
test_data = test_stats['test_round_summary'].get('test_data')
if test_data:
test_name = test_data.get('name')
for k, v in test_stats['tests_delta'].items():
| |
this code has been
# compiled into the module it is possible to get
# an IOError that doesn't match the IOError from
# Python parse time resulting in an IOError
# exception being raised. Consequently we just
# catch all exceptions.
pass
except ImportError:
pass
try:
while True:
self._input_descriptions=[]
if using_readline:
# we drop completion cache because it contains
# table and column names which could have changed
# with last executed SQL
self._completion_cache=None
self._using_readline=True
try:
command=self.getcompleteline()
if command is None: # EOF
return
self.process_complete_line(command)
except:
self._append_input_description()
try:
self.handle_exception()
except UnicodeDecodeError:
self.handle_exception()
finally:
if using_readline:
readline.set_completer(old_completer)
readline.set_history_length(256)
readline.write_history_file(os.path.expanduser(self.history_file))
def handle_exception(self):
"""Handles the current exception, printing a message to stderr as appropriate.
It will reraise the exception if necessary (eg if bail is true)"""
eclass,eval,etb=sys.exc_info() # py2&3 compatible way of doing this
if isinstance(eval, SystemExit):
eval._handle_exception_saw_this=True
raise
self._out_colour()
self.write(self.stderr, self.colour.error)
if isinstance(eval, KeyboardInterrupt):
self.handle_interrupt()
text="Interrupted"
else:
text=str(eval)
if not text.endswith("\n"):
text=text+"\n"
if len(self._input_descriptions):
for i in range(len(self._input_descriptions)):
if i==0:
pref="At "
else:
pref=" "*i+"From "
self.write(self.stderr, pref+self._input_descriptions[i]+"\n")
self.write(self.stderr, text)
if self.exceptions:
stack=[]
while etb:
stack.append(etb.tb_frame)
etb = etb.tb_next
for frame in stack:
self.write(self.stderr, "\nFrame %s in %s at line %d\n" %
(frame.f_code.co_name, frame.f_code.co_filename,
frame.f_lineno))
vars=list(frame.f_locals.items())
vars.sort()
for k,v in vars:
try:
v=repr(v)[:80]
except:
v="<Unable to convert to string>"
self.write(self.stderr, "%10s = %s\n" % (k,v))
self.write(self.stderr, "\n%s: %s\n" % (eclass, repr(eval)))
self.write(self.stderr, self.colour.error_)
eval._handle_exception_saw_this=True
if self.bail:
raise
def process_sql(self, sql, bindings=None, internal=False, summary=None):
"""Processes SQL text consisting of one or more statements
:param sql: SQL to execute
:param bindings: bindings for the *sql*
:param internal: If True then this is an internal execution
(eg the .tables or .database command). When executing
internal sql timings are not shown nor is the SQL echoed.
:param summary: If not None then should be a tuple of two
items. If the ``sql`` returns any data then the first item
is printed before the first row, and the second item is
printed after the last row. An example usage is the .find
command which shows table names.
"""
cur=self.db.cursor()
# we need to know when each new statement is executed
state={'newsql': True, 'timing': None}
def et(cur, sql, bindings):
state['newsql']=True
# if time reporting, do so now
if not internal and self.timer:
if state['timing']:
self.display_timing(state['timing'], self.get_resource_usage())
# print statement if echo is on
if not internal and self.echo:
# ? should we strip leading and trailing whitespace? backslash quote stuff?
if bindings:
self.write(self.stderr, "%s [%s]\n" % (sql, bindings))
else:
self.write(self.stderr, sql+"\n")
# save resource from beginning of command (ie don't include echo time above)
if not internal and self.timer:
state['timing']=self.get_resource_usage()
return True
cur.setexectrace(et)
# processing loop
try:
for row in cur.execute(sql, bindings):
if state['newsql']:
# summary line?
if summary:
self._output_summary(summary[0])
# output a header always
cols=[h for h,d in cur.getdescription()]
self.output(True, cols)
state['newsql']=False
self.output(False, row)
if not state['newsql'] and summary:
self._output_summary(summary[1])
except:
# If echo is on and the sql to execute is a syntax error
# then the exec tracer won't have seen it so it won't be
# printed and the user will be wondering exactly what sql
# had the error. We look in the traceback and deduce if
# the error was happening in a prepare or not. Also we
# need to ignore the case where SQLITE_SCHEMA happened and
# a reprepare is being done since the exec tracer will
# have been called in that situation.
if not internal and self.echo:
tb=sys.exc_info()[2]
last=None
while tb:
last=tb.tb_frame
tb=tb.tb_next
if last and last.f_code.co_name=="sqlite3_prepare" \
and last.f_code.co_filename.endswith("statementcache.c") \
and "sql" in last.f_locals:
self.write(self.stderr, last.f_locals["sql"]+"\n")
raise
if not internal and self.timer:
self.display_timing(state['timing'], self.get_resource_usage())
def process_command(self, cmd):
"""Processes a dot command. It is split into parts using the
`shlex.split
<http://docs.python.org/library/shlex.html#shlex.split>`__
function which is roughly the same method used by Unix/POSIX
shells.
"""
if self.echo:
self.write(self.stderr, cmd+"\n")
# broken with unicode on Python 2!!!
if sys.version_info<(3,0):
cmd=cmd.encode("utf8")
cmd=[c.decode("utf8") for c in shlex.split(cmd)]
else:
cmd=shlex.split(cmd)
assert cmd[0][0]=="."
cmd[0]=cmd[0][1:]
fn=getattr(self, "command_"+cmd[0], None)
if not fn:
raise self.Error("Unknown command \"%s\". Enter \".help\" for help" % (cmd[0],))
res=fn(cmd[1:])
###
### Commands start here
###
def _boolean_command(self, name, cmd):
"Parse and verify boolean parameter"
if len(cmd)!=1 or cmd[0].lower() not in ("on", "off"):
raise self.Error(name+" expected ON or OFF")
return cmd[0].lower()=="on"
# Note that doc text is used for generating help output.
def command_backup(self, cmd):
"""backup ?DB? FILE: Backup DB (default "main") to FILE
Copies the contents of the current database to FILE
overwriting whatever was in FILE. If you have attached databases
then you can specify their name instead of the default of "main".
The backup is done at the page level - SQLite copies the pages
as is. There is no round trip through SQL code.
"""
dbname="main"
if len(cmd)==1:
fname=cmd[0]
elif len(cmd)==2:
dbname=cmd[0]
fname=cmd[1]
else:
raise self.Error("Backup takes one or two parameters")
out=apsw.Connection(fname)
b=out.backup("main", self.db, dbname)
try:
while not b.done:
b.step()
finally:
b.finish()
out.close()
def command_bail(self, cmd):
"""bail ON|OFF: Stop after hitting an error (default OFF)
If an error is encountered while processing commands or SQL
then exit. (Note this is different than SQLite shell which
only exits for errors in SQL.)
"""
self.bail=self._boolean_command("bail", cmd)
def command_colour(self, cmd=[]):
"""colour SCHEME: Selects a colour scheme
Residents of both countries that have not adopted the metric
system may also spell this command without a 'u'. If using a
colour terminal in interactive mode then output is
automatically coloured to make it more readable. Use 'off' to
turn off colour, and no name or 'default' for the default.
"""
if len(cmd)>1:
raise self.Error("Too many colour schemes")
c=cmd and cmd[0] or "default"
if c not in self._colours:
raise self.Error("No such colour scheme: "+c)
self.colour_scheme=c
self._out_colour()
command_color=command_colour
def command_databases(self, cmd):
"""databases: Lists names and files of attached databases
"""
if len(cmd):
raise self.Error("databases command doesn't take any parameters")
self.push_output()
self.header=True
self.output=self.output_column
self.truncate=False
self.widths=[3,15,58]
try:
self.process_sql("pragma database_list", internal=True)
finally:
self.pop_output()
def command_dump(self, cmd):
"""dump ?TABLE? [TABLE...]: Dumps all or specified tables in SQL text format
The table name is treated as like pattern so you can use % as
a wildcard. You can use dump to make a text based backup of
the database. It is also useful for comparing differences or
making the data available to other databases. Indices and
triggers for the table(s) are also dumped. Finally views
matching the table pattern name are dumped (it isn't possible
to work out which views access which table and views can
access multiple tables anyway).
Note that if you are dumping virtual tables such as used by
the FTS3 module then they may use other tables to store
information. For example if you create a FTS3 table named
*recipes* then it also creates *recipes_content*,
*recipes_segdir* etc. Consequently to dump this example
correctly use::
.dump recipes recipes_%
If the database is empty or no tables/views match then there
is no output.
"""
# Simple tables are easy to dump. More complicated is dealing
# with virtual tables, foreign keys etc.
# Lock the database while doing the dump so nothing changes
# under our feet
self.process_sql("BEGIN IMMEDIATE", internal=True)
# Used in comment() - see issue 142
outputstrtype=str
if sys.version_info<(3,0):
outputstrtype=unicode
# Python 2.3 can end up with nonsense like "en_us" so we fall
# back to ascii in that case
outputstrencoding=getattr(self.stdout, "encoding", "ascii")
try:
codecs.lookup(outputstrencoding)
except:
outputstrencoding="ascii"
def unicodify(s):
if not isinstance(s, outputstrtype):
# See issue 142 - it may not be in an expected encoding
return s.decode(outputstrencoding, "replace")
return s
try:
# first pass -see if virtual tables or foreign keys are in
# use. If they are we emit pragmas to deal with them, but
# prefer not to emit them
v={"virtuals": False,
"foreigns": False}
def check(name, sql):
if name.lower().startswith("sqlite_"):
return False
sql=sql.lower()
if re.match(r"^\s*create\s+virtual\s+.*", sql):
v["virtuals"]=True
# pragma table_info doesn't tell us if foreign keys
# are involved so we guess if any the | |
import numpy as np
import torch
import unittest
from itertools import combinations
import os
import heat as ht
from .test_suites.basic_test import TestCase
class TestStatistics(TestCase):
def test_argmax(self):
torch.manual_seed(1)
data = ht.random.randn(3, 4, 5)
# 3D local tensor, major axis
result = ht.argmax(data, axis=0)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == data._DNDarray__array.argmax(0)).all())
# 3D local tensor, minor axis
result = ht.argmax(data, axis=-1, keepdim=True)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (3, 4, 1))
self.assertEqual(result.lshape, (3, 4, 1))
self.assertEqual(result.split, None)
self.assertTrue(
(result._DNDarray__array == data._DNDarray__array.argmax(-1, keepdim=True)).all()
)
# 1D split tensor, no axis
data = ht.arange(-10, 10, split=0)
result = ht.argmax(data)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (1,))
self.assertEqual(result.lshape, (1,))
self.assertEqual(result.split, None)
self.assertTrue(
(result._DNDarray__array == torch.tensor([19], device=self.device.torch_device))
)
# 2D split tensor, along the axis
data = ht.array(ht.random.randn(4, 5), is_split=0)
result = ht.argmax(data, axis=1)
expected = torch.argmax(data._DNDarray__array, dim=1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (ht.MPI_WORLD.size * 4,))
self.assertEqual(result.lshape, (4,))
self.assertEqual(result.split, 0)
self.assertTrue((result._DNDarray__array == expected).all())
# 2D split tensor, across the axis
size = ht.MPI_WORLD.size * 2
data = ht.tril(ht.ones((size, size), split=0), k=-1)
result = ht.argmax(data, axis=0)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (size,))
self.assertEqual(result.lshape, (size,))
self.assertEqual(result.split, None)
# skip test on gpu; argmax works different
if not (torch.cuda.is_available() and result.device == ht.gpu):
self.assertTrue((result._DNDarray__array != 0).all())
# 2D split tensor, across the axis, output tensor
size = ht.MPI_WORLD.size * 2
data = ht.tril(ht.ones((size, size), split=0), k=-1)
output = ht.empty((size,))
result = ht.argmax(data, axis=0, out=output)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(output.dtype, ht.int64)
self.assertEqual(output._DNDarray__array.dtype, torch.int64)
self.assertEqual(output.shape, (size,))
self.assertEqual(output.lshape, (size,))
self.assertEqual(output.split, None)
# skip test on gpu; argmax works different
if not (torch.cuda.is_available() and output.device == ht.gpu):
self.assertTrue((output._DNDarray__array != 0).all())
# check exceptions
with self.assertRaises(TypeError):
data.argmax(axis=(0, 1))
with self.assertRaises(TypeError):
data.argmax(axis=1.1)
with self.assertRaises(TypeError):
data.argmax(axis="y")
with self.assertRaises(ValueError):
ht.argmax(data, axis=-4)
def test_argmin(self):
torch.manual_seed(1)
data = ht.random.randn(3, 4, 5)
# 3D local tensor, no axis
result = ht.argmin(data)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (1,))
self.assertEqual(result.lshape, (1,))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == data._DNDarray__array.argmin()).all())
# 3D local tensor, major axis
result = ht.argmin(data, axis=0)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == data._DNDarray__array.argmin(0)).all())
# 3D local tensor, minor axis
result = ht.argmin(data, axis=-1, keepdim=True)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (3, 4, 1))
self.assertEqual(result.lshape, (3, 4, 1))
self.assertEqual(result.split, None)
self.assertTrue(
(result._DNDarray__array == data._DNDarray__array.argmin(-1, keepdim=True)).all()
)
# 2D split tensor, along the axis
data = ht.array(ht.random.randn(4, 5), is_split=0)
result = ht.argmin(data, axis=1)
expected = torch.argmin(data._DNDarray__array, dim=1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (ht.MPI_WORLD.size * 4,))
self.assertEqual(result.lshape, (4,))
self.assertEqual(result.split, 0)
self.assertTrue((result._DNDarray__array == expected).all())
# 2D split tensor, across the axis
size = ht.MPI_WORLD.size * 2
data = ht.triu(ht.ones((size, size), split=0), k=1)
result = ht.argmin(data, axis=0)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.int64)
self.assertEqual(result._DNDarray__array.dtype, torch.int64)
self.assertEqual(result.shape, (size,))
self.assertEqual(result.lshape, (size,))
self.assertEqual(result.split, None)
# skip test on gpu; argmin works different
if not (torch.cuda.is_available() and result.device == ht.gpu):
self.assertTrue((result._DNDarray__array != 0).all())
# 2D split tensor, across the axis, output tensor
size = ht.MPI_WORLD.size * 2
data = ht.triu(ht.ones((size, size), split=0), k=1)
output = ht.empty((size,))
result = ht.argmin(data, axis=0, out=output)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(output.dtype, ht.int64)
self.assertEqual(output._DNDarray__array.dtype, torch.int64)
self.assertEqual(output.shape, (size,))
self.assertEqual(output.lshape, (size,))
self.assertEqual(output.split, None)
# skip test on gpu; argmin works different
if not (torch.cuda.is_available() and output.device == ht.gpu):
self.assertTrue((output._DNDarray__array != 0).all())
# check exceptions
with self.assertRaises(TypeError):
data.argmin(axis=(0, 1))
with self.assertRaises(TypeError):
data.argmin(axis=1.1)
with self.assertRaises(TypeError):
data.argmin(axis="y")
with self.assertRaises(ValueError):
ht.argmin(data, axis=-4)
def test_cov(self):
x = ht.array([[0, 2], [1, 1], [2, 0]], dtype=ht.float, split=1).T
if x.comm.size < 3:
cov = ht.cov(x)
actual = ht.array([[1, -1], [-1, 1]], split=0)
self.assertTrue(ht.equal(cov, actual))
data = np.loadtxt("heat/datasets/data/iris.csv", delimiter=";")
np_cov = np.cov(data[:, 0], data[:, 1:3], rowvar=False)
htdata = ht.load("heat/datasets/data/iris.csv", sep=";", split=0)
ht_cov = ht.cov(htdata[:, 0], htdata[:, 1:3], rowvar=False)
comp = ht.array(np_cov, dtype=ht.float)
self.assertTrue(ht.allclose(comp - ht_cov, 0, atol=1e-4))
np_cov = np.cov(data, rowvar=False)
ht_cov = ht.cov(htdata, rowvar=False)
self.assertTrue(ht.allclose(ht.array(np_cov, dtype=ht.float) - ht_cov, 0, atol=1e-4))
np_cov = np.cov(data, rowvar=False, ddof=1)
ht_cov = ht.cov(htdata, rowvar=False, ddof=1)
self.assertTrue(ht.allclose(ht.array(np_cov, dtype=ht.float) - ht_cov, 0, atol=1e-4))
np_cov = np.cov(data, rowvar=False, bias=True)
ht_cov = ht.cov(htdata, rowvar=False, bias=True)
self.assertTrue(ht.allclose(ht.array(np_cov, dtype=ht.float) - ht_cov, 0, atol=1e-4))
if 1 < x.comm.size < 5:
htdata = ht.load("heat/datasets/data/iris.csv", sep=";", split=1)
np_cov = np.cov(data, rowvar=False)
ht_cov = ht.cov(htdata, rowvar=False)
self.assertTrue(ht.allclose(ht.array(np_cov, dtype=ht.float), ht_cov, atol=1e-4))
np_cov = np.cov(data, data, rowvar=True)
htdata = ht.load("heat/datasets/data/iris.csv", sep=";", split=0)
ht_cov = ht.cov(htdata, htdata, rowvar=True)
self.assertTrue(ht.allclose(ht.array(np_cov, dtype=ht.float), ht_cov, atol=1e-4))
htdata = ht.load("heat/datasets/data/iris.csv", sep=";", split=0)
with self.assertRaises(RuntimeError):
ht.cov(htdata[1:], rowvar=False)
with self.assertRaises(RuntimeError):
ht.cov(htdata, htdata[1:], rowvar=False)
with self.assertRaises(TypeError):
ht.cov(np_cov)
with self.assertRaises(TypeError):
ht.cov(htdata, np_cov)
with self.assertRaises(TypeError):
ht.cov(htdata, ddof="str")
with self.assertRaises(ValueError):
ht.cov(ht.zeros((1, 2, 3)))
with self.assertRaises(ValueError):
ht.cov(htdata, ht.zeros((1, 2, 3)))
with self.assertRaises(ValueError):
ht.cov(htdata, ddof=10000)
def test_average(self):
data = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
ht_array = ht.array(data, dtype=float)
comparison = np.asanyarray(data)
# check global average
avg = ht.average(ht_array)
self.assertIsInstance(avg, ht.DNDarray)
self.assertEqual(avg.shape, ())
self.assertEqual(avg.lshape, ())
self.assertEqual(avg.split, None)
self.assertEqual(avg.dtype, ht.float32)
self.assertEqual(avg._DNDarray__array.dtype, torch.float32)
self.assertEqual(avg.numpy(), np.average(comparison))
# average along first axis
avg_vertical = ht.average(ht_array, axis=0)
self.assertIsInstance(avg_vertical, ht.DNDarray)
self.assertEqual(avg_vertical.shape, (3,))
self.assertEqual(avg_vertical.lshape, (3,))
self.assertEqual(avg_vertical.split, None)
self.assertEqual(avg_vertical.dtype, ht.float32)
self.assertEqual(avg_vertical._DNDarray__array.dtype, torch.float32)
self.assertTrue((avg_vertical.numpy() == np.average(comparison, axis=0)).all())
# average along second axis
avg_horizontal = ht.average(ht_array, axis=1)
self.assertIsInstance(avg_horizontal, ht.DNDarray)
self.assertEqual(avg_horizontal.shape, (4,))
self.assertEqual(avg_horizontal.lshape, (4,))
self.assertEqual(avg_horizontal.split, None)
self.assertEqual(avg_horizontal.dtype, ht.float32)
self.assertEqual(avg_horizontal._DNDarray__array.dtype, torch.float32)
self.assertTrue((avg_horizontal.numpy() == np.average(comparison, axis=1)).all())
# check weighted average over all float elements of split 3d tensor, across split axis
random_volume = ht.array(
torch.randn((3, 3, 3), dtype=torch.float64, device=self.device.torch_device), is_split=1
)
size = random_volume.comm.size
random_weights = ht.array(
torch.randn((3 * size,), dtype=torch.float64, device=self.device.torch_device), split=0
)
avg_volume = ht.average(random_volume, weights=random_weights, axis=1)
np_avg_volume = np.average(random_volume.numpy(), weights=random_weights.numpy(), axis=1)
self.assertIsInstance(avg_volume, ht.DNDarray)
self.assertEqual(avg_volume.shape, (3, 3))
self.assertEqual(avg_volume.lshape, (3, 3))
self.assertEqual(avg_volume.dtype, ht.float64)
self.assertEqual(avg_volume._DNDarray__array.dtype, torch.float64)
self.assertEqual(avg_volume.split, None)
self.assertAlmostEqual(avg_volume.numpy().all(), np_avg_volume.all())
avg_volume_with_cumwgt = ht.average(
random_volume, weights=random_weights, axis=1, returned=True
)
self.assertIsInstance(avg_volume_with_cumwgt, tuple)
self.assertIsInstance(avg_volume_with_cumwgt[1], ht.DNDarray)
self.assertEqual(avg_volume_with_cumwgt[1].gshape, avg_volume_with_cumwgt[0].gshape)
self.assertEqual(avg_volume_with_cumwgt[1].split, avg_volume_with_cumwgt[0].split)
# check weighted average over all float elements of split 3d tensor (3d weights)
random_weights_3d = ht.array(
torch.randn((3, 3, 3), dtype=torch.float64, device=self.device.torch_device), is_split=1
)
avg_volume = ht.average(random_volume, weights=random_weights_3d, axis=1)
np_avg_volume = np.average(random_volume.numpy(), weights=random_weights.numpy(), axis=1)
self.assertIsInstance(avg_volume, ht.DNDarray)
self.assertEqual(avg_volume.shape, (3, 3))
self.assertEqual(avg_volume.lshape, (3, 3))
self.assertEqual(avg_volume.dtype, ht.float64)
self.assertEqual(avg_volume._DNDarray__array.dtype, torch.float64)
self.assertEqual(avg_volume.split, None)
self.assertAlmostEqual(avg_volume.numpy().all(), np_avg_volume.all())
avg_volume_with_cumwgt = ht.average(
random_volume, weights=random_weights, axis=1, returned=True
)
self.assertIsInstance(avg_volume_with_cumwgt, tuple)
self.assertIsInstance(avg_volume_with_cumwgt[1], ht.DNDarray)
self.assertEqual(avg_volume_with_cumwgt[1].gshape, avg_volume_with_cumwgt[0].gshape)
self.assertEqual(avg_volume_with_cumwgt[1].split, avg_volume_with_cumwgt[0].split)
# check average over all float elements of split 3d tensor, tuple axis
random_volume = ht.random.randn(3, 3, 3, split=0)
avg_volume = ht.average(random_volume, axis=(1, 2))
self.assertIsInstance(avg_volume, ht.DNDarray)
self.assertEqual(avg_volume.shape, (3,))
self.assertEqual(avg_volume.lshape[0], random_volume.lshape[0])
self.assertEqual(avg_volume.dtype, ht.float32)
self.assertEqual(avg_volume._DNDarray__array.dtype, torch.float32)
self.assertEqual(avg_volume.split, 0)
# check weighted average over all float elements of split 5d tensor, along split axis
random_5d = ht.random.randn(random_volume.comm.size, 2, 3, 4, 5, split=0)
axis = random_5d.split
random_weights = ht.random.randn(random_5d.gshape[axis], split=0)
avg_5d = random_5d.average(weights=random_weights, axis=axis)
self.assertIsInstance(avg_5d, ht.DNDarray)
self.assertEqual(avg_5d.gshape, (2, 3, 4, 5))
self.assertLessEqual(avg_5d.lshape[1], 3)
self.assertEqual(avg_5d.dtype, ht.float32)
self.assertEqual(avg_5d._DNDarray__array.dtype, torch.float32)
self.assertEqual(avg_5d.split, None)
# check exceptions
with self.assertRaises(TypeError):
ht.average(comparison)
with self.assertRaises(TypeError):
ht.average(random_5d, weights=random_weights.numpy(), axis=axis)
with self.assertRaises(TypeError):
ht.average(random_5d, weights=random_weights, axis=None)
with self.assertRaises(NotImplementedError):
ht.average(random_5d, weights=random_weights, axis=(1, 2))
random_weights = ht.random.randn(random_5d.gshape[axis], random_5d.gshape[axis + 1])
with self.assertRaises(TypeError):
ht.average(random_5d, weights=random_weights, axis=axis)
random_shape_weights = ht.random.randn(random_5d.gshape[axis] + 1)
with self.assertRaises(ValueError):
ht.average(random_5d, weights=random_shape_weights, axis=axis)
zero_weights = ht.zeros((random_5d.gshape[axis]), split=0)
with self.assertRaises(ZeroDivisionError):
ht.average(random_5d, weights=zero_weights, axis=axis)
weights_5d_split_mismatch = ht.ones(random_5d.gshape, split=-1)
with self.assertRaises(NotImplementedError):
ht.average(random_5d, weights=weights_5d_split_mismatch, axis=axis)
with self.assertRaises(TypeError):
ht_array.average(axis=1.1)
with self.assertRaises(TypeError):
ht_array.average(axis="y")
with self.assertRaises(ValueError):
ht.average(ht_array, axis=-4)
def test_max(self):
data = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
ht_array = ht.array(data)
comparison = torch.tensor(data, device=self.device.torch_device)
# check global max
maximum = ht.max(ht_array)
self.assertIsInstance(maximum, ht.DNDarray)
self.assertEqual(maximum.shape, (1,))
self.assertEqual(maximum.lshape, (1,))
self.assertEqual(maximum.split, None)
self.assertEqual(maximum.dtype, ht.int64)
self.assertEqual(maximum._DNDarray__array.dtype, torch.int64)
self.assertEqual(maximum, 12)
# maximum along first axis
maximum_vertical = ht.max(ht_array, axis=0)
self.assertIsInstance(maximum_vertical, ht.DNDarray)
self.assertEqual(maximum_vertical.shape, (3,))
self.assertEqual(maximum_vertical.lshape, (3,))
self.assertEqual(maximum_vertical.split, None)
self.assertEqual(maximum_vertical.dtype, ht.int64)
self.assertEqual(maximum_vertical._DNDarray__array.dtype, torch.int64)
self.assertTrue(
(maximum_vertical._DNDarray__array == comparison.max(dim=0, keepdim=True)[0]).all()
)
# maximum along second axis
maximum_horizontal = ht.max(ht_array, axis=1, keepdim=True)
self.assertIsInstance(maximum_horizontal, ht.DNDarray)
self.assertEqual(maximum_horizontal.shape, (4, 1))
self.assertEqual(maximum_horizontal.lshape, (4, 1))
self.assertEqual(maximum_horizontal.split, None)
self.assertEqual(maximum_horizontal.dtype, ht.int64)
self.assertEqual(maximum_horizontal._DNDarray__array.dtype, torch.int64)
self.assertTrue(
(maximum_horizontal._DNDarray__array == comparison.max(dim=1, keepdim=True)[0]).all()
)
# check max over all float elements of split 3d tensor, across split axis
size = ht.MPI_WORLD.size
random_volume = ht.random.randn(3, 3 * size, 3, split=1)
maximum_volume = ht.max(random_volume, axis=1)
self.assertIsInstance(maximum_volume, ht.DNDarray)
self.assertEqual(maximum_volume.shape, (3, 3))
self.assertEqual(maximum_volume.lshape, (3, 3))
self.assertEqual(maximum_volume.dtype, ht.float32)
self.assertEqual(maximum_volume._DNDarray__array.dtype, torch.float32)
self.assertEqual(maximum_volume.split, None)
# check max over all float elements of split 3d tensor, tuple axis
random_volume = | |
<reponame>AnythingTechPro/toontown-otp-original
"""
* Copyright (C) <NAME> - All Rights Reserved
* Written by <NAME> <<EMAIL>>, August 17th, 2017
* Licensing information can found in 'LICENSE', which is part of this source code package.
"""
import random
from panda3d.direct import DCPacker
from realtime import io, types
from game.OtpDoGlobals import *
from direct.directnotify.DirectNotifyGlobal import directNotify
class Shard(object):
def __init__(self, channel, name, population):
self.channel = channel
self.name = name
self.population = population
class ShardManager(object):
def __init__(self):
self._shards = {}
@property
def shards(self):
return self._shards
def has_shard(self, channel):
return channel in self._shards
def add_shard(self, channel, name, population):
if self.has_shard(channel):
return
self._shards[channel] = Shard(channel, name, population)
def remove_shard(self, channel):
if not self.has_shard(channel):
return
del self._shards[channel]
def get_shard(self, channel):
if not self.has_shard(channel):
return None
return self._shards[channel]
def get_shards(self):
return self._shards.values()
class StateObject(object):
notify = directNotify.newCategory('StateObject')
def __init__(self, network, do_id, parent_id, zone_id, dc_class, has_other, di):
self._network = network
self._do_id = do_id
self._old_owner_id = 0
self._owner_id = 0
self._old_parent_id = 0
self._parent_id = parent_id
self._old_zone_id = 0
self._zone_id = zone_id
self._dc_class = dc_class
self._has_other = has_other
self._required_fields = {}
self._other_fields = {}
field_packer = DCPacker()
field_packer.set_unpack_data(di.get_remaining_bytes())
for field_index in xrange(self._dc_class.get_num_inherited_fields()):
field = self._dc_class.get_inherited_field(field_index)
if not field:
self.notify.error('Failed to unpack required field: %d dclass: %s, unknown field!' % (
field_index, self._dc_class.get_name()))
if field.as_molecular_field() or not field.is_required():
continue
field_packer.begin_unpack(field)
field_args = field.unpack_args(field_packer)
field_packer.end_unpack()
self._required_fields[field.get_number()] = field_args
self._network.register_for_channel(self._do_id)
@property
def do_id(self):
return self._do_id
@property
def old_owner_id(self):
return self._old_owner_id
@property
def owner_id(self):
return self._owner_id
@owner_id.setter
def owner_id(self, owner_id):
self._old_owner_id = self._owner_id
self._owner_id = owner_id
@property
def old_parent_id(self):
return self._old_parent_id
@property
def parent_id(self):
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
self._old_parent_id = self._parent_id
self._parent_id = parent_id
@property
def old_zone_id(self):
return self._old_zone_id
@property
def zone_id(self):
return self._zone_id
@zone_id.setter
def zone_id(self, zone_id):
self._old_zone_id = self._zone_id
self._zone_id = zone_id
@property
def dc_class(self):
return self._dc_class
@property
def has_other(self):
return self._has_other
def append_required_data(self, datagram):
field_packer = DCPacker()
for index in self._required_fields:
field = self._dc_class.get_field_by_index(index)
if not field:
self.notify.error('Failed to append required data for field: %s dclass: %s, unknown field!' % (
field_name, self._dc_class.get_name()))
field_packer.begin_pack(field)
field.pack_args(field_packer, self._required_fields[field.get_number()])
field_packer.end_pack()
datagram.append_data(field_packer.get_string())
def append_other_data(self, datagram):
field_packer = DCPacker()
for index in self._other_fields:
field = self._dc_class.get_field_by_index(index)
if not field:
self.notify.error('Failed to append other data for field: %s dclass: %s, unknown field!' % (
field_name, self._dc_class.get_name()))
field_packer.begin_pack(field)
field.pack_args(field_packer, self._other_fields[field.get_number()])
field_packer.end_pack()
datagram.append_data(field_packer.get_string())
def setup(self):
self.handle_send_generate_broadcast()
def handle_internal_datagram(self, sender, message_type, di):
if message_type == types.STATESERVER_OBJECT_SET_OWNER:
self.handle_set_owner(sender, di)
elif message_type == types.STATESERVER_OBJECT_SET_AI:
self.handle_set_ai(sender, di)
elif message_type == types.STATESERVER_OBJECT_SET_ZONE:
self.handle_set_zone(sender, di)
else:
self.notify.warning('Received unknown message type %d for state object %d!' % (
message_type, self._do_id))
def handle_set_owner(self, sender, di):
# update the object's new owner id so that owner
# can now send field updates for this object...
self.owner_id = di.get_uint64()
def handle_set_ai(self, sender, di):
new_parent_id = di.get_uint64()
# check to see if the request provides a new AI channel
# in which the object can live under...
if new_parent_id == self._parent_id:
self.notify.warning('Failed to change to parent: %d for object: %d, object did not change parent\'s!' % (
new_parent_id, self._do_id))
return
# update the object's new parent so that we know
# which AI the object lives under...
self.parent_id = new_parent_id
# tell the object's old AI that they have left and are
# moving to an new AI...
if self._old_parent_id:
datagram = io.NetworkDatagram()
datagram.add_header(self._old_parent_id, self._do_id,
types.STATESERVER_OBJECT_CHANGING_AI)
datagram.add_uint64(self._do_id)
self._network.handle_send_connection_datagram(datagram)
# tell the new AI that the object has arrived,
# this will generate the object on the new AI...
self.handle_send_ai_generate()
# the sender of this message was a client agent handler,
# a client requested it's object move AI's...
# so let's send a response to say that the object has
# successfully moved AI's
if not self._network.shard_manager.has_shard(sender):
datagram = io.NetworkDatagram()
datagram.add_header(sender, self._do_id,
types.STATESERVER_OBJECT_SET_AI_RESP)
self._network.handle_send_connection_datagram(datagram)
def handle_set_zone(self, sender, di):
# update the object's new zone so that the object
# will be located within that new interests...
self.zone_id = di.get_uint32()
# delete any existing objects within our new interest set,
# exclude our own object since thats a local object...
self.handle_delete_objects(excludes=[self._do_id])
self.handle_send_delete_broadcast(excludes=[self._do_id])
# send generates for the quite zone objects before we change the avatar's
# zone so that they always have interest in those objects...
self.handle_send_generates(quietZone=True, excludes=[self._do_id])
# tell our current AI channel that we're changing location
# and that they need to update their instance of the object...
self.handle_send_changing_location()
# if we have an owner, tell them that we've sent all of the initial zone
# objects in the new interest set...
self.handle_send_set_zone(self._owner_id, self._zone_id, self._old_zone_id)
# generate any new objects within our new interest set,
# exclude our own object since thats a local object...
self.handle_send_generates(excludes=[self._do_id])
self.handle_send_generate_broadcast(excludes=[self._do_id])
def handle_update_field(self, sender, channel, di):
field_id = di.get_uint16()
field = self._dc_class.get_field_by_index(field_id)
if not field:
self.notify.warning('Failed to update field: %d dclass: %s, unknown field!' % (
field_id, self._dc_class.get_name()))
return
# copy the field update so we can unpack it and
# update any required fields here on the state server.
# we must do this after we allow the field to be updated
# because the checks to ensure the field is valid must be ran first...
datagram = io.NetworkDatagram()
datagram.append_data(di.get_remaining_bytes())
# create a new datagram iterator object that the field
# response update can unpack from...
di = io.NetworkDatagramIterator(datagram)
# ensure this field is not a bogus field...
if field.is_bogus_field():
self.notify.debug('Cannot handle field update for field: %s dclass: %s, field is bogus!' % (
field.get_name(), self._dc_class.get_name()))
return
if not self._network.shard_manager.has_shard(sender):
avatar_id = self._network.get_avatar_id_from_connection_channel(sender)
if not avatar_id:
self.notify.warning('Cannot handle field update for field: %s dclass: %s, unknown avatar: %d!' % (
field.get_name(), self._dc_class.get_name(), avatar_id))
return
if field.is_ownsend():
if sender != self._owner_id:
self.notify.warning('Cannot handle field update for field: %s dclass: %s, field not sendable!' % (
field.get_name(), self._dc_class.get_name()))
return
else:
if not field.is_clsend():
self.notify.warning('Cannot handle field update for field: %s dclass: %s, field not sendable!' % (
field.get_name(), self._dc_class.get_name()))
return
if not field.is_broadcast():
self.handle_send_update(field, sender, self._parent_id, di)
else:
self.handle_send_update_broadcast(field, sender, di, excludes=[avatar_id])
else:
if not field.is_broadcast():
self.handle_send_update(field, self._parent_id, channel, di)
else:
self.handle_send_update_broadcast(field, self._parent_id, di, excludes=[self._do_id])
# unpack the field arguments so that we can update our
# view of the object's required or other fields dictionaries...
di = io.NetworkDatagramIterator(datagram)
# if the iterator is empty, this means that the field
# has no arguents and that we should not attempt to update it...
if not di.get_remaining_size():
return
field_packer = DCPacker()
field_packer.set_unpack_data(di.get_remaining_bytes())
field_packer.begin_unpack(field)
field_args = field.unpack_args(field_packer)
field_packer.end_unpack()
# check to see if the field is a required or other field,
# and store the new arguments appropriately.
if field.is_required():
self._required_fields[field.get_number()] = field_args
elif field.is_ram():
self._other_fields[field.get_number()] = field_args
def handle_send_changing_location(self):
datagram = io.NetworkDatagram()
datagram.add_header(self._parent_id, self._do_id,
types.STATESERVER_OBJECT_CHANGING_LOCATION)
datagram.add_uint32(self._do_id)
datagram.add_uint32(self._parent_id)
datagram.add_uint32(self._zone_id)
self._network.handle_send_connection_datagram(datagram)
def handle_send_set_zone(self, channel, zone_id, old_zone_id):
datagram = io.NetworkDatagram()
datagram.add_header(channel, self._do_id,
types.STATESERVER_OBJECT_SET_ZONE_RESP)
datagram.add_uint32(old_zone_id)
datagram.add_uint32(zone_id)
self._network.handle_send_connection_datagram(datagram)
def handle_send_update(self, field, sender, channel, di):
datagram = io.NetworkDatagram()
datagram.add_header(channel, sender,
types.STATESERVER_OBJECT_UPDATE_FIELD)
datagram.add_uint32(self._do_id)
datagram.add_uint16(field.get_number())
field_packer = DCPacker()
field_packer.begin_pack(field)
if di.get_remaining_size():
field_packer.pack_literal_value(di.get_remaining_bytes())
field_packer.end_pack()
datagram.append_data(field_packer.get_string())
self._network.handle_send_connection_datagram(datagram)
def handle_send_update_broadcast(self, field, sender, di, excludes=[]):
for state_object in self._network.object_manager.state_objects.values():
if state_object.do_id in excludes:
continue
if state_object.parent_id == self._parent_id and state_object.zone_id == self._zone_id:
if not state_object.owner_id:
continue
self.handle_send_update(field, sender, state_object.owner_id, di)
if self._do_id in excludes:
return
if not self._owner_id:
self.handle_send_update(field, sender, self._parent_id, di)
def handle_send_ai_generate(self):
datagram = io.NetworkDatagram()
if not self._has_other:
datagram.add_header(self._parent_id, self._network.channel,
types.STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED)
else:
datagram.add_header(self._parent_id, self._network.channel,
types.STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED_OTHER)
datagram.add_uint32(self._do_id)
datagram.add_uint32(self._parent_id)
datagram.add_uint32(self._zone_id)
datagram.add_uint16(self._dc_class.get_number())
if not self._has_other:
self.append_required_data(datagram)
else:
self.append_other_data(datagram)
self._network.handle_send_connection_datagram(datagram)
def handle_send_generate(self, channel):
datagram = io.NetworkDatagram()
if not self._has_other:
datagram.add_header(channel, self._network.channel,
types.STATESERVER_OBJECT_ENTER_LOCATION_WITH_REQUIRED)
else:
datagram.add_header(channel, self._network.channel,
types.STATESERVER_OBJECT_ENTER_LOCATION_WITH_REQUIRED_OTHER)
datagram.add_uint64(self._do_id)
datagram.add_uint64(self._parent_id)
datagram.add_uint32(self._zone_id)
datagram.add_uint16(self._dc_class.get_number())
if not self._has_other:
self.append_required_data(datagram)
else:
self.append_other_data(datagram)
self._network.handle_send_connection_datagram(datagram)
def handle_send_generate_broadcast(self, excludes=[]):
for state_object in self._network.object_manager.state_objects.values():
if state_object.do_id in excludes:
continue
if state_object.parent_id == self._parent_id and state_object.zone_id == self._zone_id:
if not state_object.owner_id:
continue
self.handle_send_generate(state_object.owner_id)
def handle_send_generates(self, quietZone=False, excludes=[]):
for state_object in self._network.object_manager.state_objects.values():
if state_object.do_id in excludes:
continue
if state_object.zone_id == OTP_ZONE_ID_OLD_QUIET_ZONE and not quietZone:
continue
if state_object.parent_id == self._parent_id and state_object.zone_id == self._zone_id:
state_object.handle_send_generate(self._owner_id)
def handle_send_delete(self, channel):
datagram = io.NetworkDatagram()
datagram.add_header(channel, self._network.channel,
types.STATESERVER_OBJECT_DELETE_RAM)
datagram.add_uint64(self._do_id)
self._network.handle_send_connection_datagram(datagram)
def handle_send_delete_broadcast(self, cleanup=False, excludes=[]):
for state_object in self._network.object_manager.state_objects.values():
if state_object.do_id in excludes:
continue
if (state_object.parent_id == self._old_parent_id or state_object.zone_id == self._old_zone_id) or (cleanup and | |
16-bit (half) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_half(ctx.ref()), ctx)
def Float32(ctx=None):
"""Floating-point 32-bit (single) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_32(ctx.ref()), ctx)
def FloatSingle(ctx=None):
"""Floating-point 32-bit (single) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_single(ctx.ref()), ctx)
def Float64(ctx=None):
"""Floating-point 64-bit (double) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_64(ctx.ref()), ctx)
def FloatDouble(ctx=None):
"""Floating-point 64-bit (double) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_double(ctx.ref()), ctx)
def Float128(ctx=None):
"""Floating-point 128-bit (quadruple) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_128(ctx.ref()), ctx)
def FloatQuadruple(ctx=None):
"""Floating-point 128-bit (quadruple) sort."""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_quadruple(ctx.ref()), ctx)
class FPRMSortRef(SortRef):
""""Floating-point rounding mode sort."""
def is_fp_sort(s):
"""Return True if `s` is a Z3 floating-point sort.
>>> is_fp_sort(FPSort(8, 24))
True
>>> is_fp_sort(IntSort())
False
"""
return isinstance(s, FPSortRef)
def is_fprm_sort(s):
"""Return True if `s` is a Z3 floating-point rounding mode sort.
>>> is_fprm_sort(FPSort(8, 24))
False
>>> is_fprm_sort(RNE().sort())
True
"""
return isinstance(s, FPRMSortRef)
### FP Expressions
class FPRef(ExprRef):
"""Floating-point expressions."""
def sort(self):
"""Return the sort of the floating-point expression `self`.
>>> x = FP('1.0', FPSort(8, 24))
>>> x.sort()
FPSort(8, 24)
>>> x.sort() == FPSort(8, 24)
True
"""
return FPSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def ebits(self):
"""Retrieves the number of bits reserved for the exponent in the FloatingPoint expression `self`.
>>> b = FPSort(8, 24)
>>> b.ebits()
8
"""
return self.sort().ebits();
def sbits(self):
"""Retrieves the number of bits reserved for the exponent in the FloatingPoint expression `self`.
>>> b = FPSort(8, 24)
>>> b.sbits()
24
"""
return self.sort().sbits();
def as_string(self):
"""Return a Z3 floating point expression as a Python string."""
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def __le__(self, other):
return fpLEQ(self, other, self.ctx)
def __lt__(self, other):
return fpLT(self, other, self.ctx)
def __ge__(self, other):
return fpGEQ(self, other, self.ctx)
def __gt__(self, other):
return fpGT(self, other, self.ctx)
def __add__(self, other):
"""Create the Z3 expression `self + other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x + y
x + y
>>> (x + y).sort()
FPSort(8, 24)
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpAdd(_dflt_rm(), a, b, self.ctx)
def __radd__(self, other):
"""Create the Z3 expression `other + self`.
>>> x = FP('x', FPSort(8, 24))
>>> 10 + x
1.25*(2**3) + x
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpAdd(_dflt_rm(), a, b, self.ctx)
def __sub__(self, other):
"""Create the Z3 expression `self - other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x - y
x - y
>>> (x - y).sort()
FPSort(8, 24)
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpSub(_dflt_rm(), a, b, self.ctx)
def __rsub__(self, other):
"""Create the Z3 expression `other - self`.
>>> x = FP('x', FPSort(8, 24))
>>> 10 - x
1.25*(2**3) - x
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpSub(_dflt_rm(), a, b, self.ctx)
def __mul__(self, other):
"""Create the Z3 expression `self * other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x * y
x * y
>>> (x * y).sort()
FPSort(8, 24)
>>> 10 * y
1.25*(2**3) * y
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpMul(_dflt_rm(), a, b, self.ctx)
def __rmul__(self, other):
"""Create the Z3 expression `other * self`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x * y
x * y
>>> x * 10
x * 1.25*(2**3)
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpMul(_dflt_rm(), a, b, self.ctx)
def __pos__(self):
"""Create the Z3 expression `+self`."""
return self
def __neg__(self):
"""Create the Z3 expression `-self`.
>>> x = FP('x', Float32())
>>> -x
-x
"""
return fpNeg(self)
def __div__(self, other):
"""Create the Z3 expression `self / other`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x / y
x / y
>>> (x / y).sort()
FPSort(8, 24)
>>> 10 / y
1.25*(2**3) / y
"""
[a, b] = _coerce_fp_expr_list([self, other], self.ctx)
return fpDiv(_dflt_rm(), a, b, self.ctx)
def __rdiv__(self, other):
"""Create the Z3 expression `other / self`.
>>> x = FP('x', FPSort(8, 24))
>>> y = FP('y', FPSort(8, 24))
>>> x / y
x / y
>>> x / 10
x / 1.25*(2**3)
"""
[a, b] = _coerce_fp_expr_list([other, self], self.ctx)
return fpDiv(_dflt_rm(), a, b, self.ctx)
def __truediv__(self, other):
"""Create the Z3 expression division `self / other`."""
return self.__div__(other)
def __rtruediv__(self, other):
"""Create the Z3 expression division `other / self`."""
return self.__rdiv__(other)
def __mod__(self, other):
"""Create the Z3 expression mod `self % other`."""
return fpRem(self, other)
def __rmod__(self, other):
"""Create the Z3 expression mod `other % self`."""
return fpRem(other, self)
class FPRMRef(ExprRef):
"""Floating-point rounding mode expressions"""
def as_string(self):
"""Return a Z3 floating point expression as a Python string."""
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def RoundNearestTiesToEven(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_even(ctx.ref()), ctx)
def RNE (ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_even(ctx.ref()), ctx)
def RoundNearestTiesToAway(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_away(ctx.ref()), ctx)
def RNA (ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_nearest_ties_to_away(ctx.ref()), ctx)
def RoundTowardPositive(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_positive(ctx.ref()), ctx)
def RTP(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_positive(ctx.ref()), ctx)
def RoundTowardNegative(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_negative(ctx.ref()), ctx)
def RTN(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_negative(ctx.ref()), ctx)
def RoundTowardZero(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_zero(ctx.ref()), ctx)
def RTZ(ctx=None):
ctx = _get_ctx(ctx)
return FPRMRef(Z3_mk_fpa_round_toward_zero(ctx.ref()), ctx)
def is_fprm(a):
"""Return `True` if `a` is a Z3 floating-point rounding mode expression.
>>> rm = RNE()
>>> is_fprm(rm)
True
>>> rm = 1.0
>>> is_fprm(rm)
False
"""
return isinstance(a, FPRMRef)
def is_fprm_value(a):
"""Return `True` if `a` is a Z3 floating-point rounding mode numeral value."""
return is_fprm(a) and _is_numeral(a.ctx, a.ast)
### FP Numerals
class FPNumRef(FPRef):
"""The sign of the numeral.
>>> x = FPVal(+1.0, FPSort(8, 24))
>>> x.sign()
False
>>> x = FPVal(-1.0, FPSort(8, 24))
>>> x.sign()
True
"""
def sign(self):
l = (ctypes.c_int)()
if Z3_fpa_get_numeral_sign(self.ctx.ref(), self.as_ast(), byref(l)) == False:
raise Z3Exception("error retrieving the sign of a numeral.")
return l.value != 0
"""The sign of a floating-point numeral as a bit-vector expression.
Remark: NaN's are invalid arguments.
"""
def sign_as_bv(self):
return BitVecNumRef(Z3_fpa_get_numeral_sign_bv(self.ctx.ref(), self.as_ast()), self.ctx)
"""The significand of the numeral.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.significand()
1.25
"""
def significand(self):
return Z3_fpa_get_numeral_significand_string(self.ctx.ref(), self.as_ast())
"""The significand of the numeral as a long.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.significand_as_long()
1.25
"""
def significand_as_long(self):
ptr = (ctypes.c_ulonglong * 1)()
if not Z3_fpa_get_numeral_significand_uint64(self.ctx.ref(), self.as_ast(), ptr):
raise Z3Exception("error retrieving the significand of a numeral.")
return ptr[0]
"""The significand of the numeral as a bit-vector expression.
Remark: NaN are invalid arguments.
"""
def significand_as_bv(self):
return BitVecNumRef(Z3_fpa_get_numeral_significand_bv(self.ctx.ref(), self.as_ast()), self.ctx)
"""The exponent of the numeral.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.exponent()
1
"""
def exponent(self, biased=True):
return Z3_fpa_get_numeral_exponent_string(self.ctx.ref(), self.as_ast(), biased)
"""The exponent of the numeral as a long.
>>> x = FPVal(2.5, FPSort(8, 24))
>>> x.exponent_as_long()
1
"""
def exponent_as_long(self, biased=True):
ptr = (ctypes.c_longlong * 1)()
if not Z3_fpa_get_numeral_exponent_int64(self.ctx.ref(), self.as_ast(), ptr, biased):
raise Z3Exception("error retrieving the exponent of a numeral.")
return ptr[0]
"""The exponent of the numeral as a bit-vector expression.
Remark: NaNs are invalid arguments.
"""
def exponent_as_bv(self, biased=True):
return BitVecNumRef(Z3_fpa_get_numeral_exponent_bv(self.ctx.ref(), self.as_ast(), biased), self.ctx)
"""Indicates whether the numeral is a NaN."""
def isNaN(self):
return Z3_fpa_is_numeral_nan(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is +oo or -oo."""
def isInf(self):
return Z3_fpa_is_numeral_inf(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is +zero or -zero."""
def isZero(self):
return Z3_fpa_is_numeral_zero(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is normal."""
def isNormal(self):
return Z3_fpa_is_numeral_normal(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is subnormal."""
def isSubnormal(self):
return Z3_fpa_is_numeral_subnormal(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is positive."""
def isPositive(self):
return Z3_fpa_is_numeral_positive(self.ctx.ref(), self.as_ast())
"""Indicates whether the numeral is negative."""
def isNegative(self):
return Z3_fpa_is_numeral_negative(self.ctx.ref(), self.as_ast())
"""
The string representation of the numeral.
>>> x = FPVal(20, FPSort(8, 24))
>>> x.as_string()
1.25*(2**4)
"""
def as_string(self):
s = Z3_get_numeral_string(self.ctx.ref(), self.as_ast())
return ("FPVal(%s, %s)" % (s, self.sort()))
def is_fp(a):
"""Return `True` if `a` is a Z3 floating-point expression.
>>> b = FP('b', FPSort(8, 24))
>>> is_fp(b)
True
>>> is_fp(b + 1.0)
True
>>> is_fp(Int('x'))
False
"""
return isinstance(a, FPRef)
def is_fp_value(a):
"""Return `True` if `a` is a Z3 floating-point numeral value.
>>> b = FP('b', FPSort(8, 24))
>>> is_fp_value(b)
False
>>> b = FPVal(1.0, FPSort(8, 24))
>>> b
1
>>> is_fp_value(b)
True
"""
return is_fp(a) and _is_numeral(a.ctx, a.ast)
def FPSort(ebits, sbits, ctx=None):
"""Return a Z3 floating-point sort of the given sizes. If `ctx=None`, | |
<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Functional tests running the TPC-DS workload
#
import pytest
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
is_supported_insert_format)
class TestTpcdsQuery(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'tpcds'
@classmethod
def add_test_dimensions(cls):
super(TestTpcdsQuery, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format not in ['rc', 'hbase', 'kudu'] and
v.get_value('table_format').compression_codec in ['none', 'snap'] and
v.get_value('table_format').compression_type != 'record')
cls.ImpalaTestMatrix.add_mandatory_exec_option('decimal_v2', 0)
if cls.exploration_strategy() != 'exhaustive':
# Cut down on the execution time for these tests in core by running only
# against parquet.
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format in ['parquet'])
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('exec_option')['batch_size'] == 0)
@pytest.mark.execute_serially
# Marked serially to make sure it runs first.
def test_tpcds_count(self, vector):
self.run_test_case('count', vector)
def test_tpcds_q1(self, vector):
self.run_test_case(self.get_workload() + '-q1', vector)
def test_tpcds_q2(self, vector):
self.run_test_case(self.get_workload() + '-q2', vector)
def test_tpcds_q3(self, vector):
self.run_test_case(self.get_workload() + '-q3', vector)
def test_tpcds_q4(self, vector):
self.run_test_case(self.get_workload() + '-q4', vector)
def test_tpcds_q6(self, vector):
self.run_test_case(self.get_workload() + '-q6', vector)
def test_tpcds_q7(self, vector):
self.run_test_case(self.get_workload() + '-q7', vector)
def test_tpcds_q8(self, vector):
self.run_test_case(self.get_workload() + '-q8', vector)
def test_tpcds_q9(self, vector):
self.run_test_case(self.get_workload() + '-q9', vector)
def test_tpcds_q10a(self, vector):
self.run_test_case(self.get_workload() + '-q10a', vector)
def test_tpcds_q11(self, vector):
self.run_test_case(self.get_workload() + '-q11', vector)
def test_tpcds_q12(self, vector):
self.run_test_case(self.get_workload() + '-q12', vector)
def test_tpcds_q13(self, vector):
self.run_test_case(self.get_workload() + '-q13', vector)
def test_tpcds_q15(self, vector):
self.run_test_case(self.get_workload() + '-q15', vector)
def test_tpcds_q16(self, vector):
self.run_test_case(self.get_workload() + '-q16', vector)
def test_tpcds_q17(self, vector):
self.run_test_case(self.get_workload() + '-q17', vector)
def test_tpcds_q18a(self, vector):
self.run_test_case(self.get_workload() + '-q18a', vector)
def test_tpcds_q19(self, vector):
self.run_test_case(self.get_workload() + '-q19', vector)
def test_tpcds_q20(self, vector):
self.run_test_case(self.get_workload() + '-q20', vector)
def test_tpcds_q21(self, vector):
self.run_test_case(self.get_workload() + '-q21', vector)
def test_tpcds_q23_1(self, vector):
self.run_test_case(self.get_workload() + '-q23-1', vector)
def test_tpcds_q23_2(self, vector):
self.run_test_case(self.get_workload() + '-q23-2', vector)
def test_tpcds_q24_1(self, vector):
self.run_test_case(self.get_workload() + '-q24-1', vector)
def test_tpcds_q24_2(self, vector):
self.run_test_case(self.get_workload() + '-q24-2', vector)
def test_tpcds_q25(self, vector):
self.run_test_case(self.get_workload() + '-q25', vector)
def test_tpcds_q26(self, vector):
self.run_test_case(self.get_workload() + '-q26', vector)
def test_tpcds_q29(self, vector):
self.run_test_case(self.get_workload() + '-q29', vector)
def test_tpcds_q30(self, vector):
self.run_test_case(self.get_workload() + '-q30', vector)
def test_tpcds_q32(self, vector):
self.run_test_case(self.get_workload() + '-q32', vector)
def test_tpcds_q33(self, vector):
self.run_test_case(self.get_workload() + '-q33', vector)
def test_tpcds_q34(self, vector):
self.run_test_case(self.get_workload() + '-q34', vector)
def test_tpcds_q37(self, vector):
self.run_test_case(self.get_workload() + '-q37', vector)
def test_tpcds_q39_1(self, vector):
self.run_test_case(self.get_workload() + '-q39-1', vector)
def test_tpcds_q39_2(self, vector):
self.run_test_case(self.get_workload() + '-q39-2', vector)
def test_tpcds_q40(self, vector):
self.run_test_case(self.get_workload() + '-q40', vector)
def test_tpcds_q41(self, vector):
self.run_test_case(self.get_workload() + '-q41', vector)
def test_tpcds_q42(self, vector):
self.run_test_case(self.get_workload() + '-q42', vector)
def test_tpcds_q43(self, vector):
self.run_test_case(self.get_workload() + '-q43', vector)
def test_tpcds_q44(self, vector):
self.run_test_case(self.get_workload() + '-q44', vector)
def test_tpcds_q46(self, vector):
self.run_test_case(self.get_workload() + '-q46', vector)
def test_tpcds_q47(self, vector):
self.run_test_case(self.get_workload() + '-q47', vector)
def test_tpcds_q48(self, vector):
self.run_test_case(self.get_workload() + '-q48', vector)
def test_tpcds_q50(self, vector):
self.run_test_case(self.get_workload() + '-q50', vector)
def test_tpcds_q51(self, vector):
self.run_test_case(self.get_workload() + '-q51', vector)
def test_tpcds_q51a(self, vector):
self.run_test_case(self.get_workload() + '-q51a', vector)
def test_tpcds_q52(self, vector):
self.run_test_case(self.get_workload() + '-q52', vector)
def test_tpcds_q53(self, vector):
self.run_test_case(self.get_workload() + '-q53', vector)
def test_tpcds_q54(self, vector):
self.run_test_case(self.get_workload() + '-q54', vector)
def test_tpcds_q55(self, vector):
self.run_test_case(self.get_workload() + '-q55', vector)
def test_tpcds_q56(self, vector):
self.run_test_case(self.get_workload() + '-q56', vector)
def test_tpcds_q57(self, vector):
self.run_test_case(self.get_workload() + '-q57', vector)
def test_tpcds_q58(self, vector):
self.run_test_case(self.get_workload() + '-q58', vector)
def test_tpcds_q59(self, vector):
self.run_test_case(self.get_workload() + '-q59', vector)
def test_tpcds_q60(self, vector):
self.run_test_case(self.get_workload() + '-q60', vector)
def test_tpcds_q61(self, vector):
self.run_test_case(self.get_workload() + '-q61', vector)
def test_tpcds_q62(self, vector):
self.run_test_case(self.get_workload() + '-q62', vector)
def test_tpcds_q63(self, vector):
self.run_test_case(self.get_workload() + '-q63', vector)
def test_tpcds_q64(self, vector):
self.run_test_case(self.get_workload() + '-q64', vector)
def test_tpcds_q65(self, vector):
self.run_test_case(self.get_workload() + '-q65', vector)
def test_tpcds_q67a(self, vector):
self.run_test_case(self.get_workload() + '-q67a', vector)
def test_tpcds_q68(self, vector):
self.run_test_case(self.get_workload() + '-q68', vector)
def test_tpcds_q69(self, vector):
self.run_test_case(self.get_workload() + '-q69', vector)
def test_tpcds_q70a(self, vector):
self.run_test_case(self.get_workload() + '-q70a', vector)
def test_tpcds_q71(self, vector):
self.run_test_case(self.get_workload() + '-q71', vector)
def test_tpcds_q72(self, vector):
self.run_test_case(self.get_workload() + '-q72', vector)
def test_tpcds_q73(self, vector):
self.run_test_case(self.get_workload() + '-q73', vector)
def test_tpcds_q74(self, vector):
self.run_test_case(self.get_workload() + '-q74', vector)
def test_tpcds_q75(self, vector):
self.run_test_case(self.get_workload() + '-q75', vector)
def test_tpcds_q76(self, vector):
self.run_test_case(self.get_workload() + '-q76', vector)
def test_tpcds_q77a(self, vector):
self.run_test_case(self.get_workload() + '-q77a', vector)
def test_tpcds_q78(self, vector):
self.run_test_case(self.get_workload() + '-q78', vector)
def test_tpcds_q79(self, vector):
self.run_test_case(self.get_workload() + '-q79', vector)
def test_tpcds_q80a(self, vector):
self.run_test_case(self.get_workload() + '-q80a', vector)
def test_tpcds_q81(self, vector):
self.run_test_case(self.get_workload() + '-q81', vector)
def test_tpcds_q82(self, vector):
self.run_test_case(self.get_workload() + '-q82', vector)
def test_tpcds_q83(self, vector):
self.run_test_case(self.get_workload() + '-q83', vector)
def test_tpcds_q84(self, vector):
self.run_test_case(self.get_workload() + '-q84', vector)
def test_tpcds_q85(self, vector):
self.run_test_case(self.get_workload() + '-q85', vector)
def test_tpcds_q86a(self, vector):
self.run_test_case(self.get_workload() + '-q86a', vector)
def test_tpcds_q88(self, vector):
self.run_test_case(self.get_workload() + '-q88', vector)
def test_tpcds_q89(self, vector):
self.run_test_case(self.get_workload() + '-q89', vector)
def test_tpcds_q91(self, vector):
self.run_test_case(self.get_workload() + '-q91', vector)
def test_tpcds_q92(self, vector):
self.run_test_case(self.get_workload() + '-q92', vector)
def test_tpcds_q94(self, vector):
self.run_test_case(self.get_workload() + '-q94', vector)
def test_tpcds_q95(self, vector):
self.run_test_case(self.get_workload() + '-q95', vector)
def test_tpcds_q96(self, vector):
self.run_test_case(self.get_workload() + '-q96', vector)
def test_tpcds_q97(self, vector):
self.run_test_case(self.get_workload() + '-q97', vector)
def test_tpcds_q98(self, vector):
self.run_test_case(self.get_workload() + '-q98', vector)
def test_tpcds_q99(self, vector):
self.run_test_case(self.get_workload() + '-q99', vector)
class TestTpcdsDecimalV2Query(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'tpcds'
@classmethod
def add_test_dimensions(cls):
super(TestTpcdsDecimalV2Query, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format not in ['rc', 'hbase', 'kudu'] and
v.get_value('table_format').compression_codec in ['none', 'snap'] and
v.get_value('table_format').compression_type != 'record')
if cls.exploration_strategy() != 'exhaustive':
# Cut down on the execution time for these tests in core by running only
# against parquet.
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format in ['parquet'])
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('exec_option')['batch_size'] == 0)
def test_tpcds_q1(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q1', vector)
def test_tpcds_q2(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q2', vector)
def test_tpcds_q3(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q3', vector)
def test_tpcds_q4(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q4', vector)
def test_tpcds_q5(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q5', vector)
def test_tpcds_q6(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q6', vector)
def test_tpcds_q7(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q7', vector)
def test_tpcds_q8(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q8', vector)
def test_tpcds_q9(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q9', vector)
def test_tpcds_q10a(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q10a', vector)
def test_tpcds_q11(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q11', vector)
def test_tpcds_q12(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q12', vector)
def test_tpcds_q13(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q13', vector)
def test_tpcds_q15(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q15', vector)
def test_tpcds_q16(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q16', vector)
def test_tpcds_q17(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q17', vector)
def test_tpcds_q18(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q18', vector)
def test_tpcds_q18a(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q18a', vector)
def test_tpcds_q19(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q19', vector)
def test_tpcds_q20(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q20', vector)
def test_tpcds_q21(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q21', vector)
def test_tpcds_q22(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q22', vector)
def test_tpcds_q22a(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q22a', vector)
def test_tpcds_q25(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q25', vector)
def test_tpcds_q26(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q26', vector)
def test_tpcds_q27(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q27', vector)
def test_tpcds_q29(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q29', vector)
def test_tpcds_q30(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q30', vector)
def test_tpcds_q31(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q31', vector)
def test_tpcds_q32(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q32', vector)
def test_tpcds_q33(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q33', vector)
def test_tpcds_q34(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q34', vector)
def test_tpcds_q36(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q36', vector)
def test_tpcds_q37(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q37', vector)
def test_tpcds_q38(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q38-rewrite', vector)
def test_tpcds_q39_1(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q39-1', vector)
def test_tpcds_q39_2(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q39-2', vector)
def test_tpcds_q40(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q40', vector)
def test_tpcds_q41(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q41', vector)
def test_tpcds_q42(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q42', vector)
def test_tpcds_q43(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q43', vector)
def test_tpcds_q45(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q45', vector)
def test_tpcds_q46(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q46', vector)
def test_tpcds_q47(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q47', vector)
def test_tpcds_q48(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q48', vector)
def test_tpcds_q50(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q50', vector)
def test_tpcds_q51(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q51', vector)
def test_tpcds_q51a(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q51a', vector)
def test_tpcds_q52(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q52', vector)
def test_tpcds_q53(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q53', vector)
def test_tpcds_q54(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q54', vector)
def test_tpcds_q55(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q55', vector)
def test_tpcds_q56(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q56', vector)
def test_tpcds_q57(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q57', vector)
def test_tpcds_q58(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q58', vector)
def test_tpcds_q59(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q59', vector)
def test_tpcds_q60(self, vector):
self.run_test_case(self.get_workload() + '-decimal_v2-q60', vector)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
microwave_fov_figures.py
Created on Tue Oct 26 16:11:23 2021
@author: thayer
"""
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import sys
sys.path.append('/home/thayer/Desktop/DavidCS/ubuntu_partition/code/pydar/')
import pydar
# SSMI inc angle
ssmi_inc = 45
# Load scans
project_path = "../data/RS"
project_names = ["mosaic_rs_170420.RiSCAN",
"mosaic_rs_220420.RiSCAN",]
scan_area = pydar.ScanArea(project_path, project_names=project_names,
import_mode='read_scan',
las_fieldnames=['Points', 'PointId', 'Classification',
'Reflectance'], class_list='all')
for project_name in project_names:
scan_area.project_dict[project_name].read_transforms()
scan_area.project_dict[project_name].apply_transforms([
'current_transform'])
# Get the labels
ss = scan_area.project_dict["mosaic_rs_170420.RiSCAN"].scan_dict[
'ScanPos008']
ss.load_labels()
labels = ss.get_labels()
# %% Create a dataframe to organize this
df = pd.DataFrame({'subcategory': ['HUTRAD',
'HUTRAD',
'HUTRAD',
'HUTRAD',
'HUTRAD',
'HUTRAD',
'SSMI',
'SSMI'],
'location': ['left',
'left',
'middle',
'middle',
'right',
'right',
'ssmi',
'ssmi19'],
'frequency': ['10.7 GHz',
'10.7 GHz',
'18.7 GHz',
'18.7 GHz',
'6.9 GHz',
'6.9 GHz',
'89 GHz',
'19 GHz'],
'polarization': ['V',
'H',
'V',
'H',
'V',
'H',
'None',
'None'],
'beam_width': [9.1,
6.6,
8.6,
6.4,
14.8,
11.2,
5.88,
6.0]})
# %% modify labels dataframe accordingly
labels = labels.reset_index()
labels['point'] = np.vstack((labels['x_trans'], labels['y_trans'],
labels['z_trans'])).T.tolist()
labels.drop(columns=['category', 'project_name', 'scan_name', 'x', 'y', 'z',
'x_trans', 'y_trans', 'z_trans']
, inplace=True)
labels['location'] = labels['id'].apply(lambda x: x.split('_')[0])
labels['type'] = labels['id'].apply(lambda x: x.split('_')[1])
labels.drop(columns=['id'], inplace=True)
labels = labels.pivot(index=['subcategory', 'location'], columns='type')
labels.columns = labels.columns.droplevel()
labels = labels.reset_index()
# Janna and Philip asked for SSMI measurements to all be made from incidence
# angle of 55 degrees, simplest way to do this is to adjust the ori points
for i in np.arange(labels.shape[0]):
if labels.at[i, 'subcategory']=='SSMI':
#Adjust the z value of the orientation point such that it is 55 degre
# above ctr point. the distance between points is 0.1 m
labels.at[i, 'ori'][2] = (0.1*np.sin(ssmi_inc*np.pi/180) +
labels.at[i,'ctr'][2])
# Cartesian product df
df = df.merge(pd.DataFrame({'project_name': project_names}), how='cross')
# and merge to get dataframe we want
df = df.merge(labels)
# %% Let's extract points for each beam
df['points'] = None
for i in np.arange(df.shape[0]):
# Cone is only oriented along x axis, so we need to create the appropriate
# transform to align beam axis with x axis.
transform = vtk.vtkTransform()
transform.PostMultiply()
transform.Translate(-np.array(df.at[i, 'ctr']))
vec = np.array(df.at[i, 'ori'])-np.array(df.at[i, 'ctr'])
transform.RotateZ(-np.arctan2(vec[1], vec[0])*180/np.pi)
transform.RotateY(np.arcsin(vec[2]/.1)*180/np.pi)
# Create cone, vtk's cone angle is half the beam width I think
cone = vtk.vtkCone()
cone.SetTransform(transform)
cone.SetAngle(df.at[i,'beam_width']/2)
# Extract points inside this cone
extractPoints = vtk.vtkExtractPoints()
extractPoints.SetImplicitFunction(cone)
extractPoints.SetInputData(scan_area.project_dict[df.at[i,'project_name']]
.get_merged_points())
extractPoints.Update()
df.at[i, 'points'] = extractPoints.GetOutput()
# %% Extract z values of points
df['pts_z'] = None
for i in np.arange(df.shape[0]):
df.at[i, 'pts_z'] = vtk_to_numpy(df.at[i, 'points'].GetPoints()
.GetData())[:,2]
print(df.at[i, 'pts_z'].shape)
# %% Now create plots looking at how the surface changed
f, axs = plt.subplots(2, 4, sharex=True, sharey=True, figsize=(20, 8))
axs = np.ravel(axs)
df_titles = df[['frequency', 'polarization']].drop_duplicates().reset_index(
drop=True)
for i in range(df_titles.shape[0]):
# Apr. 17
frequency = df_titles.at[i, "frequency"]
polarization = df_titles.at[i, "polarization"]
pts = df.query('project_name == "mosaic_rs_170420.RiSCAN" and '
'frequency == @frequency and '
'polarization == @polarization').pts_z
m0 = np.mean(pts.values[0])
axs[i].hist(pts, density=True, color='b', alpha=.7)
# Apr. 22
frequency = df_titles.at[i, "frequency"]
polarization = df_titles.at[i, "polarization"]
pts = df.query('project_name == "mosaic_rs_220420.RiSCAN" and '
'frequency == @frequency and '
'polarization == @polarization').pts_z
m1 = np.mean(pts.values[0])
axs[i].hist(pts, density=True, color='r', alpha=.7)
axs[i].set_title(frequency + ' pol: ' + polarization)
axs[i].text(.6, .6, "mean change:\n" + str(round(m1-m0, 2)) + " m",
transform=axs[i].transAxes)
axs[0].set_ylim([0, 50])
axs[4].set_xlabel('Surface Height (m)')
axs[4].set_ylabel('PDF Density')
f.savefig(os.path.join('..', 'figures', 'per_beam_heights.png'))
# %% Add columns corresponding to statistics on pts_z
df['point count'] = df['pts_z'].apply(lambda x: x.shape[0])
df['mean height'] = df['pts_z'].apply(lambda x: np.mean(x))
df['std height'] = df['pts_z'].apply(lambda x: np.std(x))
df['date'] = df['project_name'].apply(lambda x: pydar.mosaic_date_parser(x))
print(df[['date', 'frequency', 'polarization', 'point count', 'mean height',
'std height']])
# %% helper funciton
# Define function for writing the camera position and focal point to
# std out when the user presses 'u'
def cameraCallback(obj, event):
print("Camera Pos: " + str(obj.GetRenderWindow().
GetRenderers().GetFirstRenderer().
GetActiveCamera().GetPosition()))
print("Focal Point: " + str(obj.GetRenderWindow().
GetRenderers().GetFirstRenderer().
GetActiveCamera().GetFocalPoint()))
print("Roll: " + str(obj.GetRenderWindow().
GetRenderers().GetFirstRenderer().
GetActiveCamera().GetRoll()))
# %% Examine beams in 3D rendering, particularly, did right beam change
z_min = -2.35
z_max = -1.85
#beam_h = 2
pdata = scan_area.project_dict['mosaic_rs_220420.RiSCAN'].get_merged_points()
# Create vertices
vertexGlyphFilter = vtk.vtkVertexGlyphFilter()
vertexGlyphFilter.SetInputData(pdata)
vertexGlyphFilter.Update()
# # Create elevation filter
elevFilter = vtk.vtkSimpleElevationFilter()
elevFilter.SetInputConnection(vertexGlyphFilter.GetOutputPort())
# needed to prevent simpleelevationfilter from overwriting
# Classification array
elevFilter.Update()
# Create mapper, hardcode LUT for now
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(elevFilter.GetOutputPort())
mapper.SetLookupTable(pydar.mplcmap_to_vtkLUT(z_min, z_max))
mapper.SetScalarRange(z_min, z_max)
mapper.SetScalarVisibility(1)
# Create Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create renderer
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
# Add beams as a cone sources
for i in [1, 5, 9, 13, 15]:
coneSource = vtk.vtkConeSource()
coneSource.SetAngle(df.at[i, 'beam_width']/2)
ctr = np.array(df.at[i, 'ctr'])
ori = np.array(df.at[i, 'ori'])
# Set the beam height as the greatest distance from beam to point
beam_h = np.sqrt(np.mean(((vtk_to_numpy(df.at[i, 'points'].GetPoints().GetData())
- ctr)**2).sum(axis=1)))
coneSource.SetHeight(beam_h)
coneSource.SetCenter(ctr - beam_h*10*(ori - ctr)/2)
coneSource.SetDirection(ori - ctr)
coneSource.SetResolution(50)
coneSource.CappingOff()
coneSource.Update()
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(coneSource.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetOpacity(0.5)
renderer.AddActor(coneActor)
scalarBar = vtk.vtkScalarBarActor()
scalarBar.SetLookupTable(pydar.mplcmap_to_vtkLUT(z_min, z_max))
renderer.AddActor2D(scalarBar)
# Create RenderWindow and interactor, set style to trackball camera
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(1500, 1000)
renderWindow.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renderWindow)
style = vtk.vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(style)
iren.Initialize()
renderWindow.Render()
iren.AddObserver('UserEvent', cameraCallback)
iren.Start()
# %% repeat to save snapshot
Camera_Pos = (7.633547124715694, 4.645905542839899, -0.09970566876032372)
Focal_Point = (20.93980822692692, 3.3339772969789756, -2.179099777456295)
Roll = 91.47122262160124
z_min = -2.35
z_max = -1.85
#beam_h = 2
pdata = scan_area.project_dict['mosaic_rs_220420.RiSCAN'].get_merged_points()
# Create vertices
vertexGlyphFilter = vtk.vtkVertexGlyphFilter()
vertexGlyphFilter.SetInputData(pdata)
vertexGlyphFilter.Update()
# # Create elevation filter
elevFilter = vtk.vtkSimpleElevationFilter()
elevFilter.SetInputConnection(vertexGlyphFilter.GetOutputPort())
# needed to prevent simpleelevationfilter from overwriting
# Classification array
elevFilter.Update()
# Create mapper, hardcode LUT for now
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(elevFilter.GetOutputPort())
mapper.SetLookupTable(pydar.mplcmap_to_vtkLUT(z_min, z_max))
mapper.SetScalarRange(z_min, z_max)
mapper.SetScalarVisibility(1)
# Create Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create renderer
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
# Add beams as a cone sources
for i in [1, 5, 9, 13, 15]:
coneSource = vtk.vtkConeSource()
coneSource.SetAngle(df.at[i, 'beam_width']/2)
ctr = np.array(df.at[i, 'ctr'])
ori = np.array(df.at[i, 'ori'])
# Set the beam height as the greatest distance from beam to point
beam_h = np.sqrt(np.mean(((vtk_to_numpy(df.at[i, 'points'].GetPoints().GetData())
- ctr)**2).sum(axis=1)))
coneSource.SetHeight(beam_h)
coneSource.SetCenter(ctr - beam_h*10*(ori - ctr)/2)
coneSource.SetDirection(ori - ctr)
coneSource.SetResolution(50)
coneSource.CappingOff()
coneSource.Update()
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(coneSource.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetOpacity(0.5)
renderer.AddActor(coneActor)
scalarBar = vtk.vtkScalarBarActor()
scalarBar.SetLookupTable(pydar.mplcmap_to_vtkLUT(z_min, z_max))
renderer.AddActor2D(scalarBar)
# Create RenderWindow
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(1500, 1000)
renderWindow.AddRenderer(renderer)
# Create Camera
camera = vtk.vtkCamera()
camera.SetFocalPoint(Focal_Point)
camera.SetPosition(Camera_Pos)
camera.SetRoll(Roll)
renderer.SetActiveCamera(camera)
renderWindow.Render()
# Screenshot image to save
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renderWindow)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(os.path.join('..', 'figures', 'april_22_w_beams.png'))
writer.SetInputData(w2if.GetOutput())
writer.Write()
renderWindow.Finalize()
del renderWindow
# %% Finally, let's look at incident angles, first we need to create normals
radius = 0.1
max_nn = 10
for project_name in project_names:
scan_area.project_dict[project_name].create_normals(radius=radius,
max_nn=max_nn)
# %% Repeat point extraction, now this brings normals
# left and right are from the perspective looking in the direction
# that the instrument is looking!!
labels = ss.get_labels()
df_n = pd.DataFrame({'subcategory': ['HUTRAD',
'HUTRAD',
'HUTRAD',
'HUTRAD',
'HUTRAD',
'HUTRAD',
'SSMI',
'SSMI'],
'location': ['left',
'left',
'middle',
'middle',
'right',
'right',
'ssmi',
'ssmi19'],
'frequency': ['10.7 GHz',
'10.7 GHz',
'18.7 GHz',
'18.7 GHz',
'6.9 GHz',
'6.9 GHz',
'89 GHz',
'19 GHz'],
'polarization': ['V',
'H',
'V',
'H',
'V',
'H',
'None',
'None'],
'beam_width': [9.1,
6.6,
8.6,
6.4,
14.8,
11.2,
5.88,
6.0]})
# %% modify labels dataframe accordingly
labels = labels.reset_index()
labels['point'] = np.vstack((labels['x_trans'], labels['y_trans'],
labels['z_trans'])).T.tolist()
labels.drop(columns=['category', 'project_name', 'scan_name', 'x', 'y', 'z',
'x_trans', 'y_trans', 'z_trans']
, inplace=True)
labels['location'] = labels['id'].apply(lambda x: x.split('_')[0])
labels['type'] = labels['id'].apply(lambda x: x.split('_')[1])
labels.drop(columns=['id'], inplace=True)
labels = labels.pivot(index=['subcategory', 'location'], columns='type')
labels.columns = labels.columns.droplevel()
labels = labels.reset_index()
# Janna and Philip asked for SSMI measurements to all be made from incidence
# angle of 55 degrees, simplest way to do this is to adjust the ori points
for i in np.arange(labels.shape[0]):
if labels.at[i, 'subcategory']=='SSMI':
#Adjust the z value of the orientation point such that it is 55 degre
# above ctr point. the distance between points is 0.1 m
labels.at[i, 'ori'][2] = (0.1*np.sin(ssmi_inc*np.pi/180) +
labels.at[i,'ctr'][2])
# Cartesian product df_n
df_n = df_n.merge(pd.DataFrame({'project_name': project_names}), how='cross')
# and merge to get dataframe we want
df_n = df_n.merge(labels)
# %% Let's extract points for each beam
df_n['points'] = None
for i in np.arange(df_n.shape[0]):
# Cone is only oriented along x axis, so we need to create the appropriate
# transform to align beam axis with x axis.
transform = vtk.vtkTransform()
transform.PostMultiply()
transform.Translate(-np.array(df_n.at[i, 'ctr']))
vec = np.array(df_n.at[i, 'ori'])-np.array(df_n.at[i, 'ctr'])
transform.RotateZ(-np.arctan2(vec[1], vec[0])*180/np.pi)
transform.RotateY(np.arcsin(vec[2]/.1)*180/np.pi)
# Create cone, vtk's cone angle is half the beam width I think
cone = vtk.vtkCone()
cone.SetTransform(transform)
cone.SetAngle(df_n.at[i,'beam_width']/2)
# Extract points inside this cone
extractPoints = vtk.vtkExtractPoints()
extractPoints.SetImplicitFunction(cone)
extractPoints.SetInputData(scan_area.project_dict[df_n.at[i,'project_name']]
.get_merged_points())
extractPoints.Update()
df_n.at[i, 'points'] = extractPoints.GetOutput()
# %% Now compute the incidence angle for each point/beam
df_n['incidence angle'] = None
for i in np.arange(df_n.shape[0]):
ctr = np.array(df.at[i, 'ctr'])
pts = vtk_to_numpy(df_n.at[i, 'points'].GetPoints().GetData())
vec = pts - ctr
vec = vec/np.sqrt((vec**2).sum(axis=1))[:, np.newaxis]
nrm = vtk_to_numpy(df_n.at[i, 'points'].GetPointData().GetNormals())
df_n.at[i, 'incidence angle'] = (np.arccos((vec*nrm).sum(axis=1))*180
/np.pi) - 90
# %% now plot
f, axs = plt.subplots(2, 4, sharex=True, sharey=True, figsize=(20, 8))
axs = np.ravel(axs)
df_titles = df_n[['frequency', 'polarization']].drop_duplicates().reset_index(
drop=True)
for i in range(df_titles.shape[0]):
# Apr. 17
frequency = df_titles.at[i, "frequency"]
polarization = df_titles.at[i, "polarization"]
inc = df_n.query('project_name == "mosaic_rs_170420.RiSCAN" and '
'frequency == @frequency and '
'polarization == @polarization')['incidence angle']
m0 = np.mean(inc.values[0])
axs[i].hist(inc, density=True, color='b', alpha=.7)
# Apr. 22
frequency = df_titles.at[i, "frequency"]
polarization = df_titles.at[i, | |
<reponame>EnjoyLifeFund/macHighSierra-py36-pkgs
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_gtm_wide_ip
short_description: Manages F5 BIG-IP GTM wide ip.
description:
- Manages F5 BIG-IP GTM wide ip.
version_added: "2.0"
options:
lb_method:
description:
- Specifies the load balancing method used to select a pool in this wide
IP. This setting is relevant only when multiple pools are configured
for a wide IP.
required: True
choices:
- round-robin
- ratio
- topology
- global-availability
name:
description:
- Wide IP name. This name must be formatted as a fully qualified
domain name (FQDN). You can also use the alias C(wide_ip) but this
is deprecated and will be removed in a future Ansible version.
required: True
aliases:
- wide_ip
type:
description:
- Specifies the type of wide IP. GTM wide IPs need to be keyed by query
type in addition to name, since pool members need different attributes
depending on the response RDATA they are meant to supply. This value
is required if you are using BIG-IP versions >= 12.0.0.
choices:
- a
- aaaa
- cname
- mx
- naptr
- srv
version_added: 2.4
state:
description:
- When C(present) or C(enabled), ensures that the Wide IP exists and
is enabled. When C(absent), ensures that the Wide IP has been
removed. When C(disabled), ensures that the Wide IP exists and is
disabled.
default: present
choices:
- present
- absent
- disabled
- enabled
version_added: 2.4
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- <NAME> (@caphrim007)
'''
EXAMPLES = '''
- name: Set lb method
bigip_gtm_wide_ip:
server: "lb.mydomain.com"
user: "admin"
password: "<PASSWORD>"
lb_method: "round-robin"
name: "my-wide-ip.example.com"
delegate_to: localhost
'''
RETURN = '''
lb_method:
description: The new load balancing method used by the wide IP.
returned: changed
type: string
sample: "topology"
state:
description: The new state of the wide IP.
returned: changed
type: string
sample: "disabled"
'''
import re
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
from distutils.version import LooseVersion
class Parameters(AnsibleF5Parameters):
updatables = ['lb_method']
returnables = ['name', 'lb_method', 'state']
api_attributes = ['poolLbMode', 'enabled', 'disabled']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def lb_method(self):
deprecated = [
'return_to_dns', 'null', 'static_persist', 'vs_capacity',
'least_conn', 'lowest_rtt', 'lowest_hops', 'packet_rate', 'cpu',
'hit_ratio', 'qos', 'bps', 'drop_packet', 'explicit_ip',
'connection_rate', 'vs_score'
]
if self._values['lb_method'] is None:
return None
lb_method = str(self._values['lb_method'])
if lb_method in deprecated:
raise F5ModuleError(
"The provided lb_method is not supported"
)
elif lb_method == 'global_availability':
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg='The provided lb_method is deprecated',
version='2.4'
)
)
lb_method = 'global-availability'
elif lb_method == 'round_robin':
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg='The provided lb_method is deprecated',
version='2.4'
)
)
lb_method = 'round-robin'
return lb_method
@lb_method.setter
def lb_method(self, value):
self._values['lb_method'] = value
@property
def collection(self):
type_map = dict(
a='a_s',
aaaa='aaaas',
cname='cnames',
mx='mxs',
naptr='naptrs',
srv='srvs'
)
if self._values['type'] is None:
return None
wideip_type = self._values['type']
return type_map[wideip_type]
@property
def type(self):
if self._values['type'] is None:
return None
return str(self._values['type'])
@property
def name(self):
if self._values['name'] is None:
return None
if not re.search(r'.*\..*\..*', self._values['name']):
raise F5ModuleError(
"The provided name must be a valid FQDN"
)
return self._values['name']
@property
def poolLbMode(self):
return self.lb_method
@poolLbMode.setter
def poolLbMode(self, value):
self.lb_method = value
@property
def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def enabled(self):
if self._values['state'] == 'disabled':
return False
elif self._values['state'] in ['present', 'enabled']:
return True
elif self._values['enabled'] is True:
return True
else:
return None
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['present', 'enabled']:
return False
elif self._values['disabled'] is True:
return True
else:
return None
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.version_is_less_than_12():
manager = self.get_manager('untyped')
else:
manager = self.get_manager('typed')
return manager.exec_module()
def get_manager(self, type):
if type == 'typed':
return TypedManager(self.client)
elif type == 'untyped':
return UntypedManager(self.client)
def version_is_less_than_12(self):
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if self.want.state == 'disabled' and self.have.enabled:
changed['state'] = self.want.state
elif self.want.state in ['present', 'enabled'] and self.have.disabled:
changed['state'] = self.want.state
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ["present", "disabled"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
return result
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
if self.have:
warnings += self.have._values.get('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.want.lb_method is None:
raise F5ModuleError(
"The 'lb_method' option is required when state is 'present'"
)
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Wide IP")
return True
class UntypedManager(BaseManager):
def exists(self):
return self.client.api.tm.gtm.wideips.wideip.exists(
name=self.want.name,
partition=self.want.partition
)
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.gtm.wideips.wipeip.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.gtm.wideips.wideip.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.gtm.wideips.wideip.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.gtm.wideips.wideip.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class TypedManager(BaseManager):
def __init__(self, client):
super(TypedManager, self).__init__(client)
if self.want.type is None:
raise F5ModuleError(
"The 'type' option is required for BIG-IP instances "
"greater than or equal to 12.x"
)
def exists(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.want.collection)
resource = getattr(collection, self.want.type)
result = resource.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.want.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def read_current_from_device(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.want.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
result = result.attrs
return Parameters(result)
def create_on_device(self):
params = self.want.api_params()
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.want.collection)
resource = getattr(collection, self.want.type)
resource.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.want.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
deprecated = [
'return_to_dns', 'null', 'round_robin', 'static_persist',
'global_availability', 'vs_capacity', 'least_conn', 'lowest_rtt',
'lowest_hops', 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score'
]
supported = [
'round-robin', 'topology', 'ratio', 'global-availability'
]
lb_method_choices = deprecated + supported
self.supports_check_mode = True
self.argument_spec = dict(
lb_method=dict(
required=False,
choices=lb_method_choices,
default=None
),
name=dict(
required=True,
aliases=['wide_ip']
),
type=dict(
required=False,
default=None,
choices=[
'a', 'aaaa', 'cname', 'mx', | |
<reponame>Haiiliin/PyAbaqus<gh_stars>1-10
from abaqusConstants import *
from .AreaStyle import AreaStyle
from .LineStyle import LineStyle
class Area:
"""The Area object is used to display a rectangular area in an XYPlot. The Area object has
no constructor. Area objects are automatically created whenever a XYPlot, Chart,
PlotTitle, or Legend objects are created.
Attributes
----------
inset: Boolean
A Boolean specifying whether the area is inset or occupies a reserved area. The default
value is OFF.
positionMethod: SymbolicConstant
A SymbolicConstant specifying how the area is positioned. Possible values are AUTO_ALIGN
and MANUAL. The default value is AUTO_ALIGN.
alignment: SymbolicConstant
A SymbolicConstant specifying the relative position of the area in its parent when
**positionMethod=AUTO_ALIGN**. Possible values are:
- BOTTOM_LEFT
- BOTTOM_CENTER
- BOTTOM_RIGHT
- CENTER_LEFT
- CENTER
- CENTER_RIGHT
- TOP_LEFT
- TOP_CENTER
- TOP_RIGHT
The default value is BOTTOM_LEFT.
sizeMethod: SymbolicConstant
A SymbolicConstant specifying how the area size is defined. Possible values are
AUTOMATIC and MANUAL. The default value is AUTOMATIC.
width: float
A Float specifying the width of the area in mm. The default value is 1.0.
height: float
A Float specifying the height of the area in mm. The default value is 1.0.
widthScale: float
A Float specifying the scale as a fraction of the width of the available area when the
sizeMethod=MANUAL. The valid range is (0, 1). The default value is 1.0.
heightScale: float
A Float specifying the scale as a fraction of the height of the available area when the
**sizeMethod=MANUAL**. The valid range is (0, 1). The default value is 1.0.
pl: float
A Float specifying the left padding of the area in mm. The default value is 1.0.
pr: float
A Float specifying the right padding of the area in mm. The default value is 1.0.
pt: float
A Float specifying the top padding of the area in mm. The default value is 1.0.
pb: float
A Float specifying the bottom padding of the area in mm. The default value is 1.0.
style: AreaStyle
An :py:class:`~abaqus.XY.AreaStyle.AreaStyle` object specifying whether and how to fill the area.
border: LineStyle
A :py:class:`~abaqus.XY.LineStyle.LineStyle` object specifying whether and how to draw the border of the area.
origin: tuple[float]
A pair of Floats specifying the X- and Y-offsets in millimeters from the lower-left
corner of the XYPlot.
originOffset: tuple[float]
A pair of Floats specifying the X- and Y-offsets of the origin as a fraction of the
available area. The **originOffset** argument is ignored unless **positionMethod=MANUAL**.
The default value is (-1, 0). The valid range for each float is (0, 1).
Notes
-----
This object can be accessed by:
.. code-block:: python
import visualization
session.charts[name].area
session.charts[name].gridArea
session.charts[name].legend.area
session.defaultChartOptions.gridArea
session.defaultChartOptions.legend.area
session.defaultPlot.area
session.defaultPlot.title.area
session.xyPlots[name].area
session.xyPlots[name].charts[name].area
session.xyPlots[name].charts[name].gridArea
session.xyPlots[name].charts[name].legend.area
session.xyPlots[name].title.area
"""
# A Boolean specifying whether the area is inset or occupies a reserved area. The default
# value is OFF.
inset: Boolean = OFF
# A SymbolicConstant specifying how the area is positioned. Possible values are AUTO_ALIGN
# and MANUAL. The default value is AUTO_ALIGN.
positionMethod: SymbolicConstant = AUTO_ALIGN
# A SymbolicConstant specifying the relative position of the area in its parent when
# *positionMethod*=AUTO_ALIGN. Possible values are:
# - BOTTOM_LEFT
# - BOTTOM_CENTER
# - BOTTOM_RIGHT
# - CENTER_LEFT
# - CENTER
# - CENTER_RIGHT
# - TOP_LEFT
# - TOP_CENTER
# - TOP_RIGHT
# The default value is BOTTOM_LEFT.
alignment: SymbolicConstant = BOTTOM_LEFT
# A SymbolicConstant specifying how the area size is defined. Possible values are
# AUTOMATIC and MANUAL. The default value is AUTOMATIC.
sizeMethod: SymbolicConstant = AUTOMATIC
# A Float specifying the width of the area in mm. The default value is 1.0.
width: float = 1
# A Float specifying the height of the area in mm. The default value is 1.0.
height: float = 1
# A Float specifying the scale as a fraction of the width of the available area when the
# sizeMethod=MANUAL. The valid range is (0, 1). The default value is 1.0.
widthScale: float = 1
# A Float specifying the scale as a fraction of the height of the available area when the
# *sizeMethod*=MANUAL. The valid range is (0, 1). The default value is 1.0.
heightScale: float = 1
# A Float specifying the left padding of the area in mm. The default value is 1.0.
pl: float = 1
# A Float specifying the right padding of the area in mm. The default value is 1.0.
pr: float = 1
# A Float specifying the top padding of the area in mm. The default value is 1.0.
pt: float = 1
# A Float specifying the bottom padding of the area in mm. The default value is 1.0.
pb: float = 1
# An AreaStyle object specifying whether and how to fill the area.
style: AreaStyle = AreaStyle()
# A LineStyle object specifying whether and how to draw the border of the area.
border: LineStyle = LineStyle()
# A pair of Floats specifying the X- and Y-offsets in millimeters from the lower-left
# corner of the XYPlot.
origin: tuple[float] = ()
# A pair of Floats specifying the X- and Y-offsets of the origin as a fraction of the
# available area. The *originOffset* argument is ignored unless *positionMethod*=MANUAL.
# The default value is (-1, 0). The valid range for each float is (0, 1).
originOffset: tuple[float] = ()
def setValues(self, area: 'Area' = None, style: AreaStyle = AreaStyle(), border: LineStyle = LineStyle(),
positionMethod: SymbolicConstant = AUTO_ALIGN,
alignment: SymbolicConstant = BOTTOM_LEFT, sizeMethod: SymbolicConstant = AUTOMATIC,
originOffset: tuple[float] = (), widthScale: float = 1, heightScale: float = 1,
inset: Boolean = OFF, pl: float = 1, pr: float = 1, pt: float = 1, pb: float = 1):
"""This method modifies the Area object.
Parameters
----------
area
An Area object from which attributes are to be copied.
style
An AreaStyle object.
border
A LineStyle object.
positionMethod
A SymbolicConstant specifying how the area is positioned. Possible values are AUTO_ALIGN
and MANUAL. The default value is AUTO_ALIGN.
alignment
A SymbolicConstant specifying the relative position of the area in its parent when
*positionMethod*=AUTO_ALIGN. Possible values are:
- BOTTOM_LEFT
- BOTTOM_CENTER
- BOTTOM_RIGHT
- CENTER_LEFT
- CENTER
- CENTER_RIGHT
- TOP_LEFT
- TOP_CENTER
- TOP_RIGHT
The default value is BOTTOM_LEFT.
sizeMethod
A SymbolicConstant specifying how the area size is defined. Possible values are
AUTOMATIC and MANUAL. The default value is AUTOMATIC.
originOffset
A pair of Floats specifying the X- and Y-offsets of the origin as a fraction of the
available area. The *originOffset* argument is ignored unless *positionMethod*=MANUAL.
The default value is (-1, 0). The valid range for each float is (0, 1).
widthScale
A Float specifying the scale as a fraction of the width of the available area when the
sizeMethod=MANUAL. The valid range is (0, 1). The default value is 1.0.
heightScale
A Float specifying the scale as a fraction of the height of the available area when the
*sizeMethod*=MANUAL. The valid range is (0, 1). The default value is 1.0.
inset
A Boolean specifying whether the area is inset or occupies a reserved area. The default
value is OFF.
pl
A Float specifying the left padding of the area in mm. The default value is 1.0.
pr
A Float specifying the right padding of the area in mm. The default value is 1.0.
pt
A Float specifying the top padding of the area in mm. The default value is 1.0. | |
not support Index from Snapshot",
1572: "Storage unit must be specified for this operation",
1573: "Backup image cannot be expired because its SLP processing is not yet complete",
1574: "Data Classification name cannot be 'Any' while creating new data classification",
1575: "Data Classification auto creation failed",
1576: "Topology validation failed",
1577: "Storage unit in the SLP does not match the accelerator attribute in policy",
1578: "Invalid window close options",
1579: "One or more images were not processed because the window closed",
1580: "VMware policy with PFI enabled requires an SLP",
1581: "Non-application consistent VMware policy is not compatible with snapdupe operations",
1582: "Application consistent VMware policy requires VM quiesce",
1583: "VMware policy with PFI enabled requires VIP auto discovery",
1584: "VMware policy with 'Persistent Frozen Image' enabled requires schedule type of Full Backup",
1585: "Backup image cannot be expired because not all dependent copies are expired",
1586: "SLP operation was canceled",
1587: "Storage lifecycle policy cannot have both target and untarget replication to remote master",
1588: "Target master server is already used in one of the replications to remote master Explanation: Recommended Action: Define all targeted replication operations with distinct target master server. Click here to view technical notes and other information in the Veritas Knowledge Base about this status code.",
1589: "Cannot connect to specified target master server",
1590: "Cannot find specified target import SLP",
1591: "No import SLP(s) found with compatible replication target device.",
1592: "Trusted master servers are being referred by one or more Storage Lifecycle Policies (SLPs) on the source or target domain.",
1593: "Replication Director for VMware policy requires mapped backups",
1594: "Failed to determine disk media ID",
1596: "Select a storage lifecycle policy that has no snapshot operation as a policy’s Storage Destination",
1597: "Replication Director for Oracle policy requires an SLP",
1598: "Oracle policy with PFI and FI enabled requires an SLP",
1599: "Application schedule storage selection cannot be a snapshot SLP",
1600: "The Policy storage is a snapshot SLP and the Application schedule does not override the policy storage selection. Snapshot SLP storage is not allowed on an Application schedule.",
1601: "Full schedule requires a snapshot SLP",
1602: "The Policy storage is not a snapshot SLP and the Full schedule does not override the policy storage selection. Snapshot SLP storage is required on the Full schedule.",
1603: "Failed to save target SLP volume information",
1604: "No import SLP(s) found with compatible data class.",
1800: "Invalid client list",
1915: "Cannot delete instance group that contains instances (delete or move instances first)",
1916: "Database error, cannot access the instance repository",
1917: "Cannot add instance group, this group name is already in use",
1918: "Cannot find a group by this name",
1919: "This instance or instance group was modified by another process, refresh before editing",
1920: "An instance with this name and client already exists",
1921: "The specified instance cannot be found",
1922: "Domain is a required field for Windows instances",
1925: "The requested operation(s) failed",
1926: "The entry specified already exists",
1927: "The entry specified does not exist",
1928: "The credentials for 1 or more instances could not be verified",
2000: "Unable to allocate new media for backup, storage unit has none available.",
2001: "No drives are available for this job",
2002: "Invalid STU identifier type",
2003: "Drive is not allocated.",
2004: "Drive is already allocated",
2005: "MDS has received an invalid message from a media server.",
2006: "NDMP credentials are not defined in EMM.",
2007: "Storage unit is not compatible with requesting job",
2008: "All compatible drive paths are down",
2009: "All compatible drive paths are down but media is available",
2010: "Job type is invalid",
2011: "The media server reported a system error",
2012: "Media has conflicts in EMM",
2013: "Error record insert failed",
2014: "Media is not assigned",
2015: "Media is expired",
2016: "Media is assigned to another server",
2017: "Media needs to be unmounted from a drive",
2018: "Number of cleanings is invalid",
2019: "Media is in a drive that is not configured on local system",
2020: "Robotic library is down on server",
2021: "Allocation record insert failed",
2022: "Allocation status record insert failed",
2023: "Allocation identifier is not known to EMM",
2024: "Allocation request update failed",
2025: "Allocation request delete failed",
2026: "Allocation status request delete failed",
2027: "Media server is not active",
2028: "Media is reserved",
2029: "EMM database is inconsistent",
2030: "Insufficient disk space or high water mark would be exceeded",
2031: "Media is not defined in EMM",
2032: "Media is in use according to EMM",
2033: "Media has been misplaced",
2034: "Retry the allocation request later",
2035: "Request needs to pend",
2036: "Drive is in a robotic library that is up",
2037: "Drive is not ready",
2038: "Media loaded in drive is not write-enabled",
2039: "SCSI reservation conflict detected",
2040: "Maximum job count has been reached for the storage unit",
2041: "Storage unit is down",
2042: "Density mismatch detected",
2043: "Requested slot is empty",
2044: "Media is assigned to another application",
2045: "Storage unit is disabled since max job count is less than 1",
2046: "Media is unmountable",
2047: "Media is write protected",
2048: "Media is in use by the ACS robotic library",
2049: "Media not found in the ACS robotic library",
2050: "ACS media has an unreadable external label",
2051: "ACS media is not in the drive's domain",
2052: "An ACS Library Storage Module (LSM) is offline",
2053: "Media is in an inaccessible drive",
2054: "Media is in a drive that is currently in a DOWN state",
2055: "ACS physical drive is not available",
2056: "The file name used for the mount request already exists",
2057: "The scan host of the drive is not active",
2058: "LTID needs to be restarted on media servers before the device can be used",
2059: "The robotic library is not available",
2060: "Media needs to be rewound or unmounted from a drive",
2061: "The host is not an active node of a cluster",
2062: "Throttled job count has been reached for the storage unit",
2063: "Server is not licensed for the Remote Client Option",
2064: "Job history indicates that no media is available",
2065: "Job history indicates that no drive is available",
2066: "Disk pool not found",
2067: "Disk volume not found",
2068: "Disk volume mount point not found",
2069: "Disk volume mount point record insert failed",
2070: "The specified mount path will not fit in the allocated space",
2071: "Unable to find any storage servers for the request",
2072: "Invalid operation on static mount point",
2073: "Disk pool is down",
2074: "Disk volume is down",
2075: "Fibre Transport resources are not available",
2076: "DSM returned an unexpected error",
2078: "The maximum number of mounts for the disk volume have been exceeded",
2079: "DSM has detected that an invalid file system is mounted on the volume",
2080: "Disk volume has no max writers count",
2081: "Disk volume has no max readers count",
2082: "The drive needs to be marked as available",
2083: "The media affinity group is not defined in EMM",
2084: "Media affinity group record insert failed",
2085: "Disk volume is not available",
2086: "Disk volume cannot be used for more than one copy in the same job",
2087: "Media allocation would exceed maximum partially full media limit",
2088: "Cleaning media is not available",
2089: "FT client is not running",
2090: "FT client has no devices configured",
2091: "FT client devices are offline",
2092: "FT server devices for client are offline",
2093: "No FT servers for this client are running",
2094: "STU cannot run Lifecycle backups",
2095: "STU cannot run VMware backup",
2096: "NDMP operation does not support multiple inline copies",
2097: "Storage unit group does not exist in EMM configuration",
2098: "Media pool is not eligible for this job",
2099: "Required drive or drive path is not configured",
2100: "Maximum number of mounts has been exceeded for tape media",
2101: "Media server not found in EMM database",
2102: "Storage unit does not support spanning",
2103: "Media server mismatch",
2104: "Storage units are not available",
2105: "Storage unit requested for replication job is not replication capable",
2106: "Disk storage server is down",
2107: "Requested media server does not have credentials or is not configured for the storage server",
2108: "Requested NDMP machine does not have credentials or is not configured in NetBackup",
2109: "Requested Fibre Transport client machine was not found in NetBackup configuration",
2110: "Requested machine is not configured in NetBackup",
2111: "All storage units are configured with On Demand Only and are not eligible for jobs requesting ANY storage unit",
2112: "NetBackup media server version is too low for the operation",
2113: "Invalid or no disk array credentials are added for vserver",
2504: "Direct expiration | |
width;
:param `ySize`: if not ``None``, the new L{ImageBar} height.
"""
if not isinstance(colour, wx.Colour):
colour = wx.NamedColour(colour)
if self._hasBitmap:
bitmap = self._hasBitmap
else:
bitmap = zoombackgrey.GetImage()
if xSize is not None:
self._size = wx.Size(xSize, ySize)
bitmap.Rescale(self._size.width, self._size.height/2)
r1, g1, b1 = self._startColour.Red(), self._startColour.Green(), self._startColour.Blue()
r2, g2, b2 = colour.Red(), colour.Green(), colour.Blue()
fr = (r1 > 0 and [float(r2)/r1] or [0])[0]
fg = (g1 > 0 and [float(g2)/g1] or [0])[0]
fb = (b1 > 0 and [float(b2)/b1] or [0])[0]
bitmap = bitmap.AdjustChannels(fr, fg, fb, 1)
self._bitmap = bitmap.ConvertToBitmap()
self._endColour = colour
def GetBarColour(self):
""" Returns the background button bar colour. """
return self._endColour
class ZoomBarEvent(wx.PyCommandEvent):
""" Event sent from the L{ZoomBar} when a button is activated. """
def __init__(self, eventType, eventId=1):
"""
Default class constructor.
:param `eventType`: the event type;
:param `eventId`: the event identifier.
"""
wx.PyCommandEvent.__init__(self, eventType, eventId)
self._eventType = eventType
def SetSelection(self, selection):
"""
Sets the index of the selected button.
:param `selection`: an integer indicating the current selected button.
"""
self._selection = selection
def GetSelection(self):
""" Returns the index of the selected button. """
return self._selection
def GetLabel(self):
""" Returns the text label of the selected button. """
return self._label
def SetLabel(self, label):
"""
Sets the text label of the selected button.
:param `label`: the text label of the selected button.
"""
self._label = label
class ZoomBar(wx.PyControl):
"""
ZoomBar is a class that *appoximatively* mimics the behaviour of the Mac Dock,
inside a `wx.Panel`.
This is the main class implementation.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
name="ZoomBar"):
"""
Default class constructor.
:param `parent`: the L{ZoomBar} parent. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `name`: the window name.
"""
wx.PyControl.__init__(self, parent, id, pos, size, style=wx.BORDER_THEME)
# Zoom from the center. If True button zooms upwards.
self._centerZoom = False
# Whether you want reflections or not
self._showReflections = True
# Allows us to nudge a reflection closer to original
self._nudgeReflection = 0
# Extension of the reflection. BMP or PNG etc.
# Initial size of the buttons
self._buttonSize = 48
# Show labels on hovering
self._showLabels = True
# used internally
self._noResize = False
self._buttons = []
self._reflectionButtons = []
self._imgBar = ImageBar()
self._previousHit = -1
self._currentHit = -1
wx.CallLater(200, self.OnLeaveWindow, None)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
if wx.Platform == "__WXMSW__":
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDown)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
def DoGetBestSize(self):
"""
Gets the size which best suits the window: for a control, it would be the
minimal size which doesn't truncate the control, for a panel - the same size
as it would have after a call to `Fit()`.
"""
xSize = self._buttonSize*len(self._buttons) + len(self._buttons) + self._buttonSize
ySize = self._buttonSize*2
if self._showLabels:
dc = wx.ClientDC(self)
dummy, yextent = dc.GetTextExtent("Ajgt")
ySize += yextent
return wx.Size(xSize+50, ySize+20)
# reposition the buttons
def Reposition(self, toButton):
"""
Repositions all the buttons inside the L{ZoomBar}.
:param `toButton`: the button currently hovered by the mouse (and hence
zoomed).
"""
nLeft = toButton._left
nRight = toButton._left + toButton._width + 1
nButton = self._buttons.index(toButton)
# do any buttons on the right
for n in xrange(nButton + 1, len(self._buttons)):
oButton = self._buttons[n]
oButton._left = nRight
if self._showReflections:
oButtonR = self._reflectionButtons[n]
oButtonR._left = nRight
nRight = nRight + oButton._width + 1
# Reset
nLeft = toButton._left
# now to the left
if nButton > 0:
# only for 2nd and more
for n in xrange(nButton-1, -1, -1):
oButton = self._buttons[n]
oButton._left = nLeft - (oButton._width + 1)
if self._showReflections:
oButtonR = self._reflectionButtons[n]
oButtonR._left = oButton._left
nLeft = oButton._left
# method to add required buttons
def AddButton(self, normalBmp, reflectionBmp=wx.NullBitmap, label="", disabledBmp=wx.NullBitmap,
disabledReflectionBmp=wx.NullBitmap):
"""
Adds a button to L{ZoomBar}.
:param `normalBmp`: the button main bitmap, an instance of `wx.Bitmap`;
:param `reflectionBmp`: a bitmap representing a reflection of the main bitmap,
an instance of `wx.Bitmap`;
:param `label`: the button label;
:param `disabledBmp`: the button main bitmap when the button is in a disabled
state, an instance of `wx.Bitmap`;
:param `disabledReflectionBmp`: a bitmap representing a reflection of the main bitmap,
when the button is in a disabled state, an instance of `wx.Bitmap`.
"""
button = ZoomBarImage(self, normalBmp, disabledBmp, label)
button.SetSize(self._buttonSize, self._buttonSize)
button._centerZoom = (self._showReflections and [False] or [self._centerZoom])[0]
self._buttons.append(button)
self.InitialReposition()
if self._showReflections and reflectionBmp.IsOk():
rbutton = ZoomBarImage(self, reflectionBmp, disabledReflectionBmp)
rbutton.SetSize(self._buttonSize, self._buttonSize)
rbutton._centerzoom = False
rbutton._isAReflection = True
self._reflectionButtons.append(rbutton)
return button
def AddSeparator(self, normalBmp, reflectionBmp=wx.NullBitmap):
"""
Adds a separator to L{ZoomBar}.
:param `normalBmp`: the separator main bitmap, an instance of `wx.Bitmap`;
:param `reflectionBmp`: a bitmap representing a reflection of the main bitmap,
an instance of `wx.Bitmap`.
"""
button = self.AddButton(normalBmp, reflectionBmp)
button._isSeparator = True
def SetZoomFactor(self, zoom):
"""
Sets the zoom factor for all the buttons. Larger number gives a greater zoom
effect.
:param `zoom`: a floating point number, greater than or equal to 1.0.
"""
if zoom < 1:
raise Exception("The zoom factor must be greater or equal to 1")
for button in self._buttons:
button._zoomFactor = zoom
self._zoomFactor = zoom
self.DoLayout()
def GetZoomFactor(self):
""" Returns the current zoom factor. """
return self._zoomFactor
def SetCenterZoom(self, center=True):
"""
Sets to zoom from the center.
:param `center`: if ``True`` button zooms upwards.
"""
self._centerZoom = center
for button in self._buttons:
button._centerZoom = (self._showReflections and [False] or [self._centerZoom])[0]
self.DoLayout()
def GetCenterZoom(self):
""" Returns ``True`` if buttons zoom upwards. """
return self._centerZoom
def SetShowReflections(self, show):
"""
Sets whether to show reflections or not.
:param `show`: ``True`` to show reflections, ``False`` otherwise.
"""
self._showReflections = show
self.DoLayout()
def GetShowReflections(self):
""" Returns ``True`` if reflections bitmap are currently shown. """
return self._showReflections
def SetShowLabels(self, show):
"""
Sets whether to show button labels or not.
:param `show`: ``True`` to show button labels, ``False`` otherwise.
"""
self._showLabels = show
self.DoLayout()
def GetShowLabels(self):
""" Returns ``True`` if button labels are currently shown. """
return self._showLabels
def SetBarColour(self, colour):
"""
Sets the background button bar colour.
:param `colour`: an instance of `wx.Colour`;
"""
self._imgBar.SetBarColour(colour)
self.Refresh()
def GetBarColour(self):
""" Returns the background button bar colour. """
return self._imgBar.GetBarColour()
def SetButtonSize(self, size):
"""
Sets the original button size.
:param `size`: the new (not-zoomed) button size, in pixels.
"""
self._buttonSize = size
self.DoLayout()
def GetButtonSize(self):
""" Returns the original (not zoomed) button size, in pixels. """
return self._buttonSize
def EnableButton(self, index, enable=True):
"""
Enables/disables the button at position `index`.
:param `index`: the index of the button to enable/disable;
:param `enable`: ``True`` to enable the button, ``False`` to disable it.
"""
if index < 0 or index >= len(self._buttons):
return False
self._buttons[index].Enable(enable)
self.Refresh()
return True
def IsButtonEnabled(self, index):
"""
Returns ``True`` if the button at position `index` is enabled, ``False``
otherwise.
:param `index`: the index of the button to check.
"""
if index < 0 or index >= len(self._buttons):
return False
return self._buttons[index].IsEnabled()
def DoLayout(self):
""" Common method to re-layout L{ZoomBar}. """
self.ResetSize()
self.GetContainingSizer().Layout()
self.Refresh()
def ResetSize(self):
"""
Resets all the button sizes and positions, recalculating the optimal L{ZoomBar}
size.
"""
xSize = self._buttonSize*len(self._buttons) + len(self._buttons) + self._buttonSize
ySize = self._buttonSize*2
self._imgBar.SetSize(xSize+self._buttonSize, ySize)
for button in self._buttons:
button.LoopScales(self._buttonSize)
if self._showLabels:
dc = wx.ClientDC(self)
dummy, yextent = dc.GetTextExtent("Ajgt")
ySize += yextent
if self._showReflections:
ySize += self._buttonSize/2
if self._centerZoom:
ySize += self._buttonSize
size = wx.Size(xSize+50, ySize)
self.SetInitialSize(size)
self.SnapToBottom(size)
# Sets up the initial buttons and sizes them from the center
def InitialReposition(self):
"""
Sets up the initial buttons and sizes them from the center.
"""
# repositions the button centrally
# odd buttons one is central - even, half by half
if not self._buttons:
return
size = self.GetSize()
oButton = self._buttons[0]
totalWidth = oButton._width*len(self._buttons) + len(self._buttons)
| |
<reponame>automl/nes<gh_stars>10-100
import os
import argparse
import dill as pickle
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from math import pi
from matplotlib.ticker import FormatStrFormatter
from pathlib import Path
import seaborn as sns
from nes.ensemble_selection.config import BUDGET, PLOT_EVERY
matplotlib.use("Agg")
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.linestyle'] = 'dotted'
plt.rcParams['font.size'] = 14
parser = argparse.ArgumentParser()
parser.add_argument(
"--esa",
type=str,
default="beam_search",
help="Ensemble selection algorithm. See nes/ensemble_selection/esas.py. Default: beam_search.",
)
parser.add_argument(
"--Ms", type=int, nargs="+", help="A sequence of ensemble sizes (M's) to plot.",
)
parser.add_argument(
"--save_dir", type=str, help="Directory to save plots.",
)
parser.add_argument(
"--load_plotting_data_dir",
type=str,
help="Directory where outputs of evaluate_ensembles.py are saved.",
)
parser.add_argument(
"--methods", type=str, nargs="+", help="A sequence of method names to plot."
)
parser.add_argument(
"--dataset", choices=["cifar10", "cifar100", "fmnist", "imagenet", "tiny"], type=str, help="Dataset."
)
parser.add_argument(
"--runs", type=str, default=[''], nargs='+', help="Subdirectories in load_plotting_data_dir over which to average runs."
)
args = parser.parse_args()
# ---------------------------------------------------------------------------- #
# Helper functions #
# ---------------------------------------------------------------------------- #
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
# merge all trajectories keeping all time steps
df = pd.DataFrame().join(pandas_data_frames, how='outer')
# forward fill to make it a propper step function
df = df.fillna(method='ffill')
if default_value is None:
# backward fill to replace the NaNs for the early times by the
# performance of a random configuration
df = df.fillna(method='bfill')
else:
df = df.fillna(default_value)
return df
def get_trajectories(losses, iterations):
'''
methods_dict (dict):
key (str): method name; should be one in methods
values (dict): key -> str: 'losses' or 'iterations';
values -> list of lists with all the evaluated metrics
'''
dfs = []
for i in range(len(losses)):
loss = losses[i]
iteration = iterations[i]
# print('Run %d, Min: %f'%(i, loss))
df = pd.DataFrame({str(i): loss}, index=iteration)
dfs.append(df)
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
pass
return np.array(df.index), np.array(df.T)
def _invert(x, limits):
"""inverts a value x on a scale from
limits[0] to limits[1]"""
return limits[1] - (x - limits[0])
def _scale_data(data, ranges):
"""scales data[1:] to ranges[0],
inverts if the scale is reversed"""
for d, (y1, y2) in zip(data[1:], ranges[1:]):
assert (y1 <= d <= y2) or (y2 <= d <= y1)
x1, x2 = ranges[0]
d = data[0]
if x1 > x2:
d = _invert(d, (x1, x2))
x1, x2 = x2, x1
sdata = [d]
for d, (y1, y2) in zip(data[1:], ranges[1:]):
if y1 > y2:
d = _invert(d, (y1, y2))
y1, y2 = y2, y1
sdata.append((d-y1) / (y2-y1)
* (x2 - x1) + x1)
return sdata
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
class ComplexRadar():
"""
From: https://datascience.stackexchange.com/questions/6084/how-do-i-create-a-complex-radar-chart
"""
def __init__(self, fig, variables, ranges,
n_ordinate_levels=4):
angles = np.arange(0, 360, 360./len(variables)) + 360./len(variables)/2 # added offset to rotate whole thing
axes = [fig.add_axes([0.1,0.1,0.9,0.9],polar=True,
label = "axes{}".format(i))
for i in range(len(variables))]
l, text = axes[0].set_thetagrids(angles,
labels=variables)
# this doesnt seem to work for polar plots, i.e. it doesn't rotate anything
# [txt.set_rotation(angle - 180) for txt, angle
# in zip(text, angles)]
# attempting this instead (actually realized this is done below...)
labels = []
for label, angle in zip(axes[0].get_xticklabels(), angles):
x,y = label.get_position()
lab = axes[0].text(x,y, label.get_text(), transform=label.get_transform(),
ha=label.get_ha(), va=label.get_va())
lab.set_rotation(angle - 90 if angle < 180 else angle + 90)
labels.append(lab)
axes[0].set_xticklabels([])
for ax in axes[1:]:
ax.patch.set_visible(False)
ax.grid("off")
ax.xaxis.set_visible(False)
for i, ax in enumerate(axes):
grid = np.linspace(*ranges[i],
num=n_ordinate_levels)
gridlabel = ["{}".format(round(x,2))
for x in grid]
if ranges[i][0] > ranges[i][1]:
print(grid)
#grid = grid[::-1] # hack to invert grid
# gridlabels aren't reversed
gridlabel[0] = "" # clean up origin
gridlabel[-1] = ""
ax.set_rgrids(grid, labels=gridlabel,
angle=angles[i])
#ax.spines["polar"].set_visible(False)
ax.set_ylim(*ranges[i])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
# variables for plotting
self.angle = np.deg2rad(np.r_[angles, angles[0]])
self.ranges = ranges
self.ax = axes[0]
def plot(self, data, annotate=False, *args, **kw):
sdata = _scale_data(data, self.ranges)
self.ax.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kw)
def fill(self, data, *args, **kw):
sdata = _scale_data(data, self.ranges)
self.ax.fill(self.angle, np.r_[sdata, sdata[0]], *args, **kw)
# ===================================
# Plot things
# ===================================
metric_label = {"loss": "NLL", "error": "Error", "ece": "ECE"}
colors = {
"nes_rs": "forestgreen",
"deepens_rs": "dodgerblue",
"nes_re": "crimson",
"deepens_darts": "black",
"deepens_gdas": "cyan",
"deepens_minimum": "dodgerblue",
"deepens_amoebanet": "darkorange",
}
markers = {
'nes_rs': 'v',
'deepens_rs': 'h',
'nes_re': 'x',
'deepens_minimum': '^',
'deepens_darts': '<',
'deepens_gdas': '.',
'deepens_amoebanet': '>'
}
label_names = {
'nes_rs': 'NES-RS',
'deepens_rs': 'DeepEns (RS)',
'nes_re': 'NES-RE',
'deepens_minimum': 'DeepEns (best arch.)',
'deepens_darts': 'DeepEns (DARTS)',
'deepens_gdas': 'DeepEns (GDAS)',
'deepens_amoebanet': 'DeepEns (AmoebaNet)',
}
ens_attr_to_title = {
"evals": "Ensemble",
"avg_baselearner_evals": "Average baselearner",
"oracle_evals": "Oracle ensemble",
}
SAVE_DIR = args.save_dir
#data_types = ["test", "val"]
data_type = "test"
ens_attrs = ["evals", "avg_baselearner_evals", "oracle_evals", "disagreement"]
#severities = range(6) if (args.dataset in ["cifar10", "cifar100", "tiny"]) else range(1)
severity = 0
M = 3
data_dict = {
'methods': [x for x in args.methods],
'Ensemble NLL': list(np.zeros(len(args.methods))),
'Error': list(np.zeros(len(args.methods))),
'ECE': list(np.zeros(len(args.methods))),
'Avg. base learner NLL': list(np.zeros(len(args.methods))),
'Oracle NLL': list(np.zeros(len(args.methods))),
'1 - Pred. Disagr.': list(np.zeros(len(args.methods)))
}
metric_to_dict = {'loss': 'Ensemble NLL', 'error': 'Error', 'ece': 'ECE'}
for ens_attr in ens_attrs:
for metric in ["loss", "error", "ece",
"normalized_disagreement"]:
if (ens_attr == "disagreement") ^ ("disagreement" in metric):
continue
for pool_name in args.methods:
if pool_name in ["nes_rs", "nes_re"]:
xs = []
ys = []
for plot_dir in [os.path.join(args.load_plotting_data_dir, p) for p in args.runs]:
with open(
os.path.join(
plot_dir,
f"plotting_data__esa_{args.esa}_M_{M}_pool_{pool_name}.pickle",
),
"rb",
) as f:
plotting_data = pickle.load(f)
x = plotting_data[str(M)][str(severity)][ens_attr][args.esa][
pool_name
].x
yy = plotting_data[str(M)][str(severity)][ens_attr][args.esa][
pool_name
].y
y = [item[data_type][str(severity)][metric] for item in yy]
xs.append(x)
ys.append(y)
assert len(xs) == len(ys)
assert len(set(xs)) == 1
y = np.array(ys).mean(axis=0)
# plot only the last value
idx = data_dict['methods'].index(pool_name)
if (metric in ["loss", "error", "ece"]) and (ens_attr == 'evals'):
data_dict[metric_to_dict[metric]][idx] = y[-1]
elif (metric == 'loss') and (ens_attr ==
'avg_baselearner_evals'):
data_dict['Avg. base learner NLL'][idx] = y[-1]
elif (metric == 'loss') and (ens_attr ==
'oracle_evals'):
data_dict['Oracle NLL'][idx] = y[-1]
elif (metric == 'normalized_disagreement') and (ens_attr ==
'disagreement'):
data_dict['1 - Pred. Disagr.'][idx] = 1 - y[-1]
elif pool_name in ["deepens_darts", "deepens_pcdarts",
"deepens_amoebanet", "deepens_gdas",
"deepens_minimum"]:
if args.runs != ['']:
with open(
os.path.join(
args.load_plotting_data_dir,
args.runs[0],
f"plotting_data__M_{M}_pool_{pool_name}.pickle",
),
"rb",
) as f:
plotting_data = pickle.load(f)
else:
with open(
os.path.join(
args.load_plotting_data_dir,
f"plotting_data__M_{M}_pool_{pool_name}.pickle",
),
"rb",
) as f:
plotting_data = pickle.load(f)
yy = plotting_data[str(M)][str(severity)][ens_attr][pool_name].y
y = yy[data_type][str(severity)][metric]
# plot only the last value
idx = data_dict['methods'].index(pool_name)
if (metric in ["loss", "error", "ece"]) and (ens_attr == 'evals'):
data_dict[metric_to_dict[metric]][idx] = y
elif (metric == 'loss') and (ens_attr ==
'avg_baselearner_evals'):
data_dict['Avg. base learner NLL'][idx] = y
elif (metric == 'loss') and (ens_attr ==
'oracle_evals'):
data_dict['Oracle NLL'][idx] = y
elif (metric == 'normalized_disagreement') and (ens_attr ==
'disagreement'):
data_dict['1 - Pred. Disagr.'][idx] = 1 - y
elif pool_name in ["deepens_rs"]:
xs = []
ys = []
for plot_dir in [os.path.join(args.load_plotting_data_dir, p) for p in args.runs]:
with open(
os.path.join(
plot_dir,
f"plotting_data__M_{M}_pool_{pool_name}.pickle",
),
"rb",
) as f:
plotting_data = pickle.load(f)
x = plotting_data[str(M)][str(severity)][ens_attr][pool_name].x
yy = plotting_data[str(M)][str(severity)][ens_attr][pool_name].y
y = [item[data_type][str(severity)][metric] for item in yy]
# extend line until end of plot.
x = x + [BUDGET]
y = y + [y[-1]]
xs.append(x)
ys.append(y)
x, all_pools = get_trajectories(ys, xs)
mean = np.mean(all_pools, axis=0)
# plot only the last value
idx = data_dict['methods'].index(pool_name)
if (metric in ["loss", "error", "ece"]) and (ens_attr == 'evals'):
data_dict[metric_to_dict[metric]][idx] = mean[-1]
elif (metric == 'loss') and (ens_attr ==
'avg_baselearner_evals'):
data_dict['Avg. base learner NLL'][idx] = mean[-1]
elif (metric == 'loss') and (ens_attr ==
'oracle_evals'):
data_dict['Oracle NLL'][idx] = mean[-1]
elif (metric == 'normalized_disagreement') and (ens_attr ==
'disagreement'):
data_dict['1 - Pred. Disagr.'][idx] = mean[-1]
categories = list(data_dict.keys())
categories.remove('methods')
N = len(data_dict['methods'])
df = pd.DataFrame(data_dict)
########################
fig = plt.figure(figsize=(6, 6))
#loss, error, ece, avg, dis, orc
ranges = [(1.75, 2.0), (0.45, 0.51), (0.02, 0.035), (1.98, 2.32), (1.28, 1.65), (0., 0.2)]
#ranges = [(2.0, 1.5), (0.51, 0.45), (0.035, 0.02), (1.9, 2.5), (0.8, 1.0)]
radar = ComplexRadar(fig, categories, ranges)
for i in range(N):
values=df.loc[i].drop('methods').values.flatten().tolist()
# import pdb; pdb.set_trace()
# idx_to_modify = categories.index("Pred. Disagr.")
# values_after_modif = [v if idx != idx_to_modify else 1 - v for idx, v in enumerate(values)]
radar.plot(values, annotate=False if i==0 else False,
linewidth=2, color=colors[data_dict['methods'][i]], linestyle='solid',
marker=markers[data_dict['methods'][i]], markersize=8,
label=label_names[data_dict['methods'][i]])
radar.fill(values, color=colors[data_dict['methods'][i]], alpha=.05)
# Put a legend below current axis
radar.ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=False, ncol=3)
plt.grid(color='#AAAAAA')
radar.ax.set_facecolor('#FAFAFA')
radar.ax.spines['polar'].set_color('#222222')
# Go through labels and adjust alignment based on where
# it is in the circle.
angles = np.linspace(0,2*np.pi,len(radar.ax.get_xticklabels())+1)
angles[np.cos(angles) < 0] = angles[np.cos(angles) < 0] + np.pi
angles = np.rad2deg(angles)
labels = []
for label, angle in zip(radar.ax.get_xticklabels(), angles):
label.set_horizontalalignment('center')
x,y = label.get_position()
lab = radar.ax.text(x,y, label.get_text(), transform=label.get_transform(),
ha=label.get_ha(), va=label.get_va())
if label.get_text() in ['Error', 'Avg. base learner NLL', 'Ensemble NLL']:
offset = 180
| |
self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryServiceGroupMessageReadStatusRequest(TeaModel):
def __init__(
self,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_token_grant_type: int = None,
ding_suite_key: str = None,
open_team_id: str = None,
open_conversation_id: str = None,
open_msg_task_id: str = None,
next_token: str = None,
max_results: int = None,
):
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_token_grant_type = ding_token_grant_type
self.ding_suite_key = ding_suite_key
# 开放团队ID
self.open_team_id = open_team_id
# 开放群ID
self.open_conversation_id = open_conversation_id
# 开放消息ID
self.open_msg_task_id = open_msg_task_id
# 用来标记当前开始读取的位置,置空表示从头开始。
self.next_token = next_token
# 本次读取的最大数据记录数量,此参数为可选参数,用户传入为空时,应该有默认值。应设置最大值限制,最大不超过100
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.open_team_id is not None:
result['openTeamId'] = self.open_team_id
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.open_msg_task_id is not None:
result['openMsgTaskId'] = self.open_msg_task_id
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('openTeamId') is not None:
self.open_team_id = m.get('openTeamId')
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('openMsgTaskId') is not None:
self.open_msg_task_id = m.get('openMsgTaskId')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class QueryServiceGroupMessageReadStatusResponseBodyRecords(TeaModel):
def __init__(
self,
receiver_user_id: str = None,
receiver_union_id: str = None,
read_status: int = None,
receiver_name: str = None,
receiver_ding_talk_id: str = None,
):
# 已读人员为企业员工则有值
self.receiver_user_id = receiver_user_id
# 已读人员为非企业员工则有值
self.receiver_union_id = receiver_union_id
# 状态:已读1/未读0
self.read_status = read_status
# 接收者昵称
self.receiver_name = receiver_name
# 接收者dingtalkId
self.receiver_ding_talk_id = receiver_ding_talk_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.receiver_user_id is not None:
result['receiverUserId'] = self.receiver_user_id
if self.receiver_union_id is not None:
result['receiverUnionId'] = self.receiver_union_id
if self.read_status is not None:
result['readStatus'] = self.read_status
if self.receiver_name is not None:
result['receiverName'] = self.receiver_name
if self.receiver_ding_talk_id is not None:
result['receiverDingTalkId'] = self.receiver_ding_talk_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('receiverUserId') is not None:
self.receiver_user_id = m.get('receiverUserId')
if m.get('receiverUnionId') is not None:
self.receiver_union_id = m.get('receiverUnionId')
if m.get('readStatus') is not None:
self.read_status = m.get('readStatus')
if m.get('receiverName') is not None:
self.receiver_name = m.get('receiverName')
if m.get('receiverDingTalkId') is not None:
self.receiver_ding_talk_id = m.get('receiverDingTalkId')
return self
class QueryServiceGroupMessageReadStatusResponseBody(TeaModel):
def __init__(
self,
total_count: int = None,
next_token: str = None,
max_results: int = None,
records: List[QueryServiceGroupMessageReadStatusResponseBodyRecords] = None,
):
# 本次请求条件下的数据总量,此参数为可选参数,默认可不返回。本次请求条件下的数据总量,此参数为可选参数,默认可不返回
self.total_count = total_count
# 表示当前调用返回读取到的位置,空代表数据已经读取完毕
self.next_token = next_token
# 本次请求所返回的最大记录条数。
self.max_results = max_results
# 已读未读信息列表
self.records = records
def validate(self):
if self.records:
for k in self.records:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.total_count is not None:
result['totalCount'] = self.total_count
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
result['records'] = []
if self.records is not None:
for k in self.records:
result['records'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('totalCount') is not None:
self.total_count = m.get('totalCount')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
self.records = []
if m.get('records') is not None:
for k in m.get('records'):
temp_model = QueryServiceGroupMessageReadStatusResponseBodyRecords()
self.records.append(temp_model.from_map(k))
return self
class QueryServiceGroupMessageReadStatusResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryServiceGroupMessageReadStatusResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryServiceGroupMessageReadStatusResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddLibraryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class AddLibraryRequest(TeaModel):
def __init__(
self,
ding_token_grant_type: int = None,
ding_isv_org_id: int = None,
ding_suite_key: str = None,
ding_org_id: int = None,
open_team_ids: List[str] = None,
title: str = None,
description: str = None,
type: str = None,
source: str = None,
source_primary_key: str = None,
user_id: str = None,
):
self.ding_token_grant_type = ding_token_grant_type
self.ding_isv_org_id = ding_isv_org_id
self.ding_suite_key = ding_suite_key
self.ding_org_id = ding_org_id
# 团队id列表
self.open_team_ids = open_team_ids
# 知识库名称
self.title = title
# 知识库描述
self.description = description
# 知识库类型 INTERNAL:内部知识库 EXTERNAL:外部知识库
self.type = type
# 知识来源
self.source = source
# 知识库的唯一性标识
self.source_primary_key = source_primary_key
# 员工ID
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.open_team_ids is not None:
result['openTeamIds'] = self.open_team_ids
if self.title is not None:
result['title'] = self.title
if self.description is not None:
result['description'] = self.description
if self.type is not None:
result['type'] = self.type
if self.source is not None:
result['source'] = self.source
if self.source_primary_key is not None:
result['sourcePrimaryKey'] = self.source_primary_key
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('openTeamIds') is not None:
self.open_team_ids = m.get('openTeamIds')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('description') is not None:
self.description = m.get('description')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('source') is not None:
self.source = m.get('source')
if m.get('sourcePrimaryKey') is not None:
self.source_primary_key = m.get('sourcePrimaryKey')
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class AddLibraryResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
# success
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class AddLibraryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddLibraryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddLibraryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryGroupHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: | |
= gate.calc_proj_ineq_constraint()
# Assert
expected_choi = np.array(
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]
)
npt.assert_almost_equal(actual.to_choi_matrix(), expected_choi, decimal=14)
expected_hs = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]
)
npt.assert_almost_equal(actual.hs, expected_hs, decimal=15)
assert actual.composite_system is c_sys
assert actual.is_physicality_required is gate.is_physicality_required
assert actual.is_estimation_object is gate.is_estimation_object
assert actual.on_para_eq_constraint is gate.on_para_eq_constraint
assert actual.on_algo_eq_constraint is gate.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint is gate.on_algo_ineq_constraint
assert actual.eps_proj_physical is gate.eps_proj_physical
def test_calc_proj_ineq_constraint_with_var(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_x(c_sys)
# case 1: on_para_eq_constraint: default(True)
actual = gate.calc_proj_ineq_constraint_with_var(c_sys, gate.to_var())
expected = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# case 2: on_para_eq_constraint=True
actual = gate.calc_proj_ineq_constraint_with_var(
c_sys, gate.to_var(), on_para_eq_constraint=True
)
expected = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# case 3: on_para_eq_constraint=False
var = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1])
actual = gate.calc_proj_ineq_constraint_with_var(
c_sys, var, on_para_eq_constraint=False
)
expected = np.array(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1],
dtype=np.float64,
)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_convert_var_to_stacked_vector(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_x(c_sys)
expected = np.array(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64
)
# case 1: on_para_eq_constraint: default(True)
var = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
actual = gate.convert_var_to_stacked_vector(c_sys, var)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2: on_para_eq_constraint=True
var = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
actual = gate.convert_var_to_stacked_vector(
c_sys, var, on_para_eq_constraint=True
)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3: on_para_eq_constraint=False
var = np.array(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64
)
actual = gate.convert_var_to_stacked_vector(
c_sys, var, on_para_eq_constraint=False
)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_convert_stacked_vector_to_var(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_x(c_sys)
stacked_vector = np.array(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64
)
# case 1: on_para_eq_constraint: default(True)
actual = gate.convert_stacked_vector_to_var(c_sys, stacked_vector)
expected = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2: on_para_eq_constraint=True
actual = gate.convert_stacked_vector_to_var(
c_sys, stacked_vector, on_para_eq_constraint=True
)
expected = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3: on_para_eq_constraint=False
actual = gate.convert_stacked_vector_to_var(
c_sys, stacked_vector, on_para_eq_constraint=False
)
expected = np.array(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64
)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_is_tp():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# case: TP
z = get_z(c_sys)
assert is_tp(c_sys, z.hs) == True
# case: not TP
hs = np.array(
[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float64
)
gate = Gate(c_sys, hs, is_physicality_required=False)
assert is_tp(c_sys, hs) == False
def test_is_cp():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# case: CP
x = get_x(c_sys)
assert is_cp(c_sys, x.hs) == True
y = get_y(c_sys)
assert is_cp(c_sys, y.hs) == True
z = get_z(c_sys)
assert is_cp(c_sys, z.hs) == True
# case: not CP
hs = np.array(
[[-1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.float64
)
gate = Gate(c_sys, hs, is_physicality_required=False)
assert is_cp(c_sys, gate.hs) == False
def test_to_hs_from_choi():
# Case 1:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_x(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_y(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_z(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 4:
# Arrange
hs = np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
dtype=np.float64,
)
gate = Gate(c_sys=c_sys, hs=hs, is_physicality_required=False)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=14)
def test_to_hs_from_choi_with_dict():
# Case 1:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_x(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi_with_dict(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_y(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi_with_dict(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_z(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi_with_dict(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
def test_to_hs_from_choi_with_sparsity():
# Case 1:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_x(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi_with_sparsity(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_y(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi_with_sparsity(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
gate = get_z(c_sys)
source_choi = gate.to_choi_matrix()
# Act
actual = to_hs_from_choi_with_sparsity(c_sys, source_choi)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
def test_to_hs_from_kraus_matrices():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
### Case 1: x gate
# Arrange
gate = get_x(c_sys)
kraus = gate.to_kraus_matrices()
# Act
actual = to_hs_from_kraus_matrices(c_sys, kraus)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
### Case 2: y gate
# Arrange
gate = get_y(c_sys)
kraus = gate.to_kraus_matrices()
# Act
actual = to_hs_from_kraus_matrices(c_sys, kraus)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
### Case 3: z gate
# Arrange
gate = get_z(c_sys)
kraus = gate.to_kraus_matrices()
# Act
actual = to_hs_from_kraus_matrices(c_sys, kraus)
# Assert
expected = gate.hs
npt.assert_almost_equal(actual, expected, decimal=15)
def test_convert_var_index_to_gate_index():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# default
actual = convert_var_index_to_gate_index(c_sys, 11)
assert actual == (3, 3)
# on_para_eq_constraint=True
actual = convert_var_index_to_gate_index(c_sys, 11, on_para_eq_constraint=True)
assert actual == (3, 3)
# on_para_eq_constraint=False
actual = convert_var_index_to_gate_index(c_sys, 15, on_para_eq_constraint=False)
assert actual == (3, 3)
def test_convert_gate_index_to_var_index():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# default
actual = convert_gate_index_to_var_index(c_sys, (3, 3))
assert actual == 11
# on_para_eq_constraint=True
actual = convert_gate_index_to_var_index(c_sys, (3, 3), on_para_eq_constraint=True)
assert actual == 11
# on_para_eq_constraint=False
actual = convert_gate_index_to_var_index(c_sys, (3, 3), on_para_eq_constraint=False)
assert actual == 15
def test_convert_var_to_gate():
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# Case 1: default
# Arrange
var = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
# Act
actual = convert_var_to_gate(c_sys, var)
# Assert
expected = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
# Case 2: on_para_eq_constraint=True
# Arrange
var = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
# Act
actual = convert_var_to_gate(c_sys, var, on_para_eq_constraint=True)
# Assert
expected = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
# Case 3: on_para_eq_constraint=False
# Arrange
var = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
# Act
actual = convert_var_to_gate(c_sys, var, on_para_eq_constraint=False)
# Assert
expected = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
def test_convert_var_to_gate_2q():
# Arrange
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys_2q = CompositeSystem([e_sys0, e_sys1])
# Case 1:
# Arrange
| |
val)
def add_sub_time(time_name, sub_name, val):
tindex = time_dict[time_name]
if sub_name not in tindex:
tindex[sub_name] = (0, 0)
cur_val = tindex[sub_name]
tindex[sub_name] = (cur_val[0] + 1, cur_val[1] + val)
# --------------- Global variables to hold timings of parts of Ramba -------------
def matmul(a, b, reduction=False, out=None):
dprint(1, "starting matmul", a.shape, b.shape)
pre_matmul_start_time = timer()
ashape = a.shape
bshape = b.shape
# Handle the 1D x 1D case.
if len(ashape) == 1 and len(bshape) == 1:
assert ashape[0] == bshape[0]
# shortcut
return (a * b).sum()
aextend = False
bextend = False
if len(ashape) == 1:
aextend = True
a = reshape(a, (1, ashape[0]))
ashape = a.shape
if len(bshape) == 1:
bextend = True
if len(ashape) > 2 or len(bshape) > 2:
print("matmul for matrices higher than 2 dimensions not currently supported.")
assert 0
assert ashape[1] == bshape[0]
if bextend:
out_shape = (ashape[0],)
else:
out_shape = (ashape[0], bshape[1])
if out is not None:
assert out.shape == out_shape
out_ndarray = out
else:
out_ndarray = empty(out_shape, dtype=np.result_type(a.dtype, b.dtype))
pre_matmul_end_time = timer()
tprint(
2,
"pre_matmul_total_time:",
pre_matmul_end_time - pre_matmul_start_time,
ashape,
bshape,
)
a_send_recv = uuid.uuid4()
b_send_recv = uuid.uuid4()
# If the output is not distributed then do the compute localized and then reduce.
if not reduction and do_not_distribute(out_shape):
dprint(2, "matmul output is not distributed and is not recursive")
if ntiming >= 1:
sync_start_time = timer()
sync()
sync_end_time = timer()
tprint(
2,
"matmul_sync_total_time:",
sync_end_time - sync_start_time,
ashape,
bshape,
)
else:
deferred_op.do_ops()
matmul_start_time = timer()
adivs = shardview.distribution_to_divisions(a.distribution)
bdivs = shardview.distribution_to_divisions(b.distribution)
dprint(4, "matmul adivs:", adivs, "\n", adivs[:, :, 0], "\n", adivs[:, :, 1])
dprint(3, "matmul bdivs:", bdivs, "\n", bdivs[:, :, 0])
if do_not_distribute(bshape):
dprint(2, "matmul b matrix is not distributed")
blocal = b.asarray()
dprint(2, "blocal", blocal.shape, blocal)
worker_info = []
workers = []
matmul_workers = []
reduction_slicing_start_time = timer()
reduction_slicing_end_time = timer()
launch_start_time = timer()
matmul_workers = remote_async_call_all(
"matmul",
out_shape,
out_ndarray.gid,
out_ndarray.distribution,
a.gid,
a.size,
a.distribution,
blocal,
0,
0,
bextend,
a_send_recv,
b_send_recv,
)
launch_end_time = timer()
launch_total = launch_end_time - launch_start_time
worker_timings = get_results(matmul_workers)
post_get_end_time = timer()
if not fast_reduction:
reduce_start_time = timer()
redres = functools.reduce(operator.add, [x[11] for x in worker_timings])
reduce_end_time = timer()
out_ndarray[:] = fromarray(redres)
fromarray_end_time = timer()
if ntiming >= 1:
sync()
sync_end_time = timer()
tprint(
2,
"driver_reduction_time:",
sync_end_time - reduce_start_time,
reduce_end_time - reduce_start_time,
fromarray_end_time - reduce_end_time,
sync_end_time - fromarray_end_time,
)
matmul_end_time = timer()
tprint(
2,
"matmul_total_time:",
matmul_end_time - matmul_start_time,
ashape,
bshape,
reduction_slicing_end_time - reduction_slicing_start_time,
launch_total,
post_get_end_time - reduction_slicing_end_time,
matmul_end_time - post_get_end_time,
)
for worker_data in worker_timings:
(
worker_num,
worker_total,
compute_comm,
comm_time,
len_arange,
len_brange,
exec_time,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
_,
) = worker_data
tprint(
3,
"reduction matmul_worker:",
worker_num,
worker_total,
compute_comm,
comm_time,
exec_time,
len_arange,
len_brange,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
)
add_time("matmul_b_c_not_dist", matmul_end_time - pre_matmul_start_time)
add_sub_time(
"matmul_b_c_not_dist",
"pre",
pre_matmul_end_time - pre_matmul_start_time,
)
add_sub_time("matmul_b_c_not_dist", "launch", launch_total)
add_sub_time(
"matmul_b_c_not_dist",
"compute_comm",
max([x[2] for x in worker_timings]),
)
add_sub_time(
"matmul_b_c_not_dist", "comm", max([x[3] for x in worker_timings])
)
add_sub_time(
"matmul_b_c_not_dist", "exec", max([x[6] for x in worker_timings])
)
if aextend:
return reshape(out_ndarray, (out_shape[1],))
else:
return out_ndarray
elif (
np.array_equal(adivs[:, :, 1], bdivs[:, :, 0])
and np.min(adivs[:, 0, 0]) == np.max(adivs[:, 0, 0])
and np.min(adivs[:, 1, 0]) == np.max(adivs[:, 1, 0])
):
dprint(
2,
"matmul b matrix is distributed and has same inner distribution as the a matrix outer distribution",
)
adivs_shape = adivs.shape
assert adivs_shape[0] == num_workers
worker_info = []
workers = []
matmul_workers = []
reduction_slicing_start_time = timer()
reduction_slicing_end_time = timer()
launch_start_time = timer()
matmul_workers = remote_async_call_all(
"matmul",
out_shape,
out_ndarray.gid,
out_ndarray.distribution,
a.gid,
a.size,
a.distribution,
b.gid,
b.size,
b.distribution,
bextend,
a_send_recv,
b_send_recv,
)
launch_end_time = timer()
launch_total = launch_end_time - launch_start_time
worker_timings = get_results(matmul_workers)
post_get_end_time = timer()
if not fast_reduction:
reduce_start_time = timer()
redres = functools.reduce(operator.add, [x[11] for x in worker_timings])
reduce_end_time = timer()
out_ndarray[:] = fromarray(redres)
fromarray_end_time = timer()
if ntiming >= 1:
sync()
sync_end_time = timer()
tprint(
2,
"driver_reduction_time:",
sync_end_time - reduce_start_time,
reduce_end_time - reduce_start_time,
fromarray_end_time - reduce_end_time,
sync_end_time - fromarray_end_time,
)
matmul_end_time = timer()
tprint(
2,
"matmul_total_time:",
matmul_end_time - matmul_start_time,
ashape,
bshape,
"slicing_time",
0,
"launch_time",
launch_total,
"get_results_time",
post_get_end_time - launch_end_time,
)
for worker_data in worker_timings:
(
worker_num,
worker_total,
compute_comm,
comm_time,
len_arange,
len_brange,
exec_time,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
_,
) = worker_data
tprint(
3,
"reduction matmul_worker:",
worker_num,
worker_total,
compute_comm,
comm_time,
exec_time,
len_arange,
len_brange,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
)
add_time(
"matmul_c_not_dist_a_b_dist_match",
matmul_end_time - pre_matmul_start_time,
)
add_sub_time(
"matmul_c_not_dist_a_b_dist_match",
"pre",
pre_matmul_end_time - pre_matmul_start_time,
)
add_sub_time("matmul_c_not_dist_a_b_dist_match", "launch", launch_total)
add_sub_time(
"matmul_c_not_dist_a_b_dist_match",
"reduction",
max([x[3] for x in worker_timings]),
)
add_sub_time(
"matmul_c_not_dist_a_b_dist_match",
"exec",
max([x[6] for x in worker_timings]),
)
if aextend:
return reshape(out_ndarray, (out_shape[1],))
else:
return out_ndarray
else:
# print("not simple case:", out_shape, adivs, adivs[:,:,1], bdivs, bdivs[:,:,0], np.array_equal(adivs[:,:,1], bdivs[:,:,0]))
dprint(
2,
"matmul b matrix is distributed but a is not distributed across 2nd dimension nor b across inner dimension",
)
adivs_shape = adivs.shape
divisions = np.empty((adivs_shape[0], 2, len(out_shape)), dtype=np.int64)
starts = np.zeros(len(out_shape), dtype=np.int64)
ends = np.array(list(out_shape), dtype=np.int64)
# the ends are inclusive, not one past the last index
ends -= 1
assert adivs_shape[0] == num_workers
worker_info = []
workers = []
partials = []
reduction_slicing_start_time = timer()
for i in range(adivs_shape[0]):
shardview.make_uni_dist(divisions, i, starts, ends)
partial_matmul_res = zeros(
out_shape,
distribution=shardview.divisions_to_distribution(divisions),
)
aslice_struct = (
slice(adivs[i, 0, 0], adivs[i, 1, 0] + 1),
slice(adivs[i, 0, 1], adivs[i, 1, 1] + 1),
)
aslice = a[aslice_struct]
if bextend:
bslice_struct = (slice(adivs[i, 0, 1], adivs[i, 1, 1] + 1),)
bslice = b[bslice_struct]
cslice_struct = (slice(adivs[i, 0, 0], adivs[i, 1, 0] + 1),)
cslice = partial_matmul_res[cslice_struct]
else:
bslice_struct = (
slice(adivs[i, 0, 1], adivs[i, 1, 1] + 1),
slice(0, out_shape[1]),
)
bslice = b[bslice_struct]
cslice_struct = (
slice(adivs[i, 0, 0], adivs[i, 1, 0] + 1),
slice(0, out_shape[1]),
)
cslice = partial_matmul_res[cslice_struct]
dprint(
2,
"matmul part:",
i,
partial_matmul_res,
aslice_struct,
bslice_struct,
cslice_struct,
)
partials.append(partial_matmul_res)
worker_info.append((aslice, bslice, cslice, partial_matmul_res))
reduction_slicing_end_time = timer()
# Just so that the partial results zeros are there.
deferred_op.do_ops()
matmul_workers = []
launch_start_time = timer()
for wi in worker_info:
aslice, bslice, cslice, partial_matmul_res = wi
matmul_workers.extend(
matmul(aslice, bslice, reduction=True, out=cslice)
)
launch_end_time = timer()
worker_timings = get_results(matmul_workers)
post_get_end_time = timer()
if ndebug > 2:
pasarray = [x.asarray() for x in partials]
for p in pasarray:
print("partial result:", p)
out_ndarray[:] = functools.reduce(operator.add, partials)
if ntiming >= 1:
sync()
matmul_end_time = timer()
tprint(
2,
"matmul_total_time:",
matmul_end_time - matmul_start_time,
ashape,
bshape,
reduction_slicing_end_time - reduction_slicing_start_time,
launch_end_time - launch_start_time,
post_get_end_time - launch_end_time,
matmul_end_time - post_get_end_time,
)
for worker_data in worker_timings:
(
worker_num,
worker_total,
compute_comm,
comm_time,
len_arange,
len_brange,
exec_time,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
_,
) = worker_data
tprint(
3,
"reduction matmul_worker:",
worker_num,
worker_total,
compute_comm,
comm_time,
exec_time,
len_arange,
len_brange,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
)
add_time(
"matmul_c_not_dist_a_b_dist_non_match",
matmul_end_time - pre_matmul_start_time,
)
if aextend:
return reshape(out_ndarray, (out_shape[1],))
else:
return out_ndarray
if not reduction:
if ntiming >= 1:
sync_start_time = timer()
sync()
sync_end_time = timer()
tprint(
2,
"matmul_sync_total_time:",
sync_end_time - sync_start_time,
ashape,
bshape,
)
else:
deferred_op.do_ops()
matmul_start_time = timer()
dprint(
4,
"matmul a:",
ashape,
a.distribution,
shardview.distribution_to_divisions(a.distribution),
)
dprint(4, "matmul b:", bshape, shardview.distribution_to_divisions(b.distribution))
out_distribution = shardview.distribution_to_divisions(out_ndarray.distribution)
dprint(4, "matmul out:", out_shape, out_distribution)
# a_send_recv = uuid.uuid4()
# b_send_recv = uuid.uuid4()
end_compute_comm_set = timer()
# matmul_workers = remote_call_all("matmul", out_ndarray.gid,
# a.gid, a.size, a.distribution,
# b.gid, b.size, b.distribution, bextend,
# a_send_recv, b_send_recv)
launch_start_time = timer()
matmul_workers = remote_async_call_all(
"matmul",
out_ndarray.gid,
out_ndarray.size,
out_ndarray.distribution,
a.gid,
a.size,
a.distribution,
b.gid,
b.size,
b.distribution,
bextend,
a_send_recv,
b_send_recv,
)
launch_end_time = timer()
launch_total = launch_end_time - launch_start_time
if reduction:
return matmul_workers
else:
# worker_timings = ray.get(matmul_workers)
worker_timings = get_results(matmul_workers)
matmul_end_time = timer()
tprint(
2, "matmul_total_time:", matmul_end_time - matmul_start_time, ashape, bshape
)
for worker_data in worker_timings:
(
worker_num,
worker_total,
compute_comm,
comm_time,
len_arange,
len_brange,
exec_time,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
_,
) = worker_data
tprint(
3,
"matmul_worker:",
worker_num,
worker_total,
compute_comm,
comm_time,
exec_time,
len_arange,
len_brange,
a_send_stats,
a_recv_stats,
b_send_stats,
b_recv_stats,
)
add_time("matmul_general", matmul_end_time - pre_matmul_start_time)
add_sub_time(
"matmul_general", "pre", pre_matmul_end_time - pre_matmul_start_time
)
add_sub_time("matmul_general", "launch", launch_total)
add_sub_time(
"matmul_general", "compute_comm", max([x[2] for x in worker_timings])
)
add_sub_time("matmul_general", "comm", max([x[3] for x in worker_timings]))
add_sub_time("matmul_general", "exec", max([x[6] for x in worker_timings]))
if aextend:
return reshape(out_ndarray, (out_shape[1],))
else:
return out_ndarray
def matmul_summary():
print(get_timing_str())
if ntiming > 0:
print("registering | |
import pprint
from collections import Counter
import json
import sys
from nl4dv.utils import constants, error_codes, helpers
import os
import pandas as pd
import requests
class DataGenie:
"""
Pre-process data attributes into different categories
"""
def __init__(self, nl4dv_instance):
# nl4dv instance
self.nl4dv_instance = nl4dv_instance
# set label attribute
self.set_label_attribute(label_attribute=self.nl4dv_instance.label_attribute)
# set ignore list
self.set_ignore_words(ignore_words=self.nl4dv_instance.ignore_words)
# set custom stopwords list
self.set_reserve_words(reserve_words=self.nl4dv_instance.reserve_words)
# Other initializations
self.data_attribute_map = dict()
self.data = list()
self.rows = 0
# Set the Data if passed data_url or data_value is not None
if self.nl4dv_instance.data_url is not None:
self.set_data(data_url = self.nl4dv_instance.data_url)
elif self.nl4dv_instance.data_value is not None:
self.set_data(data_value = self.nl4dv_instance.data_value)
# Set the Aliases if passed alias_url or alias_value is not None
if self.nl4dv_instance.alias_url is not None:
self.set_alias_map(alias_url = self.nl4dv_instance.alias_url)
elif self.nl4dv_instance.alias_value is not None:
self.set_alias_map(alias_value = self.nl4dv_instance.alias_value)
# pprint.pprint(self.data_attribute_map['Running Time']) #% FOR CODE UNDERSTAND %
# pprint.pprint(self.data_attribute_map['Release Year']) #% FOR CODE UNDERSTAND %
# Update the attribute datatypes that were not correctly detected by NL4DV
def set_attribute_datatype(self, attr_type_obj):
# Set new datatype
for attribute, data_type in attr_type_obj.items():
if data_type in constants.attribute_types.values():
self.data_attribute_map[attribute]['dataType'] = data_type
self.populate_dataset_meta_for_attr(attribute, data_type)
else:
helpers.cond_print("Invalid Target DataType. Choose from " + str(constants.attribute_types.values()), debug=True)
sys.exit(error_codes.BAD_INPUT_ATTRIBUTE_DATA_TYPE)
# Set Label attribute for the dataset, i.e. one that defines what the dataset is about.
# e.g. "Correlate horsepower and MPG for sports car models" should NOT apply an explicit attribute for models since there are two explicit attributes already present.
def set_label_attribute(self, label_attribute):
self.nl4dv_instance.label_attribute = label_attribute
# WORDS that should be IGNORED in the query, i.e. NOT lead to the detection of attributes and tasks
# `Movie` in movies dataset
# `Car` in cars dataset
def set_ignore_words(self, ignore_words):
self.nl4dv_instance.ignore_words = ignore_words
# Custom STOPWORDS that should NOT removed from the query, as they might be present in the domain.
# e.g. `A` in grades dataset
def set_reserve_words(self, reserve_words):
self.nl4dv_instance.reserve_words = reserve_words
# Sets the Dataset
def set_data(self, data_url=None, data_value=None):
## type: (str) -> None
"""
User can choose to manually initialize data
"""
self.nl4dv_instance.data_url = data_url if data_url is not None else self.nl4dv_instance.data_url
self.nl4dv_instance.data_value = data_value if data_value is not None else self.nl4dv_instance.data_value
self.nl4dv_instance.data_url_type = None
# initialize values
self.data_attribute_map = dict()
self.data = list()
self.rows = 0
if self.nl4dv_instance.data_url is not None:
# Possible Local FILE or HTTP URL
if self.nl4dv_instance.data_url.lower().endswith('.csv'):
self.nl4dv_instance.data_url_type = "csv"
self.data = pd.read_csv(self.nl4dv_instance.data_url, sep=',').to_dict('records')
elif self.nl4dv_instance.data_url.lower().endswith('.tsv'):
self.nl4dv_instance.data_url_type = "tsv"
self.data = pd.read_csv(self.nl4dv_instance.data_url, sep='\t').to_dict('records')
elif self.nl4dv_instance.data_url.lower().endswith('.json'):
self.nl4dv_instance.data_url_type = "json"
self.data = pd.read_json(self.nl4dv_instance.data_url).to_dict('records')
elif self.nl4dv_instance.data_value is not None:
if isinstance(data_value, pd.DataFrame):
self.data = data_value.to_dict('records')
elif isinstance(data_value, list):
self.data = data_value
elif isinstance(data_value, dict):
self.data = pd.DataFrame(data_value).to_dict('records')
# pprint.pprint(self.data) #% FOR CODE UNDERSTAND %
# Set number of rows in the dataset
self.rows = len(self.data)
# print(pd.read_csv(self.nl4dv_instance.data_url, sep=',').to_dict('records')) #% FOR CODE UNDERSTAND %
# print(self.data) #% FOR CODE UNDERSTAND %
# print(self.data[0]) #% FOR CODE UNDERSTAND %
# initialize properties in Attribute Map
if len(self.data) > 0:
for attr in self.data[0].keys():
# Don't consider attribute names that are empty or just whitespaces
if attr and attr.strip():
self.data_attribute_map[attr] = {
'domain': set(),
'domainMeta': dict(),
'isLabelAttribute': attr == self.nl4dv_instance.label_attribute,
'summary': dict(),
'dataTypeList': list(), # temporary to determine datatype
'dataType': '',
'dataTypeMeta': dict(), # Used for for e.g., temporal attributes when they conform to a certain format
'aliases': list(),
}
# infer attribute datatypes and compute summary (range, domain)
for datum in self.data:
for attr in self.data_attribute_map.keys():
# Check for Datetime
is_date, unformatted_date_obj = helpers.isdate(datum[attr])
if is_date:
attr_datatype_for_majority = constants.attribute_types['TEMPORAL'] + "-" + str(unformatted_date_obj["regex_id"])
# Check for Numeric (float, int)
elif helpers.isfloat(datum[attr]) or helpers.isint(datum[attr]):
attr_datatype_for_majority = constants.attribute_types['QUANTITATIVE']
# Otherwise set as Nominal
else:
attr_datatype_for_majority = constants.attribute_types['NOMINAL']
# Append the list of attribute types for each data row to take best decision on heterogeneous data with multiple datatypes
self.data_attribute_map[attr]['dataTypeList'].append(attr_datatype_for_majority)
# Determine the Datatype based on majority of values.
# Also Override a few datatypes set above based on rules such as NOMINAL to ORDINAL if all values are unique such as Sr. 1, Sr. 2, ...
for attr in self.data_attribute_map:
# By default, set the attribute datatype to the most common attribute
attr_datatype = Counter(self.data_attribute_map[attr]['dataTypeList']).most_common(1)[0][0]
# If attr_datatype is Temporal (e.g., T-1, T-2, T-n where 'n' corresponds to the n'th index of the date_regex array.
# Then: PROCESS this and eventually strip the '-n' from the datatype
if not (attr_datatype in [constants.attribute_types['QUANTITATIVE'], constants.attribute_types['NOMINAL']]):
# If there is at least one instance of 'T-2' (DD*MM*YY(YY)), in the `dataTypeList`, set the regex_id to this, even if 'T-1' is the majority.
if 'T-2' in self.data_attribute_map[attr]['dataTypeList']:
attr_datatype = 'T-2'
# Strip the '-n' from the datatype
attr_datatype_split = attr_datatype.split("-")
attr_datatype = attr_datatype_split[0]
# Set the final data type
self.data_attribute_map[attr]['dataTypeMeta'] = {
"regex_id": attr_datatype_split[1]
}
# Add raw data to the domain's metadata. Only for Temporal Attributes.
if not 'raw' in self.data_attribute_map[attr]['domainMeta']:
self.data_attribute_map[attr]['domainMeta']['raw'] = set()
# Set the final data type
self.data_attribute_map[attr]['dataType'] = attr_datatype
# Update the dataset metadata for each attribute
self.populate_dataset_meta_for_attr(attr, attr_datatype)
# print(self.data_attribute_map['Title']) #% FOR CODE UNDERSTAND %
# Sets the Alias Map
def set_alias_map(self, alias_value=None, alias_url=None):
# type: (dict, str) -> None
"""
User can choose to manually initialize aliases
"""
self.nl4dv_instance.alias_url= alias_url
self.nl4dv_instance.alias_value = alias_value
if self.nl4dv_instance.alias_url is not None:
if os.path.isfile(self.nl4dv_instance.alias_url):
self.nl4dv_instance.alias_value = json.load(open(self.nl4dv_instance.alias_url, 'r', encoding='utf-8'))
else:
self.nl4dv_instance.alias_value = json.loads(requests.get(self.nl4dv_instance.alias_url).text)
if self.nl4dv_instance.alias_value is not None:
for attr in self.nl4dv_instance.alias_value.keys():
if attr in self.data_attribute_map:
self.data_attribute_map[attr]['aliases'].extend(self.nl4dv_instance.alias_value[attr])
# Set Domain, Ranges for Attributes of different datatypes
def populate_dataset_meta(self, attr, attr_val, attr_datatype):
if attr_datatype == constants.attribute_types['QUANTITATIVE']:
try:
attr_val = float(attr_val)
except Exception as e:
attr_val = float('NaN')
self.data_attribute_map[attr]['domain'].add(attr_val)
# Compute Max and Min of the attribute values
if 'min' not in self.data_attribute_map[attr]['summary']:
self.data_attribute_map[attr]['summary']['min'] = float("inf")
if 'max' not in self.data_attribute_map[attr]['summary']:
self.data_attribute_map[attr]['summary']['max'] = float("-inf")
if attr_val > self.data_attribute_map[attr]['summary']['max']:
self.data_attribute_map[attr]['summary']['max'] = attr_val
if attr_val < self.data_attribute_map[attr]['summary']['min']:
self.data_attribute_map[attr]['summary']['min'] = attr_val
elif attr_datatype == constants.attribute_types['TEMPORAL']:
is_date, unformatted_date_obj = helpers.isdate(attr_val)
parsed_attr_val = None
if is_date:
for format in constants.date_regexes[unformatted_date_obj['regex_id']][0]:
parsed_attr_val = helpers.format_str_to_date("/".join(unformatted_date_obj["regex_matches"]), format)
if parsed_attr_val is not None:
self.data_attribute_map[attr]['domain'].add(parsed_attr_val)
self.data_attribute_map[attr]['domainMeta']['raw'].add(attr_val)
break
if parsed_attr_val is not None:
# Compute Max and Min of the attribute datetime
if 'start' not in self.data_attribute_map[attr]['summary']:
self.data_attribute_map[attr]['summary']['start'] = parsed_attr_val
if 'end' not in self.data_attribute_map[attr]['summary']:
self.data_attribute_map[attr]['summary']['end'] = parsed_attr_val
# print(parsed_status, attr_val, parsed_attr_val, self.data_attribute_map[attr]['summary']['end'])
if parsed_attr_val > self.data_attribute_map[attr]['summary']['end']:
self.data_attribute_map[attr]['summary']['end'] = parsed_attr_val
if parsed_attr_val < self.data_attribute_map[attr]['summary']['start']:
self.data_attribute_map[attr]['summary']['start'] = parsed_attr_val
else:
attr_val = str(attr_val)
self.data_attribute_map[attr]['domain'].add(attr_val)
# Compute # occurrences of attribute values
if 'group_counts' not in self.data_attribute_map[attr]['summary']:
self.data_attribute_map[attr]['summary']['group_counts'] = dict()
if attr_val not in self.data_attribute_map[attr]['summary']['group_counts']:
self.data_attribute_map[attr]['summary']['group_counts'][attr_val] = 0
self.data_attribute_map[attr]['summary']['group_counts'][attr_val] += 1
# Set Domain, Ranges for single attribute
def populate_dataset_meta_for_attr(self, attr, attr_datatype):
# Re-initialize the dataTypeList to re-set the domain based on the new Prototype
self.data_attribute_map[attr]['dataTypeList'] = list()
# For each value in the dataset, add to the attribute map (domain, range)
self.data_attribute_map[attr]['domain'] = set()
for datum in self.data:
self.populate_dataset_meta(attr, datum[attr], attr_datatype)
# If an attribute has (almost) no repeating value, then mark it as the label attribute.
# eg. primary/unique key of the table? Car1 , Car2, Car3, ...
# Almost == 90% (heuristic-based)
if attr_datatype == constants.attribute_types['NOMINAL'] and len(self.data_attribute_map[attr]['domain']) > 0.9 * self.rows:
self.nl4dv_instance.label_attribute = attr
self.data_attribute_map[attr]['isLabelAttribute'] = True
# Sort domain values, remove unwanted keys, etc.
self.format_data_attribute_map(attr, attr_datatype)
def format_data_attribute_map(self, attr, attr_datatype):
# Remove NANs which are supposedly NOT unique
self.data_attribute_map[attr]['domain'] = set(filter(lambda x: x == x, self.data_attribute_map[attr]['domain']))
# Set > Sorted List > Set
try:
if attr_datatype == constants.attribute_types['QUANTITATIVE']:
self.data_attribute_map[attr]['domain'] = set(sorted([float(a) for a in self.data_attribute_map[attr]['domain']]))
elif attr_datatype == constants.attribute_types['TEMPORAL']:
self.data_attribute_map[attr]['domain'] = set(sorted(self.data_attribute_map[attr]['domain']))
self.data_attribute_map[attr]['domainMeta']['raw'] = set(sorted(self.data_attribute_map[attr]['domainMeta']['raw']))
else:
self.data_attribute_map[attr]['domain'] = set(sorted(self.data_attribute_map[attr]['domain']))
except Exception as e:
# works for hybrid numbers + strings combinations
self.data_attribute_map[attr]['domain'] = set(sorted([str(a) for a in self.data_attribute_map[attr]['domain']]))
# Delete the keys that are now redundant
self.delete_unwanted_keys(attr, attr_datatype)
# Generic method to delete keys based on datatype
def delete_unwanted_keys(self, attr, attr_datatype):
# Delete the list of datatypes, not needed anymore
if 'dataTypeList' in self.data_attribute_map[attr]:
del self.data_attribute_map[attr]['dataTypeList']
if attr_datatype == constants.attribute_types['NOMINAL'] or attr_datatype == constants.attribute_types['ORDINAL']:
self.del_temporal_keys(attr)
self.del_quantitative_keys(attr)
elif attr_datatype == constants.attribute_types['QUANTITATIVE']:
self.del_temporal_keys(attr)
self.del_nominal_keys(attr)
elif attr_datatype == constants.attribute_types['TEMPORAL']:
self.del_quantitative_keys(attr)
self.del_nominal_keys(attr)
# Delete Keys from attribute | |
import conf
import numpy
import random
from Shake import *
def CALC(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
# SCR=""\
# SCR1=""
global IFIRST,ESHK,ELECN,JVAC,R1
ESHK=0.0
JVAC=0.0
def get_globals():
NDVEC=conf.NDVEC
MSUM=conf.MSUM
MCOMP=conf.MCOMP
MRAYL=conf.MRAYL
MPAIR=conf.MPAIR
MPHOT=conf.MPHOT
MVAC=conf.MVAC
ELEV=conf.ELEV
NSDEG=conf.NSDEG
AA=conf.AA
BB=conf.BB
SCR=conf.SCR
SCR1=conf.SCR1
PRSH=conf.PRSH
ESH=conf.ESH
AUG=conf.AUG
RAD=conf.RAD
PRSHBT=conf.PRSHBT
IZ=conf.IZ
INIOCC=conf.INIOCC
ISHLMX=conf.ISHLMX
AMZ=conf.AMZ
NOCC=conf.NOCC
AUGR=conf.AUGR
RADR=conf.RADR
IONSUM=conf.IONSUM
IFLSUM=conf.IFLSUM
ESTORE=conf.ESTORE
EPHOTON=conf.EPHOTON
DRXE=conf.DRXE
DRYE=conf.DRYE
DRZE=conf.DRZE
DRX=conf.DRX
DRY=conf.DRY
DRZ=conf.DRZ
globals().update(locals())
get_globals()
def update_globals():
conf.NDVEC=NDVEC
conf.MSUM=MSUM
conf.MCOMP=MCOMP
conf.MRAYL=MRAYL
conf.MPAIR=MPAIR
conf.MPHOT=MPHOT
conf.MVAC=MVAC
conf.ELEV=ELEV
conf.NSDEG=NSDEG
conf.AA=AA
conf.BB=BB
conf.SCR,SCR1=SCR,SCR1
conf.PRSH=PRSH
conf.ESH=ESH
conf.AUG=AUG
conf.RAD=RAD
conf.PRSHBT=PRSHBT
conf.IZ=IZ
conf.INIOCC=INIOCC
conf.ISHLMX=ISHLMX
conf.AMZ=AMZ
conf.NOCC=NOCC
conf.AUGR=AUGR
conf.RADR=RADR
conf.IONSUM=IONSUM
conf.IFLSUM=IFLSUM
conf.ESTORE=ESTORE
conf.EPHOTON=EPHOTON
conf.DRXE=DRXE
conf.DRYE=DRYE
conf.DRZE=DRZE
conf.DRX=DRX
conf.DRY=DRY
conf.DRZ=DRZ
globals().update(locals())
#DIMENSION
TEMP=[0 for x in range(17+1)]
TEMP1=[0 for x in range(289+1)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
# INITIAL PHOTON DIRECTION DRX, DRY AND DRZ
DRXINIT=DRXE[int(NVAC)][1]
DRYINIT=DRYE[int(NVAC)][1]
DRZINIT=DRZE[int(NVAC)][1]
ISHELLST=ISHELL
def GOTO2(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON):
global IFIRST,ESHK,ELECN,JVAC
if(ICON==2 and IONSUM[int(NVAC)] == 1):
return
# GO INTO SECOND BETA LOOP
print("calc 104 ICON,IONSUM[int(NVAC)],ISECOND= ",ICON,IONSUM[int(NVAC)],ISECOND)
if(ICON == 3 and IONSUM[int(NVAC)] == 1 and ISECOND == 1):
GOTO66(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
print("calc 107 ICON,IFIRST,JVAC,ISECOND= ",ICON,IFIRST,JVAC,ISECOND)
if(ICON == 3 and IFIRST == 1 and JVAC == 0 and ISECOND == 2):
return 1
# C
update_globals()
UPDATE(KGAS,LGAS,ISHELL)
# C CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17+1):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17+1):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# 10 CONTINUE
# C NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0 and ICON == 3 and ISECOND == 1):
globals().update(locals())
GOTO66(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
if(TSUM == 0.0):
return 1
# C NORMALISE TO 1.0
for I in range(1,17+1):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17+1):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# 11 CONTINUE
# C CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17+1):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
# 12 CONTINUE
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17+1):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
# 13 CONTINUE
for J in range(1,16+1):
for I in range(1,17+1):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# 14 CONTINUE
# C FIND FLUORESCENCE OR AUGER TRANSITION
# 15
R1=random.uniform(0.0,1.0)
for I in range(1,17+1):
if(R1 < TEMP[I]):
# C STORE PHOTON ENERGY AND ANGLE THEN UPDATE NOCC
IFLSUM[int(NVAC)]=IFLSUM[int(NVAC)]+1
EPHOTON[int(NVAC)][IFLSUM[int(NVAC)]]=ELEV[ISHELL][IZ[KGAS][LGAS]]-ELEV[I][IZ[KGAS][LGAS]]
if(ICON == 2):
EPHOTON[int(NVAC)][IFLSUM[int(NVAC)]]=ELEV[ISHELL][IZ[KGAS][LGAS]+1]-ELEV[I][IZ[KGAS][LGAS]+1]
if(ICON == 3):
EPHOTON[int(NVAC)][IFLSUM[int(NVAC)]]=ELEV[ISHELL][IZ[KGAS][LGAS]+2]-ELEV[I][IZ[KGAS][LGAS]+2]
if(EPHOTON[int(NVAC)][IFLSUM[int(NVAC)]] < 0.0):
# WRITE(6,545)
# 545
print(' PHOTON ENERGY=%.3f NVAC=%d IFLSUM=%d IN CALC'%(EPHOTON[int(NVAC)][IFLSUM[int(NVAC)]],IFLSUM[int(NVAC)],NVAC))
ELEFT=ELEFT-DABS(EPHOTON[int(NVAC)][IFLSUM[int(NVAC)]])
if(ELEFT < 0.0):
GOTO100()
# C RANDOM EMISSION DIRECTION
R3=random.uniform(0.0,1.0)
THET=numpy.arccos(1.0-2.0*R3)
R3=random.uniform(0.0,1.0)
PHI=TWOPI*R3
# C CALC DIRECTION COSINES OF FLUORESCENCE
DRX[int(NVAC)][IFLSUM[int(NVAC)]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[int(NVAC)][IFLSUM[int(NVAC)]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[int(NVAC)][IFLSUM[int(NVAC)]]=numpy.cos(THET)
# C
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# C FIND LOWEST VACANCY
update_globals()
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# C NO MORE TRANSITIONS POSSIBLE
# C SECOND ELECTRON IN DOUBLE BETA DECAY
if(ICON == 3 and ISECOND == 1):
globals().update(locals())
GOTO66(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
return
# ENDif
globals().update(locals())
GOTO2(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
# ENDif
# 16 CONTINUE
globals().update(locals())
return 1
def GOTO4(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON):
global IFIRST,ESHK,ELECN,JVAC
globals().update(locals())
# CHECK FOR ELECTRON SHAKEOFF
IFIRST=IFIRST+1
if(IFIRST > 1):
ELECN=ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]
globals().update(locals())
ISHELL,ELECN,KGAS,LGAS,ESHK,ICON,IFIRST,JVAC=SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,ICON,IFIRST,JVAC)
globals().update(locals())
# CALCULATE ENERGY OF ELECTRON
print("calc 203 JVAC=",JVAC)
if(JVAC == 0):
pass
else:
if(IFIRST == 1):
# INITIAL ELECTRON + SHAKEOFF
if(ICON == 1):
ELECN=ELECN-ESHK-ELEV[JVAC][IZ[int(KGAS)][int(LGAS)]]
if(ICON == 2):
ELECN=ELECN-ESHK-ELEV[JVAC,(IZ[KGAS][int(LGAS)]+1)]
if(ICON == 2 or ICON == 3):
ISHELL=JVAC
if(ICON == 3):
ELECN=ELECN-ESHK-ELEV[JVAC][(IZ[int(KGAS)][int(LGAS)]+2)]
# PRIMARY ELECTRON
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ELECN
# endif
if(ICON == 1 and IFIRST != 1):
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]-ESHK-ELEV[JVAC][IZ[int(KGAS)][int(LGAS)]]
# endif
IONSUM[int(NVAC)]=IONSUM[int(NVAC)]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[int(NVAC)]> 28):
#WRITE(6,99) IONSUM[int(NVAC)]
#99
print(' WARNING ION CHARGE LIMITED TO 28+ IN THIS VERSION')
sys.exit()
# endif
# SHAKE ELECTRON
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ESHK
if(ICON == 1):
ELEFT=ELEFT-ESHK-ELEV[JVAC][IZ[int(KGAS)][int(LGAS)]]
if(ICON == 2):
ELEFT=ELEFT-ESHK-ELEV[JVAC,(IZ[KGAS,LGAS]+1)]
if(ICON == 3):
ELEFT=ELEFT-ESHK-ELEV[JVAC][(IZ[int(KGAS)][int(LGAS)]+2)]
if(ELEFT < 0.0):
globals().update(locals())
complete=GOTO100(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
return complete
# RANDOM EMISSION DIRECTION
R3=random.uniform(0.0,1.0)
THET=numpy.arccos(1.0-2.0*R3)
R3=random.uniform(0.0,1.0)
PHI=TWOPI*R3
DRXE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.cos(THET)
# RETURN IF NO SHAKE OFF WITH BETA DECAY
complete=GOTO2(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
if(complete):
return 1
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17+1):
if(counter116):
break
for I in range(1,17+1):
if(R2 < TEMP1[I+((J-1)*17)]):
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL][IZ[int(KGAS)][int(LGAS)]]-(ELEV[I][IZ[int(KGAS)][int(LGAS)]]+ELEV[I][IZ[int(KGAS)][int(LGAS)]+1])*0.5-(ELEV[J][IZ[int(KGAS)][int(LGAS)]]+ELEV[J][IZ[int(KGAS)][int(LGAS)]+1])*0.5
if(ICON == 2):
ETEMP=ELEV[ISHELL][IZ[int(KGAS)][int(LGAS)]+1]-(ELEV[I][IZ[int(KGAS)][int(LGAS)]+1]+ELEV[I][IZ[int(KGAS)][int(LGAS)]+2])*0.5-(ELEV[J][IZ[int(KGAS)][int(LGAS)]+1]+ELEV[J][IZ[int(KGAS)][int(LGAS)]+2])*0.5
if(ICON == 3):
ETEMP=ELEV[ISHELL][IZ[int(KGAS)][int(LGAS)]+2]-(ELEV[I][IZ[int(KGAS)][int(LGAS)]+2]+ELEV[I][IZ[int(KGAS)][int(LGAS)]+3])*0.5-(ELEV[J][IZ[int(KGAS)][int(LGAS)]+2]+ELEV[J][IZ[int(KGAS)][int(LGAS)]+3])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=random.uniform(0.0,1.0)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[int(NVAC)]=IONSUM[int(NVAC)]+1
if(IONSUM[int(NVAC)]> 28):
print(' IONSUM LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[int(NVAC)],' IN CALC')
sys.exit()
# endif
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ETEMP
ELEFT=ELEFT-ETEMP
if(ELEFT < 0.0):
GOTO100(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
# RANDOM EMISSION DIRECTION
R3=random.uniform(0.0,1.0)
THET=numpy.arccos(1.0-2.0*R3)
R3=random.uniform(0.0,1.0)
PHI=TWOPI*R3
DRXE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.cos(THET)
NOCC[int(KGAS)][int(LGAS)][ISHELL]=NOCC[int(KGAS)][int(LGAS)][ISHELL]+1
NOCC[int(KGAS)][int(LGAS)][I]=NOCC[int(KGAS)][int(LGAS)][I]-1
NOCC[int(KGAS)][int(LGAS)][J]=NOCC[int(KGAS)][int(LGAS)][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
# SECOND ELECTRON IN DOUBLE BETA DECAY
if(ICON == 3 and ISECOND == 1):
GOTO66(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
update_globals()
return
# endif
GOTO4(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
# endif
globals().update(locals())
def GOTO66(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON):
global IFIRST,ESHK,ELECN,JVAC
IONSUM[int(NVAC)]=IONSUM[int(NVAC)]+1
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ESECOND
DRXE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.sin(THESEC)*numpy.cos(PHISEC)
DRYE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.sin(THESEC)*numpy.sin(PHISEC)
DRZE[int(NVAC)][int(IONSUM[int(NVAC)])]=numpy.cos(THESEC)
ELECN=ESECOND
ISECOND=2
ISHELL=0
IFIRST=0
# LOOP AROUND CASCADE
globals().update(locals())
GOTO4(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
return 1
def GOTO100(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON):
print("calcn ISHELL=", ISHELL)
global IFIRST,ESHK,ELECN,JVAC
complete=0
ELEFT=ELECEN
ISHELL=ISHELLST
API=numpy.arccos(-1.00)
TWOPI=2.00*API
ISECOND=1
IFIRST=0
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17+1):
NOCC[int(KGAS)][int(LGAS)][I]=INIOCC[int(KGAS)][int(LGAS)][I]
# PHOTONS
print("344 calc ICON=",ICON)
if(ICON == 1):
IONSUM[int(NVAC)]=1
IFLSUM[int(NVAC)]=0
# STORE INITIAL PHOTOELECTRON ENERGY AND ANGLE
ESTORE[int(NVAC)][1]=ELECEN-ELEV[ISHELL][IZ[int(KGAS)][int(LGAS)]]
ELECN=ESTORE[int(NVAC)][1]
ELEFT=ELEFT-ESTORE[int(NVAC)][1]
NOCC[int(KGAS)][int(LGAS)][ISHELL]=NOCC[int(KGAS)][int(LGAS)][ISHELL]-1
# ENTRY FOR COMPTON ELECTRON.....
if(NVAC <= MCOMP[IPN]):
# IF COMPTON EVENT ELECTRON ANGLE FROM COMPTON (ALREADY STORED)
globals().update(locals())
complete=GOTO4(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
return complete
# endif
# USE PHOTOELCTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=random.uniform(0.0,1.0)
PHI=TWOPI*R3
# INITIAL PHOTON DIRECTION DRXINIT, DRYINIT AND DRZINIT
DRCOS(DRXINIT,DRYINIT,DRZINIT,THET,PHI,DRXX,DRYY,DRZZ)
DRXE[int(NVAC)][1]=DRXX
DRYE[int(NVAC)][1]=DRYY
DRZE[int(NVAC)][1]=DRZZ
globals().update(locals())
complete=GOTO4(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
return complete
# endif
if(ICON == 2):
# BETA DECAY
IONSUM[int(NVAC)]=1
IFLSUM[int(NVAC)]=0
ISHELL=0
ELECN=ELECEN
ESTORE[int(NVAC)][1]=ELECN
if(NDVEC == 2):
# RANDOM EMISSION DIRECTION
R3=random.uniform(0.0,1.0)
THET=numpy.arccos(1.0-2.0*R3)
elif(NDVEC == 0):
# RANDOM EMISSION IN THE X-Y PLANE
THET=API/2.0
elif(NDVEC == 1):
# EMISSION ALONG Z AXIS
THET=0.00
elif(NDVEC == -1):
# EMISSION ALONG -Z AXIS
THET=numpy.arccos(-1.00)
else:
print(' ERROR NDVEC NOT CORRECT SUBROUTINE STOPPED:')
sys.exit()
# endif
R3=random.uniform(0.0,1.0)
PHI=TWOPI*R3
DRXE[int(NVAC)][1]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[int(NVAC)][1]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[int(NVAC)][1]=numpy.cos(THET)
# endif
# DOUBLE BETA DECAY
if(ICON == 3):
IONSUM[int(NVAC)]=1
IFLSUM[int(NVAC)]=0
ISHELL=0
ELECN=ELECEN
ESTORE[int(NVAC)][1]=ELECN
ESECOND=ELECN
if(NDVEC == 2):
# RANDOM EMISSION DIRECTION
R3=random.uniform(0.0,1.0)
THET=numpy.arccos(1.0-2.0*R3)
elif(NDVEC == 0):
# RANDOM EMISSION IN THE X-Y PLANE
THET=API/2.0
elif(NDVEC == 1):
# EMISSION ALONG Z AXIS
THET=0.00
elif(NDVEC == -1):
# EMISSION ALONG -Z AXIS
THET=numpy.arccos(-1.00)
else:
print(' ERROR NDVEC NOT CORRECT SUBROUTINE STOPPED:')
sys.exit()
# endif
R3=random.uniform(0.0,1.0)
PHI=TWOPI*R3
DRXE[int(NVAC)][1]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[int(NVAC)][1]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[int(NVAC)][1]=numpy.cos(THET)
# endif
#
THESEC=API-THET
if(PHI < API):
PHISEC=API+PHI
else:
PHISEC=PHI-API
# endif
globals().update(locals())
print("calc IFIRST=",IFIRST)
complete=GOTO4(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
print("got this ",complete)
return complete
globals().update(locals())
GOTO66(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
globals().update(locals())
complete=GOTO100(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
if(complete):
return
print(' ERROR IN CASCADE 0')
sys.exit()
# end
def CALC1(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
# SCR=""
# SCR1=""
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS1/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[int(NVAC)]
ISTARTF=IFLSUM[int(NVAC)]
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON):
ELEFT=ELECEN
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17+1):
NOCC[int(KGAS)][int(LGAS)][I]=INIOCC[int(KGAS)][int(LGAS)][I]
IONSUM[int(NVAC)]=ISTART+1
IFLSUM[int(NVAC)]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ELECEN-ELEV[ISHELL][IZ[int(KGAS)][int(LGAS)]]
ELECN=ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]
ELEFT=ELEFT-ELECN
NOCC[int(KGAS)][int(LGAS)][ISHELL]=NOCC[int(KGAS)][int(LGAS)][ISHELL]-1
# USE PHOTELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=random.uniform(0.0,1.0)
PHI=TWOPI*R3
DRCOS(DRX0[int(NVAC)][L1],DRY0[int(NVAC)][L1],DRZ0[int(NVAC)][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[int(NVAC)][int(IONSUM[int(NVAC)])]=DRXX
DRYE[int(NVAC)][int(IONSUM[int(NVAC)])]=DRYY
DRZE[int(NVAC)][int(IONSUM[int(NVAC)])]=DRZZ
# LOOP AROUND CASCADE
def GOTO4(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON):
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]
INSUM=IONSUM[int(NVAC)]
globals().update(locals())
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC][IZ[int(KGAS)][int(LGAS)]]
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ELECN
IONSUM[int(NVAC)]=IONSUM[int(NVAC)]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[int(NVAC)]> 28) :
print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[int(NVAC)])
sys.exit()
# endif
ESTORE[int(NVAC)][int(IONSUM[int(NVAC)])]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC][IZ[KGAS,LGAS]]
if(ELEFT < 0.0):
globals().update(locals())
complete=GOTO100(IPN,NVAC,KGAS,LGAS,ELECEN,ISHELL,ICON)
return 1
# | |
dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class Track2PurchaseCorrection(mpgTransaction):
def __init__(self, order_id, txn_number):
self._Request = "us_track2_purchasecorrection"
self._tags = {"order_id" : order_id, "txn_number" : txn_number}
self._order = ["order_id", "txn_number"]
class Track2Forcepost(mpgTransaction):
def __init__(self, order_id, amount, track2, pan, expdate, pos_code, auth_code):
self._Request = "us_track2_forcepost"
self._tags = {"order_id" : order_id, "amount" : amount, "track2" : track2, "pan" : pan, "expdate" : expdate, "pos_code" : pos_code, "auth_code" : auth_code}
self._order = ["order_id", "amount", "track2", "pan", "expdate", "pos_code", "auth_code"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class Track2Refund(mpgTransaction):
def __init__(self, order_id, amount, txn_number):
self._Request = "us_track2_refund"
self._tags = {"order_id" : order_id, "amount": amount, "txn_number" : txn_number}
self._order = ["order_id", "amount", "txn_number"]
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class Track2IndependentRefund(mpgTransaction):
def __init__(self, order_id, amount, track2, pan, expdate, pos_code):
self._Request = "us_track2_ind_refund"
self._tags = {"order_id" : order_id, "amount" : amount, "track2" : track2, "pan" : pan, "expdate" : expdate, "pos_code" : pos_code}
self._order = ["order_id", "amount", "track2", "pan", "expdate", "pos_code", ]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
#Cavv Transactions
class CavvPurchase(mpgTransaction):
def __init__(self, order_id, amount, pan, expdate, cavv):
self._Request = "us_cavv_purchase"
self._tags = {"order_id" : order_id, "amount" : amount, "pan" : pan, "expdate" : expdate, "cavv" : cavv, "cvd": None, "avs": None}
self._order = ["order_id", "amount", "pan", "expdate", "cavv"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCvdInfo (self, cvdInfo):
self._tags["cvd"] = cvdInfo
self._order.append("cvd")
def setAvsInfo (self, avsInfo):
self._tags["avs"] = avsInfo
self._order.append("avs")
def setCustInfo (self, custInfo):
self._tags["CustInfo"] = custInfo
self._order.append("CustInfo")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class CavvPreauth(mpgTransaction):
def __init__(self, order_id, amount, pan, expdate, cavv):
self._Request = "us_cavv_preauth"
self._tags = {"order_id" : order_id, "amount" : amount, "pan" : pan, "expdate" : expdate, "cavv" : cavv, "cvd": None, "avs": None}
self._order = ["order_id", "amount", "pan", "expdate", "cavv"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCvdInfo (self, cvdInfo):
self._tags["cvd"] = cvdInfo
self._order.append("cvd")
def setAvsInfo (self, avsInfo):
self._tags["avs"] = avsInfo
self._order.append("avs")
def setCustInfo (self, custInfo):
self._tags["CustInfo"] = custInfo
self._order.append("CustInfo")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
#ACH transactions
class ACHDebit(mpgTransaction):
def __init__(self, order_id, amount, ach_info):
self._Request = "us_ach_debit"
self._tags = {"order_id" : order_id, "amount" : amount, "cust_info" : None, "ach_info" : ach_info}
self._order = ["order_id", "amount", "cust_info", "ach_info"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCustInfo (self, cust_info):
self._tags["cust_info"] = cust_info
self._order.append("cust_info")
def setRecur (self, recur):
self._tags["recur"] = recur
self._order.append("recur")
class ACHReversal(mpgTransaction):
def __init__(self, order_id, txn_number):
self._Request = "us_ach_reversal"
self._tags = {"order_id" : order_id, "txn_number" : txn_number}
self._order = ["order_id", "txn_number"]
class ACHCredit(mpgTransaction):
def __init__(self, order_id, amount, ach_info):
self._Request = "us_ach_credit"
self._tags = {"order_id" : order_id, "amount" : amount, "ach_info" : ach_info}
self._order = ["order_id", "amount", "ach_info"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
class ACHFiEnquiry(mpgTransaction):
def __init__(self, routing_num):
self._Request = "us_ach_fi_enquiry"
self._tags = {"routing_num" : routing_num}
self._order = ["routing_num"]
class ACHInfo(mpgTransaction):
__checkFromReader = False
def __init__(self, *vargs):
self._Request = "ach_info"
if len(list(vargs)) == 4:
self.__checkFromReader = False
self._tags = {"sec" : vargs[0], "routing_num" : vargs[1], "account_num" : vargs[2], "account_type" : vargs[3]}
self._order = ["sec", "routing_num", "account_num", "account_type"]
elif len(list(vargs)) == 6:
self.__checkFromReader = True
self._tags = {"sec" : vargs[0], "micr" : vargs[1], "dl_num" : vargs[2], "magstripe" : vargs[3], "image_front" : vargs[4], "image_back" : vargs[5]}
self._order = ["sec", "micr", "dl_num", "magstripe", "image_front", "image_back"]
else:
self.tags = {}
self._order = []
def setCustFirstName(self, cust_first_name):
self._tags["cust_first_name"] = cust_first_name
self._order.append("cust_first_name")
def setCustLastName(self, cust_last_name):
self._tags["cust_last_name"] = cust_last_name
self._order.append("cust_last_name")
def setCustAddress1(self, cust_address1):
self._tags["cust_address1"] = cust_address1
self._order.append("cust_address1")
def setCustAddress2(self, cust_address2):
self._tags["cust_address2"] = cust_address2
self._order.append("cust_address2")
def setCustCity(self, cust_city):
self._tags["cust_city"] = cust_city
self._order.append("cust_city")
def setCustState(self, cust_state):
self._tags["cust_state"] = cust_state
self._order.append("cust_state")
def setCustZip(self, cust_zip):
self._tags["cust_zip"] = cust_zip
self._order.append("cust_zip")
def setCheckNum(self, check_num):
self._tags["check_num"] = check_num
self._order.append("check_num")
def setMicr(self, micr):
self._tags["micr"] = micr
self._order.append("micr")
#Contactless
class ContactlessPurchase(mpgTransaction):
def __init__(self, order_id, amount, track2, pan, expdate, pos_code):
self._Request = "us_contactless_purchase"
self._tags = {"order_id" : order_id, "amount" : amount, "track2" : track2, "pan" : pan, "expdate" : expdate, "pos_code" : pos_code}
self._order = ["order_id", "amount", "track2", "pan", "expdate", "pos_code"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCommcardInvoice (self, commcard_invoice):
self._tags["commcard_invoice"] = commcard_invoice
self._order.append("commcard_invoice")
def setCommcardTaxAmount (self, commcard_tax_amount):
self._tags["commcard_tax_amount"] = commcard_tax_amount
self._order.append("commcard_tax_amount")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class ContactlessRefund(mpgTransaction):
def __init__(self, order_id, amount, track2, pos_code, txn_number):
self._Request = "us_contactless_refund"
self._tags = {"order_id" : order_id, "amount" : amount, "track2" : track2, "pos_code" : pos_code, "txn_number" : txn_number}
self._order = ["order_id", "amount", "track2", "pos_code", "txn_number"]
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class ContactlessPurchaseCorrection(mpgTransaction):
def __init__(self, order_id, txn_number):
self._Request = "us_contactless_purchasecorrection"
self._tags = {"order_id" : order_id, "txn_number" : txn_number}
self._order = ["order_id", "txn_number"]
#Encrypted Track2
class EncTrack2Purchase(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, pos_code, device_type):
self._Request = "us_enc_track2_purchase"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "pos_code" : pos_code, "device_type" : device_type, "avs": None}
self._order = ["order_id", "amount", "enc_track2", "pos_code", "device_type"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCommcardInvoice (self, commcard_invoice):
self._tags["commcard_invoice"] = commcard_invoice
self._order.append("commcard_invoice")
def setCommcardTaxAmount (self, commcard_tax_amount):
self._tags["commcard_tax_amount"] = commcard_tax_amount
self._order.append("commcard_tax_amount")
def setAvsInfo (self, avsInfo):
self._tags["avs"] = avsInfo
self._order.append("avs")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class EncTrack2Preauth(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, pos_code, device_type):
self._Request = "us_enc_track2_preauth"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "pos_code" : pos_code, "device_type" : device_type, "avs": None}
self._order = ["order_id", "amount", "enc_track2", "pos_code", "device_type"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCommcardInvoice (self, commcard_invoice):
self._tags["commcard_invoice"] = commcard_invoice
self._order.append("commcard_invoice")
def setCommcardTaxAmount (self, commcard_tax_amount):
self._tags["commcard_tax_amount"] = commcard_tax_amount
self._order.append("commcard_tax_amount")
def setAvsInfo (self, avsInfo):
self._tags["avs"] = avsInfo
self._order.append("avs")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class EncTrack2IndependentRefund(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, pos_code, device_type):
self._Request = "us_enc_track2_ind_refund"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "pos_code" : pos_code, "device_type" : device_type}
self._order = ["order_id", "amount", "enc_track2", "pos_code", "device_type"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class EncTrack2Forcepost(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, pos_code, device_type, auth_code):
self._Request = "us_enc_track2_forcepost"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "pos_code" : pos_code, "device_type" : device_type, "auth_code" : auth_code}
self._order = ["order_id", "amount", "enc_track2", "pos_code", "device_type", "auth_code"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
#Encrypted Non eCom
class EncPurchase(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, crypt_type, device_type):
self._Request = "us_enc_purchase"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "crypt_type" : crypt_type, "device_type" : device_type}
self._order = ["order_id", "amount", "enc_track2", "crypt_type", "device_type"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCvdInfo (self, cvdInfo):
self._tags["cvd"] = cvdInfo
self._order.append("cvd")
def setAvsInfo (self, avsInfo):
self._tags["avs"] = avsInfo
self._order.append("avs")
def setCustInfo (self, custInfo):
self._tags["CustInfo"] = custInfo
self._order.append("CustInfo")
def setRecur (self, recur):
self._tags["recur"] = recur
self._order.append("recur")
def setCommcardInvoice (self, commcard_invoice):
self._tags["commcard_invoice"] = commcard_invoice
self._order.append("commcard_invoice")
def setCommcardTaxAmount (self, commcard_tax_amount):
self._tags["commcard_tax_amount"] = commcard_tax_amount
self._order.append("commcard_tax_amount")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class EncPreauth(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, crypt_type, device_type):
self._Request = "us_enc_preauth"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "crypt_type" : crypt_type, "device_type" : device_type}
self._order = ["order_id", "amount", "enc_track2", "crypt_type", "device_type"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCvdInfo (self, cvdInfo):
self._tags["cvd"] = cvdInfo
self._order.append("cvd")
def setAvsInfo (self, avsInfo):
self._tags["avs"] = avsInfo
self._order.append("avs")
def setCustInfo (self, custInfo):
self._tags["CustInfo"] = custInfo
self._order.append("CustInfo")
def setCommcardInvoice (self, commcard_invoice):
self._tags["commcard_invoice"] = commcard_invoice
self._order.append("commcard_invoice")
def setCommcardTaxAmount (self, commcard_tax_amount):
self._tags["commcard_tax_amount"] = commcard_tax_amount
self._order.append("commcard_tax_amount")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class EncIndRefund(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, crypt_type, device_type):
self._Request = "us_enc_ind_refund"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "crypt_type" : crypt_type, "device_type" : device_type}
self._order = ["order_id", "amount", "enc_track2", "crypt_type", "device_type"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class EncForcePost(mpgTransaction):
def __init__(self, order_id, amount, enc_track2, auth_code, crypt_type, device_type):
self._Request = "us_enc_forcepost"
self._tags = {"order_id" : order_id, "amount" : amount, "enc_track2" : enc_track2, "auth_code" : auth_code, "crypt_type" : crypt_type, "device_type" : device_type}
self._order = ["order_id", "amount", "enc_track2", "auth_code", "crypt_type", "device_type"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
class EncCardVerification(mpgTransaction):
def __init__(self, order_id, enc_track2, device_type, avs):
self._Request = "us_enc_card_verification"
self._tags = {"order_id" : order_id, "enc_track2" : enc_track2, "device_type" : device_type, "avs_info" : avs}
self._order = ["order_id", "enc_track2", "device_type", "avs_info"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setDynamicDescriptor (self, dynamic_descriptor):
self._tags["dynamic_descriptor"] = dynamic_descriptor
self._order.append("dynamic_descriptor")
def setCvdInfo (self, cvdInfo):
self._tags["cvd"] = cvdInfo
self._order.append("cvd")
def setAvsInfo (self, avsInfo):
self._tags["avs"] = avsInfo
self._order.append("avs")
#Pinless Debit Transactions
class PinlessDebitPurchase(mpgTransaction):
def __init__(self, order_id, amount, pan, expdate, presentation_type, intended_use, p_account_number):
self._Request = "us_pinless_debit_purchase"
self._tags = {"order_id" : order_id, "amount" : amount, "pan" : pan, "expdate" : expdate, "presentation_type" : presentation_type, "intended_use" : intended_use, "p_account_number" : p_account_number}
self._order = ["order_id", "amount", "pan", "expdate", "presentation_type", "intended_use", "p_account_number"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setCustInfo (self, custInfo):
self._tags["CustInfo"] = custInfo
self._order.append("CustInfo")
def setRecur (self, recur):
self._tags["recur"] = recur
self._order.append("recur")
class PinlessDebitRefund(mpgTransaction):
def __init__(self, order_id, amount, txn_number):
if country=="Canada":
self._Request = "pinless_debit_refund"
else:
self._Request = "us_pinless_debit_refund"
self._tags = {"order_id" : order_id, "amount" : amount, "txn_number" : txn_number}
self._order = ["order_id", "amount", "txn_number"]
#Administrative Transactions
class OpenTotals(mpgTransaction):
def __init__(self, ecr_number):
if country=="Canada":
self._Request = "opentotals"
else:
self._Request = "us_opentotals"
self._tags = {"ecr_number" : ecr_number }
self._order = ["ecr_number"]
class BatchClose(mpgTransaction):
def __init__(self, ecr_number):
if country=="Canada":
self._Request = "batchclose"
else:
self._Request = "us_batchclose"
self._tags = {"ecr_number" : ecr_number }
self._order = ["ecr_number"]
#Recuring Transactions
class Recur(mpgTransaction):
def __init__(self, recur_unit, start_now, start_date, num_recurs, period, recur_amount):
self._Request = "recur"
self._tags = {"recur_unit" : recur_unit, "start_now" : start_now, "start_date" : start_date, "num_recurs" : num_recurs, "period" : period, "recur_amount" : recur_amount}
self._order = ["recur_unit", "start_now", "start_date", "num_recurs", "period", "recur_amount"]
class RecurUpdate(mpgTransaction):
def __init__(self, order_id):
if country=="Canada":
self._Request = "recur_update"
else:
self._Request = "us_recur_update"
self._tags = {"order_id" : order_id }
self._order = ["order_id"]
def setCustId (self, cust_id):
self._tags["cust_id"] = cust_id
self._order.append("cust_id")
def setRecurAmount (self, recur_amount):
self._tags["recur_amount"] = recur_amount
self._order.append("recur_amount")
def setPan (self, pan):
self._tags["pan"] = pan
self._order.append("pan")
def setExpDate (self, expdate):
self._tags["expdate"] = expdate
self._order.append("expdate")
def setAddNumRecurs (self, add_num_recurs):
self._tags["add_num_recurs"] = add_num_recurs
self._order.append("add_num_recurs")
def setTotalNumRecurs (self, total_num_recurs):
self._tags["total_num_recurs"] = total_num_recurs
self._order.append("total_num_recurs")
def setHold (self, hold):
self._tags["hold"] = hold
self._order.append("hold")
def setTerminate (self, terminate):
self._tags["terminate"] = terminate
self._order.append("terminate")
def setAvsStreetNumber(self, avs_street_number):
self._tags["avs_street_num"] = avs_street_number
self._order.append("avs_street_num")
def setAvsStreetName(self, avs_street_name):
self._tags["avs_street_name"] = avs_street_name
self._order.append("avs_street_name")
def setAvsZipcode(self, avs_zipcode):
self._tags["avs_zipcode"] = avs_zipcode
self._order.append("avs_zipcode")
def setPAccountNumber (self, p_account_number):
self._tags["p_account_number"] = p_account_number
self._order.append("p_account_number")
def setPresentationType (self, presentation_type):
self._tags["presentation_type"] = presentation_type
self._order.append("presentation_type")
#EFraud
class CvdInfo(mpgTransaction):
def __init__(self, cvd_indicator, cvd_value):
self._Request | |
from __future__ import print_function
import collections
import math
import os
import pickle
import sys
import time
import numpy
import torch
from sklearn.utils import compute_class_weight
from torch.nn.utils import clip_grad_norm
from torch.utils.data import DataLoader
from nldrp.dnn.config import DNN_BASE_PATH
from nldrp.dnn.logger.experiment import Metric, Experiment
from nldrp.dnn.logger.inspection import Inspector
from nldrp.dnn.util.multi_gpu import get_gpu_id
def sort_batch(lengths):
"""
Sort batch data and labels by length.
Useful for variable length inputs, for utilizing PackedSequences
Args:
lengths (nn.Tensor): tensor containing the lengths for the data
Returns:
- sorted lengths Tensor
- sort (callable) which will sort a given iterable according to lengths
- unsort (callable) which will revert a given iterable to its
original order
"""
batch_size = lengths.size(0)
sorted_lengths, sorted_idx = lengths.sort()
_, original_idx = sorted_idx.sort(0, descending=True)
reverse_idx = torch.linspace(batch_size - 1, 0, batch_size).long()
sorted_lengths = sorted_lengths[reverse_idx]
def sort(iterable):
if iterable.is_cuda:
return iterable[sorted_idx.cuda(get_gpu_id())][
reverse_idx.cuda(get_gpu_id())]
else:
return iterable[sorted_idx][reverse_idx]
def unsort(iterable):
if iterable.is_cuda:
return iterable[reverse_idx.cuda(get_gpu_id())][
original_idx.cuda(get_gpu_id())][
reverse_idx.cuda(get_gpu_id())]
else:
return iterable[reverse_idx][original_idx][reverse_idx]
return sorted_lengths, sort, unsort
def epoch_progress(loss, epoch, batch, batch_size, dataset_size):
batches = math.ceil(float(dataset_size) / batch_size)
count = batch * batch_size
bar_len = 40
filled_len = int(round(bar_len * count / float(dataset_size)))
bar = '=' * filled_len + '-' * (bar_len - filled_len)
status = 'Epoch {}, Batch Loss ({}): {:.4f}'.format(epoch, batch, loss)
_progress_str = "\r \r [{}] ...{}".format(bar, status)
sys.stdout.write(_progress_str)
sys.stdout.flush()
if batch == batches:
print()
def get_class_labels(y):
"""
Get the class labels
:param y: list of labels, ex. ['positive', 'negative', 'positive',
'neutral', 'positive', ...]
:return: sorted unique class labels
"""
return numpy.unique(y)
def get_class_weights(y):
"""
Returns the normalized weights for each class
based on the frequencies of the samples
:param y: list of true labels (the labels must be hashable)
:return: dictionary with the weight for each class
"""
weights = compute_class_weight('balanced', numpy.unique(y), y)
d = {c: w for c, w in zip(numpy.unique(y), weights)}
return d
def class_weigths(targets, to_pytorch=False):
w = get_class_weights(targets)
labels = get_class_labels(targets)
if to_pytorch:
return torch.FloatTensor([w[l] for l in sorted(labels)])
return labels
def _get_predictions(posteriors, task):
"""
Args:
posteriors (numpy.array):
Returns:
"""
if task == "clf":
if posteriors.shape[1] > 1:
predicted = numpy.argmax(posteriors, 1)
else:
predicted = numpy.clip(numpy.sign(posteriors), a_min=0,
a_max=None)
elif task == "multi-clf":
predicted = numpy.clip(numpy.sign(posteriors), a_min=0,
a_max=None)
elif task == "reg":
predicted = posteriors
else:
raise ValueError
return predicted
def predict(model, pipeline, dataloader, task,
mode="eval",
label_transformer=None):
"""
Pass a dataset(dataloader) to the model and get the predictions
Args:
dataloader (DataLoader): a torch DataLoader which will be used for
evaluating the performance of the model
mode (): set the operation mode of the model.
- "eval" : disable regularization layers
- "train" : enable regularization layers (MC eval)
model ():
pipeline ():
task ():
label_transformer ():
Returns:
"""
if mode == "eval":
model.eval()
elif mode == "train":
model.train()
else:
raise ValueError
posteriors = []
y_pred = []
y = []
attentions = []
total_loss = 0
for i_batch, sample_batched in enumerate(dataloader, 1):
outputs, labels, atts, loss = pipeline(model, sample_batched)
if loss is not None:
total_loss += loss.data[0]
# get the model posteriors
posts = outputs.data.cpu().numpy()
# get the actual predictions (classes and so on...)
if len(posts.shape) == 1:
predicted = _get_predictions(numpy.expand_dims(posts, axis=0), task)
else:
predicted = _get_predictions(posts, task)
# to numpy
labels = labels.data.cpu().numpy().squeeze().tolist()
predicted = predicted.squeeze().tolist()
posts = posts.squeeze().tolist()
if atts is not None:
atts = atts.data.cpu().numpy().squeeze().tolist()
if not isinstance(labels, collections.Iterable):
labels = [labels]
predicted = [predicted]
posts = [posts]
if atts is not None:
atts = [atts]
# make transformations to the predictions
if label_transformer is not None:
labels = [label_transformer.inverse(x) for x in labels]
labels = numpy.array(labels)
predicted = [label_transformer.inverse(x) for x in predicted]
predicted = numpy.array(predicted)
y.extend(labels)
y_pred.extend(predicted)
posteriors.extend(posts)
if atts is not None:
attentions.extend(atts)
avg_loss = total_loss / i_batch
return avg_loss, (y, y_pred), posteriors, attentions
def mc_predict(model, pipeline, dataloader, task, label_transformer=None,
runs=100):
"""
Monte Carlo predict
Args:
model ():
pipeline ():
dataloader ():
task ():
label_transformer ():
runs ():
Returns:
"""
y = None
posteriors = []
avg_losses = []
for i in range(runs):
avg_loss, (y, _), _posteriors, attentions = predict(model, pipeline,
dataloader,
task, "train",
label_transformer)
posteriors.append(_posteriors)
avg_losses.append(avg_loss)
# convert to numpy.ndarray in order to utilize scipy's methods
posteriors = numpy.array(posteriors)
means = numpy.mean(posteriors, axis=0)
# stds = numpy.std(posteriors, axis=0)
predictions = _get_predictions(means, task)
return numpy.mean(avg_losses), (y, predictions)
class LabelTransformer:
def __init__(self, map, inv_map=None):
"""
Class for creating a custom mapping of the labels to ids and back
Args:
map (dict):
inv_map (dict):
"""
self.map = map
self.inv_map = inv_map
if self.inv_map is None:
self.inv_map = {v: k for k, v in self.map.items()}
def transform(self, label):
return self.map[label]
def inverse(self, label):
return self.inv_map[label]
class MetricWatcher:
"""
Base class which monitors a given metric on a Trainer object
and check whether the model has been improved according to this metric
"""
def __init__(self, metric, mode="min", base=None):
self.best = base
self.metric = metric
self.mode = mode
self.scores = None # will be filled by the Trainer instance
def has_improved(self):
# get the latest value for the desired metric
value = self.scores[self.metric][-1]
# init best value
if self.best is None or math.isnan(self.best):
self.best = value
return True
if (
self.mode == "min" and value < self.best
or
self.mode == "max" and value > self.best
): # the performance of the model has been improved :)
self.best = value
return True
else:
# no improvement :(
return False
class EarlyStop(MetricWatcher):
def __init__(self, metric, mode="min", patience=0):
"""
Args:
patience (int): for how many epochs to wait, for the performance
to improve.
mode (str, optional): Possible values {"min","max"}.
- "min": save the model if the monitored metric is decreased.
- "max": save the model if the monitored metric is increased.
"""
MetricWatcher.__init__(self, metric, mode)
self.patience = patience
self.patience_left = patience
self.best = None
def stop(self):
"""
Check whether we should stop the training
"""
if self.has_improved():
self.patience_left = self.patience # reset patience
else:
self.patience_left -= 1 # decrease patience
print(
"patience left:{}, best({})".format(self.patience_left, self.best))
# if no more patience left, then stop training
return self.patience_left < 0
class Checkpoint(MetricWatcher):
def __init__(self, name, model, metric, model_conf, mode="min",
dir=None,
base=None,
timestamp=False,
scorestamp=False,
keep_best=False):
"""
Args:
model (nn.Module):
name (str): the name of the model
mode (str, optional): Possible values {"min","max"}.
- "min": save the model if the monitored metric is decreased.
- "max": save the model if the monitored metric is increased.
keep_best (bool): if True then keep only the best checkpoint
timestamp (bool): if True add a timestamp to the checkpoint files
scorestamp (bool): if True add the score to the checkpoint files
dir (str): the directory in which the checkpoint files will be saved
"""
MetricWatcher.__init__(self, metric, mode, base)
self.name = name
self.dir = dir
self.model = model
self.model_conf = model_conf
self.timestamp = timestamp
self.scorestamp = scorestamp
self.keep_best = keep_best
self.last_saved = None
if self.dir is None:
self.dir = os.path.join(DNN_BASE_PATH, "trained")
def _define_cp_name(self):
"""
Define the checkpoint name
Returns:
"""
fname = [self.name]
if self.scorestamp:
score_str = "{:.4f}".format(self.best)
fname.append(score_str)
if self.timestamp:
date_str = time.strftime("%Y-%m-%d_%H:%M")
fname.append(date_str)
return "_".join(fname)
def _save_checkpoint(self):
"""
A checkpoint saves:
- the model itself
- the model's config, which is required for loading related data,
such the word embeddings, on which it was trained
Returns:
"""
if not os.path.exists(self.dir):
os.makedirs(self.dir)
name = self._define_cp_name()
file_cp = os.path.join(self.dir, name + ".model")
file_conf = os.path.join(self.dir, name + ".conf")
# remove previous checkpoint files, if keep_best is True
if self.keep_best and self.last_saved is not None:
os.remove(self.last_saved["model"])
os.remove(self.last_saved["config"])
# update last saved checkpoint files
self.last_saved = {
"model": file_cp,
"config": file_conf
}
# save the checkpoint files (model, model config)
torch.save(self.model, file_cp)
with open(file_conf, 'wb') as f:
pickle.dump(self.model_conf, f)
def check(self):
"""
Check whether the model has improved and if so, then save a checkpoint
Returns:
"""
if self.has_improved():
print("Improved model ({}:{:.4f})! "
"Saving checkpoint...".format(self.metric, self.best))
self._save_checkpoint()
class Trainer:
def __init__(self, model,
train_set,
optimizer,
pipeline,
config,
train_batch_size=128,
eval_batch_size=512,
task="clf",
use_exp=False,
inspect_weights=False,
metrics=None,
val_set=None,
eval_train=True,
checkpoint=None,
early_stopping=None):
"""
The Trainer is responsible for training a model.
It is a stateful object.
It holds a set of variables that helps us to abstract
the training process.
Args:
use_exp (bool): | |
from .stage01_isotopomer_peakSpectrum_postgresql_models import *
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_isotopomer_peakSpectrum_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_isotopomer_peakSpectrum':data_stage01_isotopomer_peakSpectrum,
};
self.set_supportedTables(tables_supported);
def initialize_dataStage01_isotopomer_peakSpectrum(self):
try:
data_stage01_isotopomer_peakSpectrum.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e);
def drop_dataStage01_isotopomer_peakSpectrum(self):
try:
data_stage01_isotopomer_peakSpectrum.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_isotopomer_peakSpectrum(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_isotopomer_peakSpectrum).filter(data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_isotopomer_peakSpectrum
def get_sampleNames_experimentIDAndSampleType_peakSpectrum(self,experiment_id_I,sample_type_I):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_isotopomer_peakSpectrum.sample_name).filter(
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.sample_type.like(sample_type_I)).group_by(
data_stage01_isotopomer_peakSpectrum.sample_name).order_by(
data_stage01_isotopomer_peakSpectrum.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleTypeAndSampleNameAbbreviation_peakSpectrum(self,experiment_id_I,sample_type_I,sample_name_abbreviation_I):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_isotopomer_peakSpectrum.sample_name).filter(
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.sample_type.like(sample_type_I),
data_stage01_isotopomer_peakSpectrum.sample_name_abbreviation.like(sample_name_abbreviation_I)).group_by(
data_stage01_isotopomer_peakSpectrum.sample_name).order_by(
data_stage01_isotopomer_peakSpectrum.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAndDilution_experimentIDAndTimePointAndSampleNameAbbreviationAndScanType_peakSpectrum(self,experiment_id_I,time_point_I,sample_name_abbreviation_I,scan_type_I,sample_replicate_I):
'''Querry sample name and dilution from the experiment
by time-point, sample name abbreviation, scan type, and replicate numbers'''
try:
sample_name = self.session.query(data_stage01_isotopomer_peakSpectrum.sample_name,
sample.sample_dilution).filter(
data_stage01_isotopomer_peakSpectrum.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_isotopomer_peakSpectrum.time_point.like(time_point_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.replicate_number == sample_replicate_I,
data_stage01_isotopomer_peakSpectrum.used_,
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample.sample_name)).group_by(
data_stage01_isotopomer_peakSpectrum.sample_name,
sample.sample_dilution).all();
sample_name_O = None;
dilution_O = None;
if not sample_name:
print('no sample name and dilution found for experiment_id\ttime_point\tsample_name_abbreviation\tscan_type\tsample_replicate');
print((experiment_id_I + '\t'+ time_point_I + '\t'+ sample_name_abbreviation_I + '\t'+ scan_type_I + '\t'+ str(sample_replicate_I)));
else:
sample_name_O = sample_name[0][0];
dilution_O = sample_name[0][1];
return sample_name_O,dilution_O;
except SQLAlchemyError as e:
print(e);
# query sample name abbreviations from data_stage01_isotopomer_peakSpectrum
def get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_peakSpectrum(self,experiment_id_I,sample_type_I,time_point_I):
'''Querry sample name abbreviations from the experiment by sample type and time point'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation).filter(
data_stage01_isotopomer_peakSpectrum.time_point.like(time_point_I),
data_stage01_isotopomer_peakSpectrum.sample_type.like(sample_type_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_isotopomer_peakSpectrum.used_).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = [];
if not sample_name_abbreviations: print(('no sample name abbreviations found for experiment: ' + experiment_id_I\
+ ' and time-point: ' + time_point_I + ' and sample type: ' + sample_type_I));
else:
for sna in sample_name_abbreviations:
sample_name_abbreviations_O.append(sna[0]);
return sample_name_abbreviations_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviationsAndTimePointAndReplicateNumber_experimentIDAndSampleName_peakSpectrum(self,experiment_id_I,sample_name_I):
'''Querry sample name abbreviations, time points and replicate numbers from
the experiment by sample name'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation,
sample_description.time_point,
sample_description.sample_replicate).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id)).group_by(
sample_description.sample_name_abbreviation,
sample_description.time_point,
sample_description.sample_replicate).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = None;
time_points_O = None;
sample_replicates_O = None;
if not sample_name_abbreviations: exit('bad query result: get_sampleNameAbbreviationsAndOther_experimentIDAndSampleName_peakSpectrum');
sample_name_abbreviations_O=sample_name_abbreviations[0][0];
time_points_O=sample_name_abbreviations[0][1];
sample_replicates_O=sample_name_abbreviations[0][2];
return sample_name_abbreviations_O,time_points_O,sample_replicates_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviationsAndOther_experimentIDAndSampleName_peakSpectrum(self,experiment_id_I,sample_name_I):
'''Querry sample name abbreviations, time points, dilutions, and replicate numbers from
the experiment by sample name'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation,
sample_description.time_point,
sample.sample_dilution,
sample_description.sample_replicate).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id)).group_by(
sample_description.sample_name_abbreviation,
sample_description.time_point,
sample.sample_dilution,
sample_description.sample_replicate).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = None;
time_points_O = None;
dilutions_O = None;
sample_replicates_O = None;
if not sample_name_abbreviations: exit('bad query result: get_sampleNameAbbreviationsAndOther_experimentIDAndSampleName_peakSpectrum');
sample_name_abbreviations_O=sample_name_abbreviations[0][0];
time_points_O=sample_name_abbreviations[0][1];
dilutions_O=sample_name_abbreviations[0][2];
sample_replicates_O=sample_name_abbreviations[0][3];
return sample_name_abbreviations_O,time_points_O,dilutions_O,sample_replicates_O;
except SQLAlchemyError as e:
print(e);
# query time_points from data_stage01_isotopomer_peakSpectrum
def get_timePoints_experimentID_peakSpectrum(self,experiment_id_I):
'''time points from the experiment'''
try:
timepoints = self.session.query(
sample_description.time_point).filter(
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_isotopomer_peakSpectrum.used_).group_by(
sample_description.time_point).order_by(
sample_description.time_point.asc()).all();
time_points_O = [];
sample_replicates_O = None;
if not timepoints: print(('no time points found for experiment: ' + experiment_id_I));
else:
for tp in timepoints:
time_points_O.append(tp[0]);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# query met_id, precursor formula from data_stage01_isotopomer_peakSpectrum
def get_metIDAndPrecursorFormulaAndScanType_experimentIDAndSampleName_peakSpectrum(self,experiment_id_I,sample_name_I):
'''Querry met_id, precursor formula that are used for the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.met_id,
data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.scan_type).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I)).group_by(
data_stage01_isotopomer_peakSpectrum.met_id,
data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.scan_type).order_by(
data_stage01_isotopomer_peakSpectrum.met_id.asc(),
data_stage01_isotopomer_peakSpectrum.precursor_formula).all();
met_ids_O = [];
precursor_formulas_O = [];
scan_type_O = [];
if not component_names: exit('bad query result: get_metIDAndPrecursorFormula_experimentIDAndSampleName_peakSpectrum');
for cn in component_names:
met_ids_O.append(cn.met_id);
precursor_formulas_O.append(cn.precursor_formula);
scan_type_O.append(cn.scan_type);
return met_ids_O, precursor_formulas_O, scan_type_O;
except SQLAlchemyError as e:
print(e);
def get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanType_peakSpectrum(self,experiment_id_I,sample_name_I,scan_type_I):
'''Querry met_id, precursor formula that are used for the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.met_id,
data_stage01_isotopomer_peakSpectrum.precursor_formula).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I)).group_by(
data_stage01_isotopomer_peakSpectrum.met_id,
data_stage01_isotopomer_peakSpectrum.precursor_formula).order_by(
data_stage01_isotopomer_peakSpectrum.met_id.asc(),
data_stage01_isotopomer_peakSpectrum.precursor_formula).all();
met_ids_O = [];
precursor_formulas_O = [];
if not component_names: exit('bad query result: get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanType_peakSpectrum');
for cn in component_names:
met_ids_O.append(cn.met_id);
precursor_formulas_O.append(cn.precursor_formula);
return met_ids_O, precursor_formulas_O;
except SQLAlchemyError as e:
print(e);
def get_metIDAndPrecursorFormulaAndMass_experimentIDAndSampleNameAndScanType_peakSpectrum(self,experiment_id_I,sample_name_I,scan_type_I):
'''Querry met_id, precursor formula that are used for the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.met_id,
data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.precursor_mass).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I)).group_by(
data_stage01_isotopomer_peakSpectrum.met_id,
data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.precursor_mass).order_by(
data_stage01_isotopomer_peakSpectrum.met_id.asc(),
data_stage01_isotopomer_peakSpectrum.precursor_mass.asc(),
data_stage01_isotopomer_peakSpectrum.precursor_formula).all();
met_ids_O = [];
precursor_formulas_O = [];
precursor_mass_O = [];
if not component_names: exit('bad query result: get_metIDAndPrecursorFormulaAndMass_experimentIDAndSampleNameAndScanType_peakSpectrum');
for cn in component_names:
met_ids_O.append(cn.met_id);
precursor_formulas_O.append(cn.precursor_formula);
precursor_mass_O.append(cn.precursor_mass);
return met_ids_O, precursor_formulas_O, precursor_mass_O;
except SQLAlchemyError as e:
print(e);
def get_metID_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicate_peakSpectrum(self,experiment_id_I,time_point_I,sample_name_abbreviation_I,scan_type_I,sample_replicate_I):
'''Querry met_ids that are used for the experiment
by time-point, sample name abbreviation, scan type, and replicate numbers'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.met_id).filter(
data_stage01_isotopomer_peakSpectrum.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_isotopomer_peakSpectrum.time_point.like(time_point_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.replicate_number == sample_replicate_I,
data_stage01_isotopomer_peakSpectrum.used_).group_by(
data_stage01_isotopomer_peakSpectrum.met_id).order_by(
data_stage01_isotopomer_peakSpectrum.met_id.asc()).all();
met_ids_O = [];
if not component_names:
print('no met ids found for experiment_id\ttime_point\tsample_name_abbreviation\tscan_type\tsample_replicate');
print((experiment_id_I + '\t'+ time_point_I + '\t'+ sample_name_abbreviation_I + '\t'+ scan_type_I + '\t'+ str(sample_replicate_I)));
else:
for cn in component_names:
met_ids_O.append(cn[0]);
return met_ids_O;
except SQLAlchemyError as e:
print(e);
def get_metID_experimentIDAndSampleNameAndScanType_peakSpectrum(self,experiment_id_I,sample_name_I,scan_type_I):
'''Querry met_ids that are used for the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.met_id).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I)).group_by(
data_stage01_isotopomer_peakSpectrum.met_id).order_by(
data_stage01_isotopomer_peakSpectrum.met_id.asc()).all();
met_ids_O = [];
if not component_names: exit('bad query result: get_metID_experimentIDAndSampleNameAndScanType_peakSpectrum');
for cn in component_names:
met_ids_O.append(cn[0]);
return met_ids_O;
except SQLAlchemyError as e:
print(e);
def get_precursorFormulaAndMass_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetID_peakSpectrum(self,experiment_id_I,time_point_I,sample_name_abbreviation_I,scan_type_I,sample_replicate_I,met_id_I):
'''Querry met_ids that are used for the experiment
by time-point, sample name abbreviation, scan type, replicate numbers, and met_ids'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.precursor_mass).filter(
data_stage01_isotopomer_peakSpectrum.met_id.like(met_id_I),
data_stage01_isotopomer_peakSpectrum.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_isotopomer_peakSpectrum.time_point.like(time_point_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.replicate_number == sample_replicate_I,
data_stage01_isotopomer_peakSpectrum.used_).group_by(
data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.precursor_mass).order_by(
data_stage01_isotopomer_peakSpectrum.precursor_mass.asc(),
data_stage01_isotopomer_peakSpectrum.precursor_formula).all();
precursor_formulas_O = [];
precursor_mass_O = [];
if not component_names:
print('no precursor formula nor precursor mass found for experiment_id\ttime_point\tsample_name_abbreviation\tscan_type\tsample_replicate\tmet id');
print((experiment_id_I + '\t'+ time_point_I + '\t'+ sample_name_abbreviation_I + '\t'+ scan_type_I + '\t'+ str(sample_replicate_I) + '\t'+ met_id_I));
else:
for cn in component_names:
precursor_formulas_O.append(cn.precursor_formula);
precursor_mass_O.append(cn.precursor_mass);
return precursor_formulas_O, precursor_mass_O;
except SQLAlchemyError as e:
print(e);
def get_precursorFormulaAndMass_experimentIDAndSampleNameAndMetIDAndScanType_peakSpectrum(self,experiment_id_I,sample_name_I,met_id_I,scan_type_I):
'''Querry precursor formulas and masses that are used for the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.precursor_mass).filter(
data_stage01_isotopomer_peakSpectrum.met_id.like(met_id_I),
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I)).group_by(
data_stage01_isotopomer_peakSpectrum.precursor_formula,
data_stage01_isotopomer_peakSpectrum.precursor_mass).order_by(
data_stage01_isotopomer_peakSpectrum.precursor_mass.asc(),
data_stage01_isotopomer_peakSpectrum.precursor_formula).all();
precursor_formulas_O = [];
precursor_mass_O = [];
if not component_names: exit('bad query result: get_precursorFormulaAndMass_experimentIDAndSampleNameAndScanType_peakSpectrum');
for cn in component_names:
precursor_formulas_O.append(cn.precursor_formula);
precursor_mass_O.append(cn.precursor_mass);
return precursor_formulas_O, precursor_mass_O;
except SQLAlchemyError as e:
print(e);
# query scan types for data_stage01_peakSpectrum
def get_scanType_experimentIDAndTimePointSampleNameAbbreviation_peakSpectrum(self,experiment_id_I,time_point_I,sample_name_abbreviation_I):
'''Querry scan type that are used for the experiment by time-point and sample name abbreviation'''
try:
scantypes = self.session.query(data_stage01_isotopomer_peakSpectrum.scan_type).filter(
data_stage01_isotopomer_peakSpectrum.time_point.like(time_point_I),
data_stage01_isotopomer_peakSpectrum.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.used_).group_by(
data_stage01_isotopomer_peakSpectrum.scan_type).order_by(
data_stage01_isotopomer_peakSpectrum.scan_type.asc()).all();
scan_type_O = [];
if not scantypes:
print('no scan types found for experiment_id\ttime_point\tsample_name_abbreviation');
print((experiment_id_I + '\t'+ time_point_I + '\t'+ sample_name_abbreviation_I));
else:
for st in scantypes:
scan_type_O.append(st[0]);
return scan_type_O;
except SQLAlchemyError as e:
print(e);
def get_scanType_experimentIDAndSampleName_peakSpectrum(self,experiment_id_I,sample_name_I):
'''Querry scan type that are used for the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_peakSpectrum.scan_type).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I)).group_by(
data_stage01_isotopomer_peakSpectrum.scan_type).order_by(
data_stage01_isotopomer_peakSpectrum.scan_type.asc()).all();
scan_type_O = [];
if not component_names: exit('bad query result: get_metIDAndPrecursorFormula_experimentIDAndSampleName_peakSpectrum');
for cn in component_names:
scan_type_O.append(cn[0]);
return scan_type_O;
except SQLAlchemyError as e:
print(e);
# query replicate numbers for data_stage01_peakSpectrum
def get_replicateNumber_experimentIDAndTimePointAndSampleNameAbbreviationAndScanType_peakSpectrum(self,experiment_id_I,time_point_I,sample_name_abbreviation_I,scan_type_I):
'''Querry replicate numbers from the experiment
by time-point, sample name abbreviation and scan type'''
try:
replicates = self.session.query(data_stage01_isotopomer_peakSpectrum.replicate_number).filter(
data_stage01_isotopomer_peakSpectrum.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_isotopomer_peakSpectrum.time_point.like(time_point_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.used_).group_by(
data_stage01_isotopomer_peakSpectrum.replicate_number).order_by(
data_stage01_isotopomer_peakSpectrum.replicate_number.asc()).all();
sample_replicates_O = [];
if not replicates:
print('no replicates found for experiment_id\ttime_point\tsample_name_abbreviation\tscan_type');
print((experiment_id_I + '\t'+ time_point_I + '\t'+ sample_name_abbreviation_I + '\t'+ scan_type_I));
else:
for r in replicates:
sample_replicates_O.append(r[0]);
return sample_replicates_O;
except SQLAlchemyError as e:
print(e);
# query product formulas
def get_productFormulas_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetIDAndPrecursorFormula_peakSpectrum(self,experiment_id_I,time_point_I,sample_name_abbreviation_I,scan_type_I,sample_replicate_I,met_id_I,precursor_formula_I):
'''Querry product formulas that are used for the experiment
by time-point, sample name abbreviation, scan type, replicate numbers, met_ids, and precursor formula'''
try:
data = self.session.query(data_stage01_isotopomer_peakSpectrum.product_formula).filter(
data_stage01_isotopomer_peakSpectrum.precursor_formula.like(precursor_formula_I),
data_stage01_isotopomer_peakSpectrum.met_id.like(met_id_I),
data_stage01_isotopomer_peakSpectrum.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_isotopomer_peakSpectrum.time_point.like(time_point_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.replicate_number == sample_replicate_I,
data_stage01_isotopomer_peakSpectrum.used_).group_by(
data_stage01_isotopomer_peakSpectrum.product_formula).order_by(
data_stage01_isotopomer_peakSpectrum.product_formula.asc()).all();
product_formulas_O = [];
if not data:
print('no product formulas found for experiment_id\ttime_point\tsample_name_abbreviation\tscan_type\tsample_replicate\tmet id\tprecursor formula');
print((experiment_id_I + '\t'+ time_point_I + '\t'+ sample_name_abbreviation_I + '\t'+ scan_type_I + '\t'+ str(sample_replicate_I) + '\t'+ met_id_I + '\t'+ precursor_formula_I));
else:
for d in data:
product_formulas_O.append(d.product_formula);
return product_formulas_O;
except SQLAlchemyError as e:
print(e);
def get_productFormulas_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakSpectrum(self,experiment_id_I,sample_name_I,met_id_I,precursor_formula_I,scan_type_I):
'''Querry peak data for a specific experiment_id, sample_name, met_id, and precursor_formula'''
try:
data = self.session.query(data_stage01_isotopomer_peakSpectrum.product_formula).filter(
data_stage01_isotopomer_peakSpectrum.sample_name.like(sample_name_I),
data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_peakSpectrum.met_id.like(met_id_I),
data_stage01_isotopomer_peakSpectrum.precursor_formula.like(precursor_formula_I),
data_stage01_isotopomer_peakSpectrum.scan_type.like(scan_type_I),
data_stage01_isotopomer_peakSpectrum.used_).group_by(
data_stage01_isotopomer_peakSpectrum.product_formula).order_by(
data_stage01_isotopomer_peakSpectrum.product_formula.asc()).all();
product_formulas_O = [];
if not data:
print(('No product formulas found for sample_name: ' + sample_name_I + ', met_id: ' + met_id_I + ', and precursor_formula: ' + precursor_formula_I));
return product_formulas_O;
else:
for d in data:
product_formulas_O.append(d.product_formula);
return product_formulas_O;
except SQLAlchemyError as e:
print(e);
# query normalized intensity from data_stage01_isotopomer_peakSpectrum
def get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndPrecursorFormulaAndMassAndScanType_peakSpectrum(self,experiment_id_I,sample_name_abbreviation_I,time_point_I,replicate_number_I,met_id_I,precursor_formula_I,precursor_mass_I,scan_type_I):
'''Querry peak data for a specific experiment_id, sample_name, met_id, and scan type'''
try:
data | |
<reponame>sensen1/sage
r"""
Unique Representation
Abstract classes for cached and unique representation behavior.
.. SEEALSO::
:class:`sage.structure.factory.UniqueFactory`
AUTHORS:
- <NAME> (2008): Original version.
- <NAME> (2013-02): Separate cached and unique representation.
- <NAME>. King (2013-08): Extended documentation.
What is a cached representation?
================================
Instances of a class have a *cached representation behavior* when several
instances constructed with the same arguments share the same memory
representation. For example, calling twice::
sage: G = SymmetricGroup(6)
sage: H = SymmetricGroup(6)
to create the symmetric group on six elements gives back the same
object::
sage: G is H
True
This is a standard design pattern. Besides saving memory, it allows for
sharing cached data (say representation theoretical information about a
group). And of course a look-up in the cache is faster than the creation of a
new object.
Implementing a cached representation
------------------------------------
Sage provides two standard ways to create a cached representation:
:class:`CachedRepresentation` and
:class:`~sage.structure.factory.UniqueFactory`. Note that, in spite of its
name, :class:`~sage.structure.factory.UniqueFactory` does not ensure *unique*
representation behaviour, which will be explained below.
Using :class:`CachedRepresentation`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is often very easy to use :class:`CachedRepresentation`: One simply writes
a Python class and adds :class:`CachedRepresentation` to the list of base
classes. If one does so, then the arguments used to create an instance of this
class will by default also be used as keys for the cache::
sage: from sage.structure.unique_representation import CachedRepresentation
sage: class C(CachedRepresentation):
....: def __init__(self, a, b=0):
....: self.a = a
....: self.b = b
....: def __repr__(self):
....: return "C(%s, %s)"%(self.a, self.b)
sage: a = C(1)
sage: a is C(1)
True
In addition, pickling just works, provided that Python is able to look up the
class. Hence, in the following two lines, we explicitly put the class into the
``__main__`` module. This is needed in doctests, but not in an interactive
session::
sage: import __main__
sage: __main__.C = C
sage: loads(dumps(a)) is a
True
Often, this very easy approach is sufficient for applications. However, there
are some pitfalls. Since the arguments are used for caching, all arguments
must be hashable, i.e., must be valid as dictionary keys::
sage: C((1,2))
C((1, 2), 0)
sage: C([1,2])
Traceback (most recent call last):
...
TypeError: unhashable type: 'list'
In addition, equivalent ways of providing the arguments are *not*
automatically normalised when forming the cache key, and hence different but
equivalent arguments may yield distinct instances::
sage: C(1) is C(1,0)
False
sage: C(1) is C(a=1)
False
sage: repr(C(1)) == repr(C(a=1))
True
It should also be noted that the arguments are compared by equality, not by
identity. This is often desired, but can imply subtle problems. For example,
since ``C(1)`` already is in the cache, and since the unit elements in
different finite fields are all equal to the integer one, we find::
sage: GF(5)(1) == 1 == GF(3)(1)
True
sage: C(1) is C(GF(3)(1)) is C(GF(5)(1))
True
But ``C(2)`` is not in the cache, and the number two is not equal in different
finite fields (i. e., ``GF(5)(2) == GF(3)(2)`` returns as ``False``), even
though it is equal to the number two in the ring of integers (
``GF(5)(2) == 2 == GF(3)(2)`` returns as ``True``; equality is not transitive
when comparing elements of *distinct* algebraic structures!!). Hence, we
have::
sage: GF(5)(2) == GF(3)(2)
False
sage: C(GF(3)(2)) is C(GF(5)(2))
False
Normalising the arguments
.........................
:class:`CachedRepresentation` uses the metaclass
:class:`~sage.misc.classcall_metaclass.ClasscallMetaclass`. Its
``__classcall__`` method is a
:class:`~sage.misc.cachefunc.WeakCachedFunction`. This function creates an
instance of the given class using the given arguments, unless it finds the
result in the cache. This has the following implications:
- The arguments must be valid dictionary keys (i.e., they must be hashable;
see above).
- It is a weak cache, hence, if the user does not keep a reference to the
resulting instance, then it may be removed from the cache during garbage
collection.
- It is possible to preprocess the input arguments by implementing a
``__classcall__`` or a ``__classcall_private__`` method, but in order to
benefit from caching, :meth:`CachedRepresentation.__classcall__` should at
some point be called.
.. NOTE::
For technical reasons, it is needed that ``__classcall__`` respectively
``__classcall_private__`` are "static methods", i.e., they are callable
objects that do not bind to an instance or class. For example, a
:class:`~sage.misc.cachefunc.cached_function` can be used here, because it
is callable, but does not bind to an instance or class, because it has no
``__get__()`` method. A usual Python function, however, has a
``__get__()`` method and would thus under normal circumstances bind to an
instance or class, and thus the instance or class would be passed to the
function as the first argument. To prevent a callable object from being
bound to the instance or class, one can prepend the ``@staticmethod``
decorator to the definition; see :class:`staticmethod`.
For more on Python's ``__get__()`` method, see:
https://docs.python.org/2/howto/descriptor.html
.. WARNING::
If there is preprocessing, then the preprocessed arguments
passed to :meth:`CachedRepresentation.__classcall__` must be invariant
under the preprocessing. That is to say, preprocessing the input
arguments twice must have the same effect as preprocessing the input
arguments only once. That is to say, the preprocessing must be idempotent.
The reason for this warning lies in the way pickling is implemented. If the
preprocessed arguments are passed to
:meth:`CachedRepresentation.__classcall__`, then the resulting instance will
store the *preprocessed* arguments in some attribute, and will use them for
pickling. If the pickle is unpickled, then preprocessing is applied to the
preprocessed arguments---and this second round of preprocessing must not
change the arguments further, since otherwise a different instance would be
created.
We illustrate the warning by an example. Imagine that one has instances that
are created with an integer-valued argument, but only depend on the *square*
of the argument. It would be a mistake to square the given argument during
preprocessing::
sage: class WrongUsage(CachedRepresentation):
....: @staticmethod
....: def __classcall__(cls, n):
....: return super(WrongUsage,cls).__classcall__(cls, n^2)
....: def __init__(self, n):
....: self.n = n
....: def __repr__(self):
....: return "Something(%d)"%self.n
sage: import __main__
sage: __main__.WrongUsage = WrongUsage # This is only needed in doctests
sage: w = WrongUsage(3); w
Something(9)
sage: w._reduction
(<class '__main__.WrongUsage'>, (9,), {})
Indeed, the reduction data are obtained from the preprocessed argument. By
consequence, if the resulting instance is pickled and unpickled, the argument
gets squared *again*::
sage: loads(dumps(w))
Something(81)
Instead, the preprocessing should only take the absolute value of the given
argument, while the squaring should happen inside of the ``__init__`` method,
where it won't mess with the cache::
sage: class BetterUsage(CachedRepresentation):
....: @staticmethod
....: def __classcall__(cls, n):
....: return super(BetterUsage, cls).__classcall__(cls, abs(n))
....: def __init__(self, n):
....: self.n = n^2
....: def __repr__(self):
....: return "SomethingElse(%d)"%self.n
sage: __main__.BetterUsage = BetterUsage # This is only needed in doctests
sage: b = BetterUsage(3); b
SomethingElse(9)
sage: loads(dumps(b)) is b
True
sage: b is BetterUsage(-3)
True
In our next example, we create a cached representation class ``C`` that
returns an instance of a sub-class ``C1`` or ``C2`` depending on the given
arguments. This is implemented in a static ``__classcall_private__`` method of
``C``, letting it choose the sub-class according to the given arguments. Since
a ``__classcall_private__`` method will be ignored on sub-classes, the caching
of :class:`CachedRepresentation` is available to both ``C1`` and ``C2``. But
for illustration, we overload the static ``__classcall__`` method on ``C2``,
doing some argument preprocessing. We also create a sub-class ``C2b`` of
``C2``, demonstrating that the ``__classcall__`` method is used on the
sub-class (in contrast to a ``__classcall_private__`` method!). ::
sage: class C(CachedRepresentation):
....: @staticmethod
....: def __classcall_private__(cls, n, implementation=0):
....: if not implementation:
....: return C.__classcall__(cls, n)
....: if implementation==1:
....: return C1(n)
....: if implementation>1:
....: return C2(n,implementation)
....: def __init__(self, n):
....: self.n = n
....: def __repr__(self):
....: return "C(%d, 0)"%self.n
sage: class C1(C):
....: def __repr__(self):
....: return "C1(%d)"%self.n
sage: class C2(C):
....: @staticmethod
....: def __classcall__(cls, n, implementation=0):
....: if implementation:
....: return super(C2, cls).__classcall__(cls, (n,)*implementation)
....: return super(C2, cls).__classcall__(cls, n)
....: def __init__(self, t):
....: self.t = t
....: def __repr__(self):
....: return "C2(%s)"%repr(self.t)
sage: class C2b(C2):
....: def __repr__(self):
....: return "C2b(%s)"%repr(self.t)
sage: __main__.C2 = C2 # not needed in an interactive session
sage: __main__.C2b = C2b
In the above example, ``C`` drops the argument ``implementation`` if it
evaluates to ``False``, and since the cached ``__classcall__`` is called in
this case, we have::
sage: C(1)
C(1, 0)
sage: C(1) is C(1,0)
True
sage: C(1) is C(1,0) is C(1,None) is C(1,[])
True
(Note that we | |
<reponame>jeffbass/yin-yang-ranch
"""data_tools: data tools including classes, methods and attributes
Provides a variety of classes to hold, transfer, analyze, transform and query
the various data in the data library and in the imagehubs accessible to the
librarian.
Copyright (c) 2018 by <NAME>.
License: MIT, see LICENSE for more details.
"""
import sys
import pprint
import logging
import threading
import subprocess
from time import sleep
from pathlib import Path
from datetime import datetime
from collections import deque
from helpers.utils import YamlOptionsError
class HubData:
""" Methods and attributes to transfer data from imagehub data files
Provides methods for Librarian to access imagehub data, including event
logs and images stored by the imagehub.
Parameters:
settings (Settings object): settings object created from YAML file
"""
def __init__(self, settings):
# log directory and log file refer to the event log of the imagehub
ld = Path(settings.log_directory)
if not ld.exists():
raise YamlOptionsError('Log directory in YAML file does not exist.')
elif not ld.is_dir():
raise YamlOptionsError('Log directory in YAML file is not a directory.')
self.log_dir = ld
self.max_days = 3 # Number of days of hub log files to be loaded
self.max_history = 300 # Maximum size of the event_deque history
self.event_data = {} # see description in load_log_data function
self.newest_log_line = '' # keep track of last text line read from log
self.line_count = 0 # total lines read into event_data since program startup; useful for librarian status
self.event_data_lock = threading.RLock()
self.load_log_data(self.log_dir, self.max_days) # inital load self.event_data()
# pprint.pprint(self.event_data)
# start thread receive & add data to self.event_data as new lines are
# added to the imagehub log.
self.log_check_interval = 2 # seconds: how often check for added log lines
t = threading.Thread(target=self.watch_for_new_log_lines)
# print('Starting watch_for_new_log_lines thread.')
t.daemon = True # allows this thread to be auto-killed on program exit
t.name = 'watch_for_new_log_lines' # naming the thread helps with debugging
t.start()
""" # this is the block of lines used to test self.add_new_log_lines()
print('Total number of lines read from all log files:', self.line_count)
print('BEFORE call to add_new_log_lines: Newest log line:', self.newest_log_line)
# testing the add lines modules
query = input('Add some lines to imagehub.log, then press enter. ')
self.add_new_log_lines()
print('AFTER call to add_new_log_lines: Newest log line:', self.newest_log_line)
pprint.pprint(self.event_data)
print('Total number of lines read from all log files:', self.line_count)
print('End of test')
sys.exit() """
def load_log_data(self, ld, max_days):
""" read the imagehub log file(s), loading the event_deque
This method reads event lines from the log files. It always reads the
current log file. It also reads up to "max_days" additional log files.
Event log files are created by the imagehub.py program. They are created
using the Python logging module and rotate daily at midnight.
Event log files are "rotated" using Python's TimedRotatingFileHandler:
This means the imagehub log files have names like:
lf.log, lf.log.2020-10-22, lf.log.2020-10-21, lf.log.2020-10-20, ...
where:
lf.log is the "current log" that is currently updated by imagehub.
lf.log.<<date>> is the name pattern for the logs rotated each day.
The log files are loaded in time order. The oldest log file (up to
'max_days' old) is loaded. Then the next oldest log file is loaded,
then the next oldest log file until the current_log file, which is
always loaded last. The lines from each log file are loaded into the
event_data deque by the self.load_log_event_lines method.
Parameters:
ld (PosixPath): imagehub log directory containing event log files
max_days (int): number of additional log file day(s) to load
"""
all_logs = list(ld.glob('*log*')) # all files that have *log* in them
current_log = list(ld.glob('*log')) # current log ends in 'log'
if not current_log:
raise YamlOptionsError('There is no file ending in "log".')
elif len(current_log) > 1:
raise YamlOptionsError('More than one file ending in "log".')
else:
current_log = current_log[0] # now current log is PosixPath file
self.log_file = str(current_log) # string version of log file name
all_logs.remove(current_log) # keep only the 'dated' logs
logs_to_load = list()
if all_logs: # we have at least one 'dated' log...
# ...so get the most recent 'max_days' of them
all_logs.sort(reverse=True)
logs_to_load = all_logs[:self.max_days] # most recent ones
logs_to_load.sort() # sort them in time order: oldest to newest
logs_to_load.append(current_log) # append the current log last
for log in logs_to_load:
with open(log, 'r') as f:
lines = f.readlines()
self.load_log_event_lines(lines)
def load_log_event_lines(self, lines):
""" loads lines from a log file into the event_data dict()
Loads event lines from the log files. Loads one line at
a time, adding the event data to the self.event_data dict() which is a
nested dictionary. Example data values from self.event_data:
node event deque of tuples of data values
| |
event_data['barn']['motion'] values[0] = (datetime, 'moving') # current
values[1] = (datetime, 'moving') # previous
values[2] = (datetime, 'moving') # earlier
Each data tuple is (datetime, event_value) where each
event_value is a measure like "77 degrees" or a state like "motion".
This deque is of fixed length, so as new data points are left_appended,
those data points beyond max_history are discarded from the event_data
dictionary (but not from the event log files; those are "read only"
from the perspective of the librarian; they are written ONLY by the
imagehub program).
Parameters:
lines (list): lines from an imagehub event log file
"""
for line in lines:
self.line_count += 1
# node_tuple is (node, event, when, value)
node_tuple = self.parse_log_line(line) # returns "None" if invalid
if node_tuple: # only load a valid node_tuple that is not "None"
self.load_log_event(node_tuple)
self.newest_log_line = lines[-1]
def load_log_event(self, node_tuple):
""" load a single node event into the self.event_data dict()
Creates a single entry in the self.event_data dict() which holds all
the recent events logged from imagenodes.
'node_tuple' objects are parsed from imagehub log lines by the method
self.parse_log_line(). This method creates entries in self.event_data.
node event deque of tuples of data values
| |
event_data['barn']['motion'] values[0] = (datetime, 'moving') # current
values[1] = (datetime, 'moving') # previous
values[2] = (datetime, 'moving') # earlier
Each data tuple is (datetime, event_value) where each
event_value is a measure like "77 degrees" or a state like "motion".
This deque is of fixed length, so as new data points are left_appended,
those data points beyond max_history are discarded from the event_data
dictionary (but not from the event log files; those are "read only"
from the perspective of the librarian; they are written ONLY by the
imagehub program).
All string values in the tuple are stripped of whitespace and converted
to lower case: 'node', 'event', 'value'.
'when' is a datetime value and is stored as is.
Parameters:
node_tuple (tuple): parsed values from a single event log line
"""
# node_tuple is (node, event, when, value)
node = node_tuple[0].strip().lower()
event = node_tuple[1].strip().lower()
when = node_tuple[2]
value = node_tuple[3].strip().lower()
with self.event_data_lock:
if node not in self.event_data:
self.event_data[node] = {}
if event not in self.event_data[node]:
self.event_data[node][event] = deque(maxlen=self.max_history)
self.event_data[node][event].appendleft((when, value))
def parse_log_line(self, line):
""" parse a single line from a log file returning a tuple of values
Parses a single event line of text from a log file and returns a tuple
(node_name, event_type, <<datetime>>, event_value)
An event_value is a measure like "77 degrees" or a state like "motion".
This deque is of fixed length, so as new data points are left_appended,
those data points beyond 'max_history' are discarded from the event_data
dictionary (but not from the event log files; those are "read only"
from the perspective of the librarian; they are written ONLY by the
imagehub program).
Example:
Input Log data lines like these:
2020-06-09 18:27:11,776 ~ Driveway Mailbox|motion|moving
2020-06-09 18:33:15,788 ~ Barn|Temp|83 F
Return tuples like these:
(Driveway Mailbox, motion, <<datetime>>, moving)
(Barn, Temp, <<datetime>>, 83)
Parameters:
line (str): a single log line read from a log file
Returns:
tuple (node, event, when, value)
OR
None # if there is not a valid datetime in beginning of line
"""
two_parts = line.split('~')
part1 = two_parts[0].strip()
try:
when = datetime.strptime(part1, "%Y-%m-%d %H:%M:%S,%f")
except ValueError:
return None # Every valid line has a valid datetime
part2 = two_parts[1].rstrip(' F\n').strip().split('|')
if len(part2) < | |
# This software was developed by employees of the National Institute of
# Standards and Technology (NIST), an agency of the Federal Government.
# Pursuant to title 17 United States Code Section 105, works of NIST employees
# are not subject to copyright protection in the United States and are
# considered to be in the public domain. Permission to freely use, copy,
# modify, and distribute this software and its documentation without fee is
# hereby granted, provided that this notice and disclaimer of warranty appears
# in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND, EITHER
# EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY
# THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND FREEDOM FROM
# INFRINGEMENT, AND ANY WARRANTY THAT THE DOCUMENTATION WILL CONFORM TO THE
# SOFTWARE, OR ANY WARRANTY THAT THE SOFTWARE WILL BE ERROR FREE. IN NO EVENT
# SHALL NIST BE LIABLE FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO, DIRECT,
# INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM,
# OR IN ANY WAY CONNECTED WITH THIS SOFTWARE, WHETHER OR NOT BASED UPON
# WARRANTY, CONTRACT, TORT, OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED
# BY PERSONS OR PROPERTY OR OTHERWISE, AND WHETHER OR NOT LOSS WAS SUSTAINED
# FROM, OR AROSE OUT OF THE RESULTS OF, OR USE OF, THE SOFTWARE OR SERVICES
# PROVIDED HEREUNDER. Distributions of NIST software should also include
# copyright and licensing statements of any third-party software that are
# legally bundled with the code in compliance with the conditions of those
# licenses.
import contextlib
import copy
from ctypes import ArgumentError
from dataclasses import dataclass
import importlib
import inspect
import os
import sys
import time
import traceback
from functools import wraps, partial
from pathlib import Path
from typing import Any
import pandas as pd
from . import _device as core
from . import util as util
EMPTY = inspect.Parameter.empty
@contextlib.contextmanager
def null_context(owner):
yield owner
class NeverRaisedException(BaseException):
pass
_INSPECT_SKIP_PARAMETER_KINDS = (
# remember only named keywords, not "*args" and "**kwargs" in call signatures
inspect._ParameterKind.VAR_KEYWORD,
inspect._ParameterKind.VAR_POSITIONAL,
)
def _filter_signature_parameters(params: dict):
return {
p.name: p
for p in list(params.values())[1:]
if p.kind not in _INSPECT_SKIP_PARAMETER_KINDS
}
class notify:
"""Singleton notification handler shared by all Rack instances"""
# the global mapping of references to notification callbacks
_handlers = dict(returns=set(), calls=set(), iteration=set())
_owner_hold_list = set()
@classmethod
def clear(cls):
cls._handlers = dict(returns=set(), calls=set(), iteration=set())
@classmethod
def hold_owner_notifications(cls, *owners):
for owner in owners:
cls._owner_hold_list.add(owner)
@classmethod
def allow_owner_notifications(cls, *owners):
for owner in owners:
try:
cls._owner_hold_list.remove(owner)
except KeyError:
pass
@classmethod
def return_event(cls, owner, returned: dict):
if owner in cls._owner_hold_list:
return
if not isinstance(returned, dict):
raise TypeError(f"returned data was {repr(returned)}, which is not a dict")
for handler in cls._handlers["returns"]:
handler(dict(name=owner._owned_name, owner=owner, old=None, new=returned))
@classmethod
def call_event(cls, owner, parameters: dict):
if owner in cls._owner_hold_list:
return
if not isinstance(parameters, dict):
raise TypeError(
f"parameters data was {repr(parameters)}, which is not a dict"
)
for handler in cls._handlers["calls"]:
handler(dict(name=owner._owned_name, owner=owner, old=None, new=parameters))
@classmethod
def call_iteration_event(
cls, owner, index: int, step_name: str = None, total_count: int = None
):
if owner in cls._owner_hold_list:
return
for handler in cls._handlers["iteration"]:
handler(
dict(
name=owner._owned_name,
owner=owner,
old=None,
new=dict(index=index, step_name=step_name, total_count=total_count),
)
)
@classmethod
def observe_returns(cls, handler):
if not callable(handler):
raise AttributeError(f"{repr(handler)} is not callable")
cls._handlers["returns"].add(handler)
@classmethod
def observe_calls(cls, handler):
if not callable(handler):
raise AttributeError(f"{repr(handler)} is not callable")
cls._handlers["calls"].add(handler)
@classmethod
def observe_call_iteration(cls, handler):
if not callable(handler):
raise AttributeError(f"{repr(handler)} is not callable")
cls._handlers["iteration"].add(handler)
@classmethod
def unobserve_returns(cls, handler):
if not callable(handler):
raise AttributeError(f"{repr(handler)} is not callable")
cls._handlers["returns"].remove(handler)
@classmethod
def unobserve_calls(cls, handler):
if not callable(handler):
raise AttributeError(f"{repr(handler)} is not callable")
cls._handlers["calls"].remove(handler)
@classmethod
def unobserve_call_iteration(cls, handler):
if not callable(handler):
raise AttributeError(f"{repr(handler)} is not callable")
cls._handlers["iteration"].remove(handler)
class CallSignatureTemplate:
def __init__(self, target):
self.target = target
def get_target(self, owner):
if self.target is None or callable(self.target):
return self.target
if isinstance(owner, util.Ownable) and not hasattr(self.target, "__name__"):
# might have no signature yet if it has not been claimed by its owner
return None
target = owner._ownables.get(self.target.__name__, self.target)
if not callable(target):
raise TypeError(
f"'{getattr(target, '_owned_name', '__name__')}' is not callable"
)
return target
def get_keyword_parameters(self, owner, skip_names):
template_sig = inspect.signature(self.get_target(owner).__call__)
# template_sig = inspect.signature(template_sig.bind(owner))
return {
name: p
for name, p in template_sig.parameters.items()
if name not in skip_names and p.kind in (p.KEYWORD_ONLY,)
}
class MethodTaggerDataclass:
""" subclasses decorated with @dataclass will operate as decorators that stash annotated keywords here into the pending attribute dict """
pending = {}
def __call__(self, func):
self.pending.setdefault(func, {}).update(
{n: getattr(self, n) for n in self.__annotations__}
)
return func
@dataclass
class rack_input_table(MethodTaggerDataclass):
"""tag a method defined in a Rack to support execution from a flat table.
In practice, this often means a very long argument list.
Arguments:
table_path: location of the input table
"""
table_path: str
@dataclass
class rack_kwargs_template(MethodTaggerDataclass):
"""tag a method defined in a Rack to replace a **kwargs argument using the signature of the specified callable.
In practice, this often means a very long argument list.
Arguments:
callable_template: replace variable keyword arguments (**kwargs) with the keyword arguments defined in this callable
skip: list of column names to omit
"""
template: callable = None
class rack_kwargs_skip(MethodTaggerDataclass):
"""tag a method defined in a Rack to replace a **kwargs argument using the signature of the specified callable.
In practice, this often means a very long argument list.
Arguments:
callable_template: replace variable keyword arguments (**kwargs) with the keyword arguments defined in this callable
skip: list of column names to omit
"""
skip: list = None
def __init__(self, *arg_names):
self.skip = arg_names
class RackMethod(util.Ownable):
"""a wrapper that is applied behind the scenes in Rack classes to support introspection"""
def __init__(self, owner, name: str, kwdefaults: dict = {}):
super().__init__()
# def ownable(obj, name):
# return isinstance(getattr(self._owner, name), util.Ownable)
self._owner = owner
cls = owner.__class__
obj = getattr(cls, name)
# overwrite the namespace with tags from the table input
tags = MethodTaggerDataclass.pending.pop(obj, {})
if isinstance(obj, RackMethod):
self._wrapped = obj._wrapped
self._kwdefaults = dict(obj._kwdefaults)
self._callable_template = obj._callable_template
self.tags = obj.tags
else:
self._wrapped = obj
self._kwdefaults = kwdefaults
self._callable_template = CallSignatureTemplate(tags.pop("template", None))
self.tags = tags
# self.__call__.__name__ = self.__name__ = obj.__name__
# self.__qualname__ = obj.__qualname__
self.__doc__ = obj.__doc__
self.__name__ = name
self.__qualname__ = getattr(obj, "__qualname__", obj.__class__.__qualname__)
self._apply_signature()
setattr(owner, name, self)
def iterate_from_csv(self, path):
"""call the BoundSequence for each row in a csv table.
keyword argument names are taken from the column header
(0th row). keyword values are taken from corresponding column in
each row.
"""
table = pd.read_csv(path, index_col=0)
for i, row in enumerate(table.index):
util.logger.info(
f"{self._owned_name} from '{str(path)}' "
f"- '{row}' ({i+1}/{len(table.index)})"
)
notify.call_iteration_event(self, i, row, len(table.index))
yield row, self(**table.loc[row].to_dict())
debug = None
@classmethod
def from_method(self, method):
"""make a new RackMethod instance by copying another"""
return RackMethod(method._owner, method.__name__, method._kwdefaults)
def __copy__(self):
return self.from_method(self)
def __deepcopy__(self, memo=None):
return self.from_method(self)
def __owner_subclass__(self, owner_cls):
# allow the owner class a chance to set up self.
super().__owner_subclass__(owner_cls)
self._apply_signature()
def _apply_signature(self):
"""updates self.__signature__
__owner_subclass__ must have been called first to do introspection on self._callable_template.
"""
self.__call__ = util.copy_func(self.__call__)
# note the devices needed to execute this function
if isinstance(self._owner, Rack):
annotations = getattr(self._owner, "__annotations__", {})
# set logic to identify Device dependencies
available = {getattr(self._owner, name) for name in annotations}
accessed = {
getattr(self._owner, name)
for name in util.accessed_attributes(self._wrapped)
if not name.startswith("_") and hasattr(self._owner, name)
}
self.dependencies = available.intersection(accessed)
else:
self.dependencies = set()
# get the signature, apply parameter defaults from self._kwdefaults
sig = inspect.signature(self._wrapped)
params = dict(sig.parameters)
# replace the **kwargs with specific keywords from the template function
skip_param_names = list(self.tags.get("skip", []))
if self._callable_template.get_target(self._owner) is not None:
for kws_name, param in params.items():
if param.kind is param.VAR_KEYWORD:
break
else:
raise ArgumentError(
f'cannot apply keyword arguments template to "{self._owned_name or self.__name__}", which does not accept keyword arguments'
)
try:
template_params = self._callable_template.get_keyword_parameters(
self._owner, skip_param_names
)
except TypeError:
pass
else:
skip_param_names.append(kws_name)
params = dict(params, **template_params)
# apply updated defaults
sig = sig.replace(
parameters=(
p.replace(default=self._kwdefaults.get(name, p.default))
for name, p in params.items()
if name not in skip_param_names
)
)
# set the call signature shown by help(), or with ? in ipython/jupyter
self.__signature__ = self.__call__.__signature__ = sig
# | |
= profile_df[profile_df.z > -3.0]
if (profile_df.z.min() < datum) & (profile_df.z.max() > datum):
site_profiles.append(profile_df)
# If list of profiles contain valid data
if len(site_profiles) > 0:
# Combine individual profiles into a single dataframe
profiles_df = pd.concat(site_profiles)
# Reproject coords to Albers
trans = Transformer.from_crs('EPSG:32756', 'EPSG:3577', always_xy=True)
profiles_df['x'], profiles_df['y'] = trans.transform(
profiles_df.x.values, profiles_df.y.values)
# Compute origin and end points for each profile
start_xy = profiles_df.groupby(['id'], as_index=False).first()[['id', 'x', 'y']]
end_xy = profiles_df.groupby(['id'], as_index=False).last()[['id', 'x', 'y']]
start_xy = start_xy.rename({'x': 'start_x', 'y': 'start_y'}, axis=1)
end_xy = end_xy.rename({'x': 'end_x', 'y': 'end_y'}, axis=1)
# Join origin and end points into dataframe
profiles_df = pd.merge(left=profiles_df, right=start_xy)
profiles_df = pd.merge(left=profiles_df, right=end_xy)
# Compute chainage
profiles_df['distance'] = profiles_df.apply(
lambda x: Point(x.start_x, x.start_y).distance(Point(x.x, x.y)), axis=1)
# Drop profiles that have been assigned incorrect profile IDs.
# To do this, we use a correlation test to determine whether x
# and y coordinates within each individual profiles fall along a
# straight line. If a profile has a low correlation (e.g. less
# than 99.9), it is likely that multiple profile lines have been
# incorrectly labelled with a single profile ID.
valid_profiles = lambda x: x[['x', 'y']].corr().abs().iloc[0, 1] > 0.99
drop = (~profiles_df.groupby('id').apply(valid_profiles)).sum()
profiles_df = profiles_df.groupby('id').filter(valid_profiles)
if drop.sum() > 0: print(f'\nDropping invalid profiles: {drop:<80}')
# Restrict profiles to data that falls ocean-ward of the top of
# the foredune (the highest point in the profile) to remove
# spurious validation points, e.g. due to a non-shoreline lagoon
# at the back of the profile
foredune_dist = profiles_df.groupby(['id', 'date']).apply(
lambda x: x.distance.loc[x.z.idxmax()]).reset_index(name='foredune_dist')
profiles_df = pd.merge(left=profiles_df, right=foredune_dist)
profiles_df = profiles_df.loc[(profiles_df.distance >=
profiles_df.foredune_dist)]
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If the output contains data
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).first())
# Compute validation slope and join into dataframe
slope = val_slope(profiles_df, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'foredune_dist', 'slope',
'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
else:
print(f'Skipping {fname_out:<80}', end='\r')
def preprocess_sadew(fname, datum=0, overwrite=False):
# Get output filename
name = Path(fname).stem.split('_')[-1].lower().replace(' ', '')
fname_out = f'output_data/sadew_{name}.csv'
print(f'Processing {fname_out:<80}', end='\r')
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Load data and set nodata values to NaN
wide_df = pd.read_csv(fname).replace(-9999, np.nan)
wide_df = wide_df.loc[:,~wide_df.columns.str.contains('\.')]
wide_df.columns = wide_df.columns.str.lower()
wide_df = wide_df.rename({'easting': 'x', 'northing': 'y'}, axis=1)
# Determine coordinate transform to use
utm_54_profiles = [730007, 715055, 200001, 710019, 615010, 725004,
725038, 200047, 200074, 200020, 200031, 710030,
620005, 200046, 710001, 620016, 735007, 200004,
710018, 625002, 200038, 730003, 710006, 725072,
715009, 200128, 725033, 200017, 200126, 200034,
200060, 200030, 615006, 710031, 200053, 735002,
200056, 200049, 200028, 200057, 615003, 715003,
735006, 200127, 725029, 725010, 200025, 200033,
200042, 730010, 200043, 200040, 730004, 200012,
200051, 710023, 620008, 725071, 625003, 730009,
615005, 615007, 615002, 200055, 730006, 735004,
200007, 715056, 200059, 625001, 200008, 735005,
715004, 200054, 730001, 725009, 710022, 725028,
730008, 200019, 200044, 200050, 200032, 200036,
710029, 730002, 200010, 615011, 200052, 200026,
710025, 200021, 200068, 200002, 715006, 725001,
200037, 200013, 710026, 620006, 710027, 200048,
620010, 730005, 710024, 200035, 200006, 620007,
710032, 200122, 725006, 201057, 200005, 725005,
710021, 200129, 615009, 715062, 710017, 200024,
735003, 200045, 200029, 620009, 200039, 200015,
200058, 200124, 620004, 620002, 200041, 620012,
625004, 615004, 725031, 200003, 725008, 200011]
crs = 'EPSG:28354' if wide_df.profile[0] in utm_54_profiles else 'EPSG:28353'
# Reproject coords to Albers and create geodataframe
trans = Transformer.from_crs(crs, 'EPSG:3577', always_xy=True)
wide_df['x'], wide_df['y'] = trans.transform(
wide_df.x.values, wide_df.y.values)
# Reshape into long format with each observation on a new row
profile_df = pd.melt(wide_df.drop('sample_no', axis=1),
id_vars=['x', 'y', 'profile'],
value_name='z').dropna()
# Extract date info
profile_df['date'] = profile_df['variable'].str[1:].str.strip()
profile_df['date'] = pd.to_datetime(profile_df['date'],
format='%d%m%Y',
errors='coerce')
profile_df = profile_df.drop('variable', axis=1)
# Restrict to post 1987 and pre 2020
profile_df = profile_df[(profile_df.date.dt.year > 1987) &
(profile_df.date.dt.year < 2020)]
# Add unique ID column
profile_df['beach'] = 'sadew'
profile_df['section'] = 'all'
profile_df['profile'] = profile_df['profile'].astype(str)
profile_df['id'] = (profile_df.beach + '_' +
profile_df.section + '_' +
profile_df.profile)
profile_df['source'] = 'hydrographic survey'
profile_df['name'] = 'sadew'
# Compute origin points for each profile
profile_df = profile_df.assign(start_x=wide_df.iloc[0, 2],
start_y=wide_df.iloc[0, 3],
end_x=wide_df.iloc[-1, 2],
end_y=wide_df.iloc[-1, 3])
# Compute chainage
profile_df['distance'] = profile_df.apply(
lambda x: math.hypot(x.x - x.start_x, x.y - x.start_y), axis=1)
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profile_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If the output contains data
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
profile_df.groupby(['id', 'date']).first())
# Compute validation slope and join into dataframe
slope = val_slope(profile_df, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'slope', 'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
else:
print(f'Skipping {fname:<80}', end='\r')
def preprocess_sunshinecoast(site, datum=0, overwrite=False):
# Standardise beach name from site name
beach = site[2:].replace(' ', '').lower()
fname_out = f'output_data/sunshinecoast_{beach}.csv'
print(f'Processing {fname_out:<80}', end='\r')
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Obtain list of files
file_list = glob.glob(f'input_data/sunshinecoast/Survey Database correct data/*{site}/**/*.xlsx',
recursive=True)
# Output list to hold data
site_profiles = []
for i, survey_fname in enumerate(file_list):
# Load data
survey_data = pd.read_excel(survey_fname)
profile_df = survey_data.iloc[3:, :2].astype('float32')
profile_df.columns = ['distance', 'z']
# Get data from string
date = (os.path.basename(survey_fname)
.replace('_', '/')
.replace('a', '')
.replace('b', '')
.replace('00', '01')[0:10])
profile_df['date'] = pd.to_datetime(date)
profile_df['beach'] = beach
profile_df['section'] = 'na'
profile_df['profile'] = survey_fname.split('/')[4]
profile_df['name'] = 'sunshinecoast'
profile_df['source'] = 'hydrographic survey'
profile_df['id'] = (profile_df.beach + '_' +
profile_df.section + '_' +
profile_df.profile)
# Assign header metadata
profile_df['start_x'] = survey_data.iloc[2, 2]
profile_df['start_y'] = survey_data.iloc[2, 3]
profile_df['bearing'] = survey_data.iloc[1, 3]
# Fix Kings Beach
if 'KB' in survey_fname.split('/')[4]:
profile_df['profile'] = survey_fname.split('/')[5]
profile_df['bearing'] = 125.7
profile_df['id'] = (profile_df.beach + '_' +
profile_df.section + '_' +
profile_df.profile)
# Compute
profile_df['end_y'], profile_df['end_x'] = dist_angle(
profile_df['start_x'].iloc[0],
profile_df['start_y'].iloc[0],
8000,
profile_df['bearing'].iloc[0])
# Filter to drop pre-1987 and deep water samples, add to list if any
# data is available above 0 MSL
if ((profile_df.date.min().year > 1987) &
(profile_df.z.min() < datum) &
(profile_df.z.max() > datum)):
site_profiles.append(profile_df)
# If list of profiles contain valid data
if len(site_profiles) > 0:
# Combine into a single dataframe
profiles_df = pd.concat(site_profiles)
# Add coordinates at every supplied distance along transects
profiles_df[['x', 'y']] = profiles_df.apply(
lambda x: pd.Series(dist_along_transect(
x.distance, x.start_x, x.start_y, x.end_x, x.end_y)), axis=1)
# Convert coordinates to Australian Albers
trans = Transformer.from_crs('EPSG:32756', 'EPSG:3577', always_xy=True)
profiles_df['start_x'], profiles_df['start_y'] = trans.transform(
profiles_df['start_x'].values, profiles_df['start_y'].values)
profiles_df['end_x'], profiles_df['end_y'] = trans.transform(
profiles_df['end_x'].values, profiles_df['end_y'].values)
profiles_df['x'], profiles_df['y'] = trans.transform(
profiles_df['x'].values, profiles_df['y'].values)
# Readjust distance measurements to Australian Albers. This ensures they
# are a valid comparison against the Albers-based DEA Coastlines distances
profiles_df['distance'] = profiles_df.apply(
lambda x: Point(x.start_x, x.start_y).distance(Point(x.x, x.y)), axis=1)
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If the output contains data
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).agg(
lambda x: pd.Series.mode(x).iloc[0]))
# Compute validation slope and join into dataframe
slope = val_slope(profiles_df, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'slope', 'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
else:
print(f'Skipping {fname_out:<80}', end='\r')
def preprocess_tasmarc(site, datum=0, overwrite=True):
def _tasmarc_metadata(profile):
# Open file
with open(profile, 'r') as profile_data:
# Load header data (first 20 rows starting with "#")
header = takewhile(lambda x: x.startswith(('#', '&', ' | |
get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], "http://localhost/othe/login/")
@patch("defender.config.LOCKOUT_URL", "/o/login/")
def test_failed_login_redirect_to_url_local(self):
""" Test to make sure that after lockout we send to the correct
redirect URL """
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
lockout_url = "/o/login/"
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now, check redirect make sure it is valid.
response = self._login()
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], lockout_url)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], lockout_url)
@patch("defender.config.LOCKOUT_TEMPLATE", "defender/lockout.html")
def test_failed_login_redirect_to_template(self):
""" Test to make sure that after lockout we send to the correct
template """
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now, check template make sure it is valid.
response = self._login()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "defender/lockout.html")
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "defender/lockout.html")
@patch("defender.config.COOLOFF_TIME", 0)
def test_failed_login_no_cooloff(self):
""" failed login no cooloff """
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now, check redirect make sure it is valid.
response = self._login()
self.assertContains(response, self.PERMANENT_LOCKED_MESSAGE)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, self.PERMANENT_LOCKED_MESSAGE)
def test_login_attempt_model(self):
""" test the login model """
response = self._login()
self.assertContains(response, LOGIN_FORM_KEY)
self.assertEqual(AccessAttempt.objects.count(), 1)
self.assertIsNotNone(str(AccessAttempt.objects.all()[0]))
def test_is_valid_ip(self):
""" Test the is_valid_ip() method """
self.assertEqual(utils.is_valid_ip("192.168.0.1"), True)
self.assertEqual(utils.is_valid_ip("172.16.31.10"), True)
self.assertEqual(utils.is_valid_ip("8.8.8.8"), True)
self.assertEqual(utils.is_valid_ip("127.0.0.1"), True)
self.assertEqual(utils.is_valid_ip("fish"), False)
self.assertEqual(utils.is_valid_ip(None), False)
self.assertEqual(utils.is_valid_ip(""), False)
self.assertEqual(utils.is_valid_ip("0x41.0x41.0x41.0x41"), False)
self.assertEqual(utils.is_valid_ip("192.168.100.34.y"), False)
self.assertEqual(
utils.is_valid_ip("2001:0db8:85a3:0000:0000:8a2e:0370:7334"), True
)
self.assertEqual(utils.is_valid_ip("2001:db8:85a3:0:0:8a2e:370:7334"), True)
self.assertEqual(utils.is_valid_ip("2001:db8:85a3::8a2e:370:7334"), True)
self.assertEqual(utils.is_valid_ip("::ffff:192.0.2.128"), True)
self.assertEqual(utils.is_valid_ip("::ffff:8.8.8.8"), True)
def test_parse_redis_url(self):
""" test the parse_redis_url method """
# full regular
conf = parse_redis_url("redis://user:password@localhost2:1234/2", False)
self.assertEqual(conf.get("HOST"), "localhost2")
self.assertEqual(conf.get("DB"), 2)
self.assertEqual(conf.get("PASSWORD"), "password")
self.assertEqual(conf.get("PORT"), 1234)
# full non local
conf = parse_redis_url(
"redis://user:pass@www.localhost.com:1234/2", False)
self.assertEqual(conf.get("HOST"), "www.localhost.com")
self.assertEqual(conf.get("DB"), 2)
self.assertEqual(conf.get("PASSWORD"), "<PASSWORD>")
self.assertEqual(conf.get("PORT"), 1234)
# no user name
conf = parse_redis_url("redis://password@localhost2:1234/2", False)
self.assertEqual(conf.get("HOST"), "localhost2")
self.assertEqual(conf.get("DB"), 2)
self.assertEqual(conf.get("PASSWORD"), None)
self.assertEqual(conf.get("PORT"), 1234)
# no user name 2 with colon
conf = parse_redis_url("redis://:password@localhost2:1234/2", False)
self.assertEqual(conf.get("HOST"), "localhost2")
self.assertEqual(conf.get("DB"), 2)
self.assertEqual(conf.get("PASSWORD"), "password")
self.assertEqual(conf.get("PORT"), 1234)
# Empty
conf = parse_redis_url(None, False)
self.assertEqual(conf.get("HOST"), "localhost")
self.assertEqual(conf.get("DB"), 0)
self.assertEqual(conf.get("PASSWORD"), None)
self.assertEqual(conf.get("PORT"), 6379)
# no db
conf = parse_redis_url("redis://:password@localhost2:1234", False)
self.assertEqual(conf.get("HOST"), "localhost2")
self.assertEqual(conf.get("DB"), 0)
self.assertEqual(conf.get("PASSWORD"), "password")
self.assertEqual(conf.get("PORT"), 1234)
# no password
conf = parse_redis_url("redis://localhost2:1234/0", False)
self.assertEqual(conf.get("HOST"), "localhost2")
self.assertEqual(conf.get("DB"), 0)
self.assertEqual(conf.get("PASSWORD"), None)
self.assertEqual(conf.get("PORT"), 1234)
# password with special character and set the password_quote = True
conf = parse_redis_url("redis://:calmkart%23%40%21@localhost:6379/0", True)
self.assertEqual(conf.get("HOST"), "localhost")
self.assertEqual(conf.get("DB"), 0)
self.assertEqual(conf.get("PASSWORD"), "<PASSWORD>#@!")
self.assertEqual(conf.get("PORT"), 6379)
# password without special character and set the password_quote = True
conf = parse_redis_url("redis://:password@localhost2:1234", True)
self.assertEqual(conf.get("HOST"), "localhost2")
self.assertEqual(conf.get("DB"), 0)
self.assertEqual(conf.get("PASSWORD"), "password")
self.assertEqual(conf.get("PORT"), 1234)
@patch("defender.config.DEFENDER_REDIS_NAME", "default")
def test_get_redis_connection_django_conf(self):
""" get the redis connection """
redis_client = get_redis_connection()
self.assertIsInstance(redis_client, Redis)
@patch("defender.config.DEFENDER_REDIS_NAME", "bad-key")
def test_get_redis_connection_django_conf_wrong_key(self):
""" see if we get the correct error """
error_msg = "The cache bad-key was not found on the django " "cache settings."
self.assertRaisesMessage(KeyError, error_msg, get_redis_connection)
def test_get_ip_address_from_request(self):
""" get ip from request, make sure it is correct """
req = HttpRequest()
req.META["REMOTE_ADDR"] = "1.2.3.4"
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, "1.2.3.4")
req = HttpRequest()
req.META["REMOTE_ADDR"] = "1.2.3.4 "
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, "1.2.3.4")
req = HttpRequest()
req.META["REMOTE_ADDR"] = "192.168.100.34.y"
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, "127.0.0.1")
req = HttpRequest()
req.META["REMOTE_ADDR"] = "cat"
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, "127.0.0.1")
req = HttpRequest()
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, "127.0.0.1")
@patch("defender.config.BEHIND_REVERSE_PROXY", True)
@patch("defender.config.REVERSE_PROXY_HEADER", "HTTP_X_PROXIED")
def test_get_ip_reverse_proxy_custom_header(self):
""" make sure the ip is correct behind reverse proxy """
req = HttpRequest()
req.META["HTTP_X_PROXIED"] = "1.2.3.4"
self.assertEqual(utils.get_ip(req), "1.2.3.4")
req = HttpRequest()
req.META["HTTP_X_PROXIED"] = "1.2.3.4, 5.6.7.8, 127.0.0.1"
self.assertEqual(utils.get_ip(req), "1.2.3.4")
req = HttpRequest()
req.META["REMOTE_ADDR"] = "1.2.3.4"
self.assertEqual(utils.get_ip(req), "1.2.3.4")
@patch("defender.config.BEHIND_REVERSE_PROXY", True)
@patch("defender.config.REVERSE_PROXY_HEADER", "HTTP_X_REAL_IP")
def test_get_user_attempts(self):
""" Get the user attempts make sure they are correct """
ip_attempts = random.randint(3, 12)
username_attempts = random.randint(3, 12)
for i in range(0, ip_attempts):
utils.increment_key(utils.get_ip_attempt_cache_key("1.2.3.4"))
for i in range(0, username_attempts):
utils.increment_key(utils.get_username_attempt_cache_key("foobar"))
req = HttpRequest()
req.POST["username"] = "foobar"
req.META["HTTP_X_REAL_IP"] = "1.2.3.4"
self.assertEqual(
utils.get_user_attempts(req), max(ip_attempts, username_attempts)
)
req = HttpRequest()
req.POST["username"] = "foobar"
req.META["HTTP_X_REAL_IP"] = "5.6.7.8"
self.assertEqual(utils.get_user_attempts(req), username_attempts)
req = HttpRequest()
req.POST["username"] = "barfoo"
req.META["HTTP_X_REAL_IP"] = "1.2.3.4"
self.assertEqual(utils.get_user_attempts(req), ip_attempts)
def test_admin(self):
""" test the admin pages for this app """
from .admin import AccessAttemptAdmin
AccessAttemptAdmin
def test_unblock_view_user_with_plus(self):
"""
There is an available admin view for unblocking a user
with a plus sign in the username.
Regression test for #GH76.
"""
reverse(
"defender_unblock_username_view", kwargs={"username": "user+<EMAIL>"}
)
def test_unblock_view_user_with_special_symbols(self):
"""
There is an available admin view for unblocking a user
with a exclamation mark sign in the username.
"""
reverse(
"defender_unblock_username_view", kwargs={"username": "user!<EMAIL>"}
)
def test_decorator_middleware(self):
# because watch_login is called twice in this test (once by the
# middleware and once by the decorator) we have half as many attempts
# before getting locked out.
# this is getting called twice, once for each decorator, not sure how
# to dynamically remove one of the middlewares during a test so we
# divide the failure limit by 2.
for i in range(0, int(config.FAILURE_LIMIT)):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login()
self.assertContains(response, self.LOCKED_MESSAGE)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, self.LOCKED_MESSAGE)
def test_get_view(self):
""" Check that the decorator doesn't tamper with GET requests """
for i in range(0, config.FAILURE_LIMIT):
response = self.client.get(ADMIN_LOGIN_URL)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
response = self.client.get(ADMIN_LOGIN_URL)
self.assertNotContains(response, self.LOCKED_MESSAGE)
@patch("defender.config.USE_CELERY", True)
def test_use_celery(self):
""" Check that use celery works """
self.assertEqual(AccessAttempt.objects.count(), 0)
for i in range(0, int(config.FAILURE_LIMIT)):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login()
self.assertContains(response, self.LOCKED_MESSAGE)
self.assertEqual(AccessAttempt.objects.count(), config.FAILURE_LIMIT + 1)
self.assertIsNotNone(str(AccessAttempt.objects.all()[0]))
@patch("defender.config.LOCKOUT_BY_IP_USERNAME", True)
def test_lockout_by_ip_and_username(self):
""" Check that lockout still works when locking out by
IP and Username combined """
username = "testy"
for i in range(0, config.FAILURE_LIMIT):
response = self._login(username=username)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login(username=username)
self.assertContains(response, self.LOCKED_MESSAGE)
# We shouldn't get a lockout message when attempting to use no username
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, LOGIN_FORM_KEY)
# We shouldn't get a lockout message when attempting to use a
# different username
response = self._login()
self.assertContains(response, LOGIN_FORM_KEY)
# Successful login should not clear IP lock
self._login(username=VALID_USERNAME, password=<PASSWORD>)
# We should still be locked out for the locked
# username using the same IP
response = self._login(username=username)
self.assertContains(response, self.LOCKED_MESSAGE)
# We shouldn't get a lockout message when attempting to use a
# different ip address
ip = "172.16.31.10"
response = self._login(username=VALID_USERNAME, remote_addr=ip)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
@patch("defender.config.DISABLE_IP_LOCKOUT", True)
def test_disable_ip_lockout(self):
""" Check that lockout still works when we disable IP Lock out """
username = "testy"
# try logging in with the same IP, but different username
# we shouldn't be blocked.
# same IP different, usernames
ip = "172.16.31.10"
for i in range(0, config.FAILURE_LIMIT + 10):
login_username = "{0}{1}".format(username, i)
response = self._login(username=login_username, remote_addr=ip)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# same username with same IP
for i in range(0, config.FAILURE_LIMIT):
response = self._login(username=username)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# But we should get one now
# same username | |
offset_to_section: FileOffset
) -> None:
i_offset = (4 - (offset_to_section)) % 4
self.proto_ids: List[DexProtoId] = [
DexProtoId(
shorty=self.strings[self._parse_uint(data[i : i + 4])],
return_type=self.type_ids[self._parse_uint(data[i + 4 : i + 8])],
parameters=self.type_lists[
cast(FileOffset, self._parse_uint(data[i + 8 : i + 12]))
]
if self._parse_uint(data[i + 8 : i + 12])
else list(),
)
for i in range(i_offset, size * 12 + i_offset, 12)
]
for proto in self.proto_ids:
if len(proto.shorty) - 1 != len(proto.parameters):
log_error("Shorty does not match parameters")
def parse_field_ids(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
i_offset = (4 - (offset_to_section)) % 4
self.field_ids = [
DexFieldId(
class_=self.type_ids[self._parse_ushort(data[i : i + 2])],
type_=self.type_ids[self._parse_ushort(data[i + 2 : i + 4])],
name=self.strings[self._parse_uint(data[i + 4 : i + 8])],
)
for i in range(i_offset, i_offset + size * 8, 8)
]
def parse_method_ids(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
i_offset = (4 - (offset_to_section)) % 4
self.method_ids = [
DexMethodId(
class_=self.type_ids[self._parse_ushort(data[i : i + 2])],
proto=self.proto_ids[self._parse_ushort(data[i + 2 : i + 4])],
name=self.strings[self._parse_uint(data[i + 4 : i + 8])],
)
for i in range(i_offset, size * 8 + i_offset, 8)
]
def parse_class_defs(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
i_offset = (4 - (offset_to_section)) % 4
self.class_defs = list()
for i in range(i_offset, size * 32 + i_offset, 32):
cdef = DexClassDef(
class_type=self.type_ids[self._parse_uint(data[i : i + 4])],
access_flags=AccessFlag(self._parse_uint(data[i + 4 : i + 8]), "class"),
superclass=self.type_ids[self._parse_uint(data[i + 8 : i + 12])]
if self._parse_uint(data[i + 8 : i + 12]) != NO_INDEX
else None,
interfaces=self.type_lists[
cast(FileOffset, self._parse_uint(data[i + 12 : i + 16]))
]
if self._parse_uint(data[i + 12 : i + 16]) != 0
else None,
source_file=self.strings[self._parse_uint(data[i + 16 : i + 20])]
if self._parse_uint(data[i + 16 : i + 20]) != NO_INDEX
else None,
annotations=cast(FileOffset, self._parse_uint(data[i + 20 : i + 24])),
class_data=self.class_data_items[
self._parse_uint(data[i + 24 : i + 28])
]
if self._parse_uint(data[i + 24 : i + 28]) != 0
else None,
static_values=self.encoded_arrays[
self._parse_uint(data[i + 28 : i + 32])
]
if self._parse_uint(data[i + 28 : i + 32])
else list(),
)
# Right now static_values is truncated like it is in the dex file.
# It could pad out the array like the spec says, but I think that
# would be harder
# if cdef.class_data is not None:
# for i in range(
# len(cdef.static_values), len(cdef.class_data.static_fields)
# ):
# cdef.static_values.append(None)
self.class_defs.append(cdef)
def parse_call_site_ids(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
self.call_site_ids = [
self._parse_uint(data[i : i + 4]) for i in range(0, size * 4, 4)
]
def parse_method_handles(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
i_offset = (4 - (offset_to_section)) % 4
self.method_handles: List[DexMethodHandle] = list()
for i in range(i_offset, size * 8 + i_offset, 8):
method_handle_type = MethodHandleType(self._parse_ushort(data[i : i + 2]))
id_ = self._parse_ushort(data[i + 2 : i + 4])
self.method_handles.append(
DexMethodHandle(
type_=method_handle_type,
field_or_method_id=self.method_ids[id_]
if id_ <= 0x3
else self.field_ids[id_],
)
)
def parse_type_lists(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
self.type_lists: Dict[FileOffset, List[DexType]] = dict()
i = 0
for num in range(size):
i += (4 - (i + offset_to_section)) % 4
type_list_size = self._parse_uint(data[i : i + 4])
self.type_lists[cast(FileOffset, offset_to_section + i)] = [
self.type_ids[self._parse_ushort(data[j : j + 2])]
for j in range(i + 4, i + 4 + type_list_size * 2, 2)
]
i += 4 + type_list_size * 2
def _parse_encoded_fields(
self, data: bytes, size: int
) -> Tuple[List[DexEncodedField], int]:
fields = list()
i = 0
field_idx = 0
for num in range(size):
field_idx_diff, off1 = parse_uleb128(data[i : i + 5])
field_idx += field_idx_diff
access_flags, off2 = parse_uleb128(data[i + off1 : i + off1 + 5])
fields.append(
DexEncodedField(
self.field_ids[field_idx], AccessFlag(access_flags, "field")
)
)
i += off1 + off2
return fields, i
def _parse_encoded_methods(
self, data: bytes, size: int
) -> Tuple[List[DexEncodedMethod], int]:
methods = list()
i = 0
method_idx = 0
for num in range(size):
method_idx_diff, off1 = parse_uleb128(data[i : i + 5])
method_idx += method_idx_diff
access_flags, off2 = parse_uleb128(data[i + off1 : i + off1 + 5])
code_off, off3 = cast(
Tuple[FileOffset, int],
parse_uleb128(data[i + off1 + off2 : i + off1 + off2 + 5]),
)
method = self.method_ids[method_idx]
if method._insns_off is not None and method._insns_off != code_off + 16:
log_warn(
f"More than 1 code block assigned to same method {method} with 2nd code block at {code_off}"
)
else:
method._insns_off = cast(FileOffset, code_off + 16)
methods.append(
DexEncodedMethod(
method=method,
access_flags=AccessFlag(access_flags, "method"),
code=self.code_items[code_off] if code_off else None,
)
)
i += off1 + off2 + off3
return methods, i
def parse_class_data(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
self.class_data_items: Dict[int, DexClassData] = dict()
i = 0
for num in range(size):
class_data_off = offset_to_section + i
static_fields_size, off = parse_uleb128(data[i : i + 5])
i += off
instance_fields_size, off = parse_uleb128(data[i : i + 5])
i += off
direct_methods_size, off = parse_uleb128(data[i : i + 5])
i += off
virtual_methods_size, off = parse_uleb128(data[i : i + 5])
i += off
static_fields: List[DexEncodedField] = list()
instance_fields: List[DexEncodedField] = list()
direct_methods: List[DexEncodedMethod] = list()
virtual_methods: List[DexEncodedMethod] = list()
if static_fields_size:
static_fields, off = self._parse_encoded_fields(
data[i : i + 5 * 2 * static_fields_size], static_fields_size
)
i += off
if instance_fields_size:
instance_fields, off = self._parse_encoded_fields(
data[i : i + 5 * 2 * instance_fields_size], instance_fields_size
)
i += off
if direct_methods_size:
direct_methods, off = self._parse_encoded_methods(
data[i : i + 5 * 3 * direct_methods_size], direct_methods_size
)
i += off
if virtual_methods_size:
virtual_methods, off = self._parse_encoded_methods(
data[i : i + 5 * 3 * virtual_methods_size], virtual_methods_size
)
i += off
self.class_data_items[class_data_off] = DexClassData(
static_fields, instance_fields, direct_methods, virtual_methods
)
def parse_code_items(
self, data: bytes, size: int, offset_to_section: FileOffset
) -> None:
self.code_items: Dict[FileOffset, DexCodeItem] = dict()
self.pseudoinstructions: PseudoInstructions = cast(PseudoInstructions, dict())
i = 0
for num in range(size):
code_item_off: FileOffset = cast(FileOffset, offset_to_section + i)
insns_size_ = self._parse_uint(data[i + 12 : i + 16])
code_item = DexCodeItem(
registers_size=self._parse_ushort(data[i : i + 2]),
ins_size=self._parse_ushort(data[i + 2 : i + 4]),
outs_size=self._parse_ushort(data[i + 4 : i + 6]),
tries_size=self._parse_ushort(data[i + 6 : i + 8]),
debug_info=cast(
FileOffset, self._parse_uint(data[i + 8 : i + 12])
), # TODO debug_info_item offset
insns_size=insns_size_,
# insns is stored as an array of endian-sensitive shorts
insns=b"".join(
[
pack(">H", self._parse_ushort(data[j : j + 2]))
for j in range(i + 16, i + 16 + insns_size_ * 2, 2)
]
),
_insns_off=cast(FileOffset, i + 16 + offset_to_section),
tries=list(), # try/catch items get filled in below
)
i += (
16
+ 2 * code_item.insns_size
# "two bytes of padding... only present if tries_size is
# non-zero and insns_size is odd."
+ 2 * (code_item.tries_size and (code_item.insns_size % 2))
)
# This part is very confusing, sorry
if code_item.tries_size:
assert (i + offset_to_section) % 4 == 0
tries_off = i
# Parse handlers first and then come back for tries
i += code_item.tries_size * 8
# Parse handlers
encoded_catch_handler_list_off = i
encoded_catch_handler_list_size, off = parse_uleb128(data[i : i + 5])
i += off
handler_list = dict()
for num2 in range(encoded_catch_handler_list_size):
encoded_handler_off = i - encoded_catch_handler_list_off
encoded_catch_handler_size, off = parse_sleb128(data[i : i + 5])
i += off
handlers = list()
for num3 in range(abs(encoded_catch_handler_size)):
type_idx, off = parse_uleb128(data[i : i + 5])
i += off
addr, off = parse_uleb128(data[i : i + 5])
i += off
handlers.append((self.type_ids[type_idx], addr))
if encoded_catch_handler_size <= 0:
catch_all_addr, off = cast(
Tuple[BytecodeAddress, int], parse_uleb128(data[i : i + 5])
)
i += off
encoded_handler = DexEncodedCatchHandler(
size=abs(encoded_catch_handler_size),
handlers=handlers,
catch_all_addr=catch_all_addr,
)
else:
encoded_handler = DexEncodedCatchHandler(
size=encoded_catch_handler_size, handlers=handlers
)
handler_list[encoded_handler_off] = encoded_handler
# Parse tries
for num2 in range(code_item.tries_size):
code_item.tries.append(
DexTryItem(
start_addr=cast(
BytecodeAddress,
self._parse_uint(data[tries_off : tries_off + 4]),
),
insn_count=self._parse_ushort(
data[tries_off + 4 : tries_off + 6]
),
handler=handler_list[
self._parse_ushort(data[tries_off + 6 : tries_off + 8])
],
)
)
tries_off += 8
self.code_items[code_item_off] = code_item
# Disassemble | |
#!/usr/bin/python
"""
(C) Copyright 2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from logging import getLogger
from ClusterShell.NodeSet import NodeSet
class TelemetryUtils():
# pylint: disable=too-many-nested-blocks
"""Defines a object used to verify telemetry information."""
ENGINE_CONTAINER_METRICS = [
"engine_pool_container_handles",
"engine_pool_ops_cont_close",
"engine_pool_ops_cont_destroy",
"engine_pool_ops_cont_open"]
ENGINE_POOL_METRICS = [
"engine_pool_entries_dtx_batched_degree",
"engine_pool_entries_dtx_batched_total",
"engine_pool_ops_akey_enum",
"engine_pool_ops_akey_punch",
"engine_pool_ops_compound",
"engine_pool_ops_dkey_enum",
"engine_pool_ops_dkey_punch",
"engine_pool_ops_dtx_abort",
"engine_pool_ops_dtx_check",
"engine_pool_ops_dtx_commit",
"engine_pool_ops_dtx_refresh",
"engine_pool_ops_ec_agg",
"engine_pool_ops_ec_rep",
"engine_pool_ops_fetch",
"engine_pool_ops_key_query",
"engine_pool_ops_migrate",
"engine_pool_ops_obj_enum",
"engine_pool_ops_obj_punch",
"engine_pool_ops_obj_sync",
"engine_pool_ops_recx_enum",
"engine_pool_ops_tgt_akey_punch",
"engine_pool_ops_tgt_dkey_punch",
"engine_pool_ops_tgt_punch",
"engine_pool_ops_tgt_update",
"engine_pool_ops_update",
"engine_pool_pool_handles",
"engine_pool_resent",
"engine_pool_restarted",
"engine_pool_started_at",
"engine_pool_xferred_fetch",
"engine_pool_xferred_update"]
ENGINE_EVENT_METRICS = [
"engine_events_dead_ranks",
"engine_events_last_event_ts",
"engine_servicing_at",
"engine_started_at"]
ENGINE_IO_DTX_COMMITTABLE_METRICS = [
"engine_io_dtx_committable",
"engine_io_dtx_committable_max",
"engine_io_dtx_committable_mean",
"engine_io_dtx_committable_min",
"engine_io_dtx_committable_stddev"]
ENGINE_IO_DTX_COMMITTED_METRICS = [
"engine_io_dtx_committed",
"engine_io_dtx_committed_max",
"engine_io_dtx_committed_mean",
"engine_io_dtx_committed_min",
"engine_io_dtx_committed_stddev"]
ENGINE_IO_LATENCY_FETCH_METRICS = [
"engine_io_latency_fetch",
"engine_io_latency_fetch_max",
"engine_io_latency_fetch_mean",
"engine_io_latency_fetch_min",
"engine_io_latency_fetch_stddev"]
ENGINE_IO_LATENCY_UPDATE_METRICS = [
"engine_io_latency_update",
"engine_io_latency_update_max",
"engine_io_latency_update_mean",
"engine_io_latency_update_min",
"engine_io_latency_update_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_METRICS = [
"engine_io_ops_akey_enum_active",
"engine_io_ops_akey_enum_active_max",
"engine_io_ops_akey_enum_active_mean",
"engine_io_ops_akey_enum_active_min",
"engine_io_ops_akey_enum_active_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_akey_enum_latency",
"engine_io_ops_akey_enum_latency_max",
"engine_io_ops_akey_enum_latency_mean",
"engine_io_ops_akey_enum_latency_min",
"engine_io_ops_akey_enum_latency_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_akey_punch_active",
"engine_io_ops_akey_punch_active_max",
"engine_io_ops_akey_punch_active_mean",
"engine_io_ops_akey_punch_active_min",
"engine_io_ops_akey_punch_active_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_akey_punch_latency",
"engine_io_ops_akey_punch_latency_max",
"engine_io_ops_akey_punch_latency_mean",
"engine_io_ops_akey_punch_latency_min",
"engine_io_ops_akey_punch_latency_stddev"]
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS = [
"engine_io_ops_compound_active",
"engine_io_ops_compound_active_max",
"engine_io_ops_compound_active_mean",
"engine_io_ops_compound_active_min",
"engine_io_ops_compound_active_stddev"]
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS = [
"engine_io_ops_compound_latency",
"engine_io_ops_compound_latency_max",
"engine_io_ops_compound_latency_mean",
"engine_io_ops_compound_latency_min",
"engine_io_ops_compound_latency_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS = [
"engine_io_ops_dkey_enum_active",
"engine_io_ops_dkey_enum_active_max",
"engine_io_ops_dkey_enum_active_mean",
"engine_io_ops_dkey_enum_active_min",
"engine_io_ops_dkey_enum_active_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_dkey_enum_latency",
"engine_io_ops_dkey_enum_latency_max",
"engine_io_ops_dkey_enum_latency_mean",
"engine_io_ops_dkey_enum_latency_min",
"engine_io_ops_dkey_enum_latency_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_dkey_punch_active",
"engine_io_ops_dkey_punch_active_max",
"engine_io_ops_dkey_punch_active_mean",
"engine_io_ops_dkey_punch_active_min",
"engine_io_ops_dkey_punch_active_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_dkey_punch_latency",
"engine_io_ops_dkey_punch_latency_max",
"engine_io_ops_dkey_punch_latency_mean",
"engine_io_ops_dkey_punch_latency_min",
"engine_io_ops_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS = [
"engine_io_ops_ec_agg_active",
"engine_io_ops_ec_agg_active_max",
"engine_io_ops_ec_agg_active_mean",
"engine_io_ops_ec_agg_active_min",
"engine_io_ops_ec_agg_active_stddev"]
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS = [
"engine_io_ops_ec_agg_latency",
"engine_io_ops_ec_agg_latency_max",
"engine_io_ops_ec_agg_latency_mean",
"engine_io_ops_ec_agg_latency_min",
"engine_io_ops_ec_agg_latency_stddev"]
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS = [
"engine_io_ops_ec_rep_active",
"engine_io_ops_ec_rep_active_max",
"engine_io_ops_ec_rep_active_mean",
"engine_io_ops_ec_rep_active_min",
"engine_io_ops_ec_rep_active_stddev"]
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS = [
"engine_io_ops_ec_rep_latency",
"engine_io_ops_ec_rep_latency_max",
"engine_io_ops_ec_rep_latency_mean",
"engine_io_ops_ec_rep_latency_min",
"engine_io_ops_ec_rep_latency_stddev"]
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS = [
"engine_io_ops_fetch_active",
"engine_io_ops_fetch_active_max",
"engine_io_ops_fetch_active_mean",
"engine_io_ops_fetch_active_min",
"engine_io_ops_fetch_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS = [
"engine_io_ops_key_query_active",
"engine_io_ops_key_query_active_max",
"engine_io_ops_key_query_active_mean",
"engine_io_ops_key_query_active_min",
"engine_io_ops_key_query_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS = [
"engine_io_ops_key_query_latency",
"engine_io_ops_key_query_latency_max",
"engine_io_ops_key_query_latency_mean",
"engine_io_ops_key_query_latency_min",
"engine_io_ops_key_query_latency_stddev"]
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS = [
"engine_io_ops_migrate_active",
"engine_io_ops_migrate_active_max",
"engine_io_ops_migrate_active_mean",
"engine_io_ops_migrate_active_min",
"engine_io_ops_migrate_active_stddev"]
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS = [
"engine_io_ops_migrate_latency",
"engine_io_ops_migrate_latency_max",
"engine_io_ops_migrate_latency_mean",
"engine_io_ops_migrate_latency_min",
"engine_io_ops_migrate_latency_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS = [
"engine_io_ops_obj_enum_active",
"engine_io_ops_obj_enum_active_max",
"engine_io_ops_obj_enum_active_mean",
"engine_io_ops_obj_enum_active_min",
"engine_io_ops_obj_enum_active_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS = [
"engine_io_ops_obj_enum_latency",
"engine_io_ops_obj_enum_latency_max",
"engine_io_ops_obj_enum_latency_mean",
"engine_io_ops_obj_enum_latency_min",
"engine_io_ops_obj_enum_latency_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_obj_punch_active",
"engine_io_ops_obj_punch_active_max",
"engine_io_ops_obj_punch_active_mean",
"engine_io_ops_obj_punch_active_min",
"engine_io_ops_obj_punch_active_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS = [
"engine_io_ops_obj_punch_latency",
"engine_io_ops_obj_punch_latency_max",
"engine_io_ops_obj_punch_latency_mean",
"engine_io_ops_obj_punch_latency_min",
"engine_io_ops_obj_punch_latency_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS = [
"engine_io_ops_obj_sync_active",
"engine_io_ops_obj_sync_active_max",
"engine_io_ops_obj_sync_active_mean",
"engine_io_ops_obj_sync_active_min",
"engine_io_ops_obj_sync_active_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS = [
"engine_io_ops_obj_sync_latency",
"engine_io_ops_obj_sync_latency_max",
"engine_io_ops_obj_sync_latency_mean",
"engine_io_ops_obj_sync_latency_min",
"engine_io_ops_obj_sync_latency_stddev"]
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS = [
"engine_io_ops_recx_enum_active",
"engine_io_ops_recx_enum_active_max",
"engine_io_ops_recx_enum_active_mean",
"engine_io_ops_recx_enum_active_min",
"engine_io_ops_recx_enum_active_stddev"]
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS = [
"engine_io_ops_recx_enum_latency",
"engine_io_ops_recx_enum_latency_max",
"engine_io_ops_recx_enum_latency_mean",
"engine_io_ops_recx_enum_latency_min",
"engine_io_ops_recx_enum_latency_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_akey_punch_active",
"engine_io_ops_tgt_akey_punch_active_max",
"engine_io_ops_tgt_akey_punch_active_mean",
"engine_io_ops_tgt_akey_punch_active_min",
"engine_io_ops_tgt_akey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_akey_punch_latency",
"engine_io_ops_tgt_akey_punch_latency_max",
"engine_io_ops_tgt_akey_punch_latency_mean",
"engine_io_ops_tgt_akey_punch_latency_min",
"engine_io_ops_tgt_akey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_dkey_punch_active",
"engine_io_ops_tgt_dkey_punch_active_max",
"engine_io_ops_tgt_dkey_punch_active_mean",
"engine_io_ops_tgt_dkey_punch_active_min",
"engine_io_ops_tgt_dkey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_dkey_punch_latency",
"engine_io_ops_tgt_dkey_punch_latency_max",
"engine_io_ops_tgt_dkey_punch_latency_mean",
"engine_io_ops_tgt_dkey_punch_latency_min",
"engine_io_ops_tgt_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_punch_active",
"engine_io_ops_tgt_punch_active_max",
"engine_io_ops_tgt_punch_active_mean",
"engine_io_ops_tgt_punch_active_min",
"engine_io_ops_tgt_punch_active_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_punch_latency",
"engine_io_ops_tgt_punch_latency_max",
"engine_io_ops_tgt_punch_latency_mean",
"engine_io_ops_tgt_punch_latency_min",
"engine_io_ops_tgt_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_tgt_update_active",
"engine_io_ops_tgt_update_active_max",
"engine_io_ops_tgt_update_active_mean",
"engine_io_ops_tgt_update_active_min",
"engine_io_ops_tgt_update_active_stddev"]
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_update_active",
"engine_io_ops_update_active_max",
"engine_io_ops_update_active_mean",
"engine_io_ops_update_active_min",
"engine_io_ops_update_active_stddev"]
ENGINE_IO_METRICS = ENGINE_IO_DTX_COMMITTABLE_METRICS +\
ENGINE_IO_DTX_COMMITTED_METRICS +\
ENGINE_IO_LATENCY_FETCH_METRICS +\
ENGINE_IO_LATENCY_UPDATE_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS +\
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS +\
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS +\
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS
ENGINE_NET_METRICS = [
"engine_net_ofi_sockets_failed_addr",
"engine_net_ofi_sockets_req_timeout",
"engine_net_ofi_sockets_uri_lookup_timeout",
"engine_net_uri_lookup_other",
"engine_net_uri_lookup_self"]
ENGINE_RANK_METRICS = [
"engine_rank"]
GO_METRICS = [
"go_gc_duration_seconds",
"go_goroutines",
"go_info",
"go_memstats_alloc_bytes",
"go_memstats_alloc_bytes_total",
"go_memstats_buck_hash_sys_bytes",
"go_memstats_frees_total",
"go_memstats_gc_cpu_fraction",
"go_memstats_gc_sys_bytes",
"go_memstats_heap_alloc_bytes",
"go_memstats_heap_idle_bytes",
"go_memstats_heap_inuse_bytes",
"go_memstats_heap_objects",
"go_memstats_heap_released_bytes",
"go_memstats_heap_sys_bytes",
"go_memstats_last_gc_time_seconds",
"go_memstats_lookups_total",
"go_memstats_mallocs_total",
"go_memstats_mcache_inuse_bytes",
"go_memstats_mcache_sys_bytes",
"go_memstats_mspan_inuse_bytes",
"go_memstats_mspan_sys_bytes",
"go_memstats_next_gc_bytes",
"go_memstats_other_sys_bytes",
"go_memstats_stack_inuse_bytes",
"go_memstats_stack_sys_bytes",
"go_memstats_sys_bytes",
"go_threads"]
PROCESS_METRICS = [
"process_cpu_seconds_total",
"process_max_fds",
"process_open_fds",
"process_resident_memory_bytes",
"process_start_time_seconds",
"process_virtual_memory_bytes",
"process_virtual_memory_max_bytes"]
ENGINE_NVME_METRICS = [
"engine_nvme_<id>_commands_checksum_mismatch",
"engine_nvme_<id>_commands_ctrl_busy_time",
"engine_nvme_<id>_commands_data_units_read",
"engine_nvme_<id>_commands_data_units_written",
"engine_nvme_<id>_commands_host_read_cmds",
"engine_nvme_<id>_commands_host_write_cmds",
"engine_nvme_<id>_commands_media_errs",
"engine_nvme_<id>_commands_read_errs",
"engine_nvme_<id>_commands_unmap_errs",
"engine_nvme_<id>_commands_write_errs",
"engine_nvme_<id>_power_cycles",
"engine_nvme_<id>_power_on_hours",
"engine_nvme_<id>_read_only_warn",
"engine_nvme_<id>_reliability_avail_spare",
"engine_nvme_<id>_reliability_avail_spare_threshold",
"engine_nvme_<id>_reliability_avail_spare_warn",
"engine_nvme_<id>_reliability_percentage_used",
"engine_nvme_<id>_reliability_reliability_warn",
"engine_nvme_<id>_temp_crit_time",
"engine_nvme_<id>_temp_current",
"engine_nvme_<id>_temp_warn",
"engine_nvme_<id>_temp_warn_time",
"engine_nvme_<id>_unsafe_shutdowns",
"engine_nvme_<id>_volatile_mem_warn",
"engine_nvme_<id>_vendor_program_fail_cnt_norm",
"engine_nvme_<id>_vendor_program_fail_cnt_raw",
"engine_nvme_<id>_vendor_erase_fail_cnt_norm",
"engine_nvme_<id>_vendor_erase_fail_cnt_raw",
"engine_nvme_<id>_vendor_wear_leveling_cnt_norm",
"engine_nvme_<id>_vendor_wear_leveling_cnt_min",
"engine_nvme_<id>_vendor_wear_leveling_cnt_max",
"engine_nvme_<id>_vendor_wear_leveling_cnt_avg",
"engine_nvme_<id>_vendor_endtoend_err_cnt_raw",
"engine_nvme_<id>_vendor_crc_err_cnt_raw",
"engine_nvme_<id>_vendor_media_wear_raw",
"engine_nvme_<id>_vendor_host_reads_raw",
"engine_nvme_<id>_vendor_crc_workload_timer_raw",
"engine_nvme_<id>_vendor_thermal_throttle_status_raw",
"engine_nvme_<id>_vendor_thermal_throttle_event_cnt",
"engine_nvme_<id>_vendor_retry_buffer_overflow_cnt",
"engine_nvme_<id>_vendor_pll_lock_loss_cnt",
"engine_nvme_<id>_vendor_nand_bytes_written",
"engine_nvme_<id>_vendor_host_bytes_written"]
def __init__(self, dmg, servers):
"""Create a TelemetryUtils object.
Args:
dmg (DmgCommand): the DmgCommand object configured to communicate
with the servers
servers (list): a list of server host names
"""
self.log = getLogger(__name__)
self.dmg = dmg
self.hosts = NodeSet.fromlist(servers)
def get_all_server_metrics_names(self, server, with_pools=False):
"""Get all the telemetry metrics names for this server.
Args:
server (DaosServerCommand): the server from which to determine what
metrics will be available
Returns:
list: all of the telemetry metrics names for this server
"""
all_metrics_names = list(self.ENGINE_EVENT_METRICS)
all_metrics_names.extend(self.ENGINE_IO_METRICS)
all_metrics_names.extend(self.ENGINE_NET_METRICS)
all_metrics_names.extend(self.ENGINE_RANK_METRICS)
all_metrics_names.extend(self.GO_METRICS)
all_metrics_names.extend(self.PROCESS_METRICS)
if with_pools:
all_metrics_names.extend(self.ENGINE_POOL_METRICS)
all_metrics_names.extend(self.ENGINE_CONTAINER_METRICS)
# Add NVMe metrics for any NVMe devices configured for this server
for nvme_list in server.manager.job.get_engine_values("bdev_list"):
for nvme in nvme_list if nvme_list is not None else []:
# Replace the '<id>' placeholder with the actual NVMe ID
nvme_id = nvme.replace(":", "_").replace(".", "_")
nvme_metrics = [
name.replace("<id>", nvme_id)
for name in self.ENGINE_NVME_METRICS]
all_metrics_names.extend(nvme_metrics)
return all_metrics_names
def list_metrics(self):
"""List the available metrics for each host.
Returns:
dict: a dictionary of host keys linked to a list of metric names
"""
info = {}
self.log.info("Listing telemetry metrics from %s", self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_list(host=host)
info[host] = []
if "response" in data:
if "available_metric_sets" in data["response"]:
for entry in data["response"]["available_metric_sets"]:
if "name" in entry:
info[host].append(entry["name"])
return info
def get_metrics(self, name):
"""Obtain the specified metric information for each host.
Args:
name (str): Comma-separated list of metric names to query.
Returns:
dict: a dictionary of host keys linked to metric data for each
metric name specified
"""
info = {}
self.log.info("Querying telemetry metric %s from %s", name, self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_query(host=host, metrics=name)
info[host] = {}
if "response" in data:
if "metric_sets" in data["response"]:
for entry in data["response"]["metric_sets"]:
info[host][entry["name"]] = {
"description": entry["description"],
"metrics": entry["metrics"]
}
return info
def get_container_metrics(self):
"""Get the container telemetry metrics.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
info = self.get_metrics(",".join(self.ENGINE_CONTAINER_METRICS))
self.log.info("Container Telemetry Information")
for host in info:
data[host] = {name: 0 for name in self.ENGINE_CONTAINER_METRICS}
for name in self.ENGINE_CONTAINER_METRICS:
if name in info[host]:
for metric in info[host][name]["metrics"]:
self.log.info(
" %s (%s): %s (%s)",
info[host][name]["description"], name,
metric["value"], host)
data[host][name] = metric["value"]
return data
def get_pool_metrics(self, specific_metrics=None):
"""Get the pool telemetry metrics.
Args:
specific_metrics(list): list of specific pool metrics
Returns:
dict: dictionary of dictionaries of pool metric names and
values per server host key
"""
data = {}
if specific_metrics is None:
specific_metrics = self.ENGINE_POOL_METRICS
info = self.get_metrics(",".join(specific_metrics))
self.log.info("Pool Telemetry Information")
for name in specific_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %s",
"Host", "Rank", "Target", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %s",
host, rank, target, metric["value"])
return data
def get_io_metrics(self, test_metrics=None):
"""Get the io telemetry metrics.
Args:
test_metrics (str list, optional): Comma-separated list of metric
names to query. By default, test_metrics is entire
ENGINE_IO_METRICS.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
if test_metrics is None:
test_metrics = self.ENGINE_IO_METRICS
info = self.get_metrics(",".join(test_metrics))
self.log.info("Telemetry Information")
for name in test_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %-6s %s",
"Host", "Rank", "Target", "Size", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]
and "size" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
size = metric["labels"]["size"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target][size] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
host, rank, target, size, metric["value"])
elif ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target]["-"] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
| |
nan ],
[ 810, 27.8132, 0.1891, 33.0672, 0.1590, 36.2760, 0.1450, 6.2147, 0.8461, 2.16e-15, 1.98e-15, 2.11e-15, nan ],
[ 820, 28.2188, 0.1910, 33.6859, 0.1600, 36.1652, 0.1490, 6.2148, 0.8671, 2.23e-15, 2.21e-15, 2.10e-15, nan ],
[ 830, 28.7670, 0.1919, 34.5118, 0.1600, 37.0519, 0.1490, 6.1770, 0.8938, 2.17e-15, 2.30e-15, 2.43e-15, nan ],
[ 840, 28.8193, 0.1962, 34.2255, 0.1652, 36.9444, 0.1531, 6.2139, 0.9100, 2.00e-15, 2.19e-15, 2.10e-15, nan ],
[ 850, 29.0849, 0.1991, 34.8935, 0.1659, 37.1343, 0.1559, 6.2383, 0.9282, 2.78e-15, 2.56e-15, 2.56e-15, nan ],
[ 860, 29.6307, 0.2000, 36.3985, 0.1628, 37.7814, 0.1569, 10.6788, 0.5550, 2.25e-15, 2.66e-15, 2.31e-15, nan ],
[ 870, 30.6520, 0.1979, 36.2926, 0.1671, 38.6057, 0.1571, 10.6404, 0.5701, 2.03e-15, 2.28e-15, 2.03e-15, nan ],
[ 880, 31.1723, 0.1991, 37.1841, 0.1669, 39.7995, 0.1559, 10.6284, 0.5839, 2.34e-15, 2.34e-15, 2.33e-15, nan ],
[ 890, 31.2480, 0.2031, 37.9790, 0.1671, 40.4609, 0.1569, 10.5439, 0.6020, 2.43e-15, 2.33e-15, 2.31e-15, nan ],
[ 900, 31.3645, 0.2069, 37.7069, 0.1721, 40.5729, 0.1600, 10.6553, 0.6092, 2.84e-15, 2.38e-15, 2.49e-15, nan ],
[ 1000, 34.3959, 0.2329, 42.1641, 0.1900, 44.5096, 0.1800, 10.3911, 0.7710, 3.31e-15, 2.96e-15, 3.23e-15, nan ],
[ 1100, 37.5751, 0.2580, 45.9392, 0.2110, 28.3516, 0.3419, 10.6180, 0.9129, 3.19e-15, 3.24e-15, 3.24e-15, nan ],
[ 1200, 41.3494, 0.2789, 51.3031, 0.2248, 30.9130, 0.3731, 9.4306, 1.2231, 3.04e-15, 3.44e-15, 3.23e-15, nan ],
[ 1300, 44.2153, 0.3061, 54.3797, 0.2489, 32.6278, 0.4148, 8.5346, 1.5860, 3.33e-15, 3.15e-15, 3.22e-15, nan ],
[ 1400, 48.4453, 0.3240, 60.6235, 0.2589, 34.6511, 0.4530, 8.1877, 1.9171, 3.45e-15, 4.07e-15, 3.90e-15, nan ],
[ 1500, 52.2273, 0.3450, 65.4878, 0.2751, 37.5425, 0.4799, 8.4392, 2.1350, 3.54e-15, 2.77e-15, 3.06e-15, nan ],
[ 1600, 55.3994, 0.3700, 68.7839, 0.2980, 38.8171, 0.5281, 8.3468, 2.4559, 3.14e-15, 3.04e-15, 2.70e-15, nan ],
[ 1700, 59.1816, 0.3910, 73.4730, 0.3150, 40.1730, 0.5760, 8.3938, 2.7568, 3.76e-15, 3.66e-15, 3.27e-15, nan ],
[ 1800, 63.4073, 0.4091, 75.8765, 0.3419, 42.6694, 0.6080, 7.8635, 3.2990, 3.33e-15, 3.62e-15, 3.41e-15, nan ],
[ 1900, 65.5636, 0.4408, 77.4614, 0.3731, 44.9489, 0.6430, 8.4414, 3.4239, 5.07e-15, 5.00e-15, 4.89e-15, nan ],
[ 2000, 64.4213, 0.4971, 76.9733, 0.4160, 44.4910, 0.7198, 8.2323, 3.8900, 3.93e-15, 4.00e-15, 3.81e-15, nan ],
[ 2100, 66.4934, 0.5310, 78.1017, 0.4520, 35.0238, 1.0080, 8.4584, 4.1740, 3.76e-15, 3.51e-15, 3.81e-15, nan ],
[ 2200, 67.2658, 0.5760, 78.7757, 0.4919, 37.2910, 1.0390, 8.1846, 4.7340, 4.15e-15, 4.37e-15, 4.17e-15, nan ],
[ 2300, 68.2886, 0.6201, 78.4189, 0.5400, 38.4290, 1.1020, 7.7971, 5.4312, 4.35e-15, 4.95e-15, 4.55e-15, nan ],
[ 2400, 70.8145, 0.6511, 78.6795, 0.5860, 40.0237, 1.1520, 8.4681, 5.4450, 5.39e-15, 5.73e-15, 6.04e-15, nan ],
[ 2500, 71.1808, 0.7029, 78.7987, 0.6349, 42.0018, 1.1911, 8.4583, 5.9149, 5.28e-15, 5.28e-15, 4.73e-15, nan ],
[ 2600, 71.4831, 0.7570, 79.6906, 0.6790, 42.5415, 1.2720, 8.2829, 6.5329, 5.84e-15, 4.69e-15, 5.07e-15, nan ],
[ 2700, 73.4757, 0.7942, 79.2834, 0.7360, 44.0987, 1.3232, 8.3515, 6.9871, 4.90e-15, 4.24e-15, 4.53e-15, nan ],
[ 2800, 75.4393, 0.8318, 80.4670, 0.7799, 40.5371, 1.5481, 8.4630, 7.4151, 6.08e-15, 5.87e-15, 5.04e-15, nan ],
[ 2900, 75.6332, 0.8900, 81.6008, 0.8249, 45.9760, 1.4641, 8.5134, 7.9069, 4.26e-15, 4.58e-15, 4.71e-15, nan ],
[ 3000, 77.3728, 0.9310, 82.5071, 0.8731, 46.1213, 1.5619, 8.2487, 8.7330, 5.07e-15, 4.46e-15, 5.37e-15, nan ],
[ 3100, 78.2474, 0.9830, 83.6872, 0.9191, 38.5533, 1.9951, 8.4146, 9.1410, 4.85e-15, 5.16e-15, 4.85e-15, nan ],
[ 3200, 80.2799, 1.0209, 84.0691, 0.9749, 40.7152, 2.0130, 9.0613, 9.0449, 5.78e-15, 5.55e-15, 5.57e-15, nan ],
[ 3300, 82.0040, 1.0629, 85.5344, 1.0190, 41.5236, 2.0990, 8.5677, 10.1731, 5.83e-15, 6.03e-15, 6.08e-15, nan ],
[ 3400, 82.8304, 1.1170, 86.3892, 1.0710, 42.1118, 2.1970, 8.4019, 11.0118, 6.40e-15, 5.89e-15, 6.04e-15, nan ],
[ 3500, 84.0765, 1.1661, 87.4560, 1.1210, 43.9852, 2.2290, 8.4583, 11.5912, 5.33e-15, 4.88e-15, 4.68e-15, nan ],
[ 3600, 86.2161, 1.2031, 87.8882, 1.1802, 44.6889, 2.3210, 8.3011, 12.4950, 4.98e-15, 5.08e-15, 5.21e-15, nan ],
[ 3700, 88.2894, 1.2410, 87.6495, 1.2500, 44.7030, 2.4509, 8.4580, 12.9540, 7.14e-15, 6.15e-15, 6.24e-15, nan ],
[ 3800, 89.5800, 1.2901, 88.6138, 1.3041, 45.7669, 2.5251, 8.2229, 14.0541, 4.79e-15, 5.35e-15, 5.03e-15, nan ],
[ 3900, 88.0882, 1.3819, 88.5926, 1.3740, 47.1605, 2.5811, 8.4119, 14.4708, 6.18e-15, 6.43e-15, 5.60e-15, nan ],
[ 4000, 88.3052, 1.4501, 88.6697, 1.4441, 46.9551, 2.7270, 8.4542, 15.1460, 8.13e-15, 7.03e-15, 7.12e-15, nan ],
[ 4100, 87.9315, 1.5299, 87.9863, 1.5290, 38.9035, 3.4580, 7.9547, 16.9120, 6.49e-15, 6.29e-15, 6.51e-15, nan ],
[ 4200, 89.1197, 1.5841, 88.4013, 1.5969, 41.2679, 3.4208, 8.3409, 16.9251, 7.64e-15, 7.16e-15, 7.21e-15, nan ],
[ 4300, 90.4984, 1.6351, 88.5613, 1.6708, 42.8647, 3.4521, 8.4257, 17.5619, 6.87e-15, 7.02e-15, 7.41e-15, nan ],
[ 4400, 90.7211, 1.7078, 89.6696, 1.7278, 44.4453, 3.4859, 8.4524, 18.3301, 6.15e-15, 6.33e-15, 6.45e-15, nan ],
[ 4500, 91.8643, 1.7641, 89.7773, 1.8051, 43.9881, 3.6840, 8.4744, 19.1228, 6.38e-15, 6.61e-15, 6.14e-15, nan ],
[ 4600, 91.9290, 1.8420, 89.9725, 1.8821, 45.5459, 3.7179, 8.2800, 20.4511, 7.94e-15, 7.22e-15, 6.91e-15, nan ],
[ 4700, 92.6933, 1.9071, 89.4180, 1.9770, 45.8679, 3.8540, 8.5943, 20.5691, 6.17e-15, 6.62e-15, 6.32e-15, nan ],
[ 4800, 92.8374, 1.9860, 90.2060, 2.0440, 46.0950, 3.9999, 8.5913, 21.4610, 7.29e-15, 6.77e-15, 6.09e-15, nan ],
[ 4900, 92.7695, 2.0711, 89.9128, 2.1369, 47.4890, 4.0460, 8.6061, 22.3260, 8.60e-15, 7.83e-15, 7.27e-15, nan ],
[ 5000, 94.2399, 2.1229, 90.6463, 2.2070, 47.7827, 4.1869, 8.4930, 23.5560, 7.46e-15, 6.79e-15, 6.76e-15, nan ],
[ 5100, 93.9627, 2.2151, 90.5328, 2.2991, 48.8150, 4.2639, 8.5444, 24.3599, 8.21e-15, 7.87e-15, 7.45e-15, nan ],
[ 5200, 95.7861, 2.2590, 91.8411, 2.3561, 43.3106, 4.9961, 8.6132, 25.1222, 7.62e-15, 8.81e-15, 8.19e-15, nan ],
[ 5300, 96.6391, 2.3260, 91.8292, 2.4478, 43.8700, 5.1239, 8.6323, 26.0398, 8.09e-15, 7.39e-15, 7.75e-15, nan ],
[ 5400, 96.6636, 2.4140, 91.7606, 2.5430, 43.4870, 5.3658, 8.3082, 28.0859, 7.52e-15, 7.13e-15, 7.19e-15, nan ],
[ 5500, 98.2769, 2.4631, 92.1073, 2.6281, 44.8771, 5.3940, 8.5884, 28.1851, 7.80e-15, 7.50e-15, 7.40e-15, nan ],
[ 5600, 99.3439, 2.5260, 91.3909, 2.7459, 46.0795, 5.4460, 8.6243, 29.0978, 6.73e-15, 6.82e-15, 8.29e-15, nan ],
[ 5700, 99.3776, 2.6162, 92.3580, 2.8150, 45.5959, 5.7020, 8.6504, 30.0550, 7.51e-15, 7.66e-15, 7.82e-15, nan ],
[ 5800, 98.4276, 2.7349, 91.7192, 2.9349, 47.2510, 5.6970, 8.5195, 31.5969, 9.77e-15, 9.50e-15, 8.77e-15, nan ],
[ 5900, 98.7764, 2.8200, 92.4236, 3.0138, 47.3160, 5.8870, 8.3164, 33.4940, 7.59e-15, 6.96e-15, 7.36e-15, nan ],
[ 6000, 99.5765, 2.8930, 92.8076, 3.1040, 48.7497, 5.9092, 8.7234, 33.0229, 7.65e-15, 8.34e-15, 8.60e-15, nan ],
[ 6100, 99.4876, 2.9929, 92.8181, 3.2079, 48.4696, 6.1431, 8.6965, 34.2381, 8.56e-15, 8.41e-15, 8.09e-15, nan ],
[ 6200, 99.4791, 3.0921, 92.7094, 3.3178, 43.7604, 7.0291, 8.1059, 37.9469, 8.77e-15, 7.33e-15, 7.14e-15, nan ],
[ 6300, 99.6851, 3.1860, 92.5386, 3.4320, 44.4876, 7.1390, 8.6725, 36.6211, 7.80e-15, 7.06e-15, 7.83e-15, nan ],
[ 6400, 100.0227, 3.2768, 91.8863, 3.5670, 44.3699, 7.3869, 9.1806, 35.7010, 7.25e-15, 7.02e-15, 6.82e-15, nan ],
[ 6500, 101.4090, 3.3338, 91.9229, 3.6778, 44.7602, 7.5531, 8.5447, 39.5660, 8.65e-15, 8.89e-15, 8.45e-15, nan ],
[ 6600, 101.1739, 3.4451, 92.4825, 3.7689, 45.8210, 7.6070, 8.1708, 42.6590, 7.22e-15, 7.57e-15, 7.65e-15, nan ],
[ 6700, 100.8701, 3.5610, 92.6737, 3.8760, 46.0465, 7.8008, 8.4464, 42.5270, 8.01e-15, 9.00e-15, 9.09e-15, nan ],
[ 6800, 100.8185, 3.6700, 92.0844, 4.0181, 46.2384, 8.0020, 8.4518, 43.7779, 9.13e-15, 8.55e-15, 8.76e-15, nan ],
[ 6900, 101.6718, 3.7470, 93.1433, 4.0901, 47.3950, 8.0380, 8.2127, 46.3870, 8.09e-15, 8.61e-15, 7.92e-15, nan ],
[ 7000, 101.7396, 3.8538, 92.6020, 4.2341, 47.8559, 8.1930, 8.3000, 47.2391, 7.82e-15, 7.37e-15, 7.80e-15, nan ],
[ 7100, 100.5908, 4.0100, 92.9631, 4.3390, 48.2321, 8.3630, 8.2241, 49.0470, 8.14e-15, 8.94e-15, 7.90e-15, nan ],
[ 7200, 102.6748, 4.0400, 92.3621, 4.4911, 44.0384, 9.4192, 8.3794, 49.5028, 7.41e-15, 8.43e-15, 7.95e-15, nan ],
[ 7300, 103.5481, 4.1180, 93.5106, 4.5600, 45.1557, 9.4430, 7.8723, 54.1658, 7.77e-15, 7.85e-15, 7.61e-15, nan ],
[ 7400, 103.5855, 4.2300, 92.8751, 4.7178, 44.9871, 9.7399, 8.0235, 54.6110, 7.64e-15, 7.63e-15, 7.41e-15, nan ],
[ 7500, 104.7447, 4.2970, 93.0875, 4.8351, 44.9330, 10.0169, 8.0909, 55.6290, 8.62e-15, 8.71e-15, 8.97e-15, nan ],
[ 7600, 105.2096, 4.3929, 93.0488, 4.9670, 45.5739, 10.1411, 8.1013, 57.0488, 7.76e-15, 8.67e-15, 7.84e-15, nan ],
[ 7700, 103.5130, 4.5831, 92.9306, 5.1050, 46.4924, 10.2041, 7.9385, 59.7610, 9.77e-15, 1.01e-14, 1.06e-14, nan ],
[ 7800, 102.8119, 4.7350, 93.2434, 5.2209, 47.0710, 10.3421, 7.4230, 65.5820, 8.48e-15, 8.56e-15, 8.45e-15, nan ],
[ 7900, 103.3468, 4.8320, 92.9415, 5.3730, 47.4058, 10.5340, 7.6943, 64.9021, 8.30e-15, 9.71e-15, 9.28e-15, nan ],
[ 8000, 103.8981, 4.9288, 93.4148, 5.4820, 47.4734, 10.7870, 7.5299, 68.0079, 8.46e-15, 9.30e-15, 9.18e-15, nan ],
[ 8100, 105.2037, 4.9901, 93.6307, 5.6069, 48.7991, 10.7579, 7.5352, 69.6700, 8.61e-15, 8.64e-15, 8.84e-15, nan ],
[ 8200, 105.0613, 5.1210, 93.2253, 5.7712, 44.0891, 12.2030, 7.3522, 73.1781, 8.71e-15, 8.97e-15, 9.19e-15, nan ],
[ 8300, 105.4159, 5.2290, 93.4097, 5.9011, 44.0101, 12.5248, 7.2936, 75.5761, 1.17e-14, 9.67e-15, 9.57e-15, nan ],
[ 8400, 104.9563, 5.3792, 93.4906, 6.0389, 45.1844, 12.4950, 7.6251, 74.0421, 8.88e-15, 9.15e-15, 9.60e-15, nan ],
[ 8500, 106.2316, 5.4419, 93.9856, 6.1510, 46.3293, 12.4781, 7.5885, 76.1809, 8.81e-15, 8.94e-15, 9.20e-15, nan ],
[ 8600, 105.6940, 5.5990, 93.0888, 6.3572, 46.4503, 12.7401, 7.5041, 78.8610, 9.75e-15, 8.71e-15, 8.47e-15, nan ],
[ 8700, 104.6890, 5.7850, 94.1153, 6.4349, 46.6549, 12.9809, 7.5398, 80.3242, 1.13e-14, 1.08e-14, 1.11e-14, nan ],
[ 8800, 105.9910, 5.8460, 93.0504, 6.6590, 46.7251, 13.2611, 7.9902, 77.5480, 1.13e-14, 1.15e-14, 1.23e-14, nan ],
[ 8900, 106.0730, 5.9750, 93.7439, 6.7608, 47.3292, 13.3910, 8.1088, 78.1600, 1.01e-14, 1.07e-14, 1.08e-14, nan ],
[ 9000, 106.9842, 6.0580, 93.9407, 6.8991, 48.0369, 13.4919, 7.9592, 81.4290, 1.03e-14, 1.01e-14, 1.06e-14, nan ],
[ 10000, 107.9048, 7.4151, 94.7767, | |
<filename>dev/lum_eff.py
import numpy as np
import matplotlib.pyplot as plt
from dev.output import getO
from wmpl.PythonNRLMSISE00.nrlmsise_00_header import *
from wmpl.PythonNRLMSISE00.nrlmsise_00 import *
from wmpl.Utils.TrajConversions import jd2Date, jd2LST
from wmpl.MetSim.MetSimErosionCyTools import *
from wmpl.Utils.PecinaCeplechaFunction import *
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
def getAtmDensity(lat, lon, height, jd):
""" For the given heights, returns the atmospheric density from NRLMSISE-00 model.
More info: https://github.com/magnific0/nrlmsise-00/blob/master/nrlmsise-00.h
Arguments:
lat: [float] Latitude in radians.
lon: [float] Longitude in radians.
height: [float] Height in meters.
jd: [float] Julian date.
Return:
[float] Atmosphere density in kg/m^3.
"""
# Init the input array
inp = nrlmsise_input()
# Convert the given Julian date to datetime
dt = jd2Date(jd, dt_obj=True)
# Get the day of year
doy = dt.timetuple().tm_yday
# Get the second in day
midnight = dt.replace(hour=0, minute=0, second=0, microsecond=0)
sec = (dt - midnight).seconds
# Calculate the Local sidreal time (degrees)
lst, _ = jd2LST(jd, np.degrees(lon))
### INPUT PARAMETERS ###
##########################################################################################################
# Set year (no effect)
inp.year = 0
# Day of year
inp.doy = doy
# Seconds in a day
inp.sec = sec
# Altitude in kilometers
inp.alt = height/1000.0
# Geodetic latitude (deg)
inp.g_lat = np.degrees(lat)
# Geodetic longitude (deg)
inp.g_long = np.degrees(lon)
# Local apparent solar time (hours)
inp.lst = lst/15
# f107, f107A, and ap effects are neither large nor well established below 80 km and these parameters
# should be set to 150., 150., and 4. respectively.
# 81 day average of 10.7 cm radio flux (centered on DOY)
inp.f107A = 150
# Daily 10.7 cm radio flux for previous day
inp.f107 = 150
# Magnetic index (daily)
inp.ap = 4
##########################################################################################################
# Init the flags array
flags = nrlmsise_flags()
# Set output in kilograms and meters
flags.switches[0] = 1
# Set all switches to ON
for i in range(1, 24):
flags.switches[i] = 1
# Array containing the following magnetic values:
# 0 : daily AP
# 1 : 3 hr AP index for current time
# 2 : 3 hr AP index for 3 hrs before current time
# 3 : 3 hr AP index for 6 hrs before current time
# 4 : 3 hr AP index for 9 hrs before current time
# 5 : Average of eight 3 hr AP indicies from 12 to 33 hrs prior to current time
# 6 : Average of eight 3 hr AP indicies from 36 to 57 hrs prior to current time
aph = ap_array()
# Set all AP indices to 100
for i in range(7):
aph.a[i] = 100
# Init the output array
# OUTPUT VARIABLES:
# d[0] - HE NUMBER DENSITY(CM-3)
# d[1] - O NUMBER DENSITY(CM-3)
# d[2] - N2 NUMBER DENSITY(CM-3)
# d[3] - O2 NUMBER DENSITY(CM-3)
# d[4] - AR NUMBER DENSITY(CM-3)
# d[5] - TOTAL MASS DENSITY(GM/CM3) [includes d[8] in td7d]
# d[6] - H NUMBER DENSITY(CM-3)
# d[7] - N NUMBER DENSITY(CM-3)
# d[8] - Anomalous oxygen NUMBER DENSITY(CM-3)
# t[0] - EXOSPHERIC TEMPERATURE
# t[1] - TEMPERATURE AT ALT
out = nrlmsise_output()
# Evaluate the atmosphere with the given parameters
gtd7(inp, flags, out)
# Get the total mass density
atm_density = out.d[5]
return atm_density
def atmDensityPoly(ht, dens_co):
""" Calculates the atmospheric density in kg/m^3.
Arguments:
ht: [double] Height in meters.
dens_co: [ndarray] Array of 7th order poly coeffs.
Return:
[double] Atmosphere density at height h (kg/m^3)
"""
return 10**(dens_co[0]
+ dens_co[1]*(ht/1e6)
+ dens_co[2]*(ht/1e6)**2
+ dens_co[3]*(ht/1e6)**3
+ dens_co[4]*(ht/1e6)**4
+ dens_co[5]*(ht/1e6)**5
+ dens_co[6]*(ht/1e6)**6
)
def massLoss(K, sigma, m, rho_atm, v):
""" Mass loss differential equation, the result is giving dm/dt.
Arguments:
K: [double] Shape-density coefficient (m^2/kg^(2/3)).
sigma: [double] Ablation coefficient (s^2/m^2).
m: [double] Mass (kg).
rho_atm: [double] Atmosphere density (kg/m^3).
v: [double] Velocity (m/s).
Return:
dm/dt: [double] Mass loss in kg/s.
"""
return -K*sigma*m**(2/3.0)*rho_atm*v**3
def deceleration(K, m, rho_atm, v):
""" Computes the deceleration derivative.
Arguments:
K: [double] Shape-density coefficient (m^2/kg^(2/3)).
m: [double] Mass (kg).
rho_atm: [double] Atmosphere density (kg/m^3).
v: [double] Velocity (m/S).
Return:
dv/dt: [double] Deceleration.
"""
return -K*m**(-1/3.0)*rho_atm*v**2
def curve():
return np.array([[-2.0001,78.502,0],
[-1.9951,78.402,0.146627919],
[-1.9901,78.302,0.293255837],
[-1.9851,78.202,0.439883756],
[-1.9801,78.102,0.586511674],
[-1.9751,78.002,0.733139593],
[-1.9701,77.902,0.879767511],
[-1.9651,77.802,1.02639543],
[-1.9601,77.702,1.173023349],
[-1.9551,77.602,1.319651267],
[-1.9501,77.502,1.466279186],
[-1.9451,77.402,1.612907104],
[-1.9401,77.302,1.759535023],
[-1.935,77.2,1.9090955],
[-1.93,77.1,2.055723418],
[-1.925,77,2.202351337],
[-1.92,76.9,2.348979255],
[-1.915,76.8,2.495607174],
[-1.91,76.7,2.642235093],
[-1.905,76.6,2.788863011],
[-1.9,76.5,2.93549093],
[-1.895,76.4,3.082118848],
[-1.89,76.3,3.228746767],
[-1.885,76.2,3.375374685],
[-1.88,76.1,3.522002604],
[-1.875,76,3.668630522],
[-1.87,75.9,3.815258441],
[-1.865,75.8,3.96188636],
[-1.86,75.7,4.108514278],
[-1.855,75.6,4.255142197],
[-1.85,75.5,4.401770115],
[-1.845,75.4,4.548398034],
[-1.84,75.3,4.695025952],
[-1.835,75.2,4.841653871],
[-1.83,75.1,4.98828179],
[-1.825,75,5.134909708],
[-1.82,74.9,5.281537627],
[-1.815,74.8,5.428165545],
[-1.81,74.7,5.574793464],
[-1.805,74.6,5.721421382],
[-1.8,74.5,5.868049301],
[-1.795,74.4,6.014677219],
[-1.79,74.3,6.161305138],
[-1.785,74.2,6.307933057],
[-1.78,74.1,6.454560975],
[-1.775,74,6.601188894],
[-1.77,73.9,6.747816812],
[-1.765,73.8,6.894444731],
[-1.76,73.7,7.041072649],
[-1.755,73.6,7.187700568],
[-1.75,73.5,7.334328487],
[-1.745,73.4,7.480956405],
[-1.74,73.3,7.627584324],
[-1.735,73.2,7.774212242],
[-1.73,73.1,7.920840161],
[-1.725,73,8.067468079],
[-1.72,72.9,8.214095998],
[-1.715,72.8,8.360723917],
[-1.71,72.7,8.507351835],
[-1.705,72.6,8.653979754],
[-1.7,72.5,8.800607672],
[-1.695,72.4,8.947235591],
[-1.69,72.3,9.093863509],
[-1.685,72.2,9.240491428],
[-1.68,72.1,9.387119346],
[-1.675,72,9.533747265],
[-1.67,71.9,9.680375184],
[-1.665,71.8,9.827003102],
[-1.66,71.7,9.973631021],
[-1.655,71.6,10.12025894],
[-1.65,71.5,10.26688686],
[-1.645,71.4,10.41351478],
[-1.64,71.3,10.56014269],
[-1.635,71.2,10.70677061],
[-1.63,71.1,10.85339853],
[-1.625,71,11.00002645],
[-1.62,70.9,11.14665437],
[-1.615,70.8,11.29328229],
[-1.61,70.7,11.43991021],
[-1.605,70.6,11.58653812],
[-1.6,70.5,11.73316604],
[-1.595,70.4,11.87979396],
[-1.59,70.3,12.02642188],
[-1.585,70.2,12.1730498],
[-1.58,70.1,12.31967772],
[-1.575,70,12.46630564],
[-1.57,69.9,12.61293355],
[-1.565,69.8,12.75956147],
[-1.56,69.7,12.90618939],
[-1.555,69.6,13.05281731],
[-1.55,69.5,13.19944523],
[-1.545,69.4,13.34607315],
[-1.54,69.3,13.49270107],
[-1.535,69.2,13.63932898],
[-1.53,69.1,13.7859569],
[-1.525,69,13.93258482],
[-1.52,68.9,14.07921274],
[-1.515,68.8,14.22584066],
[-1.51,68.7,14.37246858],
[-1.505,68.6,14.5190965],
[-1.5,68.5,14.66572441],
[-1.495,68.4,14.81235233],
[-1.49,68.3,14.95898025],
[-1.485,68.2,15.10560817],
[-1.48,68.1,15.25223609],
[-1.475,68,15.39886401],
[-1.47,67.9,15.54549193],
[-1.465,67.8,15.69211984],
[-1.46,67.7,15.83874776],
[-1.455,67.6,15.98537568],
[-1.45,67.5,16.1320036],
[-1.445,67.4,16.27863152],
[-1.44,67.3,16.42525944],
[-1.435,67.2,16.57188736],
[-1.43,67.1,16.71851527],
[-1.425,67,16.86514319],
[-1.42,66.9,17.01177111],
[-1.415,66.8,17.15839903],
[-1.41,66.7,17.30502695],
[-1.405,66.6,17.45165487],
[-1.4,66.5,17.59828279],
[-1.395,66.4,17.7449107],
[-1.39,66.3,17.89153862],
[-1.385,66.2,18.03816654],
[-1.38,66.1,18.18479446],
[-1.375,66,18.33142238],
[-1.37,65.9,18.4780503],
[-1.365,65.8,18.62467822],
[-1.36,65.7,18.77130613],
[-1.355,65.6,18.91793405],
[-1.35,65.5,19.06456197],
[-1.345,65.4,19.21118989],
[-1.34,65.3,19.35781781],
[-1.335,65.2,19.50444573],
[-1.33,65.1,19.65107365],
[-1.325,65,19.79770156],
[-1.32,64.9,19.94432948],
[-1.315,64.8,20.0909574],
[-1.31,64.7,20.23758532],
[-1.305,64.6,20.38421324],
[-1.3,64.5,20.53084116],
[-1.295,64.4,20.67746908],
[-1.29,64.3,20.82409699],
[-1.285,64.2,20.97072491],
[-1.28,64.1,21.11735283],
[-1.275,64,21.26398075],
[-1.27,63.9,21.41060867],
[-1.265,63.8,21.55723659],
[-1.26,63.7,21.70386451],
[-1.255,63.6,21.85049242],
[-1.25,63.5,21.99712034],
[-1.245,63.4,22.14374826],
[-1.24,63.3,22.29037618],
[-1.235,63.2,22.4370041],
[-1.23,63.1,22.58363202],
[-1.225,63,22.73025994],
[-1.22,62.9,22.87688785],
[-1.215,62.8,23.02351577],
[-1.21,62.7,23.17014369],
[-1.205,62.6,23.31677161],
[-1.2,62.5,23.46339953],
[-1.195,62.4,23.61002745],
[-1.19,62.3,23.75665537],
[-1.185,62.2,23.90328328],
[-1.18,62.1,24.0499112],
[-1.175,62,24.19653912],
[-1.17,61.9,24.34316704],
[-1.165,61.8,24.48979496],
[-1.16,61.7,24.63642288],
[-1.155,61.6,24.7830508],
[-1.15,61.5,24.92967871],
[-1.145,61.4,25.07630663],
[-1.14,61.3,25.22293455],
[-1.135,61.2,25.36956247],
[-1.13,61.1,25.51619039],
[-1.125,61,25.66281831],
[-1.12,60.9,25.80944623],
[-1.115,60.8,25.95607414],
[-1.11,60.7,26.10270206],
[-1.105,60.6,26.24932998],
[-1.1,60.5,26.3959579],
[-1.095,60.4,26.54258582],
[-1.09,60.3,26.68921374],
[-1.085,60.2,26.83584166],
[-1.08,60.1,26.98246957],
[-1.075,60,27.12909749],
[-1.07,59.9,27.27572541],
[-1.065,59.8,27.42235333],
[-1.06,59.7,27.56898125],
[-1.055,59.6,27.71560917],
[-1.05,59.5,27.86223709],
[-1.045,59.4,28.008865],
[-1.04,59.3,28.15549292],
[-1.035,59.2,28.30212084],
[-1.03,59.1,28.44874876],
[-1.025,59,28.59537668],
[-1.02,58.9,28.7420046],
[-1.015,58.8,28.88863252],
[-1.01,58.7,29.03526043],
[-1.005,58.6,29.18188835],
[-1,58.5,29.32851627],
[-0.995,58.4,29.47514419],
[-0.99,58.3,29.62177211],
[-0.985,58.2,29.76840003],
[-0.98,58.1,29.91502795],
[-0.975,58,30.06165586],
[-0.97,57.9,30.20828378],
[-0.965,57.8,30.3549117],
[-0.96,57.7,30.50153962],
[-0.955,57.6,30.64816754],
[-0.95,57.5,30.79479546],
[-0.945,57.4,30.94142338],
[-0.94,57.3,31.08805129],
[-0.935,57.2,31.23467921],
[-0.93,57.1,31.38130713],
[-0.925,57,31.52793505],
[-0.92,56.9,31.67456297],
[-0.915,56.8,31.82119089],
[-0.91,56.7,31.96781881],
[-0.905,56.6,32.11444672],
[-0.9,56.5,32.26107464],
[-0.895,56.4,32.40770256],
[-0.89,56.3,32.55433048],
[-0.885,56.2,32.7009584],
[-0.88,56.1,32.84758632],
[-0.875,56,32.99421424],
[-0.87,55.9,33.14084215],
[-0.865,55.8,33.28747007],
[-0.86,55.7,33.43409799],
[-0.855,55.6,33.58072591],
[-0.85,55.5,33.72735383],
[-0.845,55.4,33.87398175],
[-0.84,55.3,34.02060967],
[-0.835,55.2,34.16723758],
[-0.83,55.1,34.3138655],
[-0.825,55,34.46049342],
[-0.82,54.9,34.60712134],
[-0.815,54.8,34.75374926],
[-0.81,54.7,34.90037718],
[-0.805,54.6,35.0470051],
[-0.8,54.5,35.19363301],
[-0.795,54.4,35.34026093],
[-0.79,54.3,35.48688885],
[-0.785,54.2,35.63351677],
[-0.78,54.1,35.78014469],
[-0.7749,53.998,35.92970516],
[-0.7699,53.898,36.07633308],
[-0.7649,53.798,36.222961],
[-0.7599,53.698,36.36958892],
[-0.7549,53.598,36.51621684],
[-0.7499,53.498,36.66284476],
[-0.7449,53.398,36.80947268],
[-0.7399,53.298,36.95610059],
[-0.7349,53.198,37.10272851],
[-0.7299,53.098,37.24935643],
[-0.7249,52.998,37.39598435],
[-0.7199,52.898,37.54261227],
[-0.7149,52.798,37.68924019],
[-0.71,52.7,37.83293555],
[-0.705,52.6,37.97956347],
[-0.7,52.5,38.12619139],
[-0.695,52.4,38.2728193],
[-0.69,52.3,38.41944722],
[-0.685,52.2,38.56607514],
[-0.68,52.1,38.71270306],
[-0.675,52,38.85933098],
[-0.67,51.9,39.0059589],
[-0.665,51.8,39.15258681],
[-0.66,51.7,39.29921473],
[-0.655,51.6,39.44584265],
[-0.65,51.5,39.59247057],
[-0.645,51.4,39.73909849],
[-0.64,51.3,39.88572641],
[-0.635,51.2,40.03235433],
[-0.63,51.1,40.17898224],
[-0.625,51,40.32561016],
[-0.62,50.9,40.47223808],
[-0.615,50.8,40.618866],
[-0.61,50.7,40.76549392],
[-0.605,50.6,40.91212184],
[-0.6,50.5,41.05874976],
[-0.595,50.4,41.20537767],
[-0.59,50.3,41.35200559],
[-0.585,50.2,41.49863351],
[-0.58,50.1,41.64526143],
[-0.575,50,41.79188935],
[-0.57,49.9,41.93851727],
[-0.565,49.8,42.08514519],
[-0.56,49.7,42.2317731],
[-0.555,49.6,42.37840102],
[-0.55,49.5,42.52502894],
[-0.545,49.4,42.67165686],
[-0.54,49.3,42.81828478],
[-0.535,49.2,42.9649127],
[-0.53,49.1,43.11154062],
[-0.525,49,43.25816853],
[-0.52,48.9,43.40479645],
[-0.515,48.8,43.55142437],
[-0.51,48.7,43.69805229],
[-0.505,48.6,43.84468021],
[-0.5,48.5,43.99130813],
[-0.495,48.4,44.13793605],
[-0.49,48.3,44.28456396],
[-0.485,48.2,44.43119188],
[-0.48,48.1,44.5778198],
[-0.475,48,44.72444772],
[-0.47,47.9,44.87107564],
[-0.465,47.8,45.01770356],
[-0.46,47.7,45.16433148],
[-0.455,47.6,45.31095939],
[-0.45,47.5,45.45758731],
[-0.445,47.4,45.60421523],
[-0.44,47.3,45.75084315],
[-0.435,47.2,45.89747107],
[-0.43,47.1,46.04409899],
[-0.425,47,46.19072691],
[-0.42,46.9,46.33735482],
[-0.415,46.8,46.48398274],
[-0.41,46.7,46.63061066],
[-0.405,46.6,46.77723858],
[-0.4,46.5,46.9238665],
[-0.395,46.4,47.07049442],
[-0.39,46.3,47.21712234],
[-0.385,46.2,47.36375025],
[-0.38,46.1,47.51037817],
[-0.375,46,47.65700609],
[-0.37,45.9,47.80363401],
[-0.365,45.8,47.95026193],
[-0.36,45.7,48.09688985],
[-0.355,45.6,48.24351777],
[-0.35,45.5,48.39014568],
[-0.345,45.4,48.5367736],
[-0.34,45.3,48.68340152],
[-0.335,45.2,48.83002944],
[-0.33,45.1,48.97665736],
[-0.325,45,49.12328528],
[-0.32,44.9,49.2699132],
[-0.315,44.8,49.41654111],
[-0.31,44.7,49.56316903],
[-0.305,44.6,49.70979695],
[-0.3,44.5,49.85642487],
[-0.295,44.4,50.00305279],
[-0.29,44.3,50.14968071],
[-0.285,44.2,50.29630863],
[-0.28,44.1,50.44293654],
[-0.275,44,50.58956446],
[-0.27,43.9,50.73619238],
[-0.265,43.8,50.8828203],
[-0.26,43.7,51.02944822],
[-0.255,43.6,51.17607614],
[-0.25,43.5,51.32270406],
[-0.245,43.4,51.46933197],
[-0.24,43.3,51.61595989],
[-0.235,43.2,51.76258781],
[-0.23,43.1,51.90921573],
[-0.225,43,52.05584365],
[-0.22,42.9,52.20247157],
[-0.215,42.8,52.34909949],
[-0.21,42.7,52.4957274],
[-0.205,42.6,52.64235532],
[-0.2,42.5,52.78898324],
[-0.195,42.4,52.93561116],
[-0.19,42.3,53.08223908],
[-0.185,42.2,53.228867],
[-0.18,42.1,53.37549492],
[-0.175,42,53.52212283],
[-0.17,41.9,53.66875075],
[-0.165,41.8,53.81537867],
[-0.16,41.7,53.96200659],
[-0.155,41.6,54.10863451],
[-0.15,41.5,54.25526243],
[-0.145,41.4,54.40189035],
[-0.14,41.3,54.54851826],
[-0.135,41.2,54.69514618],
[-0.13,41.1,54.8417741],
[-0.125,41,54.98840202],
[-0.12,40.9,55.13502994],
[-0.115,40.8,55.28165786],
[-0.11,40.7,55.42828578],
[-0.105,40.6,55.57491369],
[-0.1,40.5,55.72154161],
[-0.095,40.4,55.86816953],
[-0.09,40.3,56.01479745],
[-0.085,40.2,56.16142537],
[-0.08,40.1,56.30805329],
[-0.075,40,56.45468121],
[-0.07,39.9,56.60130912],
[-0.065,39.8,56.74793704],
[-0.06,39.7,56.89456496],
[-0.055,39.6,57.04119288],
[-0.05,39.5,57.1878208],
[-0.045,39.4,57.33444872],
[-0.04,39.3,57.48107664],
[-0.035,39.2,57.62770455],
[-0.03,39.1,57.77433247],
[-0.025,39,57.92096039],
[-0.02,38.9,58.06758831],
[-0.015,38.8,58.21421623],
[-0.01,38.7,58.36084415],
[-0.005,38.6,58.50747207],
[0,38.5,58.65409998],
[0.005,38.4,58.8007279],
[0.01,38.3,58.94735582],
[0.015,38.2,59.09398374],
[0.02,38.1,59.24061166],
[0.025,38,59.38723958],
[0.03,37.9,59.5338675],
[0.035,37.8,59.68049541],
[0.04,37.7,59.82712333],
[0.045,37.6,59.97375125],
[0.05,37.5,60.12037917],
[0.055,37.4,60.26700709],
[0.06,37.3,60.41363501],
[0.065,37.2,60.56026293],
[0.07,37.1,60.70689084],
[0.075,37,60.85351876],
[0.08,36.9,61.00014668],
[0.085,36.8,61.1467746],
[0.09,36.7,61.29340252],
[0.095,36.6,61.44003044],
[0.1,36.5,61.58665836],
[0.105,36.4,61.73328627],
[0.11,36.3,61.87991419],
[0.115,36.2,62.02654211],
[0.12,36.1,62.17317003],
[0.125,36,62.31979795],
[0.13,35.9,62.46642587],
[0.135,35.8,62.61305379],
[0.14,35.7,62.7596817],
[0.145,35.6,62.90630962],
[0.15,35.5,63.05293754],
[0.155,35.4,63.19956546],
[0.16,35.3,63.34619338],
[0.165,35.2,63.4928213],
[0.17,35.1,63.63944922],
[0.175,35,63.78607713],
[0.18,34.9,63.93270505],
[0.185,34.8,64.07933297],
[0.19,34.7,64.22596089],
[0.195,34.6,64.37258881],
[0.2,34.5,64.51921673],
[0.205,34.4,64.66584465],
[0.21,34.3,64.81247256],
[0.215,34.2,64.95910048],
[0.22,34.1,65.1057284],
[0.225,34,65.25235632],
[0.23,33.9,65.39898424],
[0.235,33.8,65.54561216],
[0.24,33.7,65.69224008],
[0.245,33.6,65.83886799],
[0.25,33.5,65.98549591],
[0.255,33.4,66.13212383],
[0.26,33.3,66.27875175],
[0.265,33.2,66.42537967],
[0.27,33.1,66.57200759],
[0.275,33,66.7186355],
[0.28,32.9,66.86526342],
[0.285,32.8,67.01189134],
[0.29,32.7,67.15851926],
[0.295,32.6,67.30514718],
[0.3,32.5,67.4517751],
[0.305,32.4,67.59840302],
[0.31,32.3,67.74503093],
[0.315,32.2,67.89165885],
[0.32,32.1,68.03828677],
[0.325,32,68.18491469],
[0.33,31.9,68.33154261],
[0.335,31.8,68.47817053],
[0.34,31.7,68.62479845],
[0.345,31.6,68.77142636],
[0.35,31.5,68.91805428],
[0.355,31.4,69.0646822],
[0.36,31.3,69.21131012],
[0.365,31.2,69.35793804],
[0.37,31.1,69.50456596],
[0.375,31,69.65119388],
[0.38,30.9,69.79782179],
[0.385,30.8,69.94444971],
[0.39,30.7,70.09107763],
[0.395,30.6,70.23770555],
[0.4,30.5,70.38433347],
[0.405,30.4,70.53096139],
[0.41,30.3,70.67758931],
[0.415,30.2,70.82421722],
[0.42,30.1,70.97084514],
[0.425,30,71.11747306],
[0.43,29.9,71.26410098],
[0.435,29.8,71.4107289],
[0.44,29.7,71.55735682],
[0.445,29.6,71.70398474],
[0.45,29.5,71.85061265],
[0.455,29.4,71.99724057],
[0.46,29.3,72.14386849],
[0.465,29.2,72.29049641],
[0.47,29.1,72.43712433],
[0.475,29,72.58375225],
[0.48,28.9,72.73038017],
[0.485,28.8,72.87700808],
[0.49,28.7,73.023636],
[0.495,28.6,73.17026392],
[0.5,28.5,73.31689184]])
dens_co = np.array([-9.02726494,
0.108986696,
-0.0005189,
-2.0646e-5,
1.93881e-7,
-4.7231e-10, 0])
v_init = 27760
mass_init = 4500
sigma = 0.01/1e6
K = 0.7/(2500)**(2/3)
height_list = np.linspace(85500, 0, 4000)
v = v_init
m = mass_init
t = luminousEfficiency(3, 1, v_init, mass_init)
tau_list = []
tau_list.append([85500, t*100, v_init, mass_init, 1000])
dh = height_list[1] - height_list[0]
lat = np.radians(45.7024)
lon = np.radians(26.9007)
jd = 2457029.54582
sudden_loss = 850
import random
counter = 0
for h in height_list:
dens = getAtmDensity(lat, lon, h, jd)
dv = deceleration(K, m, dens, v)
dm = massLoss(K, sigma, m, dens, v)
dl = dh/(np.cos(np.radians(47)))
dt = np.abs(dl/v)
vel = v + dv*dt
mass = m + dm*dt
if np.abs(h - 53500) <= 10:
mass -= 30
if 42000 <= h <= 48000:
mass -= 11.39
if np.abs(h - 44700) <= 10:
print("1")
mass -= 300
if np.abs(h - 43200) <= 10:
print("2")
mass -= 500
if np.abs(h - 42000) <= 10:
print("3")
mass -= 100
if np.abs(h - 41000) <= 10:
print("4")
mass -= 10
if h <= 42000:
mass -= 1.1
t = luminousEfficiency(3, 1, vel, mass)
dyn = dens*vel**2
tau_list.append([h, t*100, vel, mass, dyn])
v = vel
m = mass
tau_list = np.array(tau_list)
data = np.array([[0.009858000137488755, 4429.609300890416],
[0.8667387208628936, 4195.558528162144],
[0.8792236851030195, 3763.90373001826],
[1.389495494373136, 3112.6032895675453],
[1.4095105244410164, 2309.1798560776215],
[1.6496480740980206, 2247.345998883871],
[1.6734104799051615, 1579.16783936594],
[1.74677120801992, 644.8077600240982],
[1.9867199185099855, 610.7375420334304],
[2.1036881784318844, 180.0539902913994],
[2.3251963220650094, 165.97370651171852],
[2.7213387683753085, 34.383159035209836],
[3.007882518043102, 3.6137389607345267],
[3.051209653167377, 0.9557705280602697],
[3.1397452344877848, 0.19799768745384114]])
ho, mo, vo = getO()
to_list = []
dyn_list = []
for ii in range(len(ho)):
dens = getAtmDensity(lat, lon, ho[ii], jd)
t = luminousEfficiency(3, 1, vo[ii], mo[ii])
dyn = dens*vo[ii]**2
to_list.append(t)
dyn_list.append(dyn)
print(ho[ii], dyn/1e6)
new_plots = np.array([[101912.0595578985, 4999.9660015101645, 27759.987698420442],
[101816.28818611383, 4999.965486559112, 27759.987439805253],
[101720.51465989438, 4999.964962922354, 27759.9871767204],
[101624.73897926809, 4999.96443044635, 27759.98690908686],
[101528.96114426314, 4999.9638889747985, 27759.9866368242],
[101433.18115490804, 4999.9633383485925, 27759.986359850536],
[101337.3990112316, 4999.9627784057675, 27759.98607808252],
[101241.61471326285, 4999.962208981452, 27759.985791435294],
[101145.82826103122, 4999.961629907813, 27759.9854998225],
[101050.03965456637, 4999.9610410140085, 27759.9852031562],
[100954.2488938983, 4999.960442126128, 27759.98490134691],
[100858.45597905734, 4999.959833067143, 27759.98459430352],
[100762.66091007413, 4999.959213656848, 27759.9842819333],
[100666.86368697963, 4999.9585837118075, 27759.983964141826],
[100571.06430980515, 4999.957943045296, 27759.983640833023],
[100475.26277858234, 4999.957291467241, 27759.98331190907],
[100379.45909334318, 4999.956628784164, 27759.982977270392],
[100283.65325412001, 4999.955954799121, 27759.982636815635],
[100187.84526094556, 4999.95526931164, 27759.982290441632],
[100092.03511385286, 4999.954572117658, 27759.98193804337],
[99996.22281287538, 4999.95386300946, 27759.981579513948],
[99900.40835804692, 4999.953141775613, 27759.98121474455],
[99804.59174940169, 4999.952408200902, 27759.980843624424],
[99708.77298697429, 4999.95166206626, 27759.980466040834],
[99612.95207079971, 4999.950903148705, 27759.980081879017],
[99517.12900091337, 4999.950131221269, 27759.979691022174],
[99421.30377735107, 4999.9493460529275, 27759.979293351415],
[99325.47640014905, 4999.948547408529, 27759.97888874571],
[99229.646869344, 4999.947735048721, 27759.9784770819],
[99133.81518497299, 4999.946908729882, 27759.978058234585],
[99037.98134707358, 4999.946068204039, 27759.977632076163],
[98942.14535568377, 4999.945213218797, 27759.97719847674],
[98846.30721084202, 4999.944343517261, 27759.97675730411],
[98750.46691258725, 4999.943458837954, 27759.976308423706],
[98654.62446095886, 4999.942558914742, 27759.97585169856],
[98558.77985599675, 4999.941643476751, 27759.97538698927],
[98462.9330977413, 4999.940712248282, 27759.974914153958],
[98367.0841862334, 4999.939764948731, 27759.974433048214],
[98271.23312151444, 4999.938801292502, 27759.973943525052],
[98175.37990362634, 4999.93782098892, 27759.973445434884],
[98079.52453261154, 4999.936823742144, 27759.972938625455],
[97983.66700851303, 4999.9358092510765, 27759.972422941813],
[97887.80733137434, 4999.934777209272, 27759.971898226242],
[97791.94550123956, 4999.9337273048495, 27759.97136431824],
[97696.08151815335, 4999.932659220391, 27759.970821054452],
[97600.21538216097, 4999.9315726328505, 27759.970268268615],
[97504.34709330821, 4999.930467213458, 27759.96970579154],
[97408.47665164154, 4999.929342627617, 27759.96913345102],
[97312.60405720797, 4999.92819853481, 27759.96855107182],
[97216.72931005515, 4999.927034588489, 27759.96795847558],
[97120.85241023138, 4999.92585043598, 27759.967355480807],
[97024.9733577856, 4999.924645718373, 27759.966741902786],
[96929.09215276736, 4999.923420070417, 27759.96611755354],
[96833.20879522694, 4999.922173120411, 27759.965482241776],
[96737.32328521526, 4999.920904490094, 27759.964835772822],
[96641.43562278393, 4999.919613794534, 27759.96417794858],
[96545.54580798527, 4999.918300642013, 27759.96350856744],
[96449.6538408723, 4999.9169646339105, 27759.962827424264],
[96353.75972149879, 4999.91560536459, 27759.962134310284],
[96257.86344991923, 4999.914222421276, 27759.96142901307],
[96161.96502618886, 4999.912815383934, 27759.96071131645],
[96066.0644503637, 4999.911383825148, 27759.95998100045],
[95970.16172250055, 4999.9099273099955, 27759.959237841245],
[95874.25684265698, 4999.908445395921, 27759.95848161106],
[95778.34981089138, 4999.906937632606, 27759.957712078143],
[95682.44062726296, 4999.905403561838, 27759.956929006683],
[95586.52929183176, 4999.90384271738, 27759.956132156713],
[95490.61580465866, 4999.9022546248325, 27759.955321284087],
[95394.70016580545, 4999.900638801498, 27759.95449614038],
[95298.78237533473, 4999.89899475624, 27759.953656472833],
[95202.86243331006, 4999.897321989348, 27759.952802024254],
[95106.94033979585, 4999.895619992383, 27759.951932532982],
[95011.01609485749, 4999.8938882480425, 27759.951047732775],
[94915.08969856128, 4999.892126230005, 27759.95014735276],
[94819.16115097448, 4999.890333402784, 27759.949231117338],
[94723.23045216534, 4999.888509221573, 27759.948298746127],
[94627.29760220309, 4999.8866531320955, 27759.947349953854],
[94531.36260115798, 4999.884764570439, 27759.9463844503],
[94435.42544910125, 4999.882842962904, 27759.9454019402],
[94339.48614610525, 4999.880887725836, 27759.94440212317],
[94243.54469224335, 4999.878898265467, 27759.943384693604],
[94147.60108759, 4999.8768739777415, 27759.94234934064],
[94051.65533222075, 4999.874814248153, 27759.94129574799],
[93955.70742621229, 4999.872718451567, 27759.940223593927],
[93859.75736964244, 4999.870585952053, 27759.939132551153],
[93763.80516259016, 4999.868416102701, 27759.938022286722],
[93667.8508051356, 4999.866208245446, 27759.93689246195],
[93571.89429736014, 4999.863961710881, 27759.93574273231],
[93475.93563934631, 4999.861675818078, 27759.934572747352],
[93379.97483117794, 4999.859349874395, 27759.933382150582],
[93284.0118729401, 4999.8569831752875, 27759.932170579395],
[93188.04676471914, 4999.854575004115, 27759.930937664947],
[93092.0795066027, 4999.852124631944, 27759.929683032078],
[92996.1100986798, 4999.849631317352, 27759.928406299194],
[92900.13854104074, 4999.847094306219, 27759.92710707816],
[92804.16483377725, 4999.844512831531, 27759.925784974213],
[92708.18897698242, 4999.841886113163, 27759.924439585833],
[92612.21097075079, 4999.839213357677, 27759.923070504654],
[92516.23081517832, 4999.836493758101, 27759.921677315335],
[92420.24851036245, 4999.833726493716, 27759.92025959547],
[92324.26405640211, 4999.8309107298355, 27759.918816915455],
[92228.27745339775, 4999.82804561758, 27759.917348838375],
[92132.28870145137, 4999.825130293652, 27759.91585491991],
[92036.29780066655, 4999.82216388011, 27759.914334708188],
[91940.30475114845, 4999.819145484127, 27759.912787743677],
[91844.30955300386, 4999.816074197764, 27759.911213559062],
[91748.31220634123, 4999.812949097723, 27759.909611679133],
[91652.31271127069, 4999.809769245108, 27759.907981620643],
[91556.31106790407, 4999.806533685181, 27759.906322892184],
[91460.30727635496, 4999.803241447106, 27759.904634994076],
[91364.30133673869, 4999.7998915437, 27759.902917418207],
[91268.2932491724, 4999.796482971176, 27759.901169647932],
[91172.28301377509, 4999.793014708885, 27759.899391157916],
[91076.27063066754, 4999.789485719046, 27759.89758141401],
[90980.2560999725, 4999.785894946486, 27759.895739873107],
[90884.23942181458, 4999.782241318365, 27759.893865983006],
[90788.22059632039, 4999.778523743901, 27759.891959182274],
[90692.1996236185, 4999.7747411140945, 27759.89001890009],
[90596.1765038395, 4999.770892301444, 27759.888044556115],
[90500.15123711604, 4999.766976159662, 27759.886035560332],
[90404.12382358285, 4999.762991523381, 27759.88399131292],
[90308.0942633768, 4999.758937207867, 27759.881911204062],
[90212.0625566369, 4999.754812008717, 27759.87979461383],
[90116.02870350434, 4999.750614701556, 27759.87764091201],
[90019.99270412259, 4999.746344041737, 27759.875449457948],
[89923.95455863731, 4999.741998764028, 27759.873219600402],
[89827.91426719654, 4999.737577582299, 27759.870950677356],
[89731.8718299506, 4999.7330791892055, 27759.86864201588],
[89635.82724705224, 4999.728502255864, 27759.866292931958],
[89539.78051865658, 4999.72384543153, 27759.863902730314],
[89443.73164492124, 4999.719107343263, 27759.86147070424],
[89347.68062600632, 4999.714286595598, 27759.858996135437],
[89251.62746207447, 4999.709381770202, 27759.85647829383],
[89155.57215329091, 4999.704391425532, 27759.853916437394],
[89059.5146998235, 4999.69931409649, 27759.851309811977],
[88963.45510184276, 4999.694148294068, 27759.848657651102],
[88867.39335952191, 4999.688892504995, 27759.84595917583],
[88771.32947303695, 4999.683545191373, 27759.843213594508],
[88675.26344256666, 4999.678104790314, 27759.840420102635],
[88579.19526829268, 4999.67256971357, 27759.837577882645],
[88483.1249503995, 4999.666938347157, 27759.83468610372],
[88387.05248907462, 4999.661209050978, 27759.831743921604],
[88290.97788450847, 4999.655380158439, 27759.828750478384],
[88194.90113689452, 4999.649449976059, 27759.82570490231],
[88098.82224642935, 4999.64341678308, 27759.822606307585],
[88002.74121331265, 4999.637278831066, 27759.819453794153],
[87906.6580377473, 4999.631034343502, 27759.816246447506],
[87810.57271993943, 4999.624681515384, 27759.812983338466],
[87714.48526009843, 4999.61821851281, 27759.809663522963],
[87618.39565843703, 4999.611643472563, 27759.806286041847],
[87522.30391517137, 4999.604954501683, 27759.802849920638],
[87426.21003052105, 4999.598149677047, 27759.79935416932],
[87330.11400470912, 4999.591227044932, 27759.79579778213],
[87234.01583796223, 4999.584184620576, 27759.792179737313],
[87137.91553051063, 4999.577020387742, 27759.7884989969],
[87041.81308258824, 4999.569732298264, 27759.78475450649],
[86945.70849443272, 4999.562318271598, 27759.780945194994],
[86849.6017662855, 4999.554776194362, 27759.77706997441],
[86753.49289839188, 4999.547103919873, 27759.773127739594],
[86657.38189100105, 4999.539299267679, 27759.76911736801],
[86561.26874436616, 4999.531360023085, 27759.765037719477],
[86465.15345874442, 4999.523283936673, 27759.760887635926],
[86369.03603439713, 4999.515068723818, 27759.756665941164],
[86272.91647158974, 4999.5067120642, 27759.75237144061],
[86176.7947705919, 4999.498211601303, 27759.74800292103],
[86080.67093167763, 4999.489564941921, 27759.743559150305],
[85984.54495512522, 4999.480769655645, 27759.739038877135],
[85888.41684121743, 4999.471823274358, 27759.734440830813],
[85792.28659024152, 4999.46272329171, 27759.729763720923],
[85696.15420248928, 4999.453467162601, 27759.725006237102],
[85600.01967825716, 4999.444052302648, 27759.720167048734],
[85503.88301784631, 4999.434476087654, 27759.715244804705],
[85407.74422156265, 4999.424735853066, 27759.710238133102],
[85311.60328971696, 4999.414828893429, 27759.705145640943],
[85215.46022262497, 4999.404752461837, 27759.699965913886],
[85119.31502060736, 4999.394503769371, 27759.694697515963],
[85023.16768398992, 4999.384079984541, 27759.689338989247],
[84927.01821310358, 4999.373478232714, 27759.683888853593],
[84830.86660828454, 4999.362695595539, 27759.678345606346],
[84734.71286987429, 4999.351729110369, 27759.672707722],
[84638.55699821969, 4999.340575769673, 27759.666973651936],
[84542.39899367311, 4999.329232520442, 27759.66114182411],
[84446.23885659248, 4999.317696263594, 27759.655210642715],
[84350.07658734135, 4999.305963853368, 27759.649178487918],
[84253.91218628902, 4999.294032096712, 27759.64304371549],
[84157.74565381059, 4999.281897752669, 27759.636804656526],
[84061.57699028708, 4999.269557531754, 27759.630459617118],
[83965.40619610548, 4999.257008095326, 27759.624006878013],
[83869.23327165887, 4999.244246054953, 27759.61744469432],
[83773.0582173465, 4999.231267971771, 27759.610771295123],
[83676.88103357387, 4999.218070355839, 27759.60398488321],
[83580.70172075286, 4999.204649665485, 27759.597083634693],
[83484.52027930177, 4999.191002306647, 27759.59006569868],
[83388.33670964547, 4999.177124632208, 27759.58292919693],
[83292.15101221547, 4999.163012941325, 27759.575672223524],
[83195.96318745002, 4999.148663478754, 27759.56829284448],
[83099.77323579419, 4999.134072434163, 27759.560789097435],
[83003.5811577, 4999.119235941441, 27759.553158991264],
[82907.38695362651, 4999.1041500780075, 27759.54540050573],
[82811.19062403993, 4999.088810864107, 27759.53751159112],
[82714.99216941372, 4999.073214262098, 27759.529490167883],
[82618.79159022868, 4999.057356175745, 27759.521334126246],
[82522.58888697308, 4999.04123244949, 27759.513041325867],
[82426.38406014277, 4999.024838867731, 27759.50460959543],
[82330.17711024123, 4999.008171154085, 27759.496036732282],
[82233.9680377798, 4998.99122497065, 27759.48732050205],
[82137.75684327768, 4998.973995917258, 27759.47845863825],
[82041.54352726207, 4998.956479530724, 27759.4694488419],
[81945.32809026833, 4998.938671284084, 27759.460288781123],
[81849.11053284006, 4998.920566585835, 27759.45097609077],
[81752.89085552921, 4998.90216077916, 27759.441508372],
[81656.66905889622, 4998.883449141152, 27759.43188319187],
[81560.44514351014, 4998.864426882027, 27759.42209808296],
[81464.21910994872, 4998.845089144341, 27759.412150542943],
[81367.9909587986, 4998.825431002185, 27759.402038034168],
[81271.76069065536, 4998.805447460388, 27759.39175798327],
[81175.52830612367, 4998.785133453705, 27759.38130778072],
[81079.29380581748, 4998.7644838460055, 27759.37068478042],
[80983.05719036004, 4998.743493429446, 27759.359886299273],
[80886.81846038411, 4998.722156923647, 27759.34890961676],
[80790.57761653207, 4998.700468974856, 27759.337751974497],
[80694.33465945606, | |
import tensorflow as tf
import numpy as np
from utils import unroll_data
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, SimpleRNN, GRU, Dropout, LSTM, LeakyReLU, Lambda
from keras.initializers import glorot_uniform # Or your initializer of choice
from keras import regularizers
from keras.optimizers import *
from models.utils import fast_mvnorm_diagonal_logprob
print("TensorFlow Version: {}".format(tf.__version__))
print("Keras Version: {}".format(keras.__version__))
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 4
config.inter_op_parallelism_threads = 4
tf.Session(config=config)
# run a check that tensorflow works on import
def check_tf():
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
with tf.Session() as sess:
sess.run(c)
print "TensorFlow Check Passed"
check_tf()
def reset_weights(session, model):
for layer in model.layers:
if hasattr(layer, "kernel_initializer"):
layer.kernel.initializer.run(session=session)
def map_variance(samples, df0, scale0):
"""
This estimator assumes an scaled inverse-chi squared prior over the
variance and a Gaussian likelihood. The parameters d and scale
of the internal function parameterize the posterior of the variance.
Taken from Bayesian Data Analysis, ch2 (Gelman)
samples: N length array or NxD array
df0: prior degrees of freedom
scale0: prior scale parameter
mu: (optional) mean function
returns: float or d-length array, mode of the posterior
"""
if np.ndim(samples) > 1:
n, d = np.shape(samples)
else:
n = np.shape(samples)[0]
d = 1
v = np.var(samples, axis=0)
df = df0 + n
scale = (df0 * scale0 + n * v) / df
return df * scale / (df * 2)
class LinearEvent(object):
""" this is the base clase of the event model """
def __init__(self, d, var_df0, var_scale0, optimizer=None, n_epochs=10, init_model=False,
kernel_initializer='glorot_uniform', l2_regularization=0.00, batch_size=32, prior_log_prob=0.0,
reset_weights=False, batch_update=True, optimizer_kwargs=None):
"""
:param d: dimensions of the input space
"""
self.d = d
self.f_is_trained = False
self.f0_is_trained = False
self.f0 = np.zeros(d)
self.x_history = [np.zeros((0, self.d))]
self.prior_probability = prior_log_prob
if (optimizer is None) and (optimizer_kwargs is None):
optimizer = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, amsgrad=False)
elif (optimizer is None) and not (optimizer_kwargs is None):
optimizer = Adam(**optimizer_kwargs)
elif (optimizer is not None) and (type(optimizer) != str):
optimizer = optimizer()
self.compile_opts = dict(optimizer=optimizer, loss='mean_squared_error')
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = regularizers.l2(l2_regularization)
self.n_epochs = n_epochs
self.batch_size = batch_size
self.var_df0 = var_df0
self.var_scale0 = var_scale0
self.d = d
self.reset_weights = reset_weights
self.batch_update = batch_update
self.training_pairs = []
self.prediction_errors = np.zeros((0, self.d), dtype=np.float)
self.model_weights = None
# initialize the covariance with the mode of the prior distribution
self.Sigma = np.ones(d) * var_df0 * var_scale0 / (var_df0 + 2)
self.is_visited = False # governs the special case of model's first prediction (i.e. with no experience)
# switch for inheritance -- don't want to init the model for sub-classes
if init_model:
self.init_model()
def init_model(self):
self._compile_model()
self.model_weights = self.model.get_weights()
return self.model
def _compile_model(self):
self.model = Sequential([
Dense(self.d, input_shape=(self.d,), use_bias=True, kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer),
Activation('linear')
])
self.model.compile(**self.compile_opts)
def set_model(self, sess, model):
self.sess = sess
self.model = model
self.do_reset_weights()
def reestimate(self):
self.do_reset_weights()
self.estimate()
def do_reset_weights(self):
# self._compile_model()
reset_weights(self.sess, self.model)
self.model_weights = self.model.get_weights()
def update(self, X, Xp, update_estimate=True):
"""
Parameters
----------
X: NxD array-like data of inputs
y: NxD array-like data of outputs
Returns
-------
None
"""
if X.ndim > 1:
X = X[-1, :] # only consider last example
assert X.ndim == 1
assert X.shape[0] == self.d
assert Xp.ndim == 1
assert Xp.shape[0] == self.d
x_example = X.reshape((1, self.d))
xp_example = Xp.reshape((1, self.d))
# concatenate the training example to the active event token
self.x_history[-1] = np.concatenate([self.x_history[-1], x_example], axis=0)
# also, create a list of training pairs (x, y) for efficient sampling
# picks random time-point in the history
self.training_pairs.append(tuple([x_example, xp_example]))
if update_estimate:
self.estimate()
self.f_is_trained = True
def update_f0(self, Xp, update_estimate=True):
self.update(np.zeros(self.d), Xp, update_estimate=update_estimate)
self.f0_is_trained = True
# precompute f0 for speed
self.f0 = self._predict_f0()
def get_variance(self):
# Sigma is stored as a vector corresponding to the entries of the diagonal covariance matrix
return self.Sigma
def predict_next(self, X):
"""
wrapper for the prediction function that changes the prediction to the identity function
for untrained models (this is an initialization technique)
"""
if not self.f_is_trained:
if np.ndim(X) > 1:
return np.copy(X[-1, :]).reshape(1, -1)
return np.copy(X).reshape(1, -1)
return self._predict_next(X)
def _predict_next(self, X):
"""
Parameters
----------
X: 1xD array-like data of inputs
Returns
-------
y: 1xD array of prediction vectors
"""
if X.ndim > 1:
X0 = X[-1, :]
else:
X0 = X
self.model.set_weights(self.model_weights)
return self.model.predict(np.reshape(X0, newshape=(1, self.d)))
def predict_f0(self):
"""
wrapper for the prediction function that changes the prediction to the identity function
for untrained models (this is an initialization technique)
N.B. This answer is cached for speed
"""
return self.f0
def _predict_f0(self):
return self._predict_next(np.zeros(self.d))
def log_likelihood_f0(self, Xp):
if not self.f0_is_trained:
return self.prior_probability
# predict the initial point
Xp_hat = self.predict_f0()
# return the probability
return fast_mvnorm_diagonal_logprob(Xp.reshape(-1) - Xp_hat.reshape(-1), self.Sigma)
def log_likelihood_next(self, X, Xp):
if not self.f_is_trained:
return self.prior_probability
Xp_hat = self.predict_next(X)
return fast_mvnorm_diagonal_logprob(Xp.reshape(-1) - Xp_hat.reshape(-1), self.Sigma)
def log_likelihood_sequence(self, X, Xp):
if not self.f_is_trained:
return self.prior_probability
Xp_hat = self.predict_next_generative(X)
return fast_mvnorm_diagonal_logprob(Xp.reshape(-1) - Xp_hat.reshape(-1), self.Sigma)
# create a new cluster of scenes
def new_token(self):
if len(self.x_history) == 1 and self.x_history[0].shape[0] == 0:
# special case for the first cluster which is already created
return
self.x_history.append(np.zeros((0, self.d)))
def predict_next_generative(self, X):
self.model.set_weights(self.model_weights)
# the LDS is a markov model, so these functions are the same
return self.predict_next(X)
def run_generative(self, n_steps, initial_point=None):
self.model.set_weights(self.model_weights)
if initial_point is None:
x_gen = self._predict_f0()
else:
x_gen = np.reshape(initial_point, (1, self.d))
for ii in range(1, n_steps):
x_gen = np.concatenate([x_gen, self.predict_next_generative(x_gen[:ii, :])])
return x_gen
def estimate(self):
if self.reset_weights:
self.do_reset_weights()
else:
self.model.set_weights(self.model_weights)
n_pairs = len(self.training_pairs)
if self.batch_update:
def draw_sample_pair():
# draw a random cluster for the history
idx = np.random.randint(n_pairs)
return self.training_pairs[idx]
else:
# for online sampling, just use the last training sample
def draw_sample_pair():
return self.training_pairs[-1]
# run batch gradient descent on all of the past events!
for _ in range(self.n_epochs):
# draw a set of training examples from the history
x_batch = []
xp_batch = []
for _ in range(self.batch_size):
x_sample, xp_sample = draw_sample_pair()
# these data aren't
x_batch.append(x_sample)
xp_batch.append(xp_sample)
x_batch = np.reshape(x_batch, (self.batch_size, self.d))
xp_batch = np.reshape(xp_batch, (self.batch_size, self.d))
self.model.train_on_batch(x_batch, xp_batch)
# cache the model weights
self.model_weights = self.model.get_weights()
# Update Sigma
x_train_0, xp_train_0 = self.training_pairs[-1]
xp_hat = self.model.predict(x_train_0)
self.prediction_errors = np.concatenate([self.prediction_errors, xp_train_0 - xp_hat], axis=0)
if np.shape(self.prediction_errors)[0] > 1:
self.Sigma = map_variance(self.prediction_errors, self.var_df0, self.var_scale0)
class NonLinearEvent(LinearEvent):
def __init__(self, d, var_df0, var_scale0, n_hidden=None, hidden_act='tanh', batch_size=32,
optimizer=None, n_epochs=10, init_model=False, kernel_initializer='glorot_uniform',
l2_regularization=0.00, dropout=0.50, prior_log_prob=0.0, reset_weights=False,
batch_update=True,
optimizer_kwargs=None):
LinearEvent.__init__(self, d, var_df0, var_scale0, optimizer=optimizer, n_epochs=n_epochs,
init_model=False, kernel_initializer=kernel_initializer, batch_size=batch_size,
l2_regularization=l2_regularization, prior_log_prob=prior_log_prob,
reset_weights=reset_weights, batch_update=batch_update,
optimizer_kwargs=optimizer_kwargs)
if n_hidden is None:
n_hidden = d
self.n_hidden = n_hidden
self.hidden_act = hidden_act
self.dropout = dropout
if init_model:
self.init_model()
def _compile_model(self):
self.model = Sequential()
self.model.add(Dense(self.n_hidden, input_shape=(self.d,), activation=self.hidden_act,
kernel_regularizer=self.kernel_regularizer,
kernel_initializer=self.kernel_initializer))
self.model.add(Dropout(self.dropout))
self.model.add(Dense(self.d, activation='linear',
kernel_regularizer=self.kernel_regularizer,
kernel_initializer=self.kernel_initializer))
self.model.compile(**self.compile_opts)
class NonLinearEvent_normed(NonLinearEvent):
def __init__(self, d, var_df0, var_scale0, n_hidden=None, hidden_act='tanh',
optimizer=None, n_epochs=10, init_model=False, kernel_initializer='glorot_uniform',
l2_regularization=0.00, dropout=0.50, prior_log_prob=0.0, reset_weights=False, batch_size=32,
batch_update=True, optimizer_kwargs=None):
NonLinearEvent.__init__(self, d, var_df0, var_scale0, optimizer=optimizer, n_epochs=n_epochs,
l2_regularization=l2_regularization,batch_size=batch_size,
kernel_initializer=kernel_initializer, init_model=False,
prior_log_prob=prior_log_prob, reset_weights=reset_weights,
batch_update=batch_update, optimizer_kwargs=optimizer_kwargs)
if n_hidden is None:
n_hidden = d
self.n_hidden = n_hidden
self.hidden_act = hidden_act
self.dropout = dropout
if init_model:
self.init_model()
def _compile_model(self):
self.model = Sequential()
self.model.add(Dense(self.n_hidden, input_shape=(self.d,), activation=self.hidden_act,
kernel_regularizer=self.kernel_regularizer,
kernel_initializer=self.kernel_initializer))
self.model.add(Dropout(self.dropout))
self.model.add(Dense(self.d, activation='linear',
kernel_regularizer=self.kernel_regularizer,
kernel_initializer=self.kernel_initializer))
self.model.add(Lambda(lambda x: K.l2_normalize(x, axis=-1)))
self.model.compile(**self.compile_opts)
class StationaryEvent(LinearEvent):
def _predict_next(self, X):
"""
Parameters
----------
X: 1xD array-like data of inputs
Returns
-------
y: 1xD array of prediction vectors
"""
return self.model.predict(np.zeros((1, self.d)))
class RecurentLinearEvent(LinearEvent):
# RNN which is initialized once and then trained using stochastic gradient descent
# i.e. each new scene is a single example batch of size 1
def __init__(self, d, var_df0, var_scale0, t=3,
optimizer=None, n_epochs=10, l2_regularization=0.00, batch_size=32,
kernel_initializer='glorot_uniform', init_model=False, prior_log_prob=0.0, reset_weights=False,
batch_update=True, optimizer_kwargs=None):
#
# D = dimension of single input / output example
# t = number of time steps to unroll back in time for the recurrent layer
# n_hidden1 = # of nodes in first hidden layer
# n_hidden2 = # of nodes in second hidden layer
# hidden_act1 = activation f'n of first hidden layer
# hidden_act2 = activation f'n of second hidden layer
# sgd_kwargs = arguments for the stochastic gradient descent algorithm
# n_epochs = how many gradient descent steps | |
All inherited terms by all these terms will be added in this function
using the provided ontology object so that each node will be represented by the union of all
the terms inherited by the terms annotated to it. After that step, the term IDs are simply
treated as words in a vocabulary, and the same approach as with n-grams is used to generate
the distance matrix.
Args:
ids_to_annotations (dict): A mapping between IDs and a list of ontology term ID strings.
ontology (Ontology): Ontology object with all necessary fields.
metric (str): A string indicating which distance metric should be used (e.g., cosine).
tfidf (bool, optional): Whether to use TFIDF weighting or not.
**kwargs: All the keyword arguments that can be passed to sklearn.feature_extraction.CountVectorizer()
Returns:
oats.pairwise.SquarePairwiseDistances: Distance matrix and accompanying information.
"""
# Generate the vector representations of each set of annotations by first inheriting terms then converting to strings.
ids_to_term_lists = {i:list(set(flatten([ontology.inherited(term_id) for term_id in term_list]))) for i,term_list in ids_to_annotations.items()}
ids_to_joined_term_strings = {i:" ".join(term_list).strip() for i,term_list in ids_to_term_lists.items()}
joined_term_strings_list = ids_to_joined_term_strings.values()
vectors, vectorizer = vectorize_with_ngrams(joined_term_strings_list, tfidf=tfidf, **kwargs)
joined_term_strings_to_vector_mapping = {term_list_string:vector for term_list_string,vector in zip(joined_term_strings_list,vectors)}
# Send the relevant functions and arguments to the general case method for generating the distance matrix object.s
to_vector_function = lambda term_list_string, mapping=joined_term_strings_to_vector_mapping: mapping[term_list_string]
to_vector_kwargs = {}
return(_pairwise_square_general_case(
ids_to_something=ids_to_joined_term_strings,
to_vector_now=to_vector_function,
to_vector_now_kwargs=to_vector_kwargs,
to_vector_later=_for_new_texts_get_annotations_vector,
to_vector_later_kwargs={"vectorizer":vectorizer, "ontology":ontology},
metric=metric))
# def pairwise_rectangular_precomputed_vectors(ids_to_vectors_1, ids_to_vectors_2, metric):
# """docstring
# """
# vectors = []
# row_index_in_matrix_to_id = {}
# col_index_in_matrix_to_id = {}
# id_to_row_index_in_matrix = {}
# id_to_col_index_in_matrix = {}
# row_in_matrix = 0
# for identifier,vector in ids_to_vectors_1.items():
# vectors.append(vector)
# row_index_in_matrix_to_id[row_in_matrix] = identifier
# id_to_row_index_in_matrix[identifier] = row_in_matrix
# row_in_matrix = row_in_matrix+1
# col_in_matrix = 0
# for identifier,vector in ids_to_vectors_2.items():
# vectors.append(vector)
# col_index_in_matrix_to_id[col_in_matrix] = identifier
# id_to_col_index_in_matrix[identifier] = col_in_matrix
# col_in_matrix = col_in_matrix+1
# all_vectors = vectors
# row_vectors = all_vectors[:len(ids_to_vectors_1)]
# col_vectors = all_vectors[len(ids_to_vectors_1):]
# row_id_to_vector_dict = ids_to_vectors_1
# col_id_to_vector_dict = ids_to_vectors_2
# matrix = cdist(row_vectors, col_vectors, metric)
# edgelist = _rectangular_adjacency_matrix_to_edgelist(matrix, row_index_in_matrix_to_id, col_index_in_matrix_to_id)
# # Create and return a SquarePairwiseDistances object containing the edgelist, matrix, and dictionaries.
# return(RectangularPairwiseDistances(
# metric_str = metric,
# vectorizing_function = None,
# vectorizing_function_kwargs = None,
# edgelist = edgelist,
# row_vector_dictionary = row_id_to_vector_dict,
# col_vector_dictionary = col_id_to_vector_dict,
# vectorizer_object = None,
# id_to_row_index=id_to_row_index_in_matrix,
# id_to_col_index=id_to_col_index_in_matrix,
# row_index_to_id=row_index_in_matrix_to_id,
# col_index_to_id=col_index_in_matrix_to_id,
# array=matrix))
# def pairwise_rectangular_doc2vec(model, ids_to_texts_1, ids_to_texts_2, metric):
# """docstring
# """
# vectors = []
# row_index_in_matrix_to_id = {}
# col_index_in_matrix_to_id = {}
# id_to_row_index_in_matrix = {}
# id_to_col_index_in_matrix = {}
# row_in_matrix = 0
# for identifier,description in ids_to_texts_1.items():
# inferred_vector = _infer_document_vector_from_doc2vec(description, model)
# vectors.append(inferred_vector)
# row_index_in_matrix_to_id[row_in_matrix] = identifier
# id_to_row_index_in_matrix[identifier] = row_in_matrix
# row_in_matrix = row_in_matrix+1
# col_in_matrix = 0
# for identifier,description in ids_to_texts_2.items():
# inferred_vector = _infer_document_vector_from_doc2vec(description, model)
# vectors.append(inferred_vector)
# col_index_in_matrix_to_id[col_in_matrix] = identifier
# id_to_col_index_in_matrix[identifier] = col_in_matrix
# col_in_matrix = col_in_matrix+1
# all_vectors = vectors
# row_vectors = all_vectors[:len(ids_to_texts_1)]
# col_vectors = all_vectors[len(ids_to_texts_1):]
# row_id_to_vector_dict = {row_index_in_matrix_to_id[i]:vector for i,vector in enumerate(row_vectors)}
# col_id_to_vector_dict = {col_index_in_matrix_to_id[i]:vector for i,vector in enumerate(col_vectors)}
# matrix = cdist(row_vectors, col_vectors, metric)
# edgelist = _rectangular_adjacency_matrix_to_edgelist(matrix, row_index_in_matrix_to_id, col_index_in_matrix_to_id)
# # Create and return a SquarePairwiseDistances object containing the edgelist, matrix, and dictionaries.
# return(RectangularPairwiseDistances(
# metric_str = metric,
# vectorizing_function = _infer_document_vector_from_doc2vec,
# vectorizing_function_kwargs = {"model":model},
# edgelist = edgelist,
# row_vector_dictionary = row_id_to_vector_dict,
# col_vector_dictionary = col_id_to_vector_dict,
# vectorizer_object = None,
# id_to_row_index=id_to_row_index_in_matrix,
# id_to_col_index=id_to_col_index_in_matrix,
# row_index_to_id=row_index_in_matrix_to_id,
# col_index_to_id=col_index_in_matrix_to_id,
# array=matrix))
# def pairwise_rectangular_word2vec(model, ids_to_texts_1, ids_to_texts_2, metric, method="mean"):
# """
# docstring
# """
# vectors = []
# row_index_in_matrix_to_id = {}
# col_index_in_matrix_to_id = {}
# id_to_row_index_in_matrix = {}
# id_to_col_index_in_matrix = {}
# row_in_matrix = 0
# for identifier,description in ids_to_texts_1.items():
# vector = _infer_document_vector_from_word2vec(description, model, method)
# vectors.append(vector)
# row_index_in_matrix_to_id[row_in_matrix] = identifier
# id_to_row_index_in_matrix[identifier] = row_in_matrix
# row_in_matrix = row_in_matrix+1
# col_in_matrix = 0
# for identifier,description in ids_to_texts_2.items():
# vector = _infer_document_vector_from_word2vec(description, model, method)
# vectors.append(vector)
# col_index_in_matrix_to_id[col_in_matrix] = identifier
# id_to_col_index_in_matrix[identifier] = col_in_matrix
# col_in_matrix = col_in_matrix+1
# all_vectors = vectors
# row_vectors = all_vectors[:len(ids_to_texts_1)]
# col_vectors = all_vectors[len(ids_to_texts_1):]
# row_id_to_vector_dict = {row_index_in_matrix_to_id[i]:vector for i,vector in enumerate(row_vectors)}
# col_id_to_vector_dict = {col_index_in_matrix_to_id[i]:vector for i,vector in enumerate(col_vectors)}
# matrix = cdist(row_vectors, col_vectors, metric)
# edgelist = _rectangular_adjacency_matrix_to_edgelist(matrix, row_index_in_matrix_to_id, col_index_in_matrix_to_id)
# return(RectangularPairwiseDistances(
# metric_str = metric,
# vectorizing_function = _infer_document_vector_from_word2vec,
# vectorizing_function_kwargs = {"model":model, "method":method},
# edgelist = edgelist,
# row_vector_dictionary = row_id_to_vector_dict,
# col_vector_dictionary = col_id_to_vector_dict,
# vectorizer_object = None,
# id_to_row_index=id_to_row_index_in_matrix,
# id_to_col_index=id_to_col_index_in_matrix,
# row_index_to_id=row_index_in_matrix_to_id,
# col_index_to_id=col_index_in_matrix_to_id,
# array=matrix))
# def pairwise_rectangular_bert(model, tokenizer, ids_to_texts_1, ids_to_texts_2, metric, method, layers):
# """
# docstring
# """
# vectors = []
# row_index_in_matrix_to_id = {}
# col_index_in_matrix_to_id = {}
# id_to_row_index_in_matrix = {}
# id_to_col_index_in_matrix = {}
# row_in_matrix = 0
# for identifier,description in ids_to_texts_1.items():
# inferred_vector = _infer_document_vector_from_bert(description, model, tokenizer, method, layers)
# vectors.append(inferred_vector)
# row_index_in_matrix_to_id[row_in_matrix] = identifier
# id_to_row_index_in_matrix[identifier] = row_in_matrix
# row_in_matrix = row_in_matrix+1
# col_in_matrix = 0
# for identifier,description in ids_to_texts_2.items():
# inferred_vector = _infer_document_vector_from_bert(description, model, tokenizer, method, layers)
# vectors.append(inferred_vector)
# col_index_in_matrix_to_id[col_in_matrix] = identifier
# id_to_col_index_in_matrix[identifier] = col_in_matrix
# col_in_matrix = col_in_matrix+1
# all_vectors = vectors
# row_vectors = all_vectors[:len(ids_to_texts_1)]
# col_vectors = all_vectors[len(ids_to_texts_1):]
# row_id_to_vector_dict = {row_index_in_matrix_to_id[i]:vector for i,vector in enumerate(row_vectors)}
# col_id_to_vector_dict = {col_index_in_matrix_to_id[i]:vector for i,vector in enumerate(col_vectors)}
# matrix = cdist(row_vectors, col_vectors, metric)
# edgelist = _rectangular_adjacency_matrix_to_edgelist(matrix, row_index_in_matrix_to_id, col_index_in_matrix_to_id)
# return(RectangularPairwiseDistances(
# metric_str = metric,
# vectorizing_function = _infer_document_vector_from_bert,
# vectorizing_function_kwargs = {"model":model, "tokenizer":tokenizer, "method":method, "layers":layers},
# edgelist = edgelist,
# row_vector_dictionary = row_id_to_vector_dict,
# col_vector_dictionary = col_id_to_vector_dict,
# vectorizer_object = None,
# id_to_row_index=id_to_row_index_in_matrix,
# id_to_col_index=id_to_col_index_in_matrix,
# row_index_to_id=row_index_in_matrix_to_id,
# col_index_to_id=col_index_in_matrix_to_id,
# array=matrix))
# def pairwise_rectangular_ngrams(ids_to_texts_1, ids_to_texts_2, metric, tfidf=False, **kwargs):
# """
# docstring
# """
# descriptions = []
# row_index_in_matrix_to_id = {}
# col_index_in_matrix_to_id = {}
# id_to_row_index_in_matrix = {}
# id_to_col_index_in_matrix = {}
# row_in_matrix = 0
# for identifier,description in ids_to_texts_1.items():
# descriptions.append(description)
# row_index_in_matrix_to_id[row_in_matrix] = identifier
# id_to_row_index_in_matrix[identifier] = row_in_matrix
# row_in_matrix = row_in_matrix+1
# col_in_matrix = 0
# for identifier,description in ids_to_texts_2.items():
# descriptions.append(description)
# col_index_in_matrix_to_id[col_in_matrix] = identifier
# id_to_col_index_in_matrix[identifier] = col_in_matrix
# col_in_matrix = col_in_matrix+1
# all_vectors,vectorizer = strings_to_numerical_vectors(descriptions, tfidf=tfidf, **kwargs)
# row_vectors = all_vectors[:len(ids_to_texts_1)]
# col_vectors = all_vectors[len(ids_to_texts_1):]
# row_id_to_vector_dict = {row_index_in_matrix_to_id[i]:vector for i,vector in enumerate(row_vectors)}
# col_id_to_vector_dict = {col_index_in_matrix_to_id[i]:vector for i,vector in enumerate(col_vectors)}
# matrix = cdist(row_vectors, col_vectors, metric)
# edgelist = _rectangular_adjacency_matrix_to_edgelist(matrix, row_index_in_matrix_to_id, col_index_in_matrix_to_id)
# return(RectangularPairwiseDistances(
# metric_str = metric,
# vectorizing_function = _get_ngrams_vector,
# vectorizing_function_kwargs = {"countvectorizer":vectorizer},
# edgelist = edgelist,
# row_vector_dictionary = row_id_to_vector_dict,
# col_vector_dictionary = col_id_to_vector_dict,
# vectorizer_object = vectorizer,
# id_to_row_index=id_to_row_index_in_matrix,
# id_to_col_index=id_to_col_index_in_matrix,
# row_index_to_id=row_index_in_matrix_to_id,
# col_index_to_id=col_index_in_matrix_to_id,
# array=matrix))
# def pairwise_rectangular_topic_model(ids_to_texts_1, ids_to_texts_2, metric, seed=124134, num_topics=10, algorithm="LDA", **kwargs):
# """
# docstring
# """
# # Fitting the topic model using the provided parameters and this dataset of text descriptions.
# vectorizer = TfidfVectorizer(**kwargs)
# if algorithm.lower() == "lda":
# model = LDA(n_components=num_topics, random_state=seed)
# elif algorithm.lower() == "nmf":
# model = NMF(n_components=num_topics, random_state=seed)
# else:
# raise ValueError("algorithm argument is invalid")
# descriptions = []
# row_index_in_matrix_to_id = {}
# col_index_in_matrix_to_id = {}
# id_to_row_index_in_matrix = {}
# id_to_col_index_in_matrix = {}
# row_in_matrix = 0
# for identifier,description in ids_to_texts_1.items():
# descriptions.append(description)
# row_index_in_matrix_to_id[row_in_matrix] = identifier
# id_to_row_index_in_matrix[identifier] = row_in_matrix
# row_in_matrix = row_in_matrix+1
# col_in_matrix = 0
# for identifier,description in ids_to_texts_2.items():
# descriptions.append(description)
# col_index_in_matrix_to_id[col_in_matrix] = identifier
# id_to_col_index_in_matrix[identifier] = col_in_matrix
# col_in_matrix = col_in_matrix+1
# # Apply distance metric over all the vectors to yield a matrix.
# ngram_vectors = vectorizer.fit_transform(descriptions).toarray()
# topic_vectors = model.fit_transform(ngram_vectors)
# all_vectors = topic_vectors
# row_vectors = all_vectors[:len(ids_to_texts_1)]
# col_vectors = all_vectors[len(ids_to_texts_1):]
# row_id_to_vector_dict = {row_index_in_matrix_to_id[i]:vector for i,vector in enumerate(row_vectors)}
# col_id_to_vector_dict = {col_index_in_matrix_to_id[i]:vector for i,vector in enumerate(col_vectors)}
# matrix = cdist(row_vectors, col_vectors, metric)
# edgelist = _rectangular_adjacency_matrix_to_edgelist(matrix, row_index_in_matrix_to_id, col_index_in_matrix_to_id)
# return(RectangularPairwiseDistances(
# metric_str = metric,
# vectorizing_function = _get_topic_model_vector,
# vectorizing_function_kwargs = {"countvectorizer":vectorizer, "topic_model":model},
# edgelist = edgelist,
# row_vector_dictionary = row_id_to_vector_dict,
# col_vector_dictionary = col_id_to_vector_dict,
# vectorizer_object = vectorizer,
# id_to_row_index=id_to_row_index_in_matrix,
# id_to_col_index=id_to_col_index_in_matrix,
# row_index_to_id=row_index_in_matrix_to_id,
# col_index_to_id=col_index_in_matrix_to_id,
# array=matrix))
# def pairwise_rectangular_annotations(ids_to_annotations_1, ids_to_annotations_2, ontology, metric, tfidf=False, **kwargs):
# """
# docstring
# """
# joined_term_strings = []
# row_index_in_matrix_to_id = {}
# col_index_in_matrix_to_id = {}
# id_to_row_index_in_matrix = {}
# id_to_col_index_in_matrix = {}
# row_in_matrix = 0
# for identifier,term_list in ids_to_annotations_1.items():
# print(term_list)
# term_list = [ontology.inherited(x) for x in term_list]
# print(term_list)
# term_list = flatten(term_list)
# print(term_list)
# term_list = list(set(term_list))
# print(term_list)
# joined_term_string = " ".join(term_list).strip()
# joined_term_strings.append(joined_term_string)
# row_index_in_matrix_to_id[row_in_matrix] = identifier
# id_to_row_index_in_matrix[identifier] = row_in_matrix
# row_in_matrix = row_in_matrix+1
# col_in_matrix = 0
# for identifier,term_list in ids_to_annotations_2.items():
# term_list = [ontology.inherited(x) for x in term_list]
# term_list = flatten(term_list)
# term_list = list(set(term_list))
# joined_term_string = " ".join(term_list).strip()
# joined_term_strings.append(joined_term_string)
# col_index_in_matrix_to_id[col_in_matrix] = identifier
# id_to_col_index_in_matrix[identifier] = col_in_matrix
# col_in_matrix = col_in_matrix+1
# # Find all the pairwise values for the distance matrix.
# all_vectors,vectorizer = strings_to_numerical_vectors(joined_term_strings, tfidf=tfidf, **kwargs)
# row_vectors = all_vectors[:len(ids_to_annotations_1)]
# col_vectors = all_vectors[len(ids_to_annotations_1):]
# row_id_to_vector_dict = {row_index_in_matrix_to_id[i]:vector for i,vector in enumerate(row_vectors)}
# col_id_to_vector_dict = {col_index_in_matrix_to_id[i]:vector for i,vector in enumerate(col_vectors)}
# matrix = cdist(row_vectors, col_vectors, metric)
# edgelist = _rectangular_adjacency_matrix_to_edgelist(matrix, row_index_in_matrix_to_id, col_index_in_matrix_to_id)
# return(RectangularPairwiseDistances(
# metric_str = metric,
# vectorizing_function = _get_annotations_vector,
# vectorizing_function_kwargs = {"countvectorizer":vectorizer, "ontology":ontology},
# edgelist = edgelist,
# row_vector_dictionary = row_id_to_vector_dict,
# col_vector_dictionary = col_id_to_vector_dict,
# vectorizer_object = vectorizer,
# id_to_row_index=id_to_row_index_in_matrix,
# id_to_col_index=id_to_col_index_in_matrix,
# row_index_to_id=row_index_in_matrix_to_id,
# col_index_to_id=col_index_in_matrix_to_id,
# array=matrix))
# def elemwise_list_precomputed_vectors(vector_list_1, vector_list_2, metric_function):
# """
# docstring
# """
# assert len(vector_list_1) == len(vector_list_2)
# vector_pairs = zip(vector_list_1, vector_list_2)
# distances_list = [metric_function(vector_pair[0],vector_pair[1]) for vector_pair in vector_pairs]
# assert len(distances_list) == len(vector_list_1)
# return(distances_list)
# def elemwise_list_doc2vec(model, text_list_1, text_list_2, metric_function):
# """
# docstring
# """
# assert len(text_list_1) == len(text_list_2)
# descriptions = []
# descriptions.extend(text_list_1)
# descriptions.extend(text_list_2)
# all_vectors = [_infer_document_vector_from_doc2vec(description, model) for description in descriptions]
# | |
{
atomicAdd(&(NNZ_EACH_ROW[m_c+1]), 1);
FLAGS[i+1] = 1;
C_DATA = (C)(A_DATA[i_a] * B_DATA[i_b]);
C_INDICES = n_c;
}
''',
'cupy_multiply_by_csr_step1',
preamble=_GET_ROW_ID_ + _FIND_INDEX_HOLDING_COL_IN_ROW_
)
@cupy._util.memoize(for_each_device=True)
def cupy_multiply_by_csr_step2():
return cupy.ElementwiseKernel(
'T C_DATA, I C_INDICES, raw I FLAGS',
'raw D D_DATA, raw I D_INDICES',
'''
int j = FLAGS[i];
if (j < FLAGS[i+1]) {
D_DATA[j] = (D)(C_DATA);
D_INDICES[j] = C_INDICES;
}
''',
'cupy_multiply_by_csr_step2'
)
_BINOPT_MAX_ = '''
__device__ inline O binopt(T in1, T in2) {
return max(in1, in2);
}
'''
_BINOPT_MIN_ = '''
__device__ inline O binopt(T in1, T in2) {
return min(in1, in2);
}
'''
_BINOPT_EQ_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 == in2);
}
'''
_BINOPT_NE_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 != in2);
}
'''
_BINOPT_LT_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 < in2);
}
'''
_BINOPT_GT_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 > in2);
}
'''
_BINOPT_LE_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 <= in2);
}
'''
_BINOPT_GE_ = '''
__device__ inline O binopt(T in1, T in2) {
return (in1 >= in2);
}
'''
def binopt_csr(a, b, op_name):
check_shape_for_pointwise_op(a.shape, b.shape)
a_m, a_n = a.shape
b_m, b_n = b.shape
m, n = max(a_m, b_m), max(a_n, b_n)
a_nnz = a.nnz * (m // a_m) * (n // a_n)
b_nnz = b.nnz * (m // b_m) * (n // b_n)
a_info = cupy.zeros(a_nnz + 1, dtype=a.indices.dtype)
b_info = cupy.zeros(b_nnz + 1, dtype=b.indices.dtype)
a_valid = cupy.zeros(a_nnz, dtype=numpy.int8)
b_valid = cupy.zeros(b_nnz, dtype=numpy.int8)
c_indptr = cupy.zeros(m + 1, dtype=a.indptr.dtype)
in_dtype = numpy.promote_types(a.dtype, b.dtype)
a_data = a.data.astype(in_dtype, copy=False)
b_data = b.data.astype(in_dtype, copy=False)
funcs = _GET_ROW_ID_
if op_name == '_maximum_':
funcs += _BINOPT_MAX_
out_dtype = in_dtype
elif op_name == '_minimum_':
funcs += _BINOPT_MIN_
out_dtype = in_dtype
elif op_name == '_eq_':
funcs += _BINOPT_EQ_
out_dtype = numpy.bool_
elif op_name == '_ne_':
funcs += _BINOPT_NE_
out_dtype = numpy.bool_
elif op_name == '_lt_':
funcs += _BINOPT_LT_
out_dtype = numpy.bool_
elif op_name == '_gt_':
funcs += _BINOPT_GT_
out_dtype = numpy.bool_
elif op_name == '_le_':
funcs += _BINOPT_LE_
out_dtype = numpy.bool_
elif op_name == '_ge_':
funcs += _BINOPT_GE_
out_dtype = numpy.bool_
else:
raise ValueError('invalid op_name: {}'.format(op_name))
a_tmp_data = cupy.empty(a_nnz, dtype=out_dtype)
b_tmp_data = cupy.empty(b_nnz, dtype=out_dtype)
a_tmp_indices = cupy.empty(a_nnz, dtype=a.indices.dtype)
b_tmp_indices = cupy.empty(b_nnz, dtype=b.indices.dtype)
_size = a_nnz + b_nnz
cupy_binopt_csr_step1(op_name, preamble=funcs)(
m, n,
a.indptr, a.indices, a_data, a_m, a_n, a.nnz, a_nnz,
b.indptr, b.indices, b_data, b_m, b_n, b.nnz, b_nnz,
a_info, a_valid, a_tmp_indices, a_tmp_data,
b_info, b_valid, b_tmp_indices, b_tmp_data,
c_indptr, size=_size)
a_info = cupy.cumsum(a_info, dtype=a_info.dtype)
b_info = cupy.cumsum(b_info, dtype=b_info.dtype)
c_indptr = cupy.cumsum(c_indptr, dtype=c_indptr.dtype)
c_nnz = int(c_indptr[-1])
c_indices = cupy.empty(c_nnz, dtype=a.indices.dtype)
c_data = cupy.empty(c_nnz, dtype=out_dtype)
cupy_binopt_csr_step2(op_name)(
a_info, a_valid, a_tmp_indices, a_tmp_data, a_nnz,
b_info, b_valid, b_tmp_indices, b_tmp_data, b_nnz,
c_indices, c_data, size=_size)
return csr_matrix((c_data, c_indices, c_indptr), shape=(m, n))
@cupy._util.memoize(for_each_device=True)
def cupy_binopt_csr_step1(op_name, preamble=''):
name = 'cupy_binopt_csr' + op_name + 'step1'
return cupy.ElementwiseKernel(
'''
int32 M, int32 N,
raw I A_INDPTR, raw I A_INDICES, raw T A_DATA,
int32 A_M, int32 A_N, int32 A_NNZ_ACT, int32 A_NNZ,
raw I B_INDPTR, raw I B_INDICES, raw T B_DATA,
int32 B_M, int32 B_N, int32 B_NNZ_ACT, int32 B_NNZ
''',
'''
raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA,
raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA,
raw I C_INFO
''',
'''
if (i >= A_NNZ + B_NNZ) return;
const int *MY_INDPTR, *MY_INDICES; int *MY_INFO; const T *MY_DATA;
const int *OP_INDPTR, *OP_INDICES; int *OP_INFO; const T *OP_DATA;
int MY_M, MY_N, MY_NNZ_ACT, MY_NNZ;
int OP_M, OP_N, OP_NNZ_ACT, OP_NNZ;
signed char *MY_VALID; I *MY_TMP_INDICES; O *MY_TMP_DATA;
int my_j;
if (i < A_NNZ) {
// in charge of one of non-zero element of sparse matrix A
my_j = i;
MY_INDPTR = &(A_INDPTR[0]); OP_INDPTR = &(B_INDPTR[0]);
MY_INDICES = &(A_INDICES[0]); OP_INDICES = &(B_INDICES[0]);
MY_INFO = &(A_INFO[0]); OP_INFO = &(B_INFO[0]);
MY_DATA = &(A_DATA[0]); OP_DATA = &(B_DATA[0]);
MY_M = A_M; OP_M = B_M;
MY_N = A_N; OP_N = B_N;
MY_NNZ_ACT = A_NNZ_ACT; OP_NNZ_ACT = B_NNZ_ACT;
MY_NNZ = A_NNZ; OP_NNZ = B_NNZ;
MY_VALID = &(A_VALID[0]);
MY_TMP_DATA= &(A_TMP_DATA[0]);
MY_TMP_INDICES = &(A_TMP_INDICES[0]);
} else {
// in charge of one of non-zero element of sparse matrix B
my_j = i - A_NNZ;
MY_INDPTR = &(B_INDPTR[0]); OP_INDPTR = &(A_INDPTR[0]);
MY_INDICES = &(B_INDICES[0]); OP_INDICES = &(A_INDICES[0]);
MY_INFO = &(B_INFO[0]); OP_INFO = &(A_INFO[0]);
MY_DATA = &(B_DATA[0]); OP_DATA = &(A_DATA[0]);
MY_M = B_M; OP_M = A_M;
MY_N = B_N; OP_N = A_N;
MY_NNZ_ACT = B_NNZ_ACT; OP_NNZ_ACT = A_NNZ_ACT;
MY_NNZ = B_NNZ; OP_NNZ = A_NNZ;
MY_VALID = &(B_VALID[0]);
MY_TMP_DATA= &(B_TMP_DATA[0]);
MY_TMP_INDICES = &(B_TMP_INDICES[0]);
}
int _min, _max, _mid;
// get column location
int my_col;
int my_j_act = my_j;
if (MY_M == 1 && MY_M < M) {
if (MY_N == 1 && MY_N < N) my_j_act = 0;
else my_j_act = my_j % MY_NNZ_ACT;
} else {
if (MY_N == 1 && MY_N < N) my_j_act = my_j / N;
}
my_col = MY_INDICES[my_j_act];
if (MY_N == 1 && MY_N < N) {
my_col = my_j % N;
}
// get row location
int my_row = get_row_id(my_j_act, 0, MY_M - 1, &(MY_INDPTR[0]));
if (MY_M == 1 && MY_M < M) {
if (MY_N == 1 && MY_N < N) my_row = my_j / N;
else my_row = my_j / MY_NNZ_ACT;
}
int op_row = my_row;
int op_row_act = op_row;
if (OP_M == 1 && OP_M < M) {
op_row_act = 0;
}
int op_col = 0;
_min = OP_INDPTR[op_row_act];
_max = OP_INDPTR[op_row_act + 1] - 1;
int op_j_act = _min;
bool op_nz = false;
if (_min <= _max) {
if (OP_N == 1 && OP_N < N) {
op_col = my_col;
op_nz = true;
}
else {
_mid = (_min + _max) / 2;
op_col = OP_INDICES[_mid];
while (_min < _max) {
if (op_col < my_col) {
_min = _mid + 1;
} else if (op_col > my_col) {
_max = _mid;
} else {
break;
}
_mid = (_min + _max) / 2;
op_col = OP_INDICES[_mid];
}
op_j_act = _mid;
if (op_col == my_col) {
op_nz = true;
} else if (op_col < my_col) {
op_col = N;
op_j_act += 1;
}
}
}
int op_j = op_j_act;
if (OP_M == 1 && OP_M < M) {
if (OP_N == 1 && OP_N < N) {
op_j = (op_col + N * op_row) * OP_NNZ_ACT;
} else {
op_j = op_j_act + OP_NNZ_ACT * op_row;
}
} else {
if (OP_N == 1 && OP_N < N) {
op_j = op_col + N * op_j_act;
}
}
if (i < A_NNZ || !op_nz) {
T my_data = MY_DATA[my_j_act];
T op_data = 0;
if (op_nz) op_data = OP_DATA[op_j_act];
O out;
if (i < A_NNZ) out = binopt(my_data, op_data);
else out = binopt(op_data, my_data);
if (out != static_cast<O>(0)) {
MY_VALID[my_j] = 1;
MY_TMP_DATA[my_j] = out;
MY_TMP_INDICES[my_j] = my_col;
atomicAdd( &(C_INFO[my_row + 1]), 1 );
atomicAdd( &(MY_INFO[my_j + 1]), 1 );
atomicAdd( &(OP_INFO[op_j]), 1 );
}
}
''',
name, preamble=preamble,
)
@cupy._util.memoize(for_each_device=True)
def cupy_binopt_csr_step2(op_name):
name = 'cupy_binopt_csr' + op_name + 'step2'
return cupy.ElementwiseKernel(
'''
raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA,
int32 A_NNZ,
raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA,
int32 B_NNZ
''',
'raw I C_INDICES, raw O C_DATA',
'''
if (i < A_NNZ) {
int j = i;
if (A_VALID[j]) {
C_INDICES[A_INFO[j]] = A_TMP_INDICES[j];
C_DATA[A_INFO[j]] = A_TMP_DATA[j];
}
} else if (i < A_NNZ + B_NNZ) {
int j = i - A_NNZ;
if (B_VALID[j]) {
C_INDICES[B_INFO[j]] = B_TMP_INDICES[j];
C_DATA[B_INFO[j]] = B_TMP_DATA[j];
}
}
''',
name,
)
def csr2dense(a, order):
out = cupy.zeros(a.shape, dtype=a.dtype, order=order)
m, n = a.shape
cupy_csr2dense()(m, n, a.indptr, a.indices, a.data,
(order == 'C'), out)
return out
@cupy._util.memoize(for_each_device=True)
def cupy_csr2dense():
return cupy.ElementwiseKernel(
'int32 M, int32 N, raw I INDPTR, I INDICES, T DATA, bool C_ORDER',
'raw T OUT',
'''
int row = get_row_id(i, 0, M - 1, &(INDPTR[0]));
int col = INDICES;
if (C_ORDER) {
OUT[col + N * row] += DATA;
} else {
OUT[row + M * col] += DATA;
}
''',
'cupy_csr2dense',
preamble=_GET_ROW_ID_
)
def dense2csr(a):
if a.dtype.char in 'fdFD':
if cusparse.check_availability('denseToSparse'):
return cusparse.denseToSparse(a, format='csr')
else:
return cusparse.dense2csr(a)
| |
<filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/courseware/tests/test_access.py
"""
Test the access control framework
"""
import datetime
import itertools
from unittest.mock import Mock, patch
import pytest
import ddt
import pytz
from ccx_keys.locator import CCXLocator
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from milestones.tests.utils import MilestonesTestCaseMixin
from opaque_keys.edx.locator import CourseLocator
import lms.djangoapps.courseware.access as access
import lms.djangoapps.courseware.access_response as access_response
from lms.djangoapps.courseware.masquerade import CourseMasquerade
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase, masquerade_as_group_member
from lms.djangoapps.ccx.models import CustomCourseForEdX
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseCcxCoachRole, CourseStaffRole
from common.djangoapps.student.tests.factories import (
AdminFactory,
AnonymousUserFactory,
CourseEnrollmentAllowedFactory,
CourseEnrollmentFactory
)
from common.djangoapps.student.tests.factories import BetaTesterFactory
from common.djangoapps.student.tests.factories import GlobalStaffFactory
from common.djangoapps.student.tests.factories import InstructorFactory
from common.djangoapps.student.tests.factories import StaffFactory
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.util.milestones_helpers import fulfill_course_milestone, set_prerequisite_courses
from xmodule.course_module import (
CATALOG_VISIBILITY_ABOUT,
CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
CATALOG_VISIBILITY_NONE
)
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_SPLIT_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import MINIMUM_STATIC_PARTITION_ID, Group, UserPartition
QUERY_COUNT_TABLE_BLACKLIST = WAFFLE_TABLES
# pylint: disable=protected-access
class CoachAccessTestCaseCCX(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test if user is coach on ccx.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
"""
Set up course for tests
"""
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
"""
Set up tests
"""
super().setUp()
# Create ccx coach account
self.coach = AdminFactory.create(password="<PASSWORD>")
self.client.login(username=self.coach.username, password="<PASSWORD>")
# assign role to coach
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.request_factory = RequestFactory()
def make_ccx(self):
"""
create ccx
"""
ccx = CustomCourseForEdX(
course_id=self.course.id,
coach=self.coach,
display_name="Test CCX"
)
ccx.save()
ccx_locator = CCXLocator.from_course_locator(self.course.id, str(ccx.id))
role = CourseCcxCoachRole(ccx_locator)
role.add_users(self.coach)
CourseEnrollment.enroll(self.coach, ccx_locator)
return ccx_locator
def test_has_ccx_coach_role(self):
"""
Assert that user has coach access on ccx.
"""
ccx_locator = self.make_ccx()
# user have access as coach on ccx
assert access.has_ccx_coach_role(self.coach, ccx_locator)
# user dont have access as coach on ccx
self.setup_user()
assert not access.has_ccx_coach_role(self.user, ccx_locator)
def test_ccx_coach_has_staff_role(self):
"""
Assert that user has staff access on ccx.
"""
ccx_locator = self.make_ccx()
# coach user has access as staff on ccx
assert access.has_access(self.coach, 'staff', ccx_locator)
# basic user doesn't have staff access on ccx..
self.setup_user()
assert not access.has_access(self.user, 'staff', ccx_locator)
# until we give her a staff role.
CourseStaffRole(ccx_locator).add_users(self.user)
assert access.has_access(self.user, 'staff', ccx_locator)
def test_access_student_progress_ccx(self):
"""
Assert that only a coach can see progress of student.
"""
ccx_locator = self.make_ccx()
student = UserFactory()
# Enroll user
CourseEnrollment.enroll(student, ccx_locator)
# Test for access of a coach
resp = self.client.get(reverse('student_progress', args=[str(ccx_locator), student.id]))
assert resp.status_code == 200
# Assert access of a student
self.client.login(username=student.username, password='<PASSWORD>')
resp = self.client.get(reverse('student_progress', args=[str(ccx_locator), self.coach.id]))
assert resp.status_code == 404
@ddt.ddt
class AccessTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Tests for the various access controls on the student dashboard
"""
TOMORROW = 'tomorrow'
YESTERDAY = 'yesterday'
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
DATES = {
TOMORROW: datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1),
YESTERDAY: datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1),
None: None,
}
def setUp(self):
super().setUp()
self.course = CourseFactory.create(org='edX', course='toy', run='test_run')
self.anonymous_user = AnonymousUserFactory()
self.beta_user = BetaTesterFactory(course_key=self.course.id)
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course.id)
self.course_instructor = InstructorFactory(course_key=self.course.id)
self.staff = GlobalStaffFactory()
def verify_access(self, mock_unit, student_should_have_access, expected_error_type=None):
""" Verify the expected result from _has_access_descriptor """
response = access._has_access_descriptor(self.anonymous_user, 'load', mock_unit, course_key=self.course.id)
assert student_should_have_access == bool(response)
if expected_error_type is not None:
assert isinstance(response, expected_error_type)
assert response.to_json()['error_code'] is not None
assert access._has_access_descriptor(self.course_staff, 'load', mock_unit, course_key=self.course.id)
def test_has_staff_access_to_preview_mode(self):
"""
Test that preview mode is only accessible by staff users.
"""
course_key = self.course.id
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
for user in [self.global_staff, self.course_staff, self.course_instructor]:
assert access.has_staff_access_to_preview_mode(user, course_key)
assert not access.has_staff_access_to_preview_mode(self.student, course_key)
# we don't want to restrict a staff user, masquerading as student,
# to access preview mode.
# Note that self.student now have access to preview mode,
# `is_masquerading_as_student == True` means user is staff and is
# masquerading as a student.
with patch('lms.djangoapps.courseware.access.is_masquerading_as_student') as mock_masquerade:
mock_masquerade.return_value = True
for user in [self.global_staff, self.course_staff, self.course_instructor, self.student]:
assert access.has_staff_access_to_preview_mode(user, course_key)
def test_administrative_accesses_to_course_for_user(self):
"""
Test types of admin accesses to a course
"""
course_key = self.course.id
# `administrative_accesses_to_course_for_user` returns accesses in tuple as
# (`global_staff`, `course_staff`, `course_instructor`).
# Order matters here, for example `True` at first index in tuple essentially means
# given user is a global staff.
for count, user in enumerate([self.global_staff, self.course_staff, self.course_instructor]):
assert access.administrative_accesses_to_course_for_user(user, course_key)[count]
assert not any(access.administrative_accesses_to_course_for_user(self.student, course_key))
def test_student_has_access(self):
"""
Tests course student have right access to content w/o preview.
"""
course_key = self.course.id
chapter = ItemFactory.create(category="chapter", parent_location=self.course.location)
overview = CourseOverview.get_from_id(course_key)
# Enroll student to the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
modules = [
self.course,
overview,
chapter,
]
with patch('lms.djangoapps.courseware.access.in_preview_mode') as mock_preview:
mock_preview.return_value = False
for obj in modules:
assert bool(access.has_access(self.student, 'load', obj, course_key=self.course.id))
with patch('lms.djangoapps.courseware.access.in_preview_mode') as mock_preview:
mock_preview.return_value = True
for obj in modules:
assert not bool(access.has_access(self.student, 'load', obj, course_key=self.course.id))
@patch('lms.djangoapps.courseware.access.in_preview_mode', Mock(return_value=True))
def test_has_access_with_preview_mode(self):
"""
Tests particular user's can access content via has_access in preview mode.
"""
assert bool(access.has_access(self.global_staff, 'staff', self.course, course_key=self.course.id))
assert bool(access.has_access(self.course_staff, 'staff', self.course, course_key=self.course.id))
assert bool(access.has_access(self.course_instructor, 'staff', self.course, course_key=self.course.id))
assert not bool(access.has_access(self.student, 'staff', self.course, course_key=self.course.id))
assert not bool(access.has_access(self.student, 'load', self.course, course_key=self.course.id))
# When masquerading is true, user should not be able to access staff content
with patch('lms.djangoapps.courseware.access.is_masquerading_as_student') as mock_masquerade:
mock_masquerade.return_value = True
assert not bool(access.has_access(self.global_staff, 'staff', self.course, course_key=self.course.id))
assert not bool(access.has_access(self.student, 'staff', self.course, course_key=self.course.id))
@patch('lms.djangoapps.courseware.access_utils.in_preview_mode', Mock(return_value=True))
def test_has_access_in_preview_mode_with_group(self):
"""
Test that a user masquerading as a member of a group sees appropriate content in preview mode.
"""
# Note about UserPartition and UserPartition Group IDs: these must not conflict with IDs used
# by dynamic user partitions.
partition_id = MINIMUM_STATIC_PARTITION_ID
group_0_id = MINIMUM_STATIC_PARTITION_ID + 1
group_1_id = MINIMUM_STATIC_PARTITION_ID + 2
user_partition = UserPartition(
partition_id, 'Test User Partition', '',
[Group(group_0_id, 'Group 1'), Group(group_1_id, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(user_partition)
self.course.cohort_config = {'cohorted': True}
chapter = ItemFactory.create(category="chapter", parent_location=self.course.location)
chapter.group_access = {partition_id: [group_0_id]}
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
# User should not be able to preview when masquerading as student (and not in the group above).
with patch('lms.djangoapps.courseware.access.get_user_role') as mock_user_role:
mock_user_role.return_value = 'student'
assert not bool(access.has_access(self.global_staff, 'load', chapter, course_key=self.course.id))
# Should be able to preview when in staff or instructor role.
for mocked_role in ['staff', 'instructor']:
with patch('lms.djangoapps.courseware.access.get_user_role') as mock_user_role:
mock_user_role.return_value = mocked_role
assert bool(access.has_access(self.global_staff, 'load', chapter, course_key=self.course.id))
# Now install masquerade group and set staff as a member of that.
assert 200 == masquerade_as_group_member(self.global_staff, self.course, partition_id, group_0_id)
# Can load the chapter since user is in the group.
assert bool(access.has_access(self.global_staff, 'load', chapter, course_key=self.course.id))
# Move the user to be a part of the second group.
assert 200 == masquerade_as_group_member(self.global_staff, self.course, partition_id, group_1_id)
# Cannot load the chapter since user is in a different group.
assert not bool(access.has_access(self.global_staff, 'load', chapter, course_key=self.course.id))
def test_has_access_to_course(self):
assert not access._has_access_to_course(None, 'staff', self.course.id)
assert not access._has_access_to_course(self.anonymous_user, 'staff', self.course.id)
assert not access._has_access_to_course(self.anonymous_user, 'instructor', self.course.id)
assert access._has_access_to_course(self.global_staff, 'staff', self.course.id)
assert access._has_access_to_course(self.global_staff, 'instructor', self.course.id)
# A user has staff access if they are in the staff group
assert access._has_access_to_course(self.course_staff, 'staff', self.course.id)
assert not access._has_access_to_course(self.course_staff, 'instructor', self.course.id)
# A user has staff and instructor access if they are in the instructor group
assert access._has_access_to_course(self.course_instructor, 'staff', self.course.id)
assert access._has_access_to_course(self.course_instructor, 'instructor', self.course.id)
# A user does not have staff or instructor access if they are
# not in either the staff or the the instructor group
assert not access._has_access_to_course(self.student, 'staff', self.course.id)
assert not access._has_access_to_course(self.student, 'instructor', self.course.id)
assert not access._has_access_to_course(self.student, 'not_staff_or_instructor', self.course.id)
def test__has_access_string(self):
user = Mock(is_staff=True)
assert not access._has_access_string(user, 'staff', 'not_global')
user._has_global_staff_access.return_value = True
assert access._has_access_string(user, 'staff', 'global')
self.assertRaises(ValueError, access._has_access_string, user, 'not_staff', 'global')
@ddt.data(
('load', False, True, True),
('staff', False, True, True),
('instructor', False, False, True)
)
@ddt.unpack
def test__has_access_error_desc(self, action, expected_student, expected_staff, expected_instructor):
descriptor = Mock()
for (user, expected_response) in (
(self.student, expected_student),
(self.course_staff, expected_staff),
(self.course_instructor, expected_instructor)
):
assert bool(access._has_access_error_desc(user, action, descriptor, self.course.id)) == expected_response
with pytest.raises(ValueError):
access._has_access_error_desc(self.course_instructor, 'not_load_or_staff', descriptor, self.course.id)
def test__has_access_descriptor(self):
# TODO: override DISABLE_START_DATES and test the start date branch of the method
user = Mock()
descriptor = Mock(user_partitions=[])
descriptor._class_tags = {}
descriptor.merged_group_access = {}
# Always returns true because DISABLE_START_DATES is set in test.py
assert access._has_access_descriptor(user, 'load', descriptor)
assert access._has_access_descriptor(user, 'instructor', descriptor)
with pytest.raises(ValueError):
access._has_access_descriptor(user, 'not_load_or_staff', descriptor)
@ddt.data(
(True, None, access_response.VisibilityError),
(False, None),
(True, YESTERDAY, access_response.VisibilityError),
(False, YESTERDAY),
(True, TOMORROW, access_response.VisibilityError),
(False, TOMORROW, access_response.StartDateError)
)
@ddt.unpack
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test__has_access_descriptor_staff_lock(self, visible_to_staff_only, start, expected_error_type=None):
"""
Tests that "visible_to_staff_only" overrides start date.
"""
expected_access = expected_error_type is None
mock_unit = Mock(location=self.course.location, user_partitions=[])
mock_unit._class_tags = {} # Needed for detached | |
from build.management.commands.base_build import Command as BaseBuild
from build.management.commands.build_homology_models_zip import Command as UploadModel
from django.db.models import Q
from django.conf import settings
from protein.models import Protein, ProteinConformation, ProteinAnomaly, ProteinState, ProteinSegment, ProteinFamily
from residue.models import Residue
from residue.functions import dgn, ggn
from structure.models import *
from structure.functions import HSExposureCB, PdbStateIdentifier, update_template_source, StructureSeqNumOverwrite
from common.alignment import AlignedReferenceTemplate, GProteinAlignment
from common.definitions import *
from common.models import WebLink
from signprot.models import SignprotComplex
import structure.structural_superposition as sp
import structure.assign_generic_numbers_gpcr as as_gn
import structure.homology_models_tests as tests
from structure.signprot_modeling import SignprotModeling
from structure.homology_modeling_functions import GPCRDBParsingPDB, ImportHomologyModel, Remodeling
import Bio.PDB as PDB
from modeller import *
from modeller.automodel import *
from collections import OrderedDict
import glob
import os
import shlex
import logging
import pprint
from io import StringIO, BytesIO
import sys
import re
import zipfile
import shutil
import math
from copy import deepcopy
from datetime import datetime, date
import yaml
import traceback
import subprocess
startTime = datetime.now()
logger = logging.getLogger('homology_modeling')
hdlr = logging.FileHandler('./logs/homology_modeling.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
structure_path = './structure/'
pir_path = os.sep.join([structure_path, 'PIR'])
path = "./structure/homology_models/"
build_date = date.today()
atom_num_dict = {'E':9, 'S':6, 'Y':12, 'G':4, 'A':5, 'V':7, 'M':8, 'L':8, 'I':8, 'T':7, 'F':11, 'H':10, 'K':9,
'D':8, 'C':6, 'R':11, 'P':7, 'Q':9, 'N':8, 'W':14, '-':0}
gprotein_segments = ProteinSegment.objects.filter(proteinfamily='Alpha')
gprotein_segment_slugs = [i.slug for i in gprotein_segments]
structures_with_num_issues = [i.split('.')[0] for i in os.listdir(os.sep.join([settings.DATA_DIR, 'structure_data', 'wt_pdb_lookup']))]
import warnings
warnings.filterwarnings("ignore")
class Command(BaseBuild):
help = 'Build automated chimeric GPCR homology models'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser=parser)
parser.add_argument('--update', help='Upload model to GPCRdb, overwrites existing entry', default=False,
action='store_true')
parser.add_argument('-r', help='''Run program for specific receptor(s) by giving UniProt common name as
argument (e.g. 5ht2a_human) or build revised crystal by giving PDB code (e.g. 4K5Y)''',
default=False, type=str, nargs='+')
parser.add_argument('-z', help='Create zip file of model directory containing all built models', default=False,
action='store_true')
parser.add_argument('-c', help='Select GPCR class (A, B1, B2, C, F)', default=False)
parser.add_argument('-x', help='Select crystal structure refinement for all crystals in the db', default=False, action='store_true')
parser.add_argument('--purge', help='Purge all existing records', default=False, action='store_true')
parser.add_argument('--purge_zips', help='Purge all zips in homology_models_zip dir', default=False, action='store_true')
parser.add_argument('-i', help='Number of MODELLER iterations for model building', default=1, type=int)
parser.add_argument('--test_run', action='store_true', help='Build only a test set of homology models ', default=False)
parser.add_argument('--debug', help='Debugging mode', default=False, action='store_true')
parser.add_argument('--state', help='Specify state in debug mode', default=False, type=str, nargs='+')
parser.add_argument('--complex', help='Build GPCR complex', default=False, action='store_true')
parser.add_argument('--signprot', help='Specify signaling protein with UniProt name', default=False, type=str)
parser.add_argument('--n_c_term', help='Model N- and C-termini', default=False, action='store_true')
parser.add_argument('--force_main_temp', help='Build model using this xtal as main template', default=False, type=str)
parser.add_argument('--fast_refinement', help='Chose fastest refinement option in MODELLER', default=False, action='store_true')
parser.add_argument('--keep_hetatoms', help='Keep hetero atoms from main template, this includes ligands', default=False, action='store_true')
parser.add_argument('--rerun', help='Skip models with matching zip archives and only run the missing models.', default=False, action='store_true')
def handle(self, *args, **options):
self.debug = options['debug']
if not os.path.exists('./structure/homology_models/'):
os.mkdir('./structure/homology_models')
if not os.path.exists('./structure/PIR/'):
os.mkdir('./structure/PIR')
if not os.path.exists('./static/homology_models'):
os.mkdir('./static/homology_models')
open('./structure/homology_models/done_models.txt','w').close()
if options['update']:
self.update = True
else:
self.update = False
if options['complex']:
self.complex = True
else:
self.complex = False
if not options['signprot']:
self.signprot = False
else:
self.signprot = options['signprot']
self.force_main_temp = options['force_main_temp']
self.fast_refinement = options['fast_refinement']
self.keep_hetatoms = options['keep_hetatoms']
self.rerun = options['rerun']
GPCR_class_codes = {'A':'001', 'B1':'002', 'B2':'003', 'C':'004', 'D1':'005', 'F':'006', 'T':'007'}
self.modeller_iterations = options['i']
self.build_all = False
# Build all
if options['purge']:
# if updating all, then delete existing
print("Delete existing homology model db entries")
StructureModel.objects.all().delete()
if options['purge_zips']:
print("Delete existing local homology model zips")
hommod_zip_path = './structure/homology_models_zip/'
if os.path.exists(hommod_zip_path):
files = os.listdir(hommod_zip_path)
for f in files:
try:
os.unlink(hommod_zip_path+f)
except:
shutil.rmtree(hommod_zip_path+f)
self.custom_selection = False
if options['r']:
all_receptors = Protein.objects.filter(entry_name__in=options['r'])
self.custom_selection = True
# Only refined structures
elif options['x']:
structs = Structure.objects.filter(annotated=True).order_by('pdb_code__index')
all_receptors = [i.protein_conformation.protein for i in structs]
# Build all
elif options['c']==False:
self.build_all = True
all_receptors = Protein.objects.filter(parent__isnull=True, accession__isnull=False, species__common_name='Human').filter(Q(family__slug__istartswith='001') |
Q(family__slug__istartswith='002') |
Q(family__slug__istartswith='003') |
Q(family__slug__istartswith='004') |
Q(family__slug__istartswith='005') |
Q(family__slug__istartswith='006') |
Q(family__slug__istartswith='007')).order_by('entry_name')
structs = Structure.objects.filter(annotated=True).order_by('pdb_code__index')
all_receptors = list(all_receptors)+[i.protein_conformation.protein for i in structs]
elif options['c'].upper() not in GPCR_class_codes:
raise AssertionError('Error: Incorrect class name given. Use argument -c with class name A, B1, B2, D1, C, F or T')
# Build one class
else:
all_receptors = Protein.objects.filter(parent__isnull=True, accession__isnull=False, species__common_name='Human',
family__slug__istartswith=GPCR_class_codes[options['c'].upper()])
self.receptor_list, self.receptor_list_entry_names = [],[]
# Find proteins and states for which there is no xtal yet
for r in all_receptors:
if r.accession==None:
self.receptor_list.append([r, Structure.objects.get(pdb_code__index=r.entry_name.upper()).state.name])
continue
for st in self.get_states_to_model(r):
self.receptor_list.append([r, st])
# Only specified states
if options['state']:
self.receptor_list = [i for i in self.receptor_list if i[1] in options['state']]
self.receptor_list_entry_names = [i[0].entry_name for i in self.receptor_list]
# Test run, only 5 models
if options['test_run']:
self.receptor_list = self.receptor_list[:5]
self.receptor_list_entry_names = self.receptor_list_entry_names[:5]
# Model building
print("receptors to do",len(self.receptor_list))
self.processors = options['proc']
self.prepare_input(options['proc'], self.receptor_list)
# Cleanup
missing_models = []
with open('./structure/homology_models/done_models.txt') as f:
for i in f.readlines():
if i.split('\n')[0] not in self.receptor_list_entry_names:
missing_models.append(i.split('\n')[0])
if len(missing_models)==0:
print('All models were run')
else:
print('Missing models:')
print(missing_models)
# new_args = shlex.split('/env/bin/python3 manage.py build_homology_models -r {} -p {} -s {}'.format(' '.join(missing_models, options['proc'], options['s'])))
os.remove('./structure/homology_models/done_models.txt')
# Make zip file for archiving
os.chdir('./structure/')
if options['z']==True:
if self.complex:
zipf = zipfile.ZipFile('../static/homology_models/GPCRdb_complex_homology_models_{}.zip'.format(str(build_date)),'w',zipfile.ZIP_DEFLATED)
else:
zipf = zipfile.ZipFile('../static/homology_models/GPCRdb_homology_models_{}.zip'.format(str(build_date)),'w',zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk('homology_models'):
for f in files:
if 'post' not in f:
zipf.write(os.path.join(root, f))
zipf.close()
if not self.debug:
shutil.rmtree('homology_models')
shutil.rmtree('PIR')
def main_func(self, positions, iteration, count, lock):
processor_id = round(self.processors*positions[0]/len(self.receptor_list))+1
i = 0
while count.value<len(self.receptor_list):
i += 1
with lock:
receptor = self.receptor_list[count.value]
count.value +=1
# RERUN: if a model zip file already exists, skip it and move to the next
if self.rerun:
# Init temporary model object for checks regarding signaling protein complexes etc.
temp_model_check = HomologyModeling(receptor[0].entry_name, receptor[1], [receptor[1]], iterations=self.modeller_iterations, complex_model=self.complex, signprot=self.signprot, debug=self.debug,
force_main_temp=self.force_main_temp, fast_refinement=self.fast_refinement, keep_hetatoms=self.keep_hetatoms)
path = './structure/complex_models_zip/' if temp_model_check.complex else './structure/homology_models_zip/'
# Differentiate between structure refinement and homology modeling
if temp_model_check.revise_xtal:
filepath = "{}*{}_refined_*.zip".format(path, receptor[0].entry_name.upper())
else:
filepath = "{}*{}_{}_*.zip".format(path, receptor[0].entry_name, receptor[1])
# Check if model zip file exists
if len(glob.glob(filepath)) > 0:
continue
mod_startTime = datetime.now()
logger.info('Generating model for \'{}\' ({})... ({} out of {}) (processor:{} count:{})'.format(receptor[0].entry_name, receptor[1],count.value, len(self.receptor_list),processor_id,i))
chm = CallHomologyModeling(receptor[0].entry_name, receptor[1], iterations=self.modeller_iterations, debug=self.debug,
update=self.update, complex_model=self.complex, signprot=self.signprot, force_main_temp=self.force_main_temp, keep_hetatoms=self.keep_hetatoms)
chm.run(fast_refinement=self.fast_refinement)
logger.info('Model finished for \'{}\' ({})... (processor:{} count:{}) (Time: {})'.format(receptor[0].entry_name, receptor[1],processor_id,i,datetime.now() - mod_startTime))
def get_states_to_model(self, receptor):
if self.force_main_temp and self.custom_selection:
return [Structure.objects.get(protein_conformation__protein__entry_name=self.force_main_temp.lower()).state.name]
rec_class = ProteinFamily.objects.get(name=receptor.get_protein_class())
if rec_class.name=='Class B2 (Adhesion)':
rec_class = ProteinFamily.objects.filter(name__in=['Class B1 (Secretin)', 'Class B2 (Adhesion)'])
structs_in_class = Structure.objects.filter(annotated=True).filter(Q(protein_conformation__protein__parent__family__slug__startswith=rec_class[0].slug) |
Q(protein_conformation__protein__parent__family__slug__startswith=rec_class[1].slug))
else:
structs_in_class = Structure.objects.filter(protein_conformation__protein__parent__family__slug__startswith=rec_class.slug, annotated=True)
possible_states = structs_in_class.exclude(protein_conformation__protein__parent=receptor).exclude(state__name='Other').values_list('state__name', flat=True).distinct()
if len(possible_states)==0:
if rec_class.name=='Class T (Taste 2)':
rec_class = ProteinFamily.objects.get(name='Class A (Rhodopsin)')
structs_in_class = Structure.objects.filter(protein_conformation__protein__parent__family__slug__startswith=rec_class.slug, annotated=True)
possible_states = structs_in_class.exclude(protein_conformation__protein__parent=receptor).exclude(state__name='Other').values_list('state__name', flat=True).distinct()
structs = structs_in_class.filter(protein_conformation__protein__parent=receptor)
li1 = list(possible_states)
li2 = []
for s in structs:
if s.state.name in li1 and s.state.name not in li2:
li2.append(s.state.name)
li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2]
return li_dif
class CallHomologyModeling():
def __init__(self, receptor, state, iterations=1, debug=False, update=False, complex_model=False, signprot=False, force_main_temp=False, keep_hetatoms=False, no_remodeling=False):
self.receptor = receptor
self.state = state
self.modeller_iterations = iterations
self.debug = debug
self.update = update
self.complex = complex_model
self.signprot = signprot
self.force_main_temp = force_main_temp
self.keep_hetatoms = keep_hetatoms
self.no_remodeling = no_remodeling
def run(self, import_receptor=False, fast_refinement=False):
try:
# seq_nums_overwrite_cutoff_dict = {'4PHU':2000, '4LDL':1000, '4LDO':1000, '4QKX':1000, '5JQH':1000, '5TZY':2000, '6D26':2000, '6D27':2000, '6CSY':1000}
##### Ignore output from that can come from BioPDB! #####
if not self.debug:
_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
Homology_model = HomologyModeling(self.receptor, self.state, [self.state], iterations=self.modeller_iterations, complex_model=self.complex, signprot=self.signprot, debug=self.debug,
force_main_temp=self.force_main_temp, fast_refinement=fast_refinement, keep_hetatoms=self.keep_hetatoms)
if import_receptor:
ihm = ImportHomologyModel(self.receptor, self.signprot)
model, templates, similarities = ihm.find_files()
Homology_model.alignment.reference_dict, Homology_model.alignment.template_dict, Homology_model.alignment.alignment_dict, Homology_model.main_pdb_array = ihm.parse_model(model)
Homology_model.template_source = ihm.parse_template_source(templates)
Homology_model.similarity_table_all = ihm.parse_similarities(similarities)
Homology_model.disulfide_pairs = ihm.find_disulfides()
Homology_model.trimmed_residues = []
Homology_model.main_template_preferred_chain = 'R'
Homology_model.main_structure = Homology_model.template_source['TM1']['1x50'][0]
cm = SignprotModeling(Homology_model.main_structure, self.signprot, Homology_model.template_source, Homology_model.trimmed_residues, Homology_model.alignment, Homology_model.main_pdb_array, Homology_model.debug)
cm.run()
Homology_model.template_source = cm.template_source
Homology_model.trimmed_residues = cm.trimmed_residues
Homology_model.alignment = cm.a
Homology_model.main_pdb_array = cm.main_pdb_array
Homology_model.target_signprot = cm.target_signprot
Homology_model.signprot_protconf = cm.signprot_protconf
Homology_model.signprot_complex = cm.signprot_complex
else:
alignment = Homology_model.run_alignment([self.state])
Homology_model.build_homology_model(alignment)
print('icl3', Homology_model.icl3_delete)
Homology_model.build_homology_model_second_part()
formatted_model = Homology_model.format_final_model()
if formatted_model==None:
raise ValueError('Incorrect assignment of generic numbers in {} {}'.format(self.receptor, self.state))
# # if Homology_model.changes_on_db:
ssno = StructureSeqNumOverwrite(Homology_model.main_structure)
if len(ssno.pdb_wt_table)>0:
ssno.seq_num_overwrite('wt')
# Run clash and break test
p = PDB.PDBParser()
post_model = p.get_structure('model','./structure/homology_models/{}.pdb'.format(Homology_model.modelname))
if Homology_model.signprot:
hse = HSExposureCB(post_model, radius=11, check_chain_breaks=True, check_knots=True, receptor=self.receptor, signprot=Homology_model.signprot, check_hetatoms=True)
# Run remodeling
if len(hse.remodel_resis)>0 and not self.no_remodeling and not Homology_model.revise_xtal:
rm = Remodeling('./structure/homology_models/{}.pdb'.format(Homology_model.modelname), gaps=hse.remodel_resis, receptor=self.receptor, signprot=Homology_model.signprot,
icl3_delete=Homology_model.icl3_delete)
rm.make_pirfile()
rm.run()
logger.info('Remodeled {} {} at {}'.format(self.receptor, Homology_model.signprot, hse.remodel_resis))
with open('./structure/homology_models/{}.pdb'.format(Homology_model.modelname), 'r') as remodeled_pdb:
formatted_model = remodeled_pdb.read()
formatted_model = Homology_model.format_final_model()
else:
hse = HSExposureCB(post_model, radius=11, check_chain_breaks=True, receptor=self.receptor, check_hetatoms=True)
# Remove not interacting HETATM residues
if self.debug:
print('HETRESIS to remove: {}'.format(hse.hetresis_to_remove))
if len(hse.hetresis_to_remove)>0:
post_model2 = p.get_structure('model','./structure/homology_models/{}.pdb'.format(Homology_model.modelname))
for | |
<reponame>ttagu99/naver_hack<filename>main_secls.py<gh_stars>1-10
# -*- coding: utf_8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import time
import nsml
import numpy as np
from nsml import DATASET_PATH
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Concatenate
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, BatchNormalization,Input, GlobalMaxPooling2D
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras import backend as K
from data_loader import train_data_loader
from keras.applications.xception import Xception
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras.applications.nasnet import NASNetMobile
from keras.applications.resnet50 import ResNet50
from keras.applications.nasnet import NASNetLarge
from keras.applications.mobilenetv2 import MobileNetV2
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from classification_models.resnet import ResNet18, SEResNet18
from classification_models.senet import SEResNeXt50, SEResNeXt101
from keras.models import Model,load_model
from keras.optimizers import Adam, SGD
from keras import Model, Input
from keras.layers import Layer, multiply, Lambda
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import imgaug as ia
from imgaug import augmenters as iaa
import random
from keras.utils.training_utils import multi_gpu_model
from keras.preprocessing.image import ImageDataGenerator
import pandas as pd
import tensorflow as tf
from keras.losses import categorical_crossentropy
from se_inception_resnet_v2 import SEInceptionResNetV2
def bind_model(model):
def save(dir_name):
os.makedirs(dir_name, exist_ok=True)
model.save_weights(os.path.join(dir_name, 'model'))
print('model saved!')
def load(file_path):
model.load_weights(file_path)
print('model loaded!')
def infer(queries, _):
test_path = DATASET_PATH + '/test/test_data'
db = [os.path.join(test_path, 'reference', path) for path in os.listdir(os.path.join(test_path, 'reference'))]
queries = [v.split('/')[-1].split('.')[0] for v in queries]
db = [v.split('/')[-1].split('.')[0] for v in db]
queries.sort()
db.sort()
queries, query_vecs, references, reference_vecs, indices = get_feature(model, queries, db, (299,299))
# Calculate cosine similarity
#sim_matrix = np.dot(query_vecs, reference_vecs.T)
#indices = np.argsort(sim_matrix, axis=1)
#indices = np.flip(indices, axis=1)
retrieval_results = {}
for (i, query) in enumerate(queries):
ranked_list = [references[k] for k in indices[i]]
ranked_list = ranked_list[:1000]
retrieval_results[query] = ranked_list
print('done')
return list(zip(range(len(retrieval_results)), retrieval_results.items()))
# DONOTCHANGE: They are reserved for nsml
nsml.bind(save=save, load=load, infer=infer)
def l2_normalize(v):
norm = np.linalg.norm(v,axis=1)
#if norm == 0:
# return v
return v / norm[:,None]
# data preprocess
def get_feature(model, queries, db, img_size):
# img_size = (224, 224)
batch_size = 200
#topResultsQE=5
L=3
def aug_out_featurewise_val(img):
img = img.astype('float32')
img[:,:,0] = (img[:,:,0] - 144.618226)/(56.817922)#/r_std
img[:,:,1] = (img[:,:,1] - 132.191131)/(57.820693)#/g_std
img[:,:,2] = (img[:,:,2] - 119.101468)/(60.085783)#/b_std
return img
test_path = DATASET_PATH + '/test/test_data'
intermediate_layer_model = Model(inputs=model.input, outputs=[model.get_layer('GAP_LAST').input,model.get_layer('GAP_LAST').output])
test_datagen = ImageDataGenerator(preprocessing_function=aug_out_featurewise_val)
query_generator = test_datagen.flow_from_directory(
directory=test_path,
target_size=img_size,
classes=['query'],
color_mode="rgb",
batch_size=batch_size,
class_mode=None,
shuffle=False
)
query_vecs, gap_query_vecs = intermediate_layer_model.predict_generator(query_generator, steps=len(query_generator),workers=4)
reference_generator = test_datagen.flow_from_directory(
directory=test_path,
target_size=img_size,
classes=['reference'],
color_mode="rgb",
batch_size=batch_size,
class_mode=None,
shuffle=False
)
reference_vecs, gap_reference_vecs = intermediate_layer_model.predict_generator(reference_generator, steps=len(reference_generator),workers=4)
# ------------------ DB images: reading, descripting and whitening -----------------------
DbMAC = extractRMAC(reference_vecs, intermediate_layer_model, True, L)
DbMAC = np.array(DbMAC)
DbMAC_sumpool = sumPooling(DbMAC, reference_vecs.shape[0], False)
print('DbMAC_sumpool lenght',len(DbMAC_sumpool))
# ------------------- query images: reading, descripting and whitening -----------------------
queryMAC = extractRMAC(query_vecs, intermediate_layer_model, True, L)
queryMAC = np.array(queryMAC)
queryMAC_sumpool = sumPooling(queryMAC, query_vecs.shape[0], False)
print('queryMAC_sumpool lenght',len(queryMAC_sumpool))
DbMAC_sumpool = np.array(DbMAC_sumpool)
DbMAC_sumpool = DbMAC_sumpool.squeeze()
queryMAC_sumpool = np.array(queryMAC_sumpool)
queryMAC_sumpool = queryMAC_sumpool.squeeze()
######################################
# queryMAC = queryMAC.squeeze()
# DbMAC = DbMAC.squeeze()
# print('DbMAC.shape',DbMAC.shape)
### query regions - db regions l2_nor
# queryMAC = l2_normalize(queryMAC)
# DbMAC = l2_normalize(DbMAC)
# region_number = queryMAC.shape[0]//query_vecs.shape[0]
# print('DbMAC.shape',DbMAC.shape, 'region number', region_number)
# query_con_rmac = queryMAC.reshape((queryMAC.shape[0]//region_number, queryMAC.shape[1]*region_number))
# db_con_rmac = DbMAC.reshape((DbMAC.shape[0]//region_number, DbMAC.shape[1]*region_number))
# print('query_con_rmac.shape',query_con_rmac.shape,'db_con_rmac.shape',db_con_rmac.shape)
# # pca decom_l2
# all_vecs = np.concatenate([query_con_rmac, db_con_rmac])
# print('pca rmac all')
# all_pca_vecs = PCA(256).fit_transform(all_vecs)
# print('pca rmac all l2')
# all_pca_vecs = l2_normalize(all_pca_vecs)
# print('query_con_rmac')
# query_con_rmac = all_pca_vecs[:query_con_rmac.shape[0],:]
# print('db_con_rmac')
# db_con_rmac = all_pca_vecs[query_con_rmac.shape[0]:,:]
# print('reshape concate per image', db_con_rmac.shape)
#######################################
## # query regions - db regions simimlarity
# # pca decom_l2
# all_vecs = np.concatenate([queryMAC, DbMAC])
# print('pca rmac all')
# all_pca_vecs = PCA(128).fit_transform(all_vecs)
# all_pca_vecs = l2_normalize(all_pca_vecs)
# queryMAC = all_pca_vecs[:queryMAC.shape[0],:]
# DbMAC = all_pca_vecs[queryMAC.shape[0]:,:]
# region_number = queryMAC.shape[0]//query_vecs.shape[0]
# print('DbMAC.shape',DbMAC.shape, 'region number', region_number)
# #concate
# query_con_rmac = queryMAC.reshape((queryMAC.shape[0]//region_number, queryMAC.shape[1]*region_number))
# db_con_rmac = DbMAC.reshape((DbMAC.shape[0]//region_number, DbMAC.shape[1]*region_number))
# query_con_rmac = l2_normalize(query_con_rmac)
# db_con_rmac = l2_normalize(db_con_rmac)
# print('reshape concate per image', db_con_rmac.shape)
# pca decom_l2
#all_vecs = np.concatenate([query_con_rmac, db_con_rmac])
#print('pca rmac all second')
#all_pca_vecs = PCA(256).fit_transform(all_vecs)
#all_pca_vecs = l2_normalize(all_pca_vecs)
#query_con_rmac = all_pca_vecs[:query_con_rmac.shape[0],:]
#db_con_rmac = all_pca_vecs[query_con_rmac.shape[0]:,:]
######################################
# l2
#queryMAC_sumpool = l2_normalize(queryMAC_sumpool)
#DbMAC_sumpool = l2_normalize(DbMAC_sumpool)
gap_query_vecs = l2_normalize(gap_query_vecs)
gap_reference_vecs = l2_normalize(gap_reference_vecs)
query_vecs = np.concatenate([queryMAC_sumpool,gap_query_vecs],axis=1)
reference_vecs = np.concatenate([DbMAC_sumpool, gap_reference_vecs],axis=1)
# l2 normalization
query_vecs = l2_normalize(query_vecs)
reference_vecs = l2_normalize(reference_vecs)
# pca
all_vecs = np.concatenate([query_vecs, reference_vecs])
all_pca_vecs = PCA(1024).fit_transform(all_vecs)
query_vecs = all_pca_vecs[:query_vecs.shape[0],:]
reference_vecs = all_pca_vecs[query_vecs.shape[0]:,:]
# l2 normalization
query_vecs = l2_normalize(query_vecs)
reference_vecs = l2_normalize(reference_vecs)
# Calculate cosine similarity for QE
qe_iter = 1
qe_number = 19
weights = np.logspace(0, -1.5, (qe_number+1))
weights /= weights.sum()
pre_sim_matrix = np.dot(query_vecs, reference_vecs.T)
pre_indices = np.argsort(pre_sim_matrix, axis=1) #lower first
pre_indices = np.flip(pre_indices, axis=1) #higher first
for i in range(query_vecs.shape[0]):
query_vecs[i] *= weights[0]
for refidx in range(qe_number):
query_vecs[i] += reference_vecs[pre_indices[i][refidx]]*weights[refidx+1]
# after query expanstion l2 normalization
query_vecs = l2_normalize(query_vecs)
# Calculate cosine similarity for DBA
dba_iter = 1
dba_number = 9
weights = np.logspace(0, -1.5, (dba_number+1))
weights /= weights.sum()
pre_sim_matrix = np.dot(reference_vecs, query_vecs.T)
pre_indices = np.argsort(pre_sim_matrix, axis=1) #lower first
pre_indices = np.flip(pre_indices, axis=1) #higher first
for i in range(reference_vecs.shape[0]):
reference_vecs[i] *= weights[0]
for refidx in range(dba_number):
reference_vecs[i] += query_vecs[pre_indices[i][refidx]]*weights[refidx+1]
# after database augment l2 normalization
reference_vecs = l2_normalize(reference_vecs)
# LAST Calculate cosine similarity
qe_sim_matrix = np.dot(query_vecs, reference_vecs.T)
qe_indices = np.argsort(qe_sim_matrix, axis=1)
qe_indices = np.flip(qe_indices, axis=1)
return queries, query_vecs, db, reference_vecs, qe_indices
def build_model(backbone= None, input_shape = (224,224,3), use_imagenet = 'imagenet', num_classes=1383, base_freeze=True, opt = SGD(), NUM_GPU=1,use_gap_net=False):
base_model = backbone(input_shape=input_shape, weights=use_imagenet, include_top= False)#, classes=NCATS)
x = base_model.output
#x = Flatten(name='FLATTEN_LAST')(x)
if use_gap_net ==True:
#skip_connection_layers = (594, 260, 16, 9)
gap1 = GlobalAveragePooling2D(name='GAP_1')(base_model.layers[594].output)
gap2 = GlobalAveragePooling2D(name='GAP_2')(base_model.layers[260].output)
gap3 = GlobalAveragePooling2D(name='GAP_3')(base_model.layers[16].output)
gap4 = GlobalAveragePooling2D(name='GAP_4')(base_model.layers[9].output)
#gmp = GlobalMaxPooling2D(name='GMP_LAST')(x)
gap = GlobalAveragePooling2D(name='GAP_0')(x)
g_con = Concatenate(name='GAP_LAST')([gap,gap1,gap2,gap3,gap4])
g_con = Dropout(rate=0.5)(g_con)
else:
gap = GlobalAveragePooling2D(name='GAP_LAST')(x)
g_con = Dropout(rate=0.5)(gap)
predict = Dense(num_classes, activation='softmax', name='last_softmax')(g_con)
model = Model(inputs=base_model.input, outputs=predict)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, image_paths, input_shape, labels, batch_size, aug_seq, num_classes, use_aug = True, shuffle = True, mean=None):
'Initialization'
self.image_paths = image_paths
self.input_shape = input_shape
self.batch_size = batch_size
self.labels = labels
self.aug_seq = aug_seq
self.use_aug = use_aug
self.num_classes = num_classes
self.shuffle = shuffle
self.on_epoch_end()
self.mean = mean
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.image_paths) / float(self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
batch_features = np.zeros((self.batch_size, self.input_shape[0], self.input_shape[1], self.input_shape[2]))
features = []
batch_labels = np.zeros((self.batch_size, self.num_classes))
indexes = self.indexes[index*self.batch_size:(index+1)* self.batch_size]
files = self.image_paths[indexes]
features = read_image_batch(files, (self.input_shape[0], self.input_shape[1]))
features = np.array(features)
if self.use_aug == True:
batch_features[:,:,:,:] = self.aug_seq.augment_images(features)
else:
batch_features[:,:,:,:] = features
batch_labels[:,:] = self.labels[indexes]
batch_features = normal_inputs(batch_features,self.mean)
return batch_features, batch_labels
def on_epoch_end(self):
'Updates indexes after each epoch'
org_idx = np.arange(len(self.image_paths))
mod_idx = np.random.choice(org_idx, (self.__len__()*self.batch_size) - len(self.image_paths))
self.indexes = np.concatenate([org_idx,mod_idx])
if self.shuffle == True:
np.random.shuffle(self.indexes)
class report_nsml(keras.callbacks.Callback):
def __init__(self, prefix, seed):
'Initialization'
self.prefix = prefix
self.seed = seed
def on_epoch_end(self, epoch, logs={}):
nsml.report(summary=True, epoch=epoch, loss=logs.get('loss'), val_loss=logs.get('val_loss'),acc=logs.get('acc'),val_acc=logs.get('val_acc'))
nsml.save(self.prefix +'_'+ str(self.seed)+'_' +str(epoch))
if __name__ == '__main__':
args = argparse.ArgumentParser()
# hyperparameters
args.add_argument('--epoch', type=int, default=50)
args.add_argument('--batch_size', type=int, default=26)
args.add_argument('--num_classes', type=int, default=1383)
# DONOTCHANGE: They are reserved for nsml
args.add_argument('--mode', type=str, default='train', help='submit일때 해당값이 test로 설정됩니다.')
args.add_argument('--iteration', type=str, default='0',
help='fork 명령어를 입력할때의 체크포인트로 설정됩니다. 체크포인트 옵션을 안주면 마지막 wall time 의 model 을 가져옵니다.')
args.add_argument('--pause', type=int, default=0, help='model 을 load 할때 1로 설정됩니다.')
config = args.parse_args()
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
lesssometimes = lambda aug: iaa.Sometimes(0.3, aug)
seq = iaa.Sequential(
[
iaa.SomeOf((0, 3),[
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
sometimes(iaa.CropAndPad(
percent=(-0.1, 0.2),
pad_mode=['reflect']
)),
sometimes( iaa.OneOf([
iaa.Affine(rotate=0),
iaa.Affine(rotate=90),
iaa.Affine(rotate=180),
iaa.Affine(rotate=270)
])),
sometimes(iaa.Affine(
scale={"x": (0.7, 1.3), "y": (0.7, 1.3)},
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)},
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-5, 5),
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
mode=['reflect']
)),
lesssometimes( iaa.SomeOf((0, 5),
[
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 5)),
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
sometimes(iaa.OneOf([
iaa.EdgeDetect(alpha=(0, 0.7)),
iaa.DirectedEdgeDetect(
alpha=(0, 0.7), direction=(0.0, 1.0)
),
])),
iaa.AdditiveGaussianNoise(
loc=0, scale=(0.0, 0.05*255), per_channel=0.5
),
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5),
iaa.CoarseDropout(
(0.03, 0.15), size_percent=(0.02, 0.05),
per_channel=0.2
),
]),
iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-10, 10), per_channel=0.5),
iaa.Multiply((0.5, 1.5), per_channel=0.5),
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),
iaa.Grayscale(alpha=(0.0, 1.0)),
sometimes(
iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)
),
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05)))
],
random_order=True
)),
]),
],
random_order=True
)
# training parameters
nb_epoch = config.epoch
batch_size = config.batch_size #inception resnetv2 299 60 , seresnext101 299 26
num_classes = config.num_classes
input_shape = (299,299,3)#(299,299,3)#(224, 224, 3) # input | |
<filename>emd_waveform_fig345.py
#!/usr/bin/python
# vim: set expandtab ts=4 sw=4:
# %% -----------------------------------------------------
#
# This script runs the simulations and analysis of the noisy 12Hz oscillator
# seen in figures 3, 4 and 5. The oscillation is generated and some general EMD
# and wavelet frequency metrics are computed. The three figures are then
# generated using these variables.
# %% -----------------------------------------------------
# Imports and definitions
import os
import emd
import sails
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal, stats, ndimage
from emd_waveform_utils import config
import matplotlib
matplotlib.rc('font', serif=config['fontname'])
# %% ---------------------------------------------------
# Define systems from Feynman Vol 1 50-6
def linear_system(x, K):
""" A linear system which scales a signal by a factor"""
return K * x
def nonlinear_system(x, K, eta=.43, power=2):
""" A non-linear system which scales a signal by a factor introduces a
waveform distortion"""
return K * (x + eta * (x ** power))
# %% ---------------------------------------------------
# Generate simuated data
# Create 60 seconds of data at 12Hz
peak_freq = 12
sample_rate = 512
seconds = 60
noise_std = None
x = emd.utils.ar_simulate(peak_freq, sample_rate, seconds,
noise_std=noise_std, random_seed=42, r=.99)
x = x * 1e-5
t = np.linspace(0, seconds, seconds*sample_rate)
# Apply linear and non-linear equations and add noise
x_linear_raw = linear_system(x, K=1)
x_nonlinear_raw = nonlinear_system(x, K=1, eta=2)
x_linear = x_linear_raw + np.random.randn(len(t), 1)*2e-2
x_nonlinear = x_nonlinear_raw + np.random.randn(len(t), 1)*2e-2
# %% ---------------------------------------------------
# Run frequency analyses
# Welch's Periodogram
f, pxx_linear = signal.welch(x_linear[:, 0], fs=sample_rate, nperseg=2048)
f, pxx_nonlinear = signal.welch(x_nonlinear[:, 0], fs=sample_rate, nperseg=2048)
# EMD
sift_config = {'imf_opts': {'sd_thresh': 5e-2},
'mask_freqs': 120/sample_rate,
'mask_amp_mode': 'ratio_sig',
'mask_step_factor': 2.5}
imf_linear = emd.sift.mask_sift(x_linear, **sift_config)
imf_nonlinear = emd.sift.mask_sift(x_nonlinear, **sift_config)
IP_linear, IF_linear, IA_linear = emd.spectra.frequency_transform(imf_linear, sample_rate, 'hilbert')
IP_nonlinear, IF_nonlinear, IA_nonlinear = emd.spectra.frequency_transform(imf_nonlinear, sample_rate, 'hilbert')
# %% --------------------------------------------------
# Cycle analysis
def my_range(x):
return x.max() - x.min()
def asc2desc(x):
"""Ascending to Descending ratio ( A / A+D )."""
pt = emd.cycles.cf_peak_sample(x, interp=True)
tt = emd.cycles.cf_trough_sample(x, interp=True)
if (pt is None) or (tt is None):
return np.nan
asc = pt + (len(x) - tt)
desc = tt - pt
return asc / len(x)
def peak2trough(x):
"""Peak to trough ratio ( P / P+T )."""
des = emd.cycles.cf_descending_zero_sample(x, interp=True)
if des is None:
return np.nan
return des / len(x)
Cl = emd.cycles.Cycles(IP_linear[:, 2])
Cl.compute_cycle_metric('max_amp', IA_linear[:, 2], np.max)
Cl.compute_cycle_metric('max_if', IF_linear[:, 2], np.max)
Cl.compute_cycle_metric('if_range', IF_linear[:, 2], my_range)
Cn = emd.cycles.Cycles(IP_nonlinear[:, 2])
Cn.compute_cycle_metric('max_amp', IA_nonlinear[:, 2], np.max)
Cn.compute_cycle_metric('max_if', IF_nonlinear[:, 2], np.max)
Cn.compute_cycle_metric('if_range', IF_nonlinear[:, 2], my_range)
Cn.compute_cycle_metric('asc2desc', imf_nonlinear[:, 2], asc2desc)
Cn.compute_cycle_metric('peak2trough', imf_nonlinear[:, 2], peak2trough)
conditions = ['is_good==1', 'max_amp>0.04', 'if_range<8', 'max_if<18']
pa_linear, phase_x = emd.cycles.phase_align(IP_linear[:, 2], IF_linear[:, 2],
cycles=Cl.iterate(conditions=conditions))
pa_nonlinear, phase_x = emd.cycles.phase_align(IP_nonlinear[:, 2], IF_nonlinear[:, 2],
cycles=Cn.iterate(conditions=conditions))
df_nonlinear = Cn.get_metric_dataframe(conditions=conditions)
# %% --------------------------------------------------
# Time-frequency transform
# Hilbert-Huang Transform
edges, centres = emd.spectra.define_hist_bins(0, 40, 64)
spec_linear = emd.spectra.hilberthuang_1d(IF_linear, IA_linear, edges, mode='energy')/x_linear.shape[0]
spec_nonlinear = emd.spectra.hilberthuang_1d(IF_nonlinear, IA_nonlinear, edges, mode='energy')/x_nonlinear.shape[0]
# Carrier frequency histogram definition
edges, bins = emd.spectra.define_hist_bins(2, 35, 64, 'linear')
# Compute the 2d Hilbert-Huang transform (power over time x carrier frequency)
hht_linear = emd.spectra.hilberthuang(IF_linear[:, 2], IA_linear[:, 2], edges, mode='amplitude')
hht_nonlinear = emd.spectra.hilberthuang(IF_nonlinear[:, 2], IA_nonlinear[:, 2], edges, mode='amplitude')
# Smooth HHTs to help visualisation
hht_linear = ndimage.gaussian_filter(hht_linear, .5)
hht_nonlinear = ndimage.gaussian_filter(hht_nonlinear, 1)
# Compute 2d wavelet transform
cwt_linear = sails.wavelet.morlet(x_linear[:, 0], bins, sample_rate, normalise='simple', ret_mode='amplitude')
cwt_nonlinear = sails.wavelet.morlet(x_nonlinear[:, 0], bins, sample_rate, normalise='simple', ret_mode='amplitude')
# %% --------------------------------------------------
# FIGURE 3 - Example system with time-frequency transforms
def decorate_ax(ax):
for tag in ['top', 'right']:
ax.spines[tag].set_visible(False)
inds = np.arange(7550, 8550)
width = config['3col_width'] / 25.4
height = width
matches = Cn.get_matching_cycles(conditions)
goods = emd._cycles_support.project_cycles_to_samples(matches, Cn.cycle_vect)[:, 0]
plt.figure(figsize=(width*2, height*2))
# Plot time-series
plt.axes([.1, .5, .875, .45], frameon=False)
plt.xticks([])
plt.yticks([])
plt.plot(x_nonlinear[inds]+0.5, 'k')
plt.plot(imf_nonlinear[inds, 2:].sum(axis=1)-0.25, 'g')
plt.text(-50, 1, 'Cycle #', verticalalignment='center', horizontalalignment='right')
plt.text(-50, 0.5, 'Signal', verticalalignment='center', horizontalalignment='right')
plt.text(-50, -.2, 'IMF-3', verticalalignment='center', horizontalalignment='right')
# Instantaneous Phase
ip = IP_nonlinear[inds, 2]
bad_cycles = np.logical_or(np.diff(ip) < -3, goods[inds[:-1]] == False)
bad_cycles = np.r_[bad_cycles, True]
bad_cycles = goods[inds[:-1]] == False
bad_cycles = np.r_[bad_cycles, True]
ip[np.where(np.diff(ip) < -3)[0]] = np.nan
to_plot = ip/15 - 1.15
plt.plot(to_plot)
#to_plot[:np.where(np.isnan(to_plot))[0][17]] = np.nan
to_plot[bad_cycles == False] = np.nan
plt.plot(to_plot, 'r')
mn = np.nanmin(to_plot)
mx = np.nanmax(to_plot)
plt.plot([-25, -25], [mn, mx], 'k')
plt.plot([-35, len(inds)], [mx, mx], color=[.8, .8, .8], linewidth=.5)
plt.plot([-35, len(inds)], [np.mean((mn, mx)), np.mean((mn, mx))], color=[.8, .8, .8], linewidth=.5)
plt.plot([-35, len(inds)], [mn, mn], color=[.8, .8, .8], linewidth=.5)
plt.text(-30, mx, 'pi', verticalalignment='center', horizontalalignment='right')
plt.text(-30, np.mean((mn, mx)), '0', verticalalignment='center', horizontalalignment='right')
plt.text(-30, mn, '-pi', verticalalignment='center', horizontalalignment='right')
plt.text(-105, np.mean((mn, mx)), 'Instantaneous\nPhase (rads)', ha='center', va='center', rotation=0)
# Instantanous Frequency
frange = emd._cycles_support.project_cycles_to_samples(Cn.metrics['if_range'], Cn.cycle_vect)[:, 0]
iif = IF_nonlinear[inds, 2].copy()
#iif[goods==0] = np.nan
iif[bad_cycles] = np.nan
to_plot = iif/20 - 2.15
plt.plot(to_plot)
freq_range = np.array([8, 12, 16])
freq_range_conv = freq_range/20 - 2.2
mn = np.nanmin(to_plot)
mx = np.nanmax(to_plot)
plt.plot([-25, -25], [mn, mx], 'k')
plt.plot([-35, len(inds)], [mx, mx], color=[.8, .8, .8], linewidth=.5)
plt.plot([-35, len(inds)], [np.mean((mn, mx)), np.mean((mn, mx))], color=[.8, .8, .8], linewidth=.5)
plt.plot([-35, len(inds)], [mn, mn], color=[.8, .8, .8], linewidth=.5)
for ii in range(3):
plt.text(-30, freq_range_conv[ii], '{0}Hz'.format(freq_range[ii]),
verticalalignment='center', horizontalalignment='right')
plt.text(-105, freq_range_conv[1], 'Instantaneous\nFrequency (Hz)', ha='center', va='center', rotation=0)
# Cycle Boundaries
yl = plt.ylim()
cycle_bounds = np.where(np.diff(Cn.cycle_vect[inds, 0]) > .5)[0]
for ii in range(len(cycle_bounds)):
plt.plot([cycle_bounds[ii], cycle_bounds[ii]], [-2.2, 1.4], color=[.8, .8, .8], linewidth=.5)
if ii < len(cycle_bounds)-1:
plt.text( (cycle_bounds[ii]+cycle_bounds[ii+1])/2, 1, str(ii+1), horizontalalignment='center')
plt.ylim(yl)
plt.xlim(-55, 896)
# Hilbert-Huang Transform
tt = np.linspace(0, len(inds)/sample_rate, len(inds))
plt.axes([.15, .275, .825, .2])
pcm = plt.pcolormesh(tt, bins, hht_nonlinear[:, inds], cmap='hot_r', vmin=0, vmax=.175)
yl = plt.ylim()
for ii in range(len(cycle_bounds)):
plt.plot([tt[cycle_bounds[ii]], t[cycle_bounds[ii]]], [0, bins[-1]], color=[.8, .8, .8], linewidth=.5)
plt.ylim(yl)
for tag in ['top', 'right']:
plt.gca().spines[tag].set_visible(False)
plt.gca().set_xticklabels([])
plt.ylabel('Frequency (Hz)')
plt.xlim(0, 1.75)
ax = plt.axes([.97, .285, .015, .18])
cb = plt.colorbar(pcm, cax=ax)
ax.yaxis.set_ticks_position('left')
cb.set_label('Power')
# Wavelet Transform
plt.axes([.15, .05, .825, .2])
pcm = plt.pcolormesh(tt, bins, cwt_nonlinear[:, inds], cmap='hot_r', vmin=0, vmax=.175)
yl = plt.ylim()
for ii in range(len(cycle_bounds)):
plt.plot([tt[cycle_bounds[ii]], tt[cycle_bounds[ii]]], [0, bins[-1]], color=[.8, .8, .8], linewidth=.5)
plt.ylim(yl)
for tag in ['top', 'right']:
plt.gca().spines[tag].set_visible(False)
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (seconds)')
plt.xlim(0, 1.75)
ax = plt.axes([.97, .06, .015, .18])
cb = plt.colorbar(pcm, cax=ax)
ax.yaxis.set_ticks_position('left')
cb.set_label('Power')
outname = os.path.join(config['figdir'], 'emd_fig3_simu_decomp.png')
plt.savefig(outname, dpi=300, transparent=True)
# %% --------------------------------------------------
# FIGURE 4 - PHASE ALIGNMENT IN SIMULATION
# Get temporally aligned waveforms and instantanous frequencies
waveform_linear = np.zeros((100, Cl.ncycles))*np.nan
instfreq_linear = np.zeros((100, Cl.ncycles))*np.nan
for ii, inds in Cl.iterate(conditions=conditions):
waveform_linear[:len(inds), ii] = imf_linear[inds, 2]
instfreq_linear[:len(inds), ii] = IF_linear[inds, 2]
ctrl_linear = emd.cycles.get_control_points(imf_linear[:, 2], Cl.iterate(conditions=conditions), interp=True)
ctrl_mets_linear = emd.cycles.get_control_point_metrics(ctrl_linear)
waveform_nonlinear = np.zeros((100, Cn.ncycles))*np.nan
instfreq_nonlinear = np.zeros((100, Cn.ncycles))*np.nan
for ii, inds in Cn.iterate(conditions=conditions):
waveform_nonlinear[:len(inds), ii] = imf_nonlinear[inds, 2]
instfreq_nonlinear[:len(inds), ii] = IF_nonlinear[inds, 2]
ctrl_nonlinear = emd.cycles.get_control_points(imf_nonlinear[:, 2], Cn.iterate(conditions=conditions), interp=True)
ctrl_mets_nonlinear = emd.cycles.get_control_point_metrics(ctrl_nonlinear)
I = np.argsort(ctrl_nonlinear[:, 4])[::-1]
segments = np.zeros((ctrl_nonlinear.shape[0], 60))*np.nan
for ii in range(ctrl_nonlinear.shape[0]):
for jj in range(1, ctrl_nonlinear.shape[1]):
# Round segments to ints for visualisation
segments[ii, int(np.floor(ctrl_nonlinear[ii, jj-1])):int(np.ceil(ctrl_nonlinear[ii, jj]))] = jj
# Figure start
width = config['2col_width'] / 25.4
height = width
plt.figure(figsize=(width*2, height*2))
# Plot control point segments
plt.axes([.1, .1, .2, .65])
plt.pcolormesh(segments[I, :])
plt.xticks(np.linspace(0, 40, 3))
decorate_ax(plt.gca())
plt.ylabel('Cycles (sorted)')
plt.xticks(np.linspace(0, 0.08*sample_rate, 5), np.linspace(0, 80, 5).astype(int))
plt.xlabel('Time (ms)')
plt.axes([.1, .775, .144, .075], frameon=False)
plt.xticks([])
plt.yticks([])
cols = plt.cm.viridis(np.linspace(0, 1, 4))
for ii in range(4):
xvals = np.linspace(0, .25)+.25*ii
plt.plot(xvals, np.sin(2*np.pi*xvals), linewidth=3, color=cols[ii, :])
# Plot control point metrics
plt.axes([.31, .1, .1, .65])
plt.plot(ctrl_mets_nonlinear[0][I], np.arange(len(ctrl_mets_nonlinear[0])), '.')
plt.plot(ctrl_mets_nonlinear[1][I], np.arange(len(ctrl_mets_nonlinear[0])), '+')
plt.plot(np.zeros_like(ctrl_mets_nonlinear[1][I]), np.arange(len(ctrl_mets_nonlinear[0])), 'k', linewidth=.5)
plt.xlim(-.3, .3)
plt.ylim(0, ctrl_nonlinear.shape[0])
plt.yticks([])
decorate_ax(plt.gca())
plt.axes([.31, .775, .1, .15])
plt.hist(ctrl_mets_nonlinear[0], np.linspace(-1, 1), alpha=.5)
plt.hist(ctrl_mets_nonlinear[1], np.linspace(-1, 1), alpha=.5)
plt.xlim(-.3, .3)
plt.xticks(np.linspace(-.25, .25, 3), [])
plt.legend(['Peak/Trough', 'Ascent/Descent'], frameon=False,
fontsize=8, loc='center', bbox_to_anchor=(0.175, 0.45, 1, 1))
plt.ylim(0, 250)
decorate_ax(plt.gca())
plt.title('Control-Point Ratios')
# Plot temporally aligned instantaneous frequency
plt.axes([.5, .1, .2, .65])
plt.pcolormesh(instfreq_nonlinear[:, I].T)
decorate_ax(plt.gca())
plt.xticks(np.linspace(0, 0.08*sample_rate, 5), np.linspace(0, 80, 5).astype(int))
plt.xlabel('Time (ms)')
plt.xlim(0, 60)
plt.axes([.5, .775, .2, .15])
#plt.plot(instfreq_nonlinear, color=[.8, .8, .8])
plt.plot(np.nanmean(instfreq_nonlinear, axis=1))
decorate_ax(plt.gca())
plt.title('Cycle-Onset Aligned IF')
plt.xlim(0, 60)
plt.xticks(np.linspace(0, 0.08*sample_rate, 5), [])
# Plot phase aligned instantaneous frequency
plt.axes([.75, .1, .2, .65])
pcm = plt.pcolormesh(pa_nonlinear[:, I].T)
plt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
plt.xlabel('Theta Phase')
plt.yticks(np.arange(6)*100, [])
plt.axes([.75, .775, .2, .15])
#plt.plot(pa_nonlinear[:, :-1], color=[.8, .8, .8])
plt.plot(np.nanmean(pa_nonlinear[:, :-1], axis=1))
plt.xlim(0, 48)
decorate_ax(plt.gca())
plt.xticks(np.arange(5)*12, [])
plt.title('Phase-Aligned IF')
# Inst. freq colourbar
ax = plt.axes([.685, .45, .015, .18])
cb = plt.colorbar(pcm, cax=ax)
ax.yaxis.set_ticks_position('left')
plt.title('Instantaneous\nFrequency (Hz)', fontsize=9)
outname = os.path.join(config['figdir'], 'emd_fig4_simu_phasealign.png')
plt.savefig(outname, dpi=300, transparent=True)
# %% --------------------------------------------------
# FIGURE 4 - PHASE ALIGNMENT IN SIMULATION : REVISED
I2 = I[::5]
width = config['2col_width'] / 25.4
height = config['3col_width'] / 25.4
col_height = 0.45
top_height = 0.3
# Figure start
plt.figure(figsize=(width*3, height*2))
# Plot control point segments
plt.axes([.1, .1, .2, col_height])
#plt.pcolormesh(segments[I2, :])
plt.plot(ctrl_nonlinear[I2, 1], np.arange(len(I2)), '^')
plt.plot(ctrl_nonlinear[I2, 2], np.arange(len(I2)), 'x')
plt.plot(ctrl_nonlinear[I2, 3], np.arange(len(I2)), 'v')
plt.plot(ctrl_nonlinear[I2, 4], np.arange(len(I2)), '.')
plt.legend(['Peak', 'Desc', 'Trough', 'Asc'], frameon=False, loc='center', bbox_to_anchor=(0.4, 0.2, 1, 1))
plt.xticks(np.linspace(0, 64, 5), (np.linspace(0, 125, 5)).astype(int))
plt.xlabel('Time (ms)')
plt.xlim(0, 64)
plt.ylim(0, len(I2))
plt.ylabel('# Cycle (Sorted by duration)')
decorate_ax(plt.gca())
plt.axes([.1, .6, .2, top_height-0.05])
plt.plot((0.5, 0.5), (0, 800), 'k--')
#plt.hist(ctrl_mets_nonlinear[0][I], np.linspace(-1, 1), alpha=.5)
#plt.hist(ctrl_mets_nonlinear[1][I], np.linspace(-1, 1), alpha=.5)
plt.hist(df_nonlinear['peak2trough'].values, np.linspace(0, 1), alpha=0.5)
plt.hist(df_nonlinear['asc2desc'].values, np.linspace(0, 1), alpha=0.5)
#plt.xticks(np.linspace(-.25, .25, 3))
plt.legend(['Sinusoid', 'Peak/Trough', 'Ascent/Descent'], frameon=False,
fontsize=10, loc='center', bbox_to_anchor=(0.5, 0.4, 1, 1))
decorate_ax(plt.gca())
plt.xlim(1/3, 2/3)
plt.ylim(0, 250)
plt.title('Control-Point Ratios\n')
plt.xlabel('Ratio')
plt.ylabel('Num Cycles')
# Plot temporally aligned instantaneous frequency
plt.axes([.425, .1, .2, col_height])
plt.imshow(instfreq_nonlinear[:64, I2].T, interpolation='nearest', vmin=6, vmax=14, aspect='auto', origin='lower')
decorate_ax(plt.gca())
plt.xticks(np.linspace(0, 64, 5), (np.linspace(0, 125, 5)).astype(int))
plt.xlabel('Time (ms)')
plt.xlim(0, 64)
plt.axes([.425, .6, .2, top_height/2])
mn = np.nanmean(instfreq_nonlinear[:, I], axis=1)
sem = np.nanstd(instfreq_nonlinear[:, I], axis=1)
sem = sem / np.sqrt(np.sum(np.isnan(instfreq_nonlinear[:, I]) == False, axis=1))
plt.errorbar(np.arange(100), mn, yerr=sem, errorevery=4)
decorate_ax(plt.gca())
plt.xticks(np.linspace(0, 64, 5), (np.linspace(0, 125, 5)).astype(int))
plt.xlim(0, 64)
plt.legend(['Avg IF (std-error of mean)'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)
plt.ylabel('Instantaneous\nFrequency (Hz)')
plt.axes([.425, .8, .2, 0.075])
plt.plot(np.nanmean(waveform_nonlinear[:, I], axis=1), 'k')
for tag in ['top', 'right', 'bottom']:
plt.gca().spines[tag].set_visible(False)
plt.xticks([])
plt.ylim(-0.1, 0.1)
plt.legend(['Avg Waveform'], loc='center', bbox_to_anchor=(0.3, 0.5, 1, 1), frameon=False)
plt.xlim(0, 64)
plt.ylabel(r'Amplitude (a.u.)')
plt.title('Cycle-Onset Alignment\n\n')#\nInstantaneous. Frequency\n(std-error of mean)')
# Plot phase aligned instantaneous frequency
plt.axes([.75, .1, .2, col_height])
pcm = plt.imshow(pa_nonlinear[:, I2].T, interpolation='nearest', vmin=6, vmax=14, aspect='auto', origin='lower')
decorate_ax(plt.gca())
plt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
plt.xlabel('Theta Phase (rads)')
plt.axes([.75, .6, .2, top_height/2])
mn = np.nanmean(pa_nonlinear[:, I], axis=1)
sem = np.nanstd(pa_nonlinear[:, I], axis=1) / np.sqrt(I.shape[0])
plt.errorbar(np.arange(48), mn, yerr=sem, errorevery=2)
plt.xlim(0, 48)
decorate_ax(plt.gca())
plt.xticks(np.arange(5)*12, ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
plt.ylabel('Instantaneous\nFrequency (Hz)')
plt.legend(['Avg IF | |
, _ ("dec") : 12, _ ("december") : 12, 12 : "dec"
}
_Type = datetime.date
_default_format = "%Y-%m-%d"
_kind = "date"
_init_arg_names = ("year", "month", "day")
_timetuple_slice = lambda s, tt : tt [:3]
date_pattern = Multi_Regexp \
( r"(?P<year> \d{4,4})"
r"([-/]?)"
r"(?P<month> \d{2,2})"
r"\2"
r"(?P<day> \d{2,2})"
, r"(?P<day> \d{1,2})"
r"([-./])"
r"(?P<month> \d{1,2} | [a-z]{3,})"
r"\2"
r"(?P<year> \d{4,4})"
, r"(?P<month> [a-z]{3,})"
r"\s"
r"(?P<day> \d{1,2})"
r",\s*"
r"(?P<year> \d{4,4})"
, flags = re.VERBOSE | re.IGNORECASE
)
day = property (TFL.Getter._body.day)
is_weekday = property (lambda s : s.weekday < 5)
month = property (TFL.Getter._body.month)
wk_ordinal = property (lambda s : (s.ordinal - s.weekday) // 7)
year = property (TFL.Getter._body.year)
yad = None ### set for negative `day` arguments
from _CAL.Delta import Date_Delta as Delta
@Once_Property
def delta_T (self) :
"""Arithmetic difference between Terrestrial Dynamical Time and UT in
seconds.
>>> Date (1988).delta_T
56.0
>>> Date (1995).delta_T
61.0
>>> Date (2000).delta_T
64.0
>>> Date (2007).delta_T
65.0
>>> Date (2010).delta_T
67.0
>>> Date (2050).delta_T
93.0
>>> Date (2051).delta_T
Traceback (most recent call last):
...
ValueError: Algorithm is restricted to 1800..2050, fails for 2051
>>> [Date (y).delta_T for y in
... (1800, 1802, 1822, 1830, 1990, 1972, 1950)]
[14.0, 12.0, 10.0, 7.0, 57.0, 43.0, 27.0]
"""
### see http://sunearth.gsfc.nasa.gov/eclipse/SEcat5/deltat.html
### and http://sunearth.gsfc.nasa.gov/eclipse/SEcat5/deltatpoly.html
### see <NAME>, ISBN 0-943396-61-1, p.80
y = self.year
t = y - 2000.
if -19 <= t < 5 :
return round \
( 63.86
+ t * ( 0.3345
+ t * ( -0.060374
+ t * ( 0.0017275
+ t * ( 0.000651814
+ t * 0.00002373599
)
)
)
)
)
elif -200 <= t <= -3 :
t = (self.JD - Date (1900).JD) / 36525.
return round \
( horner
( t
, ( -1.02, 91.02, 265.90, -839.16, -1545.20
, 3603.62, 4385.98, -6993.23, -6090.04
, 6298.12, 4102.86, -2137.64, -1081.51
)
)
)
elif 5 <= t <= 50 :
return round (62.92 + t * (0.32217 + t * 0.005589))
else :
raise ValueError \
("Algorithm is restricted to 1800..2050, fails for %s" % (y, ))
# end def delta_T
@classmethod
def from_julian (cls, jd, kind = "CJD") :
ordinal = int (jd) - cls.JD_offset [kind]
if kind.endswith ("S") :
ordinal //= 86400
return cls.from_ordinal (ordinal)
# end def from_julian
@classmethod
def from_ordinal (cls, ordinal) :
return cls (** {cls._kind : cls._Type.fromordinal (ordinal)})
# end def from_ordinal
@classmethod
def from_string (cls, s, check_tail = True) :
match = cls.date_pattern.match (s)
if match and ((not check_tail) or match.end () == len (s.rstrip ())) :
return cls (** cls._from_string_match_kw (s, match))
else :
raise ValueError (s)
# end def from_string
@classmethod
def month_from_string (cls, s) :
v = CAL.G8R.Months.LC (s)
try :
result = cls.months [v]
except KeyError :
try :
result = int (s)
except Exception as exc :
error = exc
else :
error = None
if error or not (1 <= result <= 12) :
raise ValueError ("Illegal value for month: '%s'" % s)
return result
# end def month_from_string
@classmethod
def str_dates_in_range (cls, after, before, str_dates) :
"""Yield `(date, str)` for all elements of `str_dates` in `(before, after)`."""
for sd in str_dates :
try :
d = cls.from_string (sd)
except ValueError :
pass
else :
if after and d <= after : continue
if before and d >= before : continue
yield d, sd
# end def str_dates_in_range
@Once_Property
def JC_J2000 (self) :
"""Julian Century relative to 2000"""
return (self.JD - 2451545.0) / 36525.0
# end def JC_J2000
@Once_Property
def julian_epoch (self) :
"""Epoch based on julian years"""
return 2000.0 + self.JD2000 / 365.25
# end def julian_epoch
@Once_Property
def month_name (self) :
return self.strftime ("%b")
# end def month_name
@Once_Property
def ordinal (self) :
"""Rata Die (based on January 1, 1)"""
return self._body.toordinal ()
# end def ordinal
@Once_Property
def periods (self) :
w = self - self.weekday
m = self.replace (day = 1)
q = m.replace (month = (self.quarter - 1) * 3 + 1)
y = m.replace (month = 1)
result = dict \
( week = (w, w + 6)
, month = (m, m.inc_month (1) - 1)
, quarter = (q, q.inc_month (3) - 1)
, year = (y, y.replace (year = y.year + 1) - 1)
)
return result
# end def periods
@Once_Property
def quarter (self) :
return (self.month - 1) // 3 + 1
# end def quarter
@Once_Property
def rjd (self) :
"""Relative julian day (based on January 1 of `self.year`)"""
return self._body.timetuple ().tm_yday
# end def rjd
@Once_Property
def tuple (self) :
return self._body.timetuple ()
# end def tuple
@Once_Property
def week (self) :
return self._body.isocalendar () [1]
# end def week
@Once_Property
def weekday (self) :
return self._body.weekday ()
# end def weekday
def inc_month (self, delta) :
m = self.month + delta
if 1 <= m <= 12 :
kw = dict (month = m)
else :
yd, m = divmod (m - 1, 12)
kw = dict (month = m + 1, year = self.year + yd)
return self.replace (** kw)
# end def inc_month
def replace (self, ** kw) :
if self.yad is None or "day" in kw :
result = self.__super.replace (** kw)
else :
kw ["day"] = 1
yad = self.yad
result = self.__super.replace (** kw)
result._body = result._body.replace \
(day = self._day_from_end (yad, result.month, result.year))
result.yad = yad
return result
# end def replace
def _day_from_end (self, yad, month, year) :
from _CAL.Year import Year
return Year (year).mmap [month].days [yad].number
# end def _day_from_end
@classmethod
def _from_string_match_kw (cls, s, match) :
assert match
kw = {}
for k, v in pyk.iteritems (match.groupdict ()) :
if v :
if k == "month" :
v = cls.month_from_string (v)
kw [k] = int (v)
return kw
# end def _from_string_match_kw
def _new_object (self, kw) :
d = kw ["day"]
if d < 0 :
kw ["day"] = self._day_from_end (d, kw ["month"], kw ["year"])
self.yad = d
return self.__super._new_object (kw)
# end def _new_object
def __getattr__ (self, name) :
if name in self.JD_offset :
result = self.ordinal + self.JD_offset [name]
if name.endswith ("S") :
result *= 86400
setattr (self, name, result)
else :
result = self.__super.__getattr__ (name)
return result
# end def __getattr__
def __add__ (self, rhs) :
delta = self._delta (rhs)
return delta.dt_op (self, operator.add)
# end def __add__
def __sub__ (self, rhs) :
delta = self._delta (rhs)
if isinstance (delta, CAL._Delta_) :
result = delta.dt_op (self, operator.sub)
else :
if hasattr (rhs, "_body") :
rhs = rhs._body
result = self.Delta (** {self.Delta._kind : self._body - rhs})
return result
# end def __sub__
# end class Date
class Date_M (CAL._Mutable_DTW_) :
"""Mutable date object
>>> d1 = d2 = Date_M (2004, 10, 14)
>>> print (d1, d2)
2004-10-14 2004-10-14
>>> d1 += 1
>>> print (d1, d2)
2004-10-15 2004-10-15
"""
Class = Date
# end class Date_M
class _Date_Arg_ (TFL.CAO.Str) :
"""Argument or option with a (calendary) date value"""
_real_name = "Date"
_CAL_Type = Date
_delta_pat = Regexp ("^[-+]")
def cook (self, value, cao = None) :
T = self._CAL_Type
if value == "now" :
result = T ()
elif value :
if self._delta_pat.match (value) :
import _CAL.Relative_Delta
delta = CAL.Relative_Delta.from_string (value.lstrip ("+"))
now = T ()
result = now + delta
if type (result) is not T :
raise TypeError \
( "Wrong delta %r forces Date_Time '%s', "
"need Date instead"
% (value, result)
)
else :
result = T.from_string (value)
else :
result = None
return result
# end def cook
# end class _Date_Arg_
def _main (cmd) :
from _TFL.Caller import Scope
### Usage example for `-regexp` and `-xformat`::
### for f in *.tex; do
### VCMove $f $(python /swing/python/Date.py -regexp '(?P<prefix> .*)_(?P<date> \d{2}-[A-Za-z][a-z]{2}-\d{4}|\d{8})\.?(?P<ext> .*)' -xformat '%(date)s_%(prefix)s.%(ext)s' $f)
### done
if cmd.regexp :
regexp = Regexp (cmd.regexp, re.VERBOSE)
if regexp.search (cmd.base_date) :
base_date = | |
<reponame>hitliaomq/pyemto<filename>pyemto/latticeinputs/latticeinputs.py
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 3 14:25:06 2014
@author: <NAME>
@author: <NAME>
"""
import sys
import numpy as np
import pyemto.common.common as common
class Latticeinputs:
"""Class which is used to communicate with the Bmdl, Kstr and Shape classes.
:returns: None
:rtype: None
"""
def __init__(self):
# Import necessary packages
from pyemto.latticeinputs.bmdl import Bmdl
from pyemto.latticeinputs.kstr import Kstr
from pyemto.latticeinputs.shape import Shape
from pyemto.latticeinputs.batch import Batch
self.bmdl = Bmdl()
self.kstr = Kstr()
self.shape = Shape()
self.batch = Batch()
return
def set_values(self, **kwargs):
"""Passes various input parameters down to the Kgrn and Kfcd classes.
:param **kwargs:
:type **kwargs:
:returns:
:rtype:
"""
for key in kwargs:
attr_found = False
if hasattr(self.bmdl, key):
self.bmdl.set_values(key, kwargs[key])
attr_found = True
if hasattr(self.kstr, key):
self.kstr.set_values(key, kwargs[key])
attr_found = True
if hasattr(self.shape, key):
self.shape.set_values(key, kwargs[key])
attr_found = True
if hasattr(self.batch, key):
self.batch.set_values(key, kwargs[key])
attr_found = True
if attr_found is False:
print(
'WARNING: Neither Bmdl(), Kstr(), Shape() nor Batch_lattice()' +\
' classes have the attribute \'{0}\''.format(key))
return
def distortion(self, lat=None, dist=None, ca=None, index=None, deltas=None, dmaxs=None,
relax=True, relax_index=None, basis=None):
"""A function which sets various class data to create distorted lattice structures.
Distorted lattices are used to calculate elastic constants.
An integer *index* is used to specify for which delta and dmax value we
want to generate the distortion input file data.
Default naming convention for the structure files:
======= = ====
bcc bco = bcco
bcc fco = bccm
fcc bco = fccm
fcc fco = fcco
======= = ====
===== ==================================================================
lat Original undistorted lattice
dist Which distortion we want to calculate.
Possible values: 'ortho' or 'mono' for lat='bcc', 'fcc' or 'hcp'.
ca c over a for hcp structures.
index Index specifying an element in the delta array.
Possible values: 0,1,2,3,4 or 5. 0 = No distortion.
delta Array of displacement values. Optional, default value
is good enough almost always (if not always).
dmax Array of 'dmax' values for KSTR. They have to be chosen in
such fashion so as to keep the number of lattice vectors
constant (or as close to a constant as possible) for all
of the distorted lattices. Optional, only needed when
a custom delta array is used.
===== ==================================================================
:param lat: The original, undistorted lattice (Default value = None)
:type lat: str
:param dist: The type of distortion (Default value = None)
:type dist: str
:param ca: hcp c/a ratio (Default value = None)
:type ca: float
:param index: Index for selecting a delta and dmax from the arrays (Default value = None)
:type index: int
:param deltas: List of delta values (Default value = None)
:type deltas: np.array(float)
:param dmaxs: List of dmax values (Default value = None)
:type dmaxs: np.array(float)
:returns: None
:rtype: None
"""
default_deltas = np.linspace(0.0, 0.05, 6)
delta_tol = 1.0E-6
# Mission critical parameters
# if folder == None:
# folder = "./"
if lat is None:
sys.exit('latticeinputs.distortion(): \'lat\' has to be given!')
elif lat == 'hcp' and ca is None:
sys.exit(
'latticeinputs.distortion(): \'ca\' (c over a) has to be given for hcp structures!')
if dist is None:
sys.exit(
'latticeinputs.distortion(): \'dist\' (distortion) has to be given!')
if index is None:
sys.exit(
'latticeinputs.distortion(): \'index\'' +\
' (delta = delta_array[index]) has to be given!')
defaultDelta = False
defaultDmax = False
if deltas is None:
deltas = default_deltas
defaultDelta = True
else:
# Check whether the input delta array is equivalent to the default
same_delta_count = 0
if len(deltas) == len(default_deltas):
for i in range(len(deltas)):
if np.abs(deltas[i] - default_deltas[i]) < delta_tol:
same_delta_count += 1
if same_delta_count == len(deltas):
deltas = default_deltas
defaultDelta = True
delta = deltas[index]
if defaultDelta == False and dmaxs == None:
sys.exit(
'latticeinputs.distortion(): \'dmax\' has to be' +\
' given when custom \'delta\' values are used!')
elif defaultDelta == False and dmaxs != None:
dmax = dmaxs[index]
elif defaultDelta == True and dmaxs != None:
dmax = dmaxs[index]
# Default dmax will be used
elif defaultDelta == True and dmaxs == None:
if lat == 'bcc':
if dist == 'ortho':
dmax_dict = {
0: 2.25, 1: 2.25, 2: 2.25, 3: 2.25, 4: 2.25, 5: 2.25}
dmax = dmax_dict[index]
elif dist == 'mono':
dmax_dict = {
0: 1.59, 1: 1.59, 0o2: 1.59, 3: 1.59, 4: 1.59, 5: 1.59}
dmax = dmax_dict[index]
elif lat == 'fcc':
if dist == 'ortho':
#dmax_dict = {0:1.6,1:1.6,2:1.6,3:1.6,4:1.6,5:1.6}
# High-accuracy
dmax_dict = {
0: 1.9, 1: 1.9, 2: 1.9, 3: 1.9, 4: 1.9, 5: 1.9}
dmax = dmax_dict[index]
elif dist == 'mono':
#dmax_dict = {0:2.40,1:2.40,2:2.30,3:2.22,4:2.21,5:2.20}
# High-accuracy
dmax_dict = {
0: 2.70, 1: 2.70, 2: 2.70, 3: 2.70, 4: 2.70, 5: 2.65}
dmax = dmax_dict[index]
elif lat == 'hcp':
if dist == 'ortho':
dmax_dict = {
0: 2.52, 1: 2.49, 2: 2.455, 3: 2.43, 4: 2.4, 5: 2.4}
dmax = dmax_dict[index]
#elif dist == 'mono':
# dmax_dict = {
# 0: 2.51, 1: 2.51, 2: 2.51, 3: 2.49, 4: 2.51, 5: 2.49}
# dmax = dmax_dict[index]
elif dist == 'mono':
dmax_dict = {
0: 2.43, 1: 2.435, 2: 2.43, 3: 2.43, 4: 2.445, 5: 2.44}
dmax = dmax_dict[index]
elif lat == 'sc':
if dist == 'ortho':
dmax_dict = {0:2.3,1:2.3,2:2.3,3:2.3,4:2.4,5:2.35}
dmax = dmax_dict[index]
elif dist == 'mono':
dmax_dict = {0:2.3,1:2.3,2:2.3,3:2.3,4:2.35,5:2.35}
dmax = dmax_dict[index]
# With hcp elastic constants, due to the two-atom basis, we have to
# relax the position of the second atom in order to get accurate results.
if relax is True and relax_index is None:
sys.exit(
'latticeinputs.distortion(): \'relax_index\' has to be given, when relax=True!')
#hcpo_disp = np.sqrt(3.0)/6.0*(1.0-delta)/(1.0+delta)
#hcpo_relax = np.linspace(-hcpo_disp,hcpo_disp,5)
#
#hcpm_disp = 2*np.sqrt(3.0)/np.sqrt(1+delta**2)/(1-delta**2)
#hcpm_relax = np.linspace(-hcpm_disp,hcpm_disp,5)
# Details can be found in Vitos' book pages 104-110.
if lat == 'bcc' and dist == 'ortho':
self.set_values(jobname_lat='bcco{0}'.format(index))
self.set_values(lat='bco', dmax=dmax)
latparams = [
1.0, (1.0 - delta) / (1.0 + delta), 1.0 / (1.0 + delta) / (1.0 - delta**2)]
latvectors = [90.0, 90.0, 90.0]
basis = [0.0, 0.0, 0.0]
self.set_values(
latparams=latparams, latvectors=latvectors, basis=basis)
elif lat == 'bcc' and dist == 'mono':
self.set_values(jobname_lat='bccm{0}'.format(index))
self.set_values(lat='fco', dmax=dmax)
latparams = [1.0, (1.0 - delta) / (1.0 + delta),
1.0 / (1.0 + delta) / (1.0 - delta**2) / np.sqrt(2.0)]
latvectors = [90.0, 90.0, 90.0]
basis = [0.0, 0.0, 0.0]
self.set_values(
latparams=latparams, latvectors=latvectors, basis=basis)
elif lat == 'fcc' and dist == 'ortho':
self.set_values(jobname_lat='fcco{0}'.format(index))
self.set_values(lat='fco', dmax=dmax)
latparams = [
1.0, (1.0 - delta) / (1.0 + delta), 1.0 / (1.0 + delta) / (1.0 - delta**2)]
latvectors = [90.0, 90.0, 90.0]
basis = [0.0, 0.0, 0.0]
self.set_values(
latparams=latparams, latvectors=latvectors, basis=basis)
elif lat == 'fcc' and dist == 'mono':
self.set_values(jobname_lat='fccm{0}'.format(index))
self.set_values(lat='bco', dmax=dmax)
latparams = [1.0, (1.0 - delta) / (1.0 + delta),
np.sqrt(2.0) / (1.0 + delta) / (1.0 - delta**2)]
latvectors = [90.0, 90.0, 90.0]
basis = [0.0, 0.0, 0.0]
self.set_values(latparams=latparams, latvectors=latvectors, basis=basis)
elif lat == 'hcp' and dist == 'ortho':
self.set_values(jobname_lat='hcpo{0}'.format(index))
self.set_values(lat='baco',dmax=dmax)
bao = np.sqrt(3.0)*(1.0-delta)/(1.0+delta)
cao = ca/(1.0+delta)/(1.0-delta**2)
latparams = [1.0,bao,cao]
latvectors = [90.0,90.0,90.0]
pos1 = [0.0,0.0,0.0]
pos2 = [0.0,latparams[1]/3.0,latparams[2]/2.0]
if relax:
hcpo_disp = bao/6.0/2.0
hcpo_relax = np.linspace(0.0,hcpo_disp,5)
pos2[1] += hcpo_relax[relax_index]
basis = [pos1,pos2]
self.set_values(latparams=latparams,latvectors=latvectors,basis=basis)
elif lat == 'hcp' and dist == 'mono':
self.set_values(jobname_lat='hcpm{0}'.format(index))
# The following out-commented lines are valid when
# on wants to describe the distorted structure
# as a simple monoclinic with a four atom basis.
# Look Vitos' book page 110.
self.set_values(lat='sm',dmax=dmax)
# WARNING!!
# gamma = the gamma angle = the beta angle in the standard/conventional definition.
gam = np.arccos(2*delta/(1+delta**2))/np.pi*180.0
bam = ca # Distorted b over a
cam = np.sqrt(3.0)/np.sqrt(1.0+delta**2)/(1.0-delta**2) # Distorted c over a
latparams = [1.0,bam,cam]
#bs1 = [1.0,0.0,0.0]
#bs2 = [2.0*delta/(1.0+delta**2)*ca,(1.0-delta**2)/(1.0+delta**2)*ca,0.0]
#bs3 = [0.0,0.0,cam]
#latvectors = [bs1,bs2,bs3]
latvectors = [90,90,gam]
pos1 = [0.0,0.0,0.0]
pos2 = [ca*delta/(1.0+delta**2),ca*(1.0-delta**2)/(1.0+delta**2)/2.0,-cam/3.0]
pos3 = [0.5,0.0,-cam/2.0]
pos4 = [pos2[0]+pos3[0],pos2[1]+pos3[1],pos2[2]+pos3[2]]
basis = [pos1,pos2,pos3,pos4]
# The following lines give the distorted structure
# as a base-centered monoclinic with a two-atom basis.
"""
self.set_values(lat='bacm',dmax=dmax)
# WARNING!!
# gamma = the gamma angle = the beta angle in the standard/conventional definition.
gam = np.arccos(2*delta/(1+delta**2))
| |
<filename>tilings/strategies/requirement_placement.py
import abc
from collections import defaultdict
from functools import reduce
from itertools import chain, product
from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple, cast
from comb_spec_searcher import DisjointUnionStrategy, StrategyFactory
from comb_spec_searcher.exception import StrategyDoesNotApply
from comb_spec_searcher.strategies import Rule
from permuta import Perm
from permuta.misc import DIR_EAST, DIR_NORTH, DIR_SOUTH, DIR_WEST, DIRS
from tilings import GriddedPerm, Tiling
from tilings.algorithms import RequirementPlacement
__all__ = [
"PatternPlacementFactory",
"RequirementPlacementFactory",
"RowAndColumnPlacementFactory",
"AllPlacementsFactory",
]
Cell = Tuple[int, int]
class RequirementPlacementStrategy(DisjointUnionStrategy[Tiling, GriddedPerm]):
def __init__(
self,
gps: Iterable[GriddedPerm],
indices: Iterable[int],
direction: int,
own_col: bool = True,
own_row: bool = True,
ignore_parent: bool = False,
include_empty: bool = False,
):
self.gps = tuple(gps)
self.indices = tuple(indices)
self.direction = direction
self.own_row, self.own_col = own_row, own_col
self.include_empty = include_empty
self._placed_cells = tuple(
sorted(set(gp.pos[idx] for idx, gp in zip(self.indices, self.gps)))
)
possibly_empty = self.include_empty or len(self.gps) > 1
super().__init__(ignore_parent=ignore_parent, possibly_empty=possibly_empty)
def _placed_cell(self, idx: int) -> Cell:
"""Return the cell placed given the index of the child."""
return self._placed_cells[idx]
def _child_idx(self, idx: int):
"""Return the index of the child given the index of gps placed into."""
return self._placed_cells.index(self.gps[idx].pos[self.indices[idx]])
def placement_class(self, tiling: Tiling) -> RequirementPlacement:
return RequirementPlacement(tiling, own_col=self.own_col, own_row=self.own_row)
def decomposition_function(self, tiling: Tiling) -> Tuple[Tiling, ...]:
placement_class = self.placement_class(tiling)
placed_tilings = placement_class.place_point_of_req(
self.gps, self.indices, self.direction
)
if self.include_empty:
return (tiling.add_obstructions(self.gps),) + placed_tilings
return placed_tilings
def extra_parameters(
self, comb_class: Tiling, children: Optional[Tuple[Tiling, ...]] = None,
) -> Tuple[Dict[str, str], ...]:
if not comb_class.extra_parameters:
return super().extra_parameters(comb_class, children)
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("Strategy does not apply")
algo = self.placement_class(comb_class)
extra_parameters: Tuple[Dict[str, str], ...] = tuple({} for _ in children)
if self.include_empty:
child = children[0]
for assumption in comb_class.assumptions:
mapped_assumption = child.forward_map_assumption(assumption).avoiding(
child.obstructions
)
if mapped_assumption.gps:
parent_var = comb_class.get_parameter(assumption)
child_var = child.get_parameter(mapped_assumption)
extra_parameters[0][parent_var] = child_var
for idx, (cell, child) in enumerate(
zip(self._placed_cells, children[1:] if self.include_empty else children)
):
mapped_assumptions = [
child.forward_map_assumption(ass)
for ass in algo.stretched_assumptions(cell)
]
for assumption, mapped_assumption in zip(
comb_class.assumptions, mapped_assumptions
):
if mapped_assumption.gps:
parent_var = comb_class.get_parameter(assumption)
child_var = child.get_parameter(mapped_assumption)
extra_parameters[idx + 1 if self.include_empty else idx][
parent_var
] = child_var
return extra_parameters
def direction_string(self):
if self.direction == DIR_EAST:
return "rightmost"
if self.direction == DIR_NORTH:
return "topmost"
if self.direction == DIR_WEST:
return "leftmost"
if self.direction == DIR_SOUTH:
return "bottommost"
def formal_step(self):
placing = "{}lacing the {} ".format(
"p" if (self.own_col and self.own_row) else "partially p",
self.direction_string(),
)
if len(self.gps) == 1:
gp = self.gps[0]
index = self.indices[0]
if len(gp) == 1:
return placing + "point in cell {}".format(gp.pos[index])
if gp.is_localized():
return placing + "{} point in {} in cell {}".format(
(index, gp.patt[index]), gp.patt, gp.pos[index],
)
return placing + "{} point in {}".format((index, gp.patt[index]), gp)
if all(len(gp) == 1 for gp in self.gps):
col_indices = set(x for x, _ in [gp.pos[0] for gp in self.gps])
if len(col_indices) == 1:
return placing + "point in column {}".format(col_indices.pop())
row_indices = set(y for _, y in [gp.pos[0] for gp in self.gps])
if len(row_indices) == 1:
return placing + "point in row {}".format(row_indices.pop())
return placing + "point at indices {} from the requirement ({})".format(
self.indices, ", ".join(str(gp) for gp in self.gps),
)
def backward_cell_map(self, placed_cell: Cell, cell: Cell) -> Cell:
x, y = cell
if self.own_col and x > placed_cell[0] + 1:
x -= 2
elif self.own_col and x == placed_cell[0] + 1:
x -= 1
if self.own_row and y > placed_cell[1] + 1:
y -= 2
elif self.own_row and y == placed_cell[1] + 1:
y -= 1
return x, y
def forward_gp_map(self, gp: GriddedPerm, forced_index: int) -> GriddedPerm:
new_pos: List[Cell] = []
forced_val = gp.patt[forced_index]
for idx, (x, y) in enumerate(gp.pos):
if idx == forced_index:
if self.own_col:
x += 1
if self.own_row:
y += 1
new_pos.append((x, y))
else:
val = gp.patt[idx]
if self.own_col and idx >= forced_index:
x += 2
if self.own_row and val >= forced_val:
y += 2
new_pos.append((x, y))
return GriddedPerm(gp.patt, new_pos)
def backward_map(
self,
tiling: Tiling,
gps: Tuple[Optional[GriddedPerm], ...],
children: Optional[Tuple[Tiling, ...]] = None,
) -> GriddedPerm:
if children is None:
children = self.decomposition_function(tiling)
idx = DisjointUnionStrategy.backward_map_index(gps)
gp: GriddedPerm = children[idx].backward_map(cast(GriddedPerm, gps[idx]))
if self.include_empty:
if idx == 0:
return gp
idx -= 1
placed_cell = self._placed_cell(idx)
return GriddedPerm(
gp.patt, [self.backward_cell_map(placed_cell, cell) for cell in gp.pos]
)
def forward_map(
self,
tiling: Tiling,
gp: GriddedPerm,
children: Optional[Tuple[Tiling, ...]] = None,
) -> Tuple[Optional[GriddedPerm], ...]:
indices = gp.forced_point_of_requirement(self.gps, self.indices, self.direction)
if children is None:
children = self.decomposition_function(tiling)
if indices is None:
return (children[0].forward_map(gp),) + tuple(
None for _ in range(len(children) - 1)
)
gps_index, forced_index = indices
child_index = self._child_idx(gps_index)
if self.include_empty:
child_index += 1
gp = self.forward_gp_map(gp, forced_index)
return (
tuple(None for _ in range(child_index))
+ (children[child_index].forward_map(gp),)
+ tuple(None for _ in range(len(children) - 1))
)
def __str__(self) -> str:
return "requirement placement strategy"
def __repr__(self) -> str:
return (
f"RequirementPlacementStrategy(gps={self.gps}, "
f"indices={self.indices}, direction={self.direction}, "
f"own_col={self.own_col}, own_row={self.own_row}, "
f"ignore_parent={self.ignore_parent}, "
f"include_empty={self.include_empty})"
)
def to_jsonable(self) -> dict:
"""Return a dictionary form of the strategy."""
d: dict = super().to_jsonable()
d.pop("workable")
d.pop("inferrable")
d.pop("possibly_empty")
d["gps"] = tuple(gp.to_jsonable() for gp in self.gps)
d["indices"] = self.indices
d["direction"] = self.direction
d["own_col"] = self.own_col
d["own_row"] = self.own_row
d["include_empty"] = self.include_empty
return d
@classmethod
def from_dict(cls, d: dict) -> "RequirementPlacementStrategy":
gps = tuple(GriddedPerm.from_dict(gp) for gp in d.pop("gps"))
return cls(gps=gps, **d)
class AbstractRequirementPlacementFactory(StrategyFactory[Tiling]):
"""
Base class for requirement placement on tilings.
It will create batch rules based on placing the direction most points
at indices i1, ..., ik in the gridded perms g1, ..., gk, respectively.
The point placements yielded are determined by the
'req_indices_and_directions_to_place' function.
"""
def __init__(
self,
partial: bool = False,
ignore_parent: bool = False,
dirs: Iterable[int] = tuple(DIRS),
include_empty: bool = False,
):
assert all(d in DIRS for d in dirs), "Got an invalid direction"
self.partial = partial
self.ignore_parent = ignore_parent
self.dirs = tuple(dirs)
self.include_empty = include_empty
@abc.abstractmethod
def req_indices_and_directions_to_place(
self, tiling: Tiling
) -> Iterator[Tuple[Tuple[GriddedPerm, ...], Tuple[int, ...], int]]:
"""
Iterator over all requirement lists, indices and directions to place.
"""
def req_placements(self, tiling: Tiling) -> Tuple[RequirementPlacement, ...]:
"""
Return the RequiremntPlacement classes used to place the points.
"""
if self.partial:
req_placements: Tuple[RequirementPlacement, ...] = (
RequirementPlacement(tiling, own_row=False),
RequirementPlacement(tiling, own_col=False),
)
else:
req_placements = (RequirementPlacement(tiling),)
return req_placements
def __call__(self, comb_class: Tiling, **kwargs) -> Iterator[Rule]:
for req_placement, (gps, indices, direction) in product(
self.req_placements(comb_class),
self.req_indices_and_directions_to_place(comb_class),
):
if (
direction in req_placement.directions
and not req_placement.already_placed(gps, indices)
):
strategy = RequirementPlacementStrategy(
gps,
indices,
direction,
own_row=req_placement.own_row,
own_col=req_placement.own_col,
ignore_parent=self.ignore_parent,
include_empty=self.include_empty,
)
children = req_placement.place_point_of_req(gps, indices, direction)
if self.include_empty:
children = (comb_class.add_obstructions(gps),) + children
yield strategy(comb_class, children)
def to_jsonable(self) -> dict:
d: dict = super().to_jsonable()
d["partial"] = self.partial
d["ignore_parent"] = self.ignore_parent
d["dirs"] = self.dirs
d["include_empty"] = self.include_empty
return d
@classmethod
def from_dict(cls, d: dict) -> "AbstractRequirementPlacementFactory":
return cls(**d)
class PatternPlacementFactory(AbstractRequirementPlacementFactory):
"""
Strategy that places a single forced point of a gridded permutation.
Yield all possible rules coming from placing a point of a pattern that
occurs as a subpattern of requirement containing a single pattern.
INPUTS:
- `point_only`: only place point for length 1 subpattern.
- `partial`: places only the point on its own row or its own column.
- `ignore_parent`: indicate if the rule should ignore parent
- `dirs`: The directions used for placement (default to all
directions).
The possible directions are:
- `permuta.misc.DIR_NORTH`
- `permuta.misc.DIR_SOUTH`
- `permuta.misc.DIR_EAST`
- `permuta.misc.DIR_WEST`
"""
def __init__(
self,
point_only: bool = False,
partial: bool = False,
ignore_parent: bool = False,
dirs: Iterable[int] = tuple(DIRS),
):
assert all(d in DIRS for d in dirs), "Got an invalid direction"
self.point_only = point_only
super().__init__(partial=partial, ignore_parent=ignore_parent, dirs=dirs)
def req_indices_and_directions_to_place(
self, tiling: Tiling
) -> Iterator[Tuple[Tuple[GriddedPerm, ...], Tuple[int, ...], int]]:
"""
If point_only, then yield all size one gps, in order to
"""
if self.point_only:
for cell in tiling.positive_cells:
gps = (GriddedPerm(Perm((0,)), (cell,)),)
indices = (0,)
for direction in self.dirs:
yield gps, indices, direction
else:
subgps = set(
chain.from_iterable(
req[0].all_subperms(proper=False)
for req in tiling.requirements
if len(req) == 1
)
)
for gp in subgps:
for index, direction in product(range(len(gp)), self.dirs):
yield (gp,), (index,), direction
def __str__(self) -> str:
s = "partial " if self.partial else ""
s | |
<reponame>jfarrimo/lol-logwatcher
#!/usr/bin/env python
'''
Copyright (c) 2012 Lolapps, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY LOLAPPS, INC. ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOLAPPS, INC. OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of Lolapps, Inc..
--------------------------------------------------------------------------------------------
differ.py scans log files, pulls errors, and writes them to a database.
Usage is as follows:
differ.py
To debug (scan a single file once completely, print results, and exit), you can use:
differ.py <file name>
TODO: fix logging output. It doesn't seem to play nicely with shell
redirection.
'''
import os
import pickle
import re
import stat
import sys
import time
import differdb
import util
import syslog_client
from settings import *
ERROR_PATTERN = re.compile(ERROR_RE)
END_PATTERN = re.compile(ERROR_END_RE)
def file_scan():
""" Looks at the globally defined TARGETS value
and builds up the eligible file listing.
Relies upon the get_file_listing helper function
"""
filelist = []
largefilelist = []
for i in TARGETS:
loglist, largeloglist = get_file_listing(i)
filelist.extend(loglist)
largefilelist.extend(largeloglist)
return filelist, largefilelist
def get_file_listing(target):
""" Helper function - provide a list of files based on
a target directory or file and file age.
1 argument:
target - the directory or file to scan
"""
# First, establish if the target is a file.
# If it is, then set it as the whole list
# If it is a directory, then scan it and return the values
# If neither, return an empty set
loglist = []
largeloglist = []
if os.path.isfile(target):
check_and_classify_file(target, loglist, largeloglist)
elif os.path.isdir(target):
for i in os.listdir(target):
check_and_classify_file(os.path.join(target, i), loglist, largeloglist)
return loglist, largeloglist
def check_and_classify_file(filename, loglist, largeloglist):
if filename in BLACKLIST:
# There are some files, we just don't want to ever touch
return
try:
stats = os.stat(filename)
except OSError:
# Sometimes files disappear during rotation
# if that's the case, then we can't stat it
# so, if it fails, just move along
return
filetype = filename.split('.')[-1]
if filetype in IGNORE_FILETYPES:
return
elif filetype in VALID_FILETYPES or filename in TARGETS:
file_size = stats[stat.ST_SIZE]
if file_size > MAX_FILE_SIZE:
largeloglist.append(filename)
else:
loglist.append(filename)
def get_log_dict(filename):
""" Unpickle a previous state file
"""
# On a new host, a state file may not be around.
try:
# It's here, let's use it
picklefile = open(filename, 'r')
logdict = pickle.load(picklefile)
picklefile.close()
except Exception:
# Not here, so create a new dictionary
logdict = {}
return logdict
def write_log_dict(filename, logdict):
""" Use object serialization to write out
our state data.
"""
picklefile = open(filename, 'w')
pickle.dump(logdict, picklefile)
picklefile.close()
def submit_errors(error_msg):
""" submit_errors is used for sending out notifications
of errors.
We can put in a variety of things here. For now, we have
email submission and a quick syslog sent over to verify that
we made it into here.
"""
# If there's no error messages, just get out of here
if not error_msg:
util.write_log('nothing to submit')
return True
myhost = util.get_differ_hostname()
# email out the message
if DIFFER_EMAIL_ERRORS:
subject = 'Differ ERRORS: %s' % myhost
util.mail_it(RCPT_TO, MAIL_FROM, subject, error_msg, '<EMAIL>')
# send to the syslog on DIFFERLOGHOST the fact that we sent out an error
# helpful for perhaps getting a quick look at how many servers
# were sending out errors
human_time = time.strftime('%Y%m%d %H:%M', time.localtime())
try:
syslog_msg = '%s errors submitted at %s' % (myhost, human_time)
c = syslog_client.syslog_client((DIFFERLOGHOST, 514))
c.log(syslog_msg, facility='local4', priority='info')
except:
pass
def process_completed_error(local_err_msg, lolfly_error, debug, db_inject):
# parse out the error some more and gather data
location, line_number, method, exception = util.parse_error_string(local_err_msg)
# set the info inside our lolfly dictionary
lolfly_error.error_msg = util.smart_truncate(local_err_msg,
length=MAX_MSG_LENGTH,
suffix=MAX_MSG_SUFFIX)
lolfly_error.exception = util.smart_truncate(exception, length=MAX_EXC_LENGTH,
suffix=MAX_EXC_SUFFIX)
lolfly_error.line_number = line_number
lolfly_error.location = util.smart_truncate(location,
length=MAX_LOCATION_LENGTH,
suffix=MAX_LOCATION_SUFFIX)
lolfly_error.method = method
if debug: lolfly_error.print_pretty()
if db_inject: lolfly_error.differ_db_inject()
return location, line_number, method, exception
def scan_file(filename, differ_db, log_pos=0, debug=False, db_inject=False):
error_msg = ''
local_err_msg = ''
# Check if we have permissions to even read the file
# in question
if not os.access(filename, os.R_OK):
util.write_log('%s unable to read due to permissions' % filename)
return log_pos, error_msg
logfile = open(filename, 'r')
logfile.seek(log_pos)
tail = None
gotmatch = False
lolfly_error = differdb.LolflyError(filename, differ_db)
for line in logfile:
if ERROR_PATTERN.search(line) and util.check_valid_error(line):
# We match, start outputting.
if tail is None:
util.write_log('got match in file : %s' % filename)
tail = MAX_LINES
local_err_msg += util.smart_truncate(line, length=MAX_LINE_LENGTH,
suffix=MAX_LINE_SUFFIX)
log_pos = logfile.tell()
tail -= 1
gotmatch = True
elif gotmatch and (re.match(PASTE_DATE_FORMAT, line) or
END_PATTERN.search(line) or
re.match(PYLONS_DATE_FORMAT, line)):
# add on to the local_err_msg
# and then update the bigger message
local_err_msg += util.smart_truncate(line, length=MAX_LINE_LENGTH,
suffix=MAX_LINE_SUFFIX)
error_msg += local_err_msg
process_completed_error(local_err_msg, lolfly_error, debug, db_inject)
# reset variables
lolfly_error.initialize()
local_err_msg = ''
tail = None
gotmatch = False
elif tail > 0:
local_err_msg += util.smart_truncate(line, length=MAX_LINE_LENGTH,
suffix=MAX_LINE_SUFFIX)
log_pos = logfile.tell()
tail -= 1
elif tail == 0:
# add on to the local_err_msg
# and then update the bigger message
error_msg += local_err_msg
process_completed_error(local_err_msg, lolfly_error, debug, db_inject)
# reset variables
lolfly_error.initialize()
local_err_msg = ''
tail = None
gotmatch = False
else:
log_pos = logfile.tell()
error_msg += local_err_msg
if local_err_msg:
process_completed_error(local_err_msg, lolfly_error, debug, db_inject)
return log_pos, error_msg
def alert_large_log(filename, differ_db, debug=False, db_inject=False):
err_mess = "Log file %s ignored by differv2 because it is too large" % filename
lolfly_error = differdb.LolflyError(filename, differ_db)
lolfly_error.error_msg = err_mess
lolfly_error.file_name = filename
lolfly_error.exception = None
if debug: lolfly_error.print_pretty()
if db_inject: lolfly_error.differ_db_inject()
return err_mess
def update_logdict(loglist, oldlogdict):
# Each time we run, we want to re-build our log dictionary. This
# helps to ensure we don't carry over stale data.
newlogdict = {}
for log in loglist:
stats = os.stat(log)
inode = stats[stat.ST_INO]
file_mtime = stats[stat.ST_MTIME]
min_mtime = int(time.time() - MAX_MTIME)
if file_mtime < min_mtime:
# we've got an older file, so update the values in the newlogdict
# to the file size
file_size = stats[stat.ST_SIZE]
newlogdict[log] = {'log_pos': file_size, 'inode': inode}
elif oldlogdict.has_key(log):
# Check to see if a file we saw before has a new inode
# which indicates a new file
if inode != oldlogdict[log]['inode']:
newlogdict[log] = {'log_pos': 0, 'inode': inode}
util.write_log('inode on %s has changed, will scan' % log)
else:
newlogdict[log] = oldlogdict[log]
else:
# normal new file
newlogdict[log] = {'log_pos': 0, 'inode': inode}
return newlogdict
def run_scan():
# only create this once for the scan run
differ_db = differdb.DifferDB()
loglist, largeloglist = file_scan()
error_msg = ''
# process log files that are too big
for log in largeloglist:
error_msg = error_msg + alert_large_log(log, differ_db, db_inject=True)
# Each time we run, we want to re-build our log dictionary. This
# helps to ensure we don't carry over stale data.
logdict = get_log_dict(STATEFILE)
logdict = update_logdict(loglist, logdict)
for log in loglist:
log_pos = logdict[log]['log_pos']
log_pos, error_log = scan_file(log, differ_db, log_pos=log_pos, db_inject=True)
if error_log:
error_msg += '==> Start errors from : %s\n' % log
error_msg += error_log
error_msg += '==> End errors from %s\n' % log
stats = os.stat(log)
inode = stats[stat.ST_INO]
logdict[log]['log_pos'] = log_pos
logdict[log]['inode'] = inode
submit_errors(error_msg)
write_log_dict(STATEFILE, logdict)
def main():
""" Just your run of the mill basic loop. All the logic is elsewhere
so that | |
", MOmega0, " and peak duration ", nrwf.internal_EstimatePeakL2M2Emission[key[0]][key[1]])
except:
good_sim = good_sim_list[0] # pick the first one. Note we will want to reduce /downselect the lookup process
group = good_sim[0]
param = good_sim[1]
else:
group = NR_group
param = NR_param
print(" Identified matching NR simulation ", group, param)
mtot = P.m1 + P.m2
q = P.m2/P.m1
# Load the catalog
wfP = nrwf.WaveformModeCatalog(group, param, \
clean_initial_transient=True,clean_final_decay=True, shift_by_extraction_radius=True,perturbative_extraction_full=perturbative_extraction_full,perturbative_extraction=perturbative_extraction,lmax=Lmax,align_at_peak_l2_m2_emission=True, build_strain_and_conserve_memory=True,use_provided_strain=use_provided_strain)
# Overwrite the parameters in wfP to set the desired scale
wfP.P.m1 = mtot/(1+q)
wfP.P.m2 = mtot*q/(1+q)
wfP.P.dist =distMpcRef*1e6*lal.PC_SI # fiducial distance
wfP.P.approx = P.approx
wfP.P.deltaT = P.deltaT
wfP.P.deltaF = P.deltaF
wfP.P.fmin = P.fmin
hlms = wfP.hlmoff( deltaT=P.deltaT,force_T=1./P.deltaF,hybrid_use=hybrid_use,hybrid_method=hybrid_method) # force a window. Check the time
hlms_conj = wfP.conj_hlmoff( deltaT=P.deltaT,force_T=1./P.deltaF,hybrid_use=hybrid_use) # force a window. Check the time
if rosDebugMessages:
print("NR variant: Length check: ",hlms[(2,2)].data.length, first_data.data.length)
# Remove memory modes (ALIGNED ONLY: Dangerous for precessing spins)
if no_memory and wfP.P.SoftAlignedQ():
for key in hlms.keys():
if key[1]==0:
hlms[key].data.data *=0.
hlms_conj[key].data.data *=0.
elif hasEOB and use_external_EOB:
print(" Using external EOB interface (Bernuzzi) ")
# Code WILL FAIL IF LAMBDA=0
P.taper = lsu.lsu_TAPER_START
lambda_crit=1e-3 # Needed to have adequate i/o output
if P.lambda1<lambda_crit:
P.lambda1=lambda_crit
if P.lambda2<lambda_crit:
P.lambda2=lambda_crit
if P.deltaT > 1./16384:
print(" Bad idea to use such a low sampling rate for EOB tidal ")
wfP = eobwf.WaveformModeCatalog(P,lmax=Lmax)
hlms = wfP.hlmoff(force_T=1./P.deltaF,deltaT=P.deltaT)
# Reflection symmetric
hlms_conj = wfP.conj_hlmoff(force_T=1./P.deltaF,deltaT=P.deltaT)
# Code will not make the EOB waveform shorter, so the code can fail if you have insufficient data, later
print(" External EOB length check ", hlms[(2,2)].data.length, first_data.data.length, first_data.data.length*P.deltaT)
print(" External EOB length check (in M) ", end=' ')
print(" Comparison EOB duration check vs epoch vs window size (sec) ", wfP.estimateDurationSec(), -hlms[(2,2)].epoch, 1./hlms[(2,2)].deltaF)
assert hlms[(2,2)].data.length ==first_data.data.length
if rosDebugMessagesDictionary["DebugMessagesLong"]:
hlmT_ref = lsu.DataInverseFourier(hlms[(2,2)])
print(" External EOB: Time offset of largest sample (should be zero) ", hlms[(2,2)].epoch + np.argmax(np.abs(hlmT_ref.data.data))*P.deltaT)
elif useNR: # NR signal required
mtot = P.m1 + P.m2
# Load the catalog
wfP = nrwf.WaveformModeCatalog(NR_group, NR_param, \
clean_initial_transient=True,clean_final_decay=True, shift_by_extraction_radius=True,
lmax=Lmax,align_at_peak_l2_m2_emission=True,use_provided_strain=use_provided_strain)
# Overwrite the parameters in wfP to set the desired scale
q = wfP.P.m2/wfP.P.m1
wfP.P.m1 *= mtot/(1+q)
wfP.P.m2 *= mtot*q/(1+q)
wfP.P.dist =distMpcRef*1e6*lal.PC_SI # fiducial distance.
hlms = wfP.hlmoff( deltaT=P.deltaT,force_T=1./P.deltaF) # force a window
else:
print(" No waveform available ")
import sys
sys.exit(0)
if not(ignore_threshold is None) and (not ROM_use_basis):
crossTermsFiducial = ComputeModeCrossTermIP(hlms,hlms, psd_dict[detectors[0]],
P.fmin, fMax,
1./2./P.deltaT, P.deltaF, analyticPSD_Q, inv_spec_trunc_Q, T_spec,verbose=verbose)
theWorthwhileModes = IdentifyEffectiveModesForDetector(crossTermsFiducial, ignore_threshold, detectors)
# Make sure worthwhile modes satisfy reflection symmetry! Do not truncate egregiously!
theWorthwhileModes = theWorthwhileModes.union( set([(p,-q) for (p,q) in theWorthwhileModes]))
print(" Worthwhile modes : ", theWorthwhileModes)
hlmsNew = {}
hlmsConjNew = {}
for pair in theWorthwhileModes:
hlmsNew[pair]=hlms[pair]
hlmsConjNew[pair] = hlms_conj[pair]
hlms =hlmsNew
hlms_conj= hlmsConjNew
if len(hlms.keys()) == 0:
print(" Failure ")
import sys
sys.exit(0)
# Print statistics on timeseries provided
if verbose:
print(" Mode npts(data) npts epoch epoch/deltaT ")
for mode in hlms.keys():
print(mode, first_data.data.length, hlms[mode].data.length, hlms[mode].data.length*P.deltaT, hlms[mode].epoch, hlms[mode].epoch/P.deltaT)
for det in detectors:
# This is the event time at the detector
t_det = ComputeArrivalTimeAtDetector(det, P.phi, P.theta,event_time_geo)
# The is the difference between the time of the leading edge of the
# time window we wish to compute the likelihood in, and
# the time corresponding to the first sample in the rholms
rho_epoch = data_dict[det].epoch - hlms[list(hlms.keys())[0]].epoch
t_shift = float(float(t_det) - float(t_window) - float(rho_epoch))
# assert t_shift > 0 # because NR waveforms may start at any time, they don't always have t_shift > 0 !
# tThe leading edge of our time window of interest occurs
# this many samples into the rholms
N_shift = int( t_shift / P.deltaT + 0.5 ) # be careful about rounding: might be one sample off!
# Number of samples in the window [t_ref - t_window, t_ref + t_window]
N_window = int( 2 * t_window / P.deltaT )
# Compute cross terms < h_lm | h_l'm' >
crossTerms[det] = ComputeModeCrossTermIP(hlms, hlms, psd_dict[det], P.fmin,
fMax, 1./2./P.deltaT, P.deltaF, analyticPSD_Q,
inv_spec_trunc_Q, T_spec,verbose=verbose)
crossTermsV[det] = ComputeModeCrossTermIP(hlms_conj, hlms, psd_dict[det], P.fmin,
fMax, 1./2./P.deltaT, P.deltaF, analyticPSD_Q,
inv_spec_trunc_Q, T_spec,prefix="V",verbose=verbose)
# Compute rholm(t) = < h_lm(t) | d >
rholms[det] = ComputeModeIPTimeSeries(hlms, data_dict[det],
psd_dict[det], P.fmin, fMax, 1./2./P.deltaT, N_shift, N_window,
analyticPSD_Q, inv_spec_trunc_Q, T_spec)
rhoXX = rholms[det][list(rholms[det].keys())[0]]
# The vector of time steps within our window of interest
# for which we have discrete values of the rholms
# N.B. I don't do simply rho_epoch + t_shift, b/c t_shift is the
# precise desired time, while we round and shift an integer number of
# steps of size deltaT
t = np.arange(N_window) * P.deltaT\
+ float(rho_epoch + N_shift * P.deltaT )
if verbose:
print("For detector", det, "...")
print("\tData starts at %.20g" % float(data_dict[det].epoch))
print("\trholm starts at %.20g" % float(rho_epoch))
print("\tEvent time at detector is: %.18g" % float(t_det))
print("\tInterpolation window has half width %g" % t_window)
print("\tComputed t_shift = %.20g" % t_shift)
print("\t(t_shift should be t_det - t_window - t_rholm = %.20g)" %\
(t_det - t_window - float(rho_epoch)))
print("\tInterpolation starts at time %.20g" % t[0])
print("\t(Should start at t_event - t_window = %.20g)" %\
(float(rho_epoch + N_shift * P.deltaT)))
# The minus N_shift indicates we need to roll left
# to bring the desired samples to the front of the array
if not skip_interpolation:
rholms_intp[det] = InterpolateRholms(rholms[det], t,verbose=verbose)
else:
rholms_intp[det] = None
if not ROM_use_basis:
return rholms_intp, crossTerms, crossTermsV, rholms, None
else:
return rholms_intp, crossTerms, crossTermsV, rholms, acatHere # labels are misleading for use_rom_basis
def ReconstructPrecomputedLikelihoodTermsROM(P,acat_rom,rho_intp_rom,crossTerms_rom, crossTermsV_rom, rho_rom,verbose=True):
"""
Using a set of ROM coefficients for hlm[lm] = coef[l,m,basis] w[basis], reconstructs <h[lm]|data>, <h[lm]|h[l'm']>
Requires ROM also be loaded in top level, for simplicity
"""
# Extract coefficients
coefs = acat_rom.coefficients(P)
# Identify available modes
modelist = acat_rom.modes_available
detectors = crossTerms_rom.keys()
rholms = {}
rholms_intp = {}
crossTerms = {}
crossTermsV = {}
# Reproduce rholms and rholms_intp
# Loop over detectors
for det in detectors:
rholms[det] ={}
rholms_intp[det] ={}
# Loop over available modes
for mode in modelist:
# Identify relevant terms in the sum
indx_list_ok = [indx for indx in coefs.keys() if indx[0]==mode[0] and indx[1]==mode[1]]
# Discrete case:
# - Create data structure to hold it
indx0 = indx_list_ok[0]
rhoTS = lal.CreateCOMPLEX16TimeSeries("rho",rho_rom[det][indx0].epoch,rho_rom[det][indx0].f0,rho_rom[det][indx0].deltaT,rho_rom[det][indx0].sampleUnits,rho_rom[det][indx0].data.length)
rhoTS.data.data = np.zeros( rho_rom[det][indx0].data.length) # problems with data initialization common with LAL
# - fill the data structure
fn_list_here = []
wt_list_here = []
for indx in indx_list_ok:
rhoTS.data.data+= np.conj(coefs[indx])*rho_rom[det][indx].data.data
wt_list_here.append(np.conj(coefs[indx]) )
fn_list_here = rho_intp_rom[det][indx]
rholms[det][mode]=rhoTS
# Interpolated case
# - create a lambda structure for it, holding the coefficients. NOT IMPLEMENTED since not used in production
if verbose:
print(" factored_likelihood: ROM: interpolated timeseries ", det, mode, " NOT CREATED")
wt_list_here = np.array(wt_list_here)
rholms_intp[det][mode] = lambda t, fns=fn_list_here, wts=wt_list_here: np.sum(np.array(map(fn_list_here,t))*wt_list_here )
# Reproduce crossTerms, crossTermsV
for det in detectors:
crossTerms[det] ={}
crossTermsV[det] ={}
for mode1 in modelist:
indx_list_ok1 = [indx for indx in coefs.keys() if indx[0]==mode1[0] and indx[1]==mode1[1]]
for mode2 in modelist:
crossTerms[det][(mode1,mode2)] =0.j
indx_list_ok2 = [indx for indx in coefs.keys() if indx[0]==mode2[0] and indx[1]==mode2[1]]
crossTerms[det][(mode1,mode2)] = np.sum(np.array([ np.conj(coefs[indx1])*coefs[indx2]*crossTerms_rom[det][(indx1,indx2)] for indx1 in indx_list_ok1 for indx2 in indx_list_ok2]))
crossTermsV[det][(mode1,mode2)] = np.sum(np.array([ coefs[indx1]*coefs[indx2]*crossTermsV_rom[det][(indx1,indx2)] for indx1 in indx_list_ok1 for indx2 in indx_list_ok2]))
if verbose:
print(" : U populated ", (mode1, mode2), " = ",crossTerms[det][(mode1,mode2) ])
print(" : V populated ", (mode1, mode2), " = ",crossTermsV[det][(mode1,mode2) ])
return rholms_intp, crossTerms, crossTermsV, rholms, None # Same return pattern as Precompute...
def FactoredLogLikelihood(extr_params, rholms,rholms_intp, crossTerms, crossTermsV, Lmax,interpolate=True):
"""
Compute the log-likelihood = -1/2 < d - h | d - h > from:
- extr_params is an object containing values of all extrinsic parameters
- rholms_intp is a dictionary of interpolating functions < h_lm(t) | d >
- crossTerms is a dictionary of < h_lm | h_l'm' >
- Lmax is the largest l-index of any h_lm mode considered
N.B. rholms_intp and crossTerms are the first two outputs of the function
'PrecomputeLikelihoodTerms'
"""
# Sanity checks
assert rholms_intp.keys() == crossTerms.keys()
detectors = rholms_intp.keys()
RA = extr_params.phi
DEC = extr_params.theta
tref = extr_params.tref # geocenter time
phiref = extr_params.phiref
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.