input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>nltk/compat.py
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Compatibility
#
# Copyright (C) 2001-2015 NLTK Project
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import absolute_import, print_function
import os
import sys
import types
from functools import wraps
# Python 2/3 compatibility layer. Based on six.
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[:2] == (2, 6)
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
get_im_class = lambda meth: meth.__self__.__class__
xrange = range
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
from imp import reload
raw_input = input
imap = map
izip = zip
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
import html.entities as htmlentitydefs
from urllib.request import (urlopen, ProxyHandler, build_opener,
install_opener, getproxies, HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler, ProxyDigestAuthHandler, Request,
url2pathname)
from urllib.error import HTTPError, URLError
from urllib.parse import quote_plus, unquote_plus, urlencode
from collections import Counter
from datetime import timezone
UTC = timezone.utc
from tempfile import TemporaryDirectory
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
import operator
int2byte = operator.methodcaller("to_bytes", 1, "big")
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
get_im_class = lambda meth: meth.im_class
xrange = xrange
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
reload = reload
raw_input = raw_input
from itertools import imap, izip
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
BytesIO = StringIO
import htmlentitydefs
from urllib2 import (urlopen, HTTPError, URLError,
ProxyHandler, build_opener, install_opener,
HTTPPasswordMgrWithDefaultRealm, ProxyBasicAuthHandler,
ProxyDigestAuthHandler, Request)
from urllib import getproxies, quote_plus, unquote_plus, urlencode, url2pathname
# Maps py2 tkinter package structure to py3 using import hook (PEP 302)
class TkinterPackage(object):
def __init__(self):
self.mod = __import__("Tkinter")
self.__path__ = ["nltk_py2_tkinter_package_path"]
def __getattr__(self, name):
return getattr(self.mod, name)
class TkinterLoader(object):
def __init__(self):
# module name mapping from py3 to py2
self.module_map = {
"tkinter": "Tkinter",
"tkinter.filedialog": "tkFileDialog",
"tkinter.font": "tkFont",
"tkinter.messagebox": "tkMessageBox",
}
def find_module(self, name, path=None):
# we are only interested in tkinter modules listed
# in self.module_map
if name in self.module_map:
return self
def load_module(self, name):
if name not in sys.modules:
if name == 'tkinter':
mod = TkinterPackage()
else:
mod = __import__(self.module_map[name])
sys.modules[name] = mod
return sys.modules[name]
sys.meta_path.insert(0, TkinterLoader())
from datetime import tzinfo, timedelta
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
# A UTC class for python 2.7
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
UTC = UTC()
unichr = unichr
int2byte = chr
import csv
import codecs
import cStringIO
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
see https://docs.python.org/2/library/csv.html
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", errors='replace', **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
encoder_cls = codecs.getincrementalencoder(encoding)
self.encoder = encoder_cls(errors=errors)
def encode(self, data):
if isinstance(data, basestring):
return data.encode("utf-8")
else:
return data
def writerow(self, row):
self.writer.writerow([self.encode(s) for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data, 'replace')
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
import warnings as _warnings
import os as _os
from tempfile import mkdtemp
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
http://stackoverflow.com/questions/19296146/tempfile-temporarydirectory-context-manager-in-python-2-7
"""
def __init__(self, suffix="", prefix="tmp", dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except OSError:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except OSError:
pass
try:
self._rmdir(path)
except OSError:
pass
if PY26:
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> Counter() # a new, empty counter
>>> Counter('gallahad') # a new counter from an iterable
>>> Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter
# objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
# fast path when counter is empty
dict.update(self, iterable)
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative | |
+ 1)
]
nodal_forces_y = aux_nodal_forces[1 :: self.number_dof] - nodal_shaft_weight
elm_forces_y = np.zeros_like(elm_weight)
elm_forces_y[:, 0] = nodal_forces_y[:-1]
elm_forces_y[-1, 1] = -nodal_forces_y[-1]
elm_forces_y += elm_weight
# locate and collect bearing and disk forces
aux_df = aux_rotor.df.loc[
(aux_rotor.df["type"] != "ShaftElement")
& (aux_rotor.df["shaft_number"] == i)
]
for j, row in aux_df.iterrows():
if row["n"] == n_max:
force = -np.round(elm_forces_y[-1, 1], 1)
else:
force = np.round(elm_forces_y[int(row["n"]) - n_min, 0], 1)
if row["type"] == "DiskElement":
DskForce_nodal["node_" + str(int(row["n"]))] = force
DskForce_tag[row["tag"]] = force
elif row["type"] == "BearingElement":
BrgForce_nodal["node_" + str(int(row["n"]))] = force
BrgForce_tag[row["tag"]] = force
if not pd.isna(row["n_link"]):
BrgForce_nodal["node_" + str(int(row["n_link"]))] = -force
# Calculate shearing force
# Each line represents an element, each column a station from the element
aux_Vx = np.zeros_like(elm_weight)
for j in range(aux_Vx.shape[0]):
if j == 0:
aux_Vx[j] = [elm_forces_y[j, 0], sum(elm_forces_y[j])]
elif j == aux_Vx.shape[0] - 1:
aux_Vx[j, 0] = aux_Vx[j - 1, 1] + elm_forces_y[j, 0]
aux_Vx[j, 1] = elm_forces_y[j, 1]
else:
aux_Vx[j, 0] = aux_Vx[j - 1, 1] + elm_forces_y[j, 0]
aux_Vx[j, 1] = aux_Vx[j, 0] + elm_forces_y[j, 1]
aux_Vx = -aux_Vx
# Calculate bending moment
# Each line represents an element, each column a station from the element
aux_Mx = np.zeros_like(aux_Vx)
for j in range(aux_Mx.shape[0]):
if j == 0:
aux_Mx[j] = [0, 0.5 * sum(aux_Vx[j]) * np.diff(aux_Vx_axis[j])]
if j == aux_Mx.shape[0] - 1:
aux_Mx[j] = [-0.5 * sum(aux_Vx[j]) * np.diff(aux_Vx_axis[j]), 0]
else:
aux_Mx[j, 0] = aux_Mx[j - 1, 1]
aux_Mx[j, 1] = aux_Mx[j, 0] + 0.5 * sum(aux_Vx[j]) * np.diff(
aux_Vx_axis[j]
)
# flattening arrays
aux_Vx = aux_Vx.flatten()
aux_Vx_axis = aux_Vx_axis.flatten()
aux_Mx = aux_Mx.flatten()
Vx.append(aux_Vx)
Vx_axis.append(aux_Vx_axis)
Mx.append(aux_Mx)
self.disk_forces_nodal = DskForce_nodal
self.bearing_forces_nodal = BrgForce_nodal
self.bearing_forces_tag = BrgForce_tag
self.disk_forces_tag = DskForce_tag
self.w_shaft = [
sum(self.df_shaft.loc[self.df_shaft.shaft_number == i, "m"]) * (-g)
for i in sh_num
]
results = StaticResults(
disp_y,
Vx,
Mx,
self.w_shaft,
self.disk_forces_nodal,
self.bearing_forces_nodal,
nodes,
nodes_pos,
Vx_axis,
)
return results
def summary(self):
"""Plot the rotor summary.
This functioncreates a summary of the main parameters and attributes of the
rotor model. The data is presented in a table format.
Returns
-------
results : ross.SummaryResults class
An instance of SumarryResults class to build the summary table
Examples
--------
>>> rotor = rotor_example()
>>> table = rotor.summary().plot()
>>> # to display the plot use the command:
>>> # show(table)
"""
self.run_static()
forces = self.bearing_forces_tag
results = SummaryResults(
self.df_shaft,
self.df_disks,
self.df_bearings,
self.nodes_pos,
forces,
self.CG,
self.Ip,
self.tag,
)
return results
@classmethod
def from_section(
cls,
leng_data,
idl_data,
odl_data,
idr_data=None,
odr_data=None,
material_data=None,
disk_data=None,
brg_seal_data=None,
min_w=None,
max_w=None,
rated_w=None,
nel_r=1,
tag=None,
):
"""Build rotor from sections.
This class is an alternative to build rotors from separated
sections. Each section has the same number (n) of shaft elements.
Parameters
----------
leng_data : list
List with the lengths of rotor regions.
idl_data : list
List with the inner diameters of rotor regions (Left Station).
odl_data : list
List with the outer diameters of rotor regions (Left Station).
idr_data : list, optional
List with the inner diameters of rotor regions (Right Station).
Default is equal to idl_data (cylindrical element).
odr_data : list, optional
List with the outer diameters of rotor regions (Right Station).
Default is equal to odl_data (cylindrical element).
material_data : ross.material or list of ross.material
Defines a single material for all sections or each section can be
defined by a material individually.
disk_data : dict, optional
Dict holding disks datas.
Example : disk_data=DiskElement.from_geometry(n=2,
material=steel,
width=0.07,
i_d=0,
o_d=0.28
)
***See 'disk_element.py' docstring for more information***
brg_seal_data : dict, optional
Dict holding lists of bearings and seals datas.
Example : brg_seal_data=BearingElement(n=1, kxx=1e6, cxx=0,
kyy=1e6, cyy=0, kxy=0,
cxy=0, kyx=0, cyx=0)
***See 'bearing_seal_element.py' docstring for more information***
nel_r : int, optional
Number or elements per shaft region.
Default is 1.
tag : str
A tag for the rotor
Raises
------
ValueError
Error raised if lists size do not match.
AttributeError
Error raised if the shaft material is not defined.
Returns
-------
A rotor object
Example
-------
>>> from ross.materials import steel
>>> rotor = Rotor.from_section(leng_data=[0.5,0.5,0.5],
... odl_data=[0.05,0.05,0.05],
... idl_data=[0,0,0],
... material_data=steel,
... disk_data=[DiskElement.from_geometry(n=1, material=steel, width=0.07, i_d=0, o_d=0.28),
... DiskElement.from_geometry(n=2, material=steel, width=0.07, i_d=0, o_d=0.35)],
... brg_seal_data=[BearingElement(n=0, kxx=1e6, cxx=0, kyy=1e6, cyy=0, kxy=0, cxy=0, kyx=0, cyx=0),
... BearingElement(n=3, kxx=1e6, cxx=0, kyy=1e6, cyy=0, kxy=0, cxy=0, kyx=0, cyx=0)],
... nel_r=1)
>>> modal = rotor.run_modal(speed=0)
>>> modal.wn.round(4)
array([ 85.7634, 85.7634, 271.9326, 271.9326, 718.58 , 718.58 ])
"""
if len(leng_data) != len(odl_data) or len(leng_data) != len(idl_data):
raise ValueError(
"The lists size do not match (leng_data, odl_data and idl_data)."
)
if material_data is None:
raise AttributeError("Please define a material or a list of materials")
if idr_data is None:
idr_data = idl_data
if odr_data is None:
odr_data = odl_data
else:
if len(leng_data) != len(odr_data) or len(leng_data) != len(idr_data):
raise ValueError(
"The lists size do not match (leng_data, odr_data and idr_data)."
)
def rotor_regions(nel_r):
"""Subroutine to discretize each rotor region into n elements.
Parameters
----------
nel_r : int
Number of elements per region
Returns
-------
regions : list
List with elements
"""
regions = []
shaft_elements = []
disk_elements = []
bearing_elements = []
try:
if len(leng_data) != len(material_data):
raise IndexError(
"material_data size does not match size of other lists"
)
# loop through rotor regions
for i, leng in enumerate(leng_data):
le = leng / nel_r
for j in range(nel_r):
idl = (idr_data[i] - idl_data[i]) * j * le / leng + idl_data[i]
odl = (odr_data[i] - odl_data[i]) * j * le / leng + odl_data[i]
idr = (idr_data[i] - idl_data[i]) * (
j + 1
) * le / leng + idl_data[i]
odr = (odr_data[i] - odl_data[i]) * (
j + 1
) * le / leng + odl_data[i]
shaft_elements.append(
ShaftElement(
le,
idl,
odl,
idr,
odr,
material=material_data[i],
shear_effects=True,
rotary_inertia=True,
gyroscopic=True,
)
)
except TypeError:
for i, leng in enumerate(leng_data):
le = leng / nel_r
for j in range(nel_r):
idl = (idr_data[i] - idl_data[i]) * j * le / leng + idl_data[i]
odl = (odr_data[i] - odl_data[i]) * j * le / leng + odl_data[i]
idr = (idr_data[i] - idl_data[i]) * (
j + 1
) * le / leng + idl_data[i]
odr = (odr_data[i] - odl_data[i]) * (
j + 1
) * le / leng + odl_data[i]
shaft_elements.append(
ShaftElement(
le,
idl,
odl,
idr,
odr,
material=material_data,
shear_effects=True,
rotary_inertia=True,
gyroscopic=True,
)
)
regions.extend([shaft_elements])
for DiskEl in disk_data:
aux_DiskEl = deepcopy(DiskEl)
aux_DiskEl.n = nel_r * DiskEl.n
aux_DiskEl.n_l = nel_r * DiskEl.n_l
aux_DiskEl.n_r = nel_r * DiskEl.n_r
disk_elements.append(aux_DiskEl)
for Brg_SealEl in brg_seal_data:
aux_Brg_SealEl = deepcopy(Brg_SealEl)
aux_Brg_SealEl.n = nel_r * Brg_SealEl.n
aux_Brg_SealEl.n_l = nel_r * Brg_SealEl.n_l
aux_Brg_SealEl.n_r = nel_r * Brg_SealEl.n_r
bearing_elements.append(aux_Brg_SealEl)
regions.append(disk_elements)
regions.append(bearing_elements)
return regions
regions = rotor_regions(nel_r)
shaft_elements = regions[0]
disk_elements = regions[1]
bearing_elements = regions[2]
return cls(
shaft_elements,
disk_elements,
bearing_elements,
min_w=min_w,
max_w=max_w,
rated_w=rated_w,
tag=tag,
)
class CoAxialRotor(Rotor):
r"""A rotor object.
This class will create a system of co-axial rotors with the shaft,
disk, bearing and seal elements provided.
Parameters
----------
shafts : list of lists
Each list of shaft elements builds a different shaft. The number of
lists sets the number of shafts.
disk_elements : list
List with the disk elements
bearing_elements : list
List with the bearing elements
point_mass_elements: list
List with the point mass elements
shaft_start_pos : list
List indicating the initial node position for each shaft.
Default is zero for each shaft created.
tag : str
A tag for the rotor
Returns
-------
A rotor object.
Attributes
----------
nodes : list
List of the model's nodes.
nodes_pos : list
List with nodal spatial location.
CG : float
Center of gravity
Examples
--------
>>> import ross as rs
>>> steel = rs.materials.steel
>>> i_d = 0
>>> o_d = 0.05
>>> n = 10
>>> L = [0.25 for _ in range(n)]
>>> axial_shaft = [rs.ShaftElement(l, i_d, o_d, material=steel) for l in L]
>>> i_d = 0.15
>>> o_d = 0.20
>>> n = 6
>>> L = [0.25 for _ in range(n)]
>>> coaxial_shaft = [rs.ShaftElement(l, i_d, o_d, material=steel) for l in | |
A SymbolicConstant specifying the type of solver. Possible values are DIRECT and
ITERATIVE. The default value is DIRECT.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
amplitude
A SymbolicConstant specifying the amplitude variation for loading magnitudes during the
step. Possible values are STEP and RAMP. The default value is RAMP.
extrapolation
A SymbolicConstant specifying the type of extrapolation to use in determining the
incremental solution for a nonlinear analysis. Possible values are NONE, LINEAR, and
PARABOLIC. The default value is LINEAR.
fullyPlastic
A String specifying the region being monitored for fully Plastic behavior. The default
value is an empty string.
noStop
A Boolean specifying whether to accept the solution to an increment after the maximum
number of iterations allowed has been completed, even if the equilibrium tolerances are
not satisfied. The default value is OFF.Warning:You should set *noStop*=ON only in
special cases when you have a thorough understanding of how to interpret the results.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
useLongTermSolution
A Boolean specifying wether to obtain the fully relaxed long-term elastic solution with
time-domain viscoelasticity or the long-term elastic-Plastic solution for two-layer
viscoplasticity. The default value is OFF.
solutionTechnique
A SymbolicConstant specifying the technique used to for solving nonlinear equations.
Possible values are FULL_NEWTON and QUASI_NEWTON. The default value is FULL_NEWTON.
reformKernel
An Int specifying the number of quasi-Newton iterations allowed before the kernel matrix
is reformed.. The default value is 8.
convertSDI
A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
CONVERT_SDI_ON. The default value is PROPAGATED.
adaptiveDampingRatio
A Float specifying the maximum allowable ratio of the stabilization energy to the total
strain energy and can be used only if *stabilizationMethod* is not NONE. The default
value is 0.05.
continueDampingFactors
A Boolean specifying whether this step will carry over the damping factors from the
results of the preceding general step. This parameter must be used in conjunction with
the *adaptiveDampingRatio* parameter. The default value is OFF.
Returns
-------
step: StaticRiksStep
A StaticRiksStep object.
"""
self.steps[name] = step = StaticStep(name, previous, description, timePeriod, nlgeom, stabilizationMethod,
stabilizationMagnitude, adiabatic, timeIncrementationMethod, maxNumInc,
initialInc, minInc, maxInc, matrixSolver, matrixStorage, amplitude,
extrapolation, fullyPlastic, noStop, maintainAttributes,
useLongTermSolution, solutionTechnique, reformKernel, convertSDI,
adaptiveDampingRatio, continueDampingFactors)
return step
def SteadyStateDirectStep(self, name: str, previous: str, frequencyRange: SteadyStateDirectFrequencyArray,
description: str = '', factorization: SymbolicConstant = COMPLEX,
scale: SymbolicConstant = LOGARITHMIC, matrixStorage: SymbolicConstant = SOLVER_DEFAULT,
maintainAttributes: Boolean = False, subdivideUsingEigenfrequencies: Boolean = OFF,
frictionDamping: Boolean = OFF) -> SteadyStateDirectStep:
"""This method creates a SteadyStateDirectStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SteadyStateDirectStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
frequencyRange
A SteadyStateDirectFrequencyArray object.
description
A String specifying a description of the new step. The default value is an empty string.
factorization
A SymbolicConstant specifying whether damping terms are to be ignored so that a real,
rather than a complex, system matrix is factored. Possible values are REAL_ONLY and
COMPLEX. The default value is COMPLEX.
scale
A SymbolicConstant specifying whether a logarithmic or linear scale is used for output.
Possible values are LOGARITHMIC and LINEAR. The default value is LOGARITHMIC.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
subdivideUsingEigenfrequencies
A Boolean specifying whether to subdivide each frequency range using the
eigenfrequencies of the system. The default value is OFF.
frictionDamping
A Boolean specifying whether to add to the damping matrix contributions due to friction
effects. The default value is OFF.
Returns
-------
step: SteadyStateDirectStep
A SteadyStateDirectStep object.
"""
self.steps[name] = step = SteadyStateDirectStep(name, previous, frequencyRange, description, factorization,
scale, matrixStorage, maintainAttributes,
subdivideUsingEigenfrequencies, frictionDamping)
return step
def SteadyStateModalStep(self, name: str, previous: str, frequencyRange: SteadyStateModalFrequencyArray,
description: str = '', scale: SymbolicConstant = LOGARITHMIC,
directDamping: DirectDamping = DirectDamping(),
compositeDamping: CompositeDamping = CompositeDamping(),
rayleighDamping: RayleighDamping = RayleighDamping(),
structuralDamping: StructuralDamping = StructuralDamping(),
directDampingByFrequency: DirectDampingByFrequency = DirectDampingByFrequency(),
rayleighDampingByFrequency: RayleighDampingByFrequency = RayleighDampingByFrequency(),
structuralDampingByFrequency: StructuralDampingByFrequency = StructuralDampingByFrequency(),
maintainAttributes: Boolean = False,
subdivideUsingEigenfrequencies: Boolean = ON) -> SteadyStateModalStep:
"""This method creates a SteadyStateModalStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SteadyStateModalStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
frequencyRange
A SteadyStateModalFrequencyArray object.
description
A String specifying a description of the new step. The default value is an empty string.
scale
A SymbolicConstant specifying whether a logarithmic or linear scale is used for output.
Possible values are LOGARITHMIC and LINEAR. The default value is LOGARITHMIC.
directDamping
A DirectDamping object.
compositeDamping
A CompositeDamping object.
rayleighDamping
A RayleighDamping object.
structuralDamping
A StructuralDamping object.
directDampingByFrequency
A DirectDampingByFrequency object.
rayleighDampingByFrequency
A RayleighDampingByFrequency object.
structuralDampingByFrequency
A StructuralDampingByFrequency object.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
subdivideUsingEigenfrequencies
A Boolean specifying whether to subdivide each frequency range using the
eigenfrequencies of the system. The default value is ON.
Returns
-------
step: SteadyStateModalStep
A SteadyStateModalStep object.
"""
self.steps[name] = step = SteadyStateModalStep(name, previous, frequencyRange, description, scale,
directDamping, compositeDamping, rayleighDamping,
structuralDamping, directDampingByFrequency,
rayleighDampingByFrequency, structuralDampingByFrequency,
maintainAttributes, subdivideUsingEigenfrequencies)
return step
def SteadyStateSubspaceStep(self, name: str, previous: str, frequencyRange: SteadyStateSubspaceFrequencyArray,
description: str = '', factorization: SymbolicConstant = COMPLEX,
scale: SymbolicConstant = LOGARITHMIC, matrixStorage: SymbolicConstant = SOLVER_DEFAULT,
maintainAttributes: Boolean = False, subdivideUsingEigenfrequencies: Boolean = ON,
projection: SymbolicConstant = ALL_FREQUENCIES, maxDampingChange: float = 0,
maxStiffnessChange: float = 0,
frictionDamping: Boolean = OFF) -> SteadyStateSubspaceStep:
"""This method creates a SteadyStateSubspaceStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SteadyStateSubspaceStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
frequencyRange
A SteadyStateSubspaceFrequencyArray object.
description
A String specifying a description of the new step. The default value is an empty string.
factorization
A SymbolicConstant specifying whether damping terms are to be ignored so that a real,
rather than a complex, system matrix is factored. Possible values are REAL_ONLY and
COMPLEX. The default value is COMPLEX.
scale
A SymbolicConstant specifying whether a logarithmic or linear scale is used for output.
Possible values are LOGARITHMIC and LINEAR. The default value is LOGARITHMIC.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
subdivideUsingEigenfrequencies
A Boolean specifying whether to subdivide each frequency range using the
eigenfrequencies of the system. The default value is ON.
projection
A SymbolicConstant specifying how often to perform subspace projections onto the modal
subspace. Possible values are ALL_FREQUENCIES, CONSTANT, EIGENFREQUENCY,
PROPERTY_CHANGE, and RANGE. The default value is ALL_FREQUENCIES.
maxDampingChange
A Float specifying the maximum relative change in damping material properties before a
new projection is to be performed. The default value is 0.1.
maxStiffnessChange
A Float specifying the maximum relative change in stiffness material properties before a
new projection is to be performed. The default value is 0.1.
frictionDamping
A Boolean specifying whether to add to the damping matrix contributions due to friction
effects. The default value is OFF.
Returns
-------
step: SteadyStateSubspaceStep
A SteadyStateSubspaceStep | |
# initial concentration
if call_libsbml(sbml.isSetInitialAmount):
self.distribution_init_concentration = self.model.distribution_init_concentrations.create()
annots.extend(['distribution_init_concentration.id',
'distribution_init_concentration.name',
'distribution_init_concentration.distribution',
'distribution_init_concentration.mean',
'distribution_init_concentration.std',
'distribution_init_concentration.units',
'distribution_init_concentration.identifiers',
'distribution_init_concentration.comments'])
LibSbmlInterface.get_annotations(self, LibSbmlInterface.gen_nested_attr_paths(annots), sbml, objs)
if structure:
for st in self.model.species_types:
if st != self.species_type and st.structure and st.structure.serialize() == structure.serialize():
self.species_type.structure = st.structure
break
class DistributionInitConcentration(obj_tables.Model, SbmlModelMixin):
""" Distribution of the initial concentration of a species
at the beginning of each cell cycle
Attributes:
id (:obj:`str`): identifier equal to `dist-init-conc-{species.id}`
name (:obj:`str`): name
model (:obj:`Model`): model
species (:obj:`Species`): species
distribution (:obj:`pronto.Term`): distribution
mean (:obj:`float`): mean concentration in a population of single cells at the
beginning of each cell cycle
std (:obj:`float`): standard deviation of the concentration in a population of
single cells at the beginning of each cell cycle
units (:obj:`unit_registry.Unit`): units; default units is `M`
identifiers (:obj:`list` of :obj:`Identifier`): identifiers
conclusions (:obj:`list` of :obj:`Conclusion`): conclusions
comments (:obj:`str`): comments
references (:obj:`list` of :obj:`Reference`): references
"""
id = StringAttribute(primary=True, unique=True)
name = StringAttribute()
model = ManyToOneAttribute(Model, related_name='distribution_init_concentrations',
verbose_related_name='Initial species concentrations')
species = OneToOneAttribute(Species, min_related=1, related_name='distribution_init_concentration',
verbose_related_name='Initial species concentration')
distribution = OntoTermAttribute(onto,
namespace='WC',
terms=onto['WC:random_distribution'].subclasses(),
default=onto['WC:normal_distribution'])
mean = FloatAttribute(min=0)
std = FloatAttribute(min=0, verbose_name='Standard deviation')
units = UnitAttribute(unit_registry,
choices=(
unit_registry.parse_units('molecule'),
unit_registry.parse_units('M'),
unit_registry.parse_units('mM'),
unit_registry.parse_units('uM'),
unit_registry.parse_units('nM'),
unit_registry.parse_units('pM'),
unit_registry.parse_units('fM'),
unit_registry.parse_units('aM'),
),
default=unit_registry.parse_units('M'))
identifiers = IdentifierManyToManyAttribute(related_name='distribution_init_concentrations',
verbose_related_name='Initial species concentrations')
evidence = EvidenceManyToManyAttribute('Evidence', related_name='distribution_init_concentrations',
verbose_related_name='Initial species concentrations')
conclusions = ManyToManyAttribute('Conclusion', related_name='distribution_init_concentrations',
verbose_related_name='Initial species concentrations')
comments = CommentAttribute()
references = ManyToManyAttribute('Reference', related_name='distribution_init_concentrations',
verbose_related_name='Initial species concentrations')
class Meta(obj_tables.Model.Meta):
# unique_together = (('species', ), )
attribute_order = ('id', 'name', 'species',
'distribution', 'mean', 'std', 'units',
'identifiers', 'evidence', 'conclusions', 'comments', 'references')
verbose_name = 'Initial species concentration'
frozen_columns = 1
children = {
'submodel': ('identifiers', 'evidence', 'conclusions', 'references'),
'core_model': ('species',
'identifiers', 'evidence', 'conclusions', 'references'),
}
child_attrs = {
'sbml': ('id', 'name', 'model', 'species', 'distribution', 'mean', 'std', 'units', 'identifiers', 'comments'),
'wc_sim': ('id', 'model', 'species', 'distribution', 'mean', 'std', 'units'),
}
def gen_id(self):
""" Generate string representation
Returns:
:obj:`str`: string representation
"""
return 'dist-init-conc-{}'.format(self.species.id)
def validate(self):
""" Check that the distribution of initial concentrations
at the beginning of each cell cycle is valid
* Validate that identifier is equal to `dist-init-conc-{species.id}]`
Returns:
:obj:`InvalidObject` or None: `None` if the object is valid,
otherwise return a list of errors as an instance of `InvalidObject`
"""
invalid_obj = super(DistributionInitConcentration, self).validate()
if invalid_obj:
errors = invalid_obj.attributes
else:
errors = []
if self.species and self.id != self.gen_id():
errors.append(InvalidAttribute(self.Meta.attributes['id'],
['Id must be {}'.format(self.gen_id())]))
if errors:
return InvalidObject(self, errors)
return None
class ObservableExpression(obj_tables.Model, Expression, SbmlModelMixin):
""" A mathematical expression of Observables and Species
The expression used by a `Observable`.
Attributes:
expression (:obj:`str`): mathematical expression for an Observable
_parsed_expression (:obj:`ParsedExpression`): an analyzed `expression`; not an `obj_tables.Model`
species (:obj:`list` of :obj:`Species`): Species used by this Observable expression
observables (:obj:`list` of :obj:`Observable`): other Observables used by this Observable expression
Related attributes:
* observable (:obj:`Observable`): observable
"""
expression = LongStringAttribute(primary=True, unique=True, default='')
species = ManyToManyAttribute(Species, related_name='observable_expressions')
observables = ManyToManyAttribute('Observable', related_name='observable_expressions')
class Meta(obj_tables.Model.Meta, Expression.Meta):
table_format = TableFormat.cell
expression_term_models = ('Species', 'Observable')
expression_is_linear = True
expression_unit_registry = unit_registry
children = {
'submodel': ('species', 'observables'),
'core_model': ('species', 'observables'),
}
child_attrs = {
'sbml': ('expression', 'species', 'observables'),
'wc_sim': ('expression', 'species', 'observables'),
}
def serialize(self):
""" Generate string representation
Returns:
:obj:`str`: string representation
"""
return Expression.serialize(self)
@classmethod
def deserialize(cls, value, objects):
""" Deserialize value
Args:
value (:obj:`str`): String representation
objects (:obj:`dict`): dictionary of objects, grouped by model
Returns:
:obj:`tuple` of :obj:`ObservableExpression`, `InvalidAttribute` or `None`:
tuple of cleaned value and cleaning error
"""
return Expression.deserialize(cls, value, objects)
def validate(self):
""" Check that the observable is valid
* Check that the expression is a linear function
Returns:
:obj:`InvalidObject` or None: `None` if the object is valid,
otherwise return a list of errors as an instance of `InvalidObject`
"""
return Expression.validate(self, self.observable)
def merge_attrs(self, other, other_objs_in_self, self_objs_in_other):
""" Merge attributes of two objects
Args:
other (:obj:`obj_tables.Model`): other model
other_objs_in_self (:obj:`dict`): dictionary that maps instances of objects in another model to objects
in a model
self_objs_in_other (:obj:`dict`): dictionary that maps instances of objects in a model to objects
in another model
"""
super(ObservableExpression, self).merge_attrs(other, other_objs_in_self, self_objs_in_other)
Expression.merge_attrs(self, other, other_objs_in_self, self_objs_in_other)
class Observable(obj_tables.Model, SbmlAssignmentRuleMixin):
""" Observable: a linear function of other Observbles and Species
Attributes:
id (:obj:`str`): unique id
name (:obj:`str`): name
model (:obj:`Model`): model
expression (:obj:`ObservableExpression`): mathematical expression for an Observable
units (:obj:`unit_registry.Unit`): units of expression
identifiers (:obj:`list` of :obj:`Identifier`): identifiers
conclusions (:obj:`list` of :obj:`Conclusion`): conclusions
comments (:obj:`str`): comments
references (:obj:`list` of :obj:`Reference`): references
Related attributes:
* observable_expressions (:obj:`list` of :obj:`ObservableExpression`): observable expressions
* function_expressions (:obj:`list` of :obj:`FunctionExpression`): function expressions
* rate_law_expressions (:obj:`list` of :obj:`RateLawExpression`): rate law expressions
* stop_condition_expressions (:obj:`list` of :obj:`StopConditionExpression`): stop condition expressions
"""
id = SlugAttribute()
name = StringAttribute()
model = ManyToOneAttribute(Model, related_name='observables')
expression = OneToOneExpressionAttribute(ObservableExpression, related_name='observable',
min_related=1, min_related_rev=1)
units = UnitAttribute(unit_registry,
choices=(unit_registry.parse_units('molecule'),),
default=unit_registry.parse_units('molecule'))
identifiers = IdentifierManyToManyAttribute(related_name='observables')
evidence = EvidenceManyToManyAttribute('Evidence', related_name='observables')
conclusions = ManyToManyAttribute('Conclusion', related_name='observables')
comments = CommentAttribute()
references = ManyToManyAttribute('Reference', related_name='observables')
class Meta(obj_tables.Model.Meta, ExpressionExpressionTermMeta):
attribute_order = ('id', 'name', 'expression', 'units',
'identifiers', 'evidence', 'conclusions', 'comments', 'references')
expression_term_model = ObservableExpression
expression_term_units = 'units'
children = {
'submodel': ('expression', 'identifiers', 'evidence', 'conclusions', 'references'),
'core_model': ('expression', 'identifiers', 'evidence', 'conclusions', 'references'),
}
child_attrs = {
'sbml': ('id', 'name', 'model', 'expression', 'units', 'identifiers', 'comments'),
'wc_sim': ('id', 'model', 'expression', 'units'),
}
class FunctionExpression(obj_tables.Model, Expression, SbmlModelMixin):
""" A mathematical expression of Functions, Observbles, Parameters and Python functions
The expression used by a :obj:`Function`.
Attributes:
expression (:obj:`str`): mathematical expression for a Function
_parsed_expression (:obj:`ParsedExpression`): an analyzed `expression`; not an `obj_tables.Model`
species (:obj:`list` of :obj:`Species`): Species used by this function expression
observables (:obj:`list` of :obj:`Observable`): Observables used by this function expression
parameters (:obj:`list` of :obj:`ParameterRenamed`): Parameters used by this function expression
functions (:obj:`list` of :obj:`Function`): other Functions used by this function expression
compartments (:obj:`list` of :obj:`Compartment`): Compartments used by this stop condition expression
Related attributes:
* function (:obj:`Function`): function
"""
expression = LongStringAttribute(primary=True, unique=True, default='')
parameters = ManyToManyAttribute('ParameterRenamed', related_name='function_expressions')
species = ManyToManyAttribute(Species, related_name='function_expressions')
observables = ManyToManyAttribute(Observable, related_name='function_expressions')
functions = ManyToManyAttribute('Function', related_name='function_expressions')
compartments = ManyToManyAttribute(Compartment, related_name='function_expressions')
class Meta(obj_tables.Model.Meta, Expression.Meta):
table_format = TableFormat.cell
expression_term_models = ('ParameterRenamed', 'Species', 'Observable', 'Function', 'Compartment')
expression_unit_registry = unit_registry
children = {
'submodel': ('parameters', 'species', 'observables', 'functions', 'compartments'),
'core_model': ('parameters', 'species', 'observables', 'functions', 'compartments'),
}
child_attrs = {
'sbml': ('expression', 'parameters', 'species', 'observables', 'functions', 'compartments'),
'wc_sim': ('expression', 'parameters', 'species', 'observables', 'functions', 'compartments'),
}
def serialize(self):
""" Generate string representation
Returns:
:obj:`str`: string representation
"""
return Expression.serialize(self)
@classmethod
def deserialize(cls, value, objects):
""" Deserialize value
Args:
value (:obj:`str`): String representation
objects (:obj:`dict`): dictionary of objects, grouped by model
Returns:
:obj:`tuple` of :obj:`FunctionExpression`, `InvalidAttribute` or `None`: tuple of cleaned value
and cleaning error
"""
return Expression.deserialize(cls, value, objects)
def validate(self):
""" Check that the function is valid
* Check that the expression is a valid Python function
Returns:
:obj:`InvalidObject` or None: `None` if the object is valid,
otherwise return a list of errors as an instance of `InvalidObject`
"""
return Expression.validate(self, self.function)
def merge_attrs(self, other, other_objs_in_self, self_objs_in_other):
""" Merge attributes of two objects
Args:
other (:obj:`obj_tables.Model`): other model
other_objs_in_self (:obj:`dict`): dictionary that maps instances of objects in another model to objects
in a model
self_objs_in_other (:obj:`dict`): dictionary that maps instances of objects in a model to objects
in another model
"""
super(FunctionExpression, self).merge_attrs(other, other_objs_in_self, self_objs_in_other)
Expression.merge_attrs(self, other, other_objs_in_self, self_objs_in_other)
class Function(obj_tables.Model, SbmlAssignmentRuleMixin):
""" Function: a mathematical expression of Functions, Observbles, Parameters and Python functions
Attributes:
id (:obj:`str`): unique id
name (:obj:`str`): name
model (:obj:`Model`): model
expression (:obj:`FunctionExpression`): mathematical expression for a Function
units (:obj:`unit_registry.Unit`): units
identifiers (:obj:`list` of :obj:`Identifier`): identifiers
conclusions (:obj:`list` of :obj:`Conclusion`): conclusions
comments (:obj:`str`): comments
references (:obj:`list` of :obj:`Reference`): references
Related attributes:
* function_expressions (:obj:`list` of :obj:`FunctionExpression`): function expressions
* rate_law_expressions (:obj:`list` of :obj:`RateLawExpression`): rate law expressions
* stop_condition_expressions (:obj:`list` of :obj:`StopConditionExpression`): stop condition expressions
"""
id = SlugAttribute()
name = StringAttribute()
model = ManyToOneAttribute(Model, related_name='functions')
expression = OneToOneExpressionAttribute(FunctionExpression, related_name='function',
min_related=1, min_related_rev=1)
units = UnitAttribute(unit_registry)
identifiers = IdentifierManyToManyAttribute(related_name='functions')
evidence = EvidenceManyToManyAttribute('Evidence', related_name='functions')
conclusions = ManyToManyAttribute('Conclusion', related_name='functions')
comments = CommentAttribute()
references = ManyToManyAttribute('Reference', related_name='functions')
class Meta(obj_tables.Model.Meta, ExpressionExpressionTermMeta):
attribute_order = ('id', 'name', 'expression', 'units',
'identifiers', 'evidence', 'conclusions', 'comments', 'references')
expression_term_model = | |
from manim import *
class cir(Scene):
def construct(self):
circle = Circle(radius = 1, color = BLUE, fill_opacity = 0.5)
tex= Text("now we have to discuss circle")
tex.to_corner(UP+LEFT)
self.play(Write(tex))
self.wait(2)
self.add(circle)
self.wait(2)
self.remove(circle)
self.wait(2)
# p1= circle.point_at_angle(90*DEGREES)
# s1= Square(side_length=0.25).move_to(p1)
# self.add(s1)
# self.wait(2)
triangle = Triangle()
cirg = Circle().surround(triangle, buffer_factor=2)
goup1= Group(cirg, triangle)
self.add(cirg)
self.add(goup1)
self.wait(2)
line = Line()
circle2 = Circle().surround(line, buffer_factor=3)
group2 = Group(line, circle2)
group4 = Group(goup1, group2).arrange(buff =1)
self.add(group4)
self.wait(2)
"""
class MovingAngle(Scene):
def construct(self):
rotation_center = LEFT
theta_tracker = ValueTracker(110)
line1 = Line(LEFT, RIGHT)
line_moving = Line(LEFT, RIGHT)
line_ref = line_moving.copy()
line_moving.rotate(
theta_tracker.get_value() * DEGREES, about_point=rotation_center
)
a = Angle(line1, line_moving, radius=0.5, other_angle=False)
te = MathTex(r"\theta").move_to(
Angle(
line1, line_moving, radius=0.5 + 3 * SMALL_BUFF, other_angle=False
).point_from_proportion(0.5)
)
self.add(line1, line_moving, a, te)
self.wait()
line_moving.add_updater(
lambda x: x.become(line_ref.copy()).rotate(
theta_tracker.get_value() * DEGREES, about_point=rotation_center
)
)
a.add_updater(
lambda x: x.become(Angle(line1, line_moving, radius=0.5, other_angle=False))
)
te.add_updater(
lambda x: x.move_to(
Angle(
line1, line_moving, radius=0.5 + 3 * SMALL_BUFF, other_angle=False
).point_from_proportion(0.5)
)
)
self.play(theta_tracker.animate.set_value(40))
self.play(theta_tracker.animate.increment_value(140))
self.play(te.animate.set_color(RED), run_time=0.5)
self.play(theta_tracker.animate.set_value(350))
class ValueTrackerExample(Scene):
def construct(self):
number_line = NumberLine()
pointer = Vector(DOWN)
label = MathTex("x").add_updater(lambda m: m.next_to(pointer, UP))
pointer_value = ValueTracker(0)
pointer.add_updater(
lambda m: m.next_to(
number_line.n2p(pointer_value.get_value()),
UP
)
)
self.add(number_line, pointer,label)
self.play(pointer_value.animate.set_value(5)),
self.wait()
self.play(pointer_value.animate.set_value(3))
class text(Scene):
def construct(self):
text0 = Text('hello world', slant = ITALIC)
text1 = Text('hello world', t2s={'world':ITALIC})
text2 = Text('hello world', weight = BOLD)
text3 = Text('hello world', t2w = {'world':BOLD})
list_of_content = Tex(
'$\\bullet$ a \\\\',
'$\\bullet$ b \\\\',
'$\\bullet$ c \\\\'
)
list2 =BulletedList(
'hello musa',
'hello world',
'bangladesh', dot_scale_factor=7
)
# self.add(list_of_content)
self.add(list2)
# for i,mobj in enumerate(self.mobjects):
# mobj.shift(DOWN*(i-1))
# self.add(list1)
class text2(Scene):
def construct(self):
text1 = Text('Google', t2c={'[:1]': '#3174f0',
'[1:2]': '#e53125',
'[2:3]': '#fbb003',
'[3:4]': '#3174f0',
'[5:]': '#e53125'},
size = 1.2).scale=(3)
self.add(text1)
class TextAlignment(Scene):
def construct(self):
title = Text("K-means", color=WHITE)
title.scale_in_place(1)
self.add(title.to_edge(UP))
t1 = Text("1. Measuring").set_color(WHITE)
t1.next_to(ORIGIN, direction=ORIGIN, aligned_edge=LEFT)
t2 = Text("2. Clustering").set_color(WHITE)
t2.next_to(t1, direction=DOWN, aligned_edge=LEFT)
t3 = Text("3. Regression").set_color(WHITE)
t3.next_to(t2, direction=DOWN, aligned_edge=LEFT)
t4 = Text("4. Prediction").set_color(WHITE)
t4.next_to(t3, direction=DOWN, aligned_edge=LEFT)
x = VGroup(t1, t2, t3, t4).scale_in_place(0.7)
x.set_opacity(0.5)
x.submobjects[1].set_opacity(1)
self.add(x)
class test(Scene):
def construct(self):
self.embd()
class MatrixExamples(Scene):
def construct(self):
m0 = Matrix([["2", "0"], ["-1", "1"]])
m1 = Matrix([["1", "0"], ["0", "1"]],
left_bracket="\\big(",
right_bracket="\\big)")
m2 = DecimalMatrix(
[["3.456", "2.122"], ["33.2244", "12.33"]],
element_to_mobject_config={"num_decimal_places": 2},
left_bracket="\\{",
right_bracket="\\}")
self.add(m0.shift(LEFT - (3, 0, 0)))
self.add(m1)
self.add(m2.shift(RIGHT + (3, 0, 0)))
class pythag(Scene):
def construct(self):
left_square, right_square = Square(), Square()
VGroup(left_square, right_square)\
.scale(2)\
.arrange_submobjects(RIGHT, buff = 2)
dots = [
left_square.point_from_proportion(i * 1/4 + 1/16)
for i in range(4)
]
dots_corners = [
left_square.point_from_proportion(i * 1/4)
for i in range(4)
]
grid = NumberPlane()
triangles = VGroup(*[
Polygon(
dots[i],
dots_corners[i],
dots[i-1],
stroke_width=0,
fill_opacity=0.7
)
for i in range(4)
])
# pe = Dot(dots[0])
self.add(grid)
sq = Square()
# self.add(sq)
self.add(left_square)
self.add(right_square)
# self.remove(sq)
# self.play(ShowCreation(Polygon([0,0,0],[3,0,0],[0,3,0],stroke_width=0.5,fill_opacity=0.3)), run_time = 5)
# for i in range(4):
# self.add(Dot(dots[i]))
# self.add(Dot(dots_corners[i]))
# self.play(ShowCreation(triangles), run_time = 3)
dots2 = [
right_square.point_from_proportion(i * 1/4 + j * 1/16)
for i,j in zip(range(4),[1,3,3,1])
]
dots_corners2 = [
right_square.point_from_proportion(i * 1/4)
for i in range(4)
]
middle = np.array([
dots2[0][0],
dots2[1][1],
0
])
for i in range(4):
self.add(Dot(dots2[i], color = GREEN))
self.wait()
self.add(Dot(dots_corners2[i]))
self.wait()
self.add(Dot(middle, color = RED))
self.wait()
all_rectangles = VGroup(*[
Polygon(
dots_corners2[i],
dots2[i],
middle,
dots2[i-1],
)
for i in range(4)
])
rectangles = all_rectangles[0::2]
# for i in range(len(rectangles)):
# self.add(rectangles[i])
# self.wait(2)
squares = all_rectangles[1::2]
total_points = 4
rect_dot = [
[
rectangles[i].points[total_points*j]
for j in range(4)
]
for i in range(2)
]
for j in rect_dot:
for i in [0,1,2,3]:
self.add(
Dot(j[i], color = YELLOW)
)
self.wait()
triangles2 = VGroup(*[
Polygon(
rect[i+1],
rect[i],
rect[i-1],
fill_opacity=0.7
)
for rect in rect_dot
for i in [0,2]
])
self.play(ShowCreation(triangles2))
theorem = TexMobject("c^2","=","a^2","+","b^2",color=BLUE).to_edge(DOWN)
parts_theorem = VGroup(
TexMobject("a^2").move_to(left_square),
TexMobject("b^2").move_to(squares[0]),
TexMobject("c^2").move_to(squares[1])
)
# self.play(ShowCreation(parts_theorem), run_time = 3)
#print(len(triangles2))
self.play(
*list(map(
DrawBorderThenFill,
[left_square,right_square,triangles.copy()
])), run_time = 20
)
self.play(
*[
ApplyMethod(
triangles[i].move_to,
triangles2[i].get_center()
)
for i in range(len(triangles))
], run_time = 30
)
self.play(
Rotate(triangles[1],-PI/2),
Rotate(triangles[2],PI/2),
)
self.play(
ShowCreation(squares),
Write(parts_theorem)
)
#"""
"""
class BoxAnimation(Scene):
def construct(self):
#Set objects
box=Box()
note=NoteBox()
label=TexMobject("A",color=BLACK)
#Set properties
note.set_height(label.get_height()*2)
note.move_to(box)
label.move_to(note)
self.play(DrawBorderThenFill(box))
self.wait()
self.play(FadeIn(note))
note.add_updater(lambda d: d.move_to(box.box_center()))
self.play(Write(label))
label.add_updater(lambda d: d.move_to(note))
self.play(box.shift,DOWN*3+LEFT*2,path_arc=PI/4)
self.wait()
self.play(open_box(box))
self.play(box.shift,UP*4.5+RIGHT*4)
self.wait()
self.play(close_box(box))
self.wait()
class OpeningManim(Scene):
def construct(self):
title = Text("")
basel = MathTex(r"\sum_{n=1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}")
VGroup(title, basel).arrange(DOWN)
self.play(
Write(title),
FadeInFrom(basel, UP),
)
self.wait()
transform_title = Tex("That was a transform")
transform_title.to_corner(UP + LEFT)
self.play(
Transform(title, transform_title),
LaggedStart(*[FadeOutAndShift(obj, direction=DOWN) for obj in basel]),
)
self.wait()
grid = NumberPlane()
grid_title = Tex("This is a grid")
grid_title.scale(1.5)
grid_title.move_to(transform_title)
self.add(grid, grid_title) # Make sure title is on top of grid
self.play(
FadeOut(title),
FadeInFrom(grid_title, direction=DOWN),
ShowCreation(grid, run_time=3, lag_ratio=0.1),
)
self.wait()
grid_transform_title = Tex(
r"That was a non-linear function \\ applied to the grid"
)
grid_transform_title.move_to(grid_title, UL)
grid.prepare_for_nonlinear_transform()
self.play(
grid.animate.apply_function(
lambda p: p
+ np.array(
[
np.sin(p[1]),
np.sin(p[0]),
0,
]
)
),
run_time=3,
)
self.wait()
self.play(Transform(grid_title, grid_transform_title))
self.wait()
class NumericalMatrixMultiplication(Scene):
CONFIG = {
"left_matrix": [[1, 2], [3, 4]],
"right_matrix": [[5, 6], [7, 8]],
"use_parens": True,
}
def construct(self):
left_matrix = [[1, 2], [3, 4]]
right_matrix = [[5, 6], [7, 8]]
use_parens = True
left_string_matrix, right_string_matrix = [
np.array(matrix)
for matrix in (left_matrix, right_matrix)
]
if right_string_matrix.shape[0] != left_string_matrix.shape[1]:
raise Exception("Incompatible shapes for matrix multiplication")
left = Matrix(left_string_matrix)
col = left.get_columns
col1 = Matrix(col)
right = Matrix(right_string_matrix).shift(2*DOWN)
# kkk = matrix_to_mobject(left)
self.add(col1)
self.wait(4)
# result = self.get_result_matrix(
# left_string_matrix, right_string_matrix
# )
self.play(ShowCreation(left))
self.wait(2)
# self.organize_matrices(left, right, result)
# self.animate_product(left, right, result)
self.play(Transform(left, right), run_time = 10)
VMobject
"""
"""
def get_result_matrix(self, left, right):
(m, k), n = left.shape, right.shape[1]
mob_matrix = np.array([VGroup()]).repeat(m * n).reshape((m, n))
for a in range(m):
for b in range(n):
template = "(%s)(%s)" if self.use_parens else "%s%s"
parts = [
prefix + template % (left[a][c], right[c][b])
for c in range(k)
for prefix in ["" if c == 0 else "+"]
]
mob_matrix[a][b] = Tex(parts, next_to_buff=0.1)
return Matrix(mob_matrix)
def add_lines(self, left, right):
line_kwargs = {
"color": BLUE,
"stroke_width": 2,
}
left_rows = [
VGroup(*row) for row in left.get_mob_matrix()
]
h_lines = VGroup()
for row in left_rows[:-1]:
h_line = Line(row.get_left(), row.get_right(), **line_kwargs)
h_line.next_to(row, DOWN, buff=left.v_buff / 2.)
h_lines.add(h_line)
right_cols = [
VGroup(*col) for col in np.transpose(right.get_mob_matrix())
]
v_lines = VGroup()
for col in right_cols[:-1]:
v_line = Line(col.get_top(), col.get_bottom(), **line_kwargs)
v_line.next_to(col, RIGHT, buff=right.h_buff / 2.)
v_lines.add(v_line)
self.play(ShowCreation(h_lines))
self.play(ShowCreation(v_lines))
self.wait()
self.show_frame()
def organize_matrices(self, left, right, result):
equals = Tex("=")
everything = VGroup(left, right, equals, result)
everything.arrange()
everything.set_width(FRAME_WIDTH - 1)
self.add(everything)
def animate_product(self, left, right, result):
l_matrix = left.get_mob_matrix()
r_matrix = right.get_mob_matrix()
result_matrix = result.get_mob_matrix()
circle = Circle(
radius=l_matrix[0][0].get_height(),
color=GREEN
)
circles = VGroup(*[
entry.get_point_mobject()
for entry in (l_matrix[0][0], r_matrix[0][0])
])
(m, k), n = l_matrix.shape, r_matrix.shape[1]
for mob in result_matrix.flatten():
mob.set_color(BLACK)
lagging_anims = []
for a in range(m):
for b in range(n):
for c in range(k):
l_matrix[a][c].set_color(YELLOW)
r_matrix[c][b].set_color(YELLOW)
for c in range(k):
start_parts = VGroup(
l_matrix[a][c].copy(),
r_matrix[c][b].copy()
)
result_entry = result_matrix[a][b].split()[c]
new_circles = VGroup(*[
circle.copy().shift(part.get_center())
for part in start_parts.split()
])
self.play(Transform(circles, new_circles))
self.play(
Transform(
start_parts,
result_entry.copy().set_color(YELLOW),
path_arc=-np.pi / 2,
lag_ratio=0,
),
*lagging_anims
)
result_entry.set_color(YELLOW)
self.remove(start_parts)
lagging_anims = [
ApplyMethod(result_entry.set_color, WHITE)
]
for c in range(k):
l_matrix[a][c].set_color(WHITE)
r_matrix[c][b].set_color(WHITE)
self.play(FadeOut(circles), *lagging_anims)
self.wait()
"""
"""
class OpeningQuote(Scene):
def constrsuct(self):
words = TexText("``The introduction of numbers as \\\\ coordinates is an act of violence.''", t2c = {"coordinates": GREEN})
words.to_edge(UP)
for mob in words.submobjects[27:27+11]:
mob.set_color(GREEN)
author = TexText("-<NAME>")
author.set_color(YELLOW)
author.next_to(words, DOWN, buff = 0.5)
self.play(FadeIn(words))
self.wait(1)
self.play(Write(author, run_time = 4))
self.wait()
def construct(self):
plane = NumberPlane()
labels = VGroup(*plane.get_axis_labels())
vector = Vector(["1","2"])
self.add(vector)
self.wait(5)
coordinates = vector_coordinate_label(vector)
symbol = Tex("\\vec{\\textbf{v}}")
sumbol.shift(0.5*(RIGHT+UP))
self.play(ShowCreation(
plane, lag_ratio = 1, run_time= 3
))
"""
class NewYearPost(MovingCameraScene):
def construct(self):
self.camera_frame.move_to(3 * UP)
text = MathTex(
r" s(t) &=\left( \begin{array}{c} "
r"x(t)"
r"\\ y(t)"
r"\end{array} \right)"
r"\\ &=\left( \begin{array}{c} "
r"v_0 t \cos(\theta)"
r"\\ v_0 t \sin(\theta) - \frac{1}{2}gt^2"
r"\end{array} \right)"
)
text.to_corner(DL).shift(3 * UP)
def func(t):
v0 = 10
theta = 0.85 * PI / 2
g = 9.81
return np.array(
(v0 | |
# -*- coding: utf-8 -*-
"""Handling of bv_maker configuration (bv_maker.cfg)."""
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import glob
from optparse import OptionParser
import os
import shlex
from socket import gethostname # for use in eval()'d expressions
import sys
import traceback
import six
from six.moves import reload_module
import brainvisa.maker.brainvisa_projects as brainvisa_projects
import brainvisa.maker.components_definition
from brainvisa.maker.environment import normalize_path
from brainvisa.maker.environment import replace_vars
from brainvisa.maker.environment import VarReplacementType
from brainvisa.maker.utils import global_installer_variables
from brainvisa.maker.version import version as brainvisa_cmake_version
default_subprocess_timeout = 3600 * 6 # default subprocess timeout is 6 hours
def check_filter_condition(filters):
# for now, use python eval()
expression = ' '.join(filters)
# replace %(var)s vars
vars = global_installer_variables()
expression = expression % vars
try:
res = bool(eval(expression))
except Exception:
traceback.print_exc()
raise
return res
class GlobalConfiguration(object):
def __init__(self, argv):
usage = '''%prog [options] [ command [command options] ]...
This program is for the management of source retrieval, configuration and compilation of BrainVISA projects.
In order to work, the commands svn and svnadmin must be installed on your system. On some Linux systems they are in two separate packages (e.g. subversion and subversion-tools).
Commands:
* info: Just output info about configured components.
* sources: Create or updated selected sources directories from Subversion
repository.
* status: Display a summary of the status of all source repositories.
* configure: Create and configure selected build directories with CMake.
* build: compile all selected build directories.
* doc: Generate documentation (sphinx, doxygen, docbook, epydoc).
* testref: Execute tests in a special mode to generate machine-specific
reference files (this is needed by some tests).
* test: Execute tests using ctest.
* pack: Generate binary packages.
* install_pack: Install binary packages.
* testref_pack: Create the machine-specific reference files for tests in
installed binary package.
* test_pack: Run tests in installed binary packages.
* publish_pack: Publish binary packages.
To get help for a specific command, use -h option of the command. Example: "%prog build -h".
To get help on how to configure and write a bv_maker configuration file, see:
http://brainvisa.info/brainvisa-cmake/compile_existing.html
config file syntax:
http://brainvisa.info/brainvisa-cmake/configuration.html
and more generally:
http://brainvisa.info/brainvisa-cmake/
'''
defaultConfigurationFile = os.environ.get('BRAINVISA_BVMAKER_CFG')
if defaultConfigurationFile is None:
defaultConfigurationFile = os.path.join(
os.environ['USERPROFILE' if sys.platform.startswith('win')
else 'HOME'], '.brainvisa', 'bv_maker.cfg')
parser = OptionParser(usage=usage)
parser.add_option('-d', '--directory', dest='directories',
help='Restrict actions to a selected directory. May be used several times to process several directories.',
metavar='DIR', action='append', default=[])
parser.add_option('-c', '--config', dest='configuration_file',
help='specify configuration file. Default ="' +
defaultConfigurationFile + '"',
metavar='CONFIG', default=None)
parser.add_option('-s', '--sources', dest='sources_directories',
help='directory containing sources',
metavar='DIR', action='append', default=[])
parser.add_option('-b', '--build', dest='build_directory',
help='build directory',
metavar='DIR', default=None)
parser.add_option('--username', dest='username',
help='specify user login to use with the svn server',
metavar='USERNAME', default='')
parser.add_option('-e', '--email', action='store_true',
help='Use email notification (if configured in the '
'general section of the configuration file)')
parser.add_option('--def', '--only-if-default', action='store_true',
dest='in_config', default=False,
help='apply only steps which are defined as default '
'steps in the bv_maker.cfg config file. Equivalent '
'to passing --only-if-default to every substep '
'which supports it.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
help='show as much information as possible')
parser.add_option(
'--version', dest='version', action='store_true',
help='show bv_maker (brainvisa-cmake) version number')
(options, args) = parser.parse_args(argv)
if args:
raise ValueError('Invalid option: %s' % args[0])
if options.version:
print('bv_maker version:', brainvisa_cmake_version)
sys.exit(0)
packages = []
self.sourcesDirectories = {}
self.buildDirectories = {}
self.packageDirectories = {}
self.publicationDirectories = {}
self.general_section = GeneralSection(self)
self.directories = options.directories
for i in ('configuration_file', 'username', 'verbose'):
setattr(self, i, getattr(options, i))
bd = None # build directory supplied on the command-line
if options.build_directory:
if not options.configuration_file:
cf = os.path.join(options.build_directory, 'bv_maker.cfg')
if os.path.exists(cf):
options.configuration_file = cf
else:
options.configuration_file = defaultConfigurationFile
os.environ['BV_MAKER_BUILD'] = options.build_directory
reload_module(brainvisa.maker.components_definition)
reload_module(brainvisa_projects)
from brainvisa.maker import build
bd = build.BuildDirectory(options.build_directory, self)
for sd in options.sources_directories:
if os.path.exists(os.path.join(sd, 'project_info.cmake')) \
or glob.glob(os.path.join(sd, 'python', '*', 'info.py')) \
or glob.glob(os.path.join(sd, '*', 'info.py')):
bd.addConfigurationLine('directory ' + sd)
else:
bd.addConfigurationLine('brainvisa all * ' + sd)
elif not options.configuration_file:
options.configuration_file = defaultConfigurationFile
if options.configuration_file \
and os.path.exists(options.configuration_file):
with open(options.configuration_file, 'rb') as f:
self.parse_config_file(f, options, extra_build_dir=bd)
if options.verbose:
print('variables initialized')
# store options and args
self.options = options
self.args = args
def parse_config_file(self, f, options, extra_build_dir=None):
"""Read configuration from an file object (opened in binary mode)."""
from brainvisa.maker import build
from brainvisa.maker import sources
lineCount = 0
currentDirectoryObject = None
source_dirs = []
build_dirs = [extra_build_dir] if extra_build_dir else []
package_dirs = []
publication_dirs = []
condition_stack = []
for line in f:
lineCount += 1
line = line.strip()
try:
line = line.decode()
except UnicodeError:
line = line.decode('utf-8')
# skip comments
if not line or line[0] == '#' or line.startswith('//'):
continue
try:
if line[0] == '[':
if line[-1] != ']':
raise SyntaxError()
l = line[1:-1].split(None, 1)
if len(l) != 2 and l[0] not in ('if', 'endif', 'else',
'general'):
raise SyntaxError()
if l[0] == 'if' \
and currentDirectoryObject is not None \
and len(l) >= 2:
if len(condition_stack) != 0 \
and condition_stack[-1] is False:
# if an upstream condition is already false,
# all the subtree is false (but we still have
# to parse it to count if/endif/else
# occurrences in it)
condition_stack.append(False)
else:
try:
condition_stack.append(
check_filter_condition(l[1:]))
except Exception:
print('error in condition, line %d:'
% lineCount, file=sys.stderr)
print(line, file=sys.stderr)
raise
elif l[0] == 'endif' and len(l) == 1:
if len(condition_stack) > 0:
# ends the filtered section
condition_stack.pop()
else:
SyntaxError('[endif] clause is not preceded '
'by [if] clause.')
elif l[0] == 'else' and len(l) == 1:
if len(condition_stack) > 0:
# inverts the filtered section, only if its
# parent condition is True
if len(condition_stack) == 1 \
or condition_stack[-2]:
condition_stack[-1] \
= not condition_stack[-1]
else:
SyntaxError('[else] clause is not preceded by '
'[if] clause.')
elif len(condition_stack) == 0 \
or False not in condition_stack:
if self.verbose:
print(' processing line %s:' % str(lineCount),
repr(line))
sys.stdout.flush()
if l[0] == 'source':
currentDirectoryObject = sources.SourceDirectory(
l[1].strip(),
self)
source_dirs.append(currentDirectoryObject)
elif l[0] == 'build':
currentDirectoryObject = build.BuildDirectory(
l[1].strip(),
self)
build_dirs.append(currentDirectoryObject)
elif l[0] == 'virtualenv':
currentDirectoryObject = build.VirtualenvDirectory(
l[1].strip(),
self)
build_dirs.append(currentDirectoryObject)
elif l[0] == 'package':
from brainvisa.maker import installer
currentDirectoryObject = installer.PackageDirectory(
l[1].strip(),
self)
package_dirs.append(currentDirectoryObject)
elif l[0] == 'package_publication':
from brainvisa.maker import installer
currentDirectoryObject = installer.PublicationDirectory(
l[1].strip(),
self)
publication_dirs.append(currentDirectoryObject)
elif l[0] == 'general' and len(l) == 1:
currentDirectoryObject = self.general_section
else:
raise SyntaxError()
elif len(condition_stack) == 0 \
or False not in condition_stack:
if currentDirectoryObject is None:
raise SyntaxError()
if self.verbose:
print(' processing line %s:' % str(lineCount),
repr(line))
sys.stdout.flush()
currentDirectoryObject.addConfigurationLine(line)
except SyntaxError as e:
# FIXME: SyntaxError should be reserved for Python syntax
# errors, use a custom exception instead
msg = e.message
if msg == '':
msg = 'Syntax error'
raise SyntaxError('%s in ' % msg + repr(
options.configuration_file) + ' on line '
+ str(lineCount))
if len(condition_stack) > 0:
RuntimeError('some [if] clause remain unclosed by [endif].')
if options.verbose:
print('configuration file %s parsed'
% repr(options.configuration_file))
for r, sections in ((None, [self.general_section]), \
('sourcesDirectories', source_dirs), \
('buildDirectories', build_dirs), \
('packageDirectories', package_dirs), \
('publicationDirectories', publication_dirs)):
for s in sections:
# Variable initialization is done after parsing of sections
# because it may depend on the complete parsing of other
# sections
if not hasattr(s, '_initialized') or not s._initialized:
s.init_vars()
registry = getattr(self, r, None) if r else None
if registry is not None:
# Register section
registry[s.directory] = s
if isinstance(s, sources.SourceDirectory):
# Parses source configuration
s.parseSourceConfiguration()
if options.verbose:
print(getattr(s, 'directory', 'general'), 'options:')
for o in s._validOptions:
print(' ', o, '=', getattr(s, o, None))
class ConfigVariableParser(object):
_validOptions = set()
_validAdditiveOptions = set()
_path_variables = set()
_variables_with_replacements = set()
_variables_with_env_only_replacements = set()
_variables_with_python_only_replacements = set()
def __new__(cls, *args):
# Initialize the class properties
for prop in cls._validOptions.union(cls._validAdditiveOptions):
cls.property_init(prop)
return super(ConfigVariableParser, cls).__new__(cls)
def __init__(self, *args):
super(ConfigVariableParser, self).__init__(*args)
self._property_info = {}
self._property_recursivity = {}
for prop in self._validOptions.union(self._validAdditiveOptions):
self._property_info[prop] = \
(self.get_replacement_type(prop),
self.is_path(prop))
@classmethod
def property_init(cls, name, doc = None):
from functools import partial
# Declare option as a property
setattr(cls, name,
property(partial(getattr(cls, '_property_get'), name),
partial(getattr(cls, '_property_set'), name),
partial(getattr(cls, '_property_del'), name),
doc if doc else 'property ' + name))
@staticmethod
def _property_has(name, config_parser):
return name in config_parser._property_info
@staticmethod
def _property_set(name, config_parser, value):
setattr(config_parser, '_' + name, value)
#print('val_type is', type(v), 'for value:', value)
@staticmethod
def _property_get_origin(name, config_parser):
getattr(config_parser, '_' + name)
@staticmethod
def _property_get(name, config_parser):
info = config_parser._property_info.get(name, None)
if info is None:
raise RuntimeError('Property %s is not declared' % | |
from __future__ import unicode_literals
import uuid
from django.contrib import auth
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.db.models.functions import Concat
from django.utils import six
import django_filters
from django_filters.filterset import FilterSetMetaclass
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import BaseFilterBackend
import taggit
from waldur_core.core import filters as core_filters
from waldur_core.core import models as core_models
from waldur_core.core.filters import BaseExternalFilter, ExternalFilterBackend
from waldur_core.logging.filters import ExternalAlertFilterBackend
from waldur_core.structure import models
from waldur_core.structure import SupportedServices
from waldur_core.structure.managers import filter_queryset_for_user
User = auth.get_user_model()
class ScopeTypeFilterBackend(DjangoFilterBackend):
""" Scope filters:
* ?scope = ``URL``
* ?scope_type = ``string`` (can be list)
"""
content_type_field = 'content_type'
scope_param = 'scope_type'
scope_models = {
'customer': models.Customer,
'service': models.Service,
'project': models.Project,
'service_project_link': models.ServiceProjectLink,
'resource': models.ResourceMixin
}
@classmethod
def get_scope_type(cls, model):
for scope_type, scope_model in cls.scope_models.items():
if issubclass(model, scope_model):
return scope_type
@classmethod
def _get_scope_models(cls, types):
for scope_type, scope_model in cls.scope_models.items():
if scope_type in types:
try:
for submodel in scope_model.get_all_models():
yield submodel
except AttributeError:
yield scope_model
@classmethod
def _get_scope_content_types(cls, types):
return ContentType.objects.get_for_models(*cls._get_scope_models(types)).values()
def filter_queryset(self, request, queryset, view):
if self.scope_param in request.query_params:
content_types = self._get_scope_content_types(request.query_params.getlist(self.scope_param))
return queryset.filter(**{'%s__in' % self.content_type_field: content_types})
return queryset
class GenericRoleFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return filter_queryset_for_user(queryset, request.user)
class GenericUserFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
user_uuid = request.query_params.get('user_uuid')
if not user_uuid:
return queryset
try:
uuid.UUID(user_uuid)
except ValueError:
return queryset.none()
try:
user = User.objects.get(uuid=user_uuid)
except User.DoesNotExist:
return queryset.none()
return filter_queryset_for_user(queryset, user)
class CustomerFilter(django_filters.FilterSet):
name = django_filters.CharFilter(
lookup_expr='icontains',
)
native_name = django_filters.CharFilter(
lookup_expr='icontains',
)
abbreviation = django_filters.CharFilter(
lookup_expr='icontains',
)
contact_details = django_filters.CharFilter(
lookup_expr='icontains',
)
o = django_filters.OrderingFilter(
fields=('name', 'abbreviation', 'contact_details', 'native_name', 'registration_code')
)
class Meta(object):
model = models.Customer
fields = [
'name',
'abbreviation',
'contact_details',
'native_name',
'registration_code',
]
class ExternalCustomerFilterBackend(ExternalFilterBackend):
pass
class ProjectTypeFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
class Meta(object):
model = models.ProjectType
fields = ['name']
class ProjectFilter(django_filters.FilterSet):
customer = django_filters.UUIDFilter(
name='customer__uuid',
distinct=True,
)
customer_name = django_filters.CharFilter(
name='customer__name',
distinct=True,
lookup_expr='icontains'
)
customer_native_name = django_filters.CharFilter(
name='customer__native_name',
distinct=True,
lookup_expr='icontains'
)
customer_abbreviation = django_filters.CharFilter(
name='customer__abbreviation',
distinct=True,
lookup_expr='icontains'
)
name = django_filters.CharFilter(lookup_expr='icontains')
description = django_filters.CharFilter(lookup_expr='icontains')
o = django_filters.OrderingFilter(
fields=(
('name', 'name'),
('created', 'created'),
('customer__name', 'customer_name'),
('customer__native_name', 'customer_native_name'),
('customer__abbreviation', 'customer_abbreviation'),
)
)
class Meta(object):
model = models.Project
fields = [
'name',
'customer', 'customer_name', 'customer_native_name', 'customer_abbreviation',
'description',
'created',
]
class CustomerUserFilter(DjangoFilterBackend):
def filter_queryset(self, request, queryset, view):
customer_uuid = request.query_params.get('customer_uuid')
if not customer_uuid:
return queryset
try:
uuid.UUID(customer_uuid)
except ValueError:
return queryset.none()
return queryset.filter(
Q(customerpermission__customer__uuid=customer_uuid,
customerpermission__is_active=True) |
Q(projectpermission__project__customer__uuid=customer_uuid,
projectpermission__is_active=True)
).distinct()
class ProjectUserFilter(DjangoFilterBackend):
def filter_queryset(self, request, queryset, view):
project_uuid = request.query_params.get('project_uuid')
if not project_uuid:
return queryset
try:
uuid.UUID(project_uuid)
except ValueError:
return queryset.none()
return queryset.filter(
projectpermission__project__uuid=project_uuid,
projectpermission__is_active=True
).distinct()
class BaseUserFilter(django_filters.FilterSet):
full_name = django_filters.CharFilter(lookup_expr='icontains')
username = django_filters.CharFilter()
native_name = django_filters.CharFilter(lookup_expr='icontains')
job_title = django_filters.CharFilter(lookup_expr='icontains')
email = django_filters.CharFilter(lookup_expr='icontains')
is_active = django_filters.BooleanFilter()
class Meta(object):
model = User
fields = [
'full_name',
'native_name',
'organization',
'organization_approved',
'email',
'phone_number',
'description',
'job_title',
'username',
'civil_number',
'is_active',
'registration_method',
]
class UserFilter(BaseUserFilter):
o = django_filters.OrderingFilter(
fields=('full_name', 'native_name', 'organization',
'organization_approved', 'email', 'phone_number',
'description', 'job_title', 'username',
'is_active', 'registration_method')
)
class UserConcatenatedNameOrderingBackend(DjangoFilterBackend):
""" Filter user by concatenated full_name + username with ?o=concatenated_name """
def filter_queryset(self, request, queryset, view):
queryset = self._filter_queryset(request, queryset, view)
return BaseUserFilter(request.query_params, queryset=queryset, request=request).qs
def _filter_queryset(self, request, queryset, view):
if 'o' not in request.query_params:
return queryset
if request.query_params['o'] == 'concatenated_name':
order_by = 'concatenated_name'
elif request.query_params['o'] == '-concatenated_name':
order_by = '-concatenated_name'
else:
return queryset
return queryset.annotate(concatenated_name=Concat('full_name', 'username')).order_by(order_by)
class UserPermissionFilter(django_filters.FilterSet):
user = django_filters.UUIDFilter(name='user__uuid')
user_url = core_filters.URLFilter(
view_name='user-detail',
name='user__uuid',
)
username = django_filters.CharFilter(
name='user__username',
lookup_expr='exact',
)
full_name = django_filters.CharFilter(
name='user__full_name',
lookup_expr='icontains',
)
native_name = django_filters.CharFilter(
name='user__native_name',
lookup_expr='icontains',
)
o = django_filters.OrderingFilter(
fields=(
('user__username', 'username'),
('user__full_name', 'full_name'),
('user__native_name', 'native_name'),
('user__email', 'email'),
('expiration_time', 'expiration_time'),
('created', 'created'),
('role', 'role'),
)
)
class ProjectPermissionFilter(UserPermissionFilter):
class Meta(object):
fields = ['role']
model = models.ProjectPermission
customer = django_filters.UUIDFilter(
name='project__customer__uuid',
)
project = django_filters.UUIDFilter(
name='project__uuid',
)
project_url = core_filters.URLFilter(
view_name='project-detail',
name='project__uuid',
)
class CustomerPermissionFilter(UserPermissionFilter):
class Meta(object):
fields = ['role']
model = models.CustomerPermission
customer = django_filters.UUIDFilter(
name='customer__uuid',
)
customer_url = core_filters.URLFilter(
view_name='customer-detail',
name='customer__uuid',
)
class SshKeyFilter(django_filters.FilterSet):
uuid = django_filters.UUIDFilter()
user_uuid = django_filters.UUIDFilter(name='user__uuid')
name = django_filters.CharFilter(lookup_expr='icontains')
o = django_filters.OrderingFilter(fields=('name',))
class Meta(object):
model = core_models.SshPublicKey
fields = [
'name',
'fingerprint',
'uuid',
'user_uuid',
'is_shared',
]
class ServiceTypeFilter(django_filters.Filter):
def filter(self, qs, value):
value = SupportedServices.get_filter_mapping().get(value)
return super(ServiceTypeFilter, self).filter(qs, value)
class ServiceSettingsFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
type = ServiceTypeFilter()
state = core_filters.StateFilter()
class Meta(object):
model = models.ServiceSettings
fields = ('name', 'type', 'state', 'shared')
class ServiceSettingsScopeFilterBackend(core_filters.GenericKeyFilterBackend):
def get_related_models(self):
return models.ResourceMixin.get_all_models()
def get_field_name(self):
return 'scope'
class ServiceFilterMetaclass(FilterSetMetaclass):
""" Build a list of supported resource via serializers definition.
See SupportedServices for details.
"""
def __new__(mcs, name, bases, args):
service_filter = super(ServiceFilterMetaclass, mcs).__new__(mcs, name, bases, args)
model = args['Meta'].model
if not model._meta.abstract:
SupportedServices.register_service_filter(args['Meta'].model, service_filter)
return service_filter
class BaseServiceFilter(six.with_metaclass(ServiceFilterMetaclass, django_filters.FilterSet)):
customer = django_filters.UUIDFilter(name='customer__uuid')
name = django_filters.CharFilter(name='settings__name', lookup_expr='icontains')
name_exact = django_filters.CharFilter(name='settings__name', lookup_expr='exact')
project = core_filters.URLFilter(view_name='project-detail', name='projects__uuid', distinct=True)
project_uuid = django_filters.UUIDFilter(name='projects__uuid', distinct=True)
settings = core_filters.URLFilter(view_name='servicesettings-detail', name='settings__uuid', distinct=True)
shared = django_filters.BooleanFilter(name='settings__shared', distinct=True)
type = ServiceTypeFilter(name='settings__type')
tag = django_filters.ModelMultipleChoiceFilter(
name='settings__tags__name',
to_field_name='name',
lookup_expr='in',
queryset=taggit.models.Tag.objects.all(),
)
# rtag - required tag, support for filtration by tags using AND operation
# ?rtag=t1&rtag=t2 - will filter instances that have both t1 and t2.
rtag = django_filters.ModelMultipleChoiceFilter(
name='settings__tags__name',
to_field_name='name',
queryset=taggit.models.Tag.objects.all(),
conjoined=True,
)
class Meta(object):
model = models.Service
fields = ('name', 'name_exact', 'project_uuid', 'customer', 'project', 'settings', 'shared', 'type', 'tag', 'rtag')
class BaseServiceProjectLinkFilter(django_filters.FilterSet):
service_uuid = django_filters.UUIDFilter(name='service__uuid')
customer_uuid = django_filters.UUIDFilter(name='service__customer__uuid')
project_uuid = django_filters.UUIDFilter(name='project__uuid')
project = core_filters.URLFilter(view_name='project-detail', name='project__uuid')
class Meta(object):
model = models.ServiceProjectLink
fields = ()
class ResourceFilterMetaclass(FilterSetMetaclass):
""" Build a list of supported resource via serializers definition.
See SupportedServices for details.
"""
def __new__(cls, name, bases, args):
resource_filter = super(ResourceFilterMetaclass, cls).__new__(cls, name, bases, args)
SupportedServices.register_resource_filter(args['Meta'].model, resource_filter)
return resource_filter
class BaseResourceFilter(six.with_metaclass(ResourceFilterMetaclass,
django_filters.FilterSet)):
def __init__(self, *args, **kwargs):
super(BaseResourceFilter, self).__init__(*args, **kwargs)
self.filters['o'] = django_filters.OrderingFilter(fields=self.ORDERING_FIELDS)
# customer
customer = django_filters.UUIDFilter(name='service_project_link__service__customer__uuid')
customer_uuid = django_filters.UUIDFilter(name='service_project_link__service__customer__uuid')
customer_name = django_filters.CharFilter(
name='service_project_link__service__customer__name', lookup_expr='icontains')
customer_native_name = django_filters.CharFilter(
name='service_project_link__project__customer__native_name', lookup_expr='icontains')
customer_abbreviation = django_filters.CharFilter(
name='service_project_link__project__customer__abbreviation', lookup_expr='icontains')
# project
project = django_filters.UUIDFilter(name='service_project_link__project__uuid')
project_uuid = django_filters.UUIDFilter(name='service_project_link__project__uuid')
project_name = django_filters.CharFilter(name='service_project_link__project__name', lookup_expr='icontains')
# service
service_uuid = django_filters.UUIDFilter(name='service_project_link__service__uuid')
service_name = django_filters.CharFilter(name='service_project_link__service__settings__name', lookup_expr='icontains')
# service settings
service_settings_uuid = django_filters.UUIDFilter(name='service_project_link__service__settings__uuid')
service_settings_name = django_filters.CharFilter(name='service_project_link__service__settings__name',
lookup_expr='icontains')
# resource
name = django_filters.CharFilter(lookup_expr='icontains')
name_exact = django_filters.CharFilter(name='name', lookup_expr='exact')
description = django_filters.CharFilter(lookup_expr='icontains')
state = core_filters.MappedMultipleChoiceFilter(
choices=[(representation, representation) for db_value, representation in core_models.StateMixin.States.CHOICES],
choice_mappings={representation: db_value for db_value, representation in core_models.StateMixin.States.CHOICES},
)
uuid = django_filters.UUIDFilter(lookup_expr='exact')
tag = django_filters.ModelMultipleChoiceFilter(
name='tags__name',
label='tag',
to_field_name='name',
lookup_expr='in',
queryset=taggit.models.Tag.objects.all(),
)
rtag = django_filters.ModelMultipleChoiceFilter(
name='tags__name',
label='rtag',
to_field_name='name',
queryset=taggit.models.Tag.objects.all(),
conjoined=True,
)
ORDERING_FIELDS = (
('name', 'name'),
('state', 'state'),
('service_project_link__project__customer__name', 'customer_name'),
('service_project_link__project__customer__native_name', 'customer_native_name'),
('service_project_link__project__customer__abbreviation', 'customer_abbreviation'),
('service_project_link__project__name', 'project_name'),
('service_project_link__service__settings__name', 'service_name'),
('service_project_link__service__uuid', 'service_uuid'),
('created', 'created'),
)
class Meta(object):
model = models.ResourceMixin
fields = (
# customer
'customer', 'customer_uuid', 'customer_name', 'customer_native_name', 'customer_abbreviation',
# project
'project', 'project_uuid', 'project_name',
# service
'service_uuid', 'service_name',
# service settings
'service_settings_name', 'service_settings_uuid',
# resource
'name', 'name_exact', 'description', 'state', 'uuid', 'tag', 'rtag',
)
class TagsFilter(BaseFilterBackend):
""" Tags ordering. Filtering for complex tags.
Example:
?tag__license-os=centos7 - will filter objects with tag "license-os:centos7".
Allow to define next parameters in view:
- tags_filter_db_field - name of tags field in database. Default: tags.
- tags_filter_request_field - name of tags in request. Default: tag.
"""
def filter_queryset(self, request, queryset, view):
self.db_field = getattr(view, 'tags_filter_db_field', 'tags')
self.request_field = getattr(view, 'tags_filter_request_field', 'tag')
queryset = self._filter(request, queryset)
queryset = self._order(request, queryset)
return queryset
def _filter(self, request, queryset):
for key in request.query_params.keys():
item_name = self._get_item_name(key)
if item_name:
value = request.query_params.get(key)
filter_kwargs = {
self.db_field + '__name__startswith': item_name,
self.db_field + '__name__icontains': value,
}
queryset = queryset.filter(**filter_kwargs)
return queryset
def _order(self, request, queryset):
order_by = request.query_params.get('o')
item_name = self._get_item_name(order_by)
if item_name:
filter_kwargs = {self.db_field + '__name__startswith': item_name}
queryset = queryset.filter(**filter_kwargs).order_by(self.db_field + '__name')
return queryset
def _get_item_name(self, key):
prefix = self.request_field + '__'
if key and key.startswith(prefix):
return key[len(prefix):]
class StartTimeFilter(BaseFilterBackend):
"""
In PostgreSQL NULL values come *last* with ascending sort order.
In MySQL NULL values come *first* with ascending sort order.
This filter provides unified sorting for both databases.
"""
def filter_queryset(self, request, queryset, view):
order = request.query_params.get('o', None)
if order == 'start_time':
queryset = queryset.extra(select={
'is_null': 'CASE WHEN start_time IS NULL THEN 0 ELSE 1 END'}) \
.order_by('is_null', 'start_time')
elif order == '-start_time':
queryset = queryset.extra(select={
'is_null': 'CASE WHEN start_time IS NULL THEN 0 ELSE 1 END'}) \
.order_by('-is_null', '-start_time')
return queryset
class BaseServicePropertyFilter(django_filters.FilterSet):
name = django_filters.CharFilter(name='name', lookup_expr='icontains')
name_exact = django_filters.CharFilter(name='name', lookup_expr='exact')
class Meta(object):
fields = ('name', 'name_exact')
class ServicePropertySettingsFilter(BaseServicePropertyFilter):
settings_uuid = django_filters.UUIDFilter(name='settings__uuid')
settings = core_filters.URLFilter(view_name='servicesettings-detail', name='settings__uuid', distinct=True)
class Meta(BaseServicePropertyFilter.Meta):
fields = BaseServicePropertyFilter.Meta.fields + ('settings_uuid', 'settings')
class AggregateFilter(BaseExternalFilter):
"""
Filter by aggregate
"""
def filter(self, | |
<gh_stars>1-10
""" Script to change versioning of files (eg. manifest.yml) for executors
[encoders, crafters, indexers, rankers, evaluators, classifiers etc.].
It also adds the required jina version.
Commits the change in the branch and raises a PR for the executor.
Then attempts to automatically merge the PRs
"""
import glob
import os
import sys
import time
import traceback
from typing import List, Optional, Tuple, Dict
import git
import github
import pkg_resources
import requests
import semver
from github import Github, Repository
from github.Issue import Issue
from github.PullRequest import PullRequest
from ruamel.yaml import YAML
WAIT_BETWEEN_PR_CHECKS = 5 * 60
TIME_WAIT_PR_CREATE = 30
FIX_MODULE_TEMPLATE = 'fix module '
COMPARISON_TYPES = ["major", "minor", "patch"]
TAG_IN_ISSUES = os.environ.get('TAG_IN_ISSUES', '')
MODULES_REPO = os.environ.get('MODULES_REPO')
GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
COMPARISON_LEVEL = os.environ['COMPARISON_LEVEL']
TEST_AGAIN = os.getenv('TEST_AGAIN').lower() == 'true'
FORCE_RECHECK_PR = os.getenv('FORCE_RECHECK_PR').lower() == 'true'
print(f'TEST_AGAIN = {TEST_AGAIN}')
print(f'FORCE_RECHECK_PR = {FORCE_RECHECK_PR}')
if MODULES_REPO is None:
print(f'Error: MODULES_REPO needs to be set. Exiting...')
sys.exit(1)
if GITHUB_TOKEN is None:
print(f'Error: GITHUB_TOKEN needs to be set. Exiting...')
sys.exit(1)
if COMPARISON_LEVEL is None:
print(f'Error: COMPARISON_LEVEL needs to be set. Exiting...')
sys.exit(1)
if COMPARISON_LEVEL not in COMPARISON_TYPES:
print(f'Error: COMPARISON_LEVEL needs to be one of {COMPARISON_TYPES}')
sys.exit(1)
GITHUB_API_HEADERS = {
f'Authorization': f'token {GITHUB_TOKEN}',
'Accept': 'application/vnd.github.v3+json'
}
# this one has PR push access
g = Github(GITHUB_TOKEN)
print(f'rate limit status: {g.get_rate_limit()}')
yaml = YAML()
def get_pr_from_gh(pr_name, all_prs):
"""try to obtain existing open PR with name in GH"""
prs = [
pr for pr in all_prs
if pr_name in pr.title
]
if len(prs) > 1:
print(f'Warning: Too many PRs matched query "{pr_name}": {[p.html_url for p in prs]}. Returning first.')
return prs[0]
elif len(prs) == 1:
pr = prs[0]
print(f'Found existing PR for {pr_name}: {pr.html_url}')
return prs[0]
else:
print(f'Couldn\'t retrieve PR for module version and jina version. Will create...')
return None
def check_pr_is_valid(pr, module, module_version, jina_core_version) -> Optional[PullRequest]:
"""Check whether PR is open and valid to be re-checked.
Otherwise, open a new one on the new version to be tested.
Could also be that we force the re-check of a closed PR that matches the version
:param pr: the PR object
:param module: the directory name
:param module_version: version of the module
:param jina_core_version: the new Jina core version to check
:return: PR, if to be re-checked
"""
print(f'PR found for {module} on Jina core v{jina_core_version} ({pr.html_url}) ...', )
if FORCE_RECHECK_PR:
print('Will rename as [old] and try again...')
pr.edit(
title=f'[old] {pr.title}'
)
else:
if pr.state == 'open':
# something must've stopped us from closing it
# make sure we close it
print('PR was open. Will handle now...')
return pr
else:
print('Warning: Module has already been tested. Skipping...')
def create_pr(manifest_path, requirements_path, module, jina_core_version, hub_repo, hub_origin, gh_hub_repo,
all_prs) -> Optional[PullRequest]:
"""for each module with manifest.yml attempts to open a PR for testing specific jina version returns None (if no
need to open new PR), old PR (if found and 'open'), new PR (if versions haven't been tested before)
"""
# noinspection PyTypeChecker
pr = None
timestamp = time.time()
print(f'handling {module}...')
module_version = None
with open(manifest_path) as fp:
info = yaml.load(fp)
module_version = info['version']
# in order to trigger a PR we make a 'dummy' change to the timestamp
info['timestamp'] = timestamp
# means this module version + jina version has been tested before
# NOTE: DO NOT MODIFY THIS AS IT'S NEEDED FOR SEARCHING ON GITHUB
pr_name = build_pr_name(jina_core_version, module, module_version)
pr: PullRequest = get_pr_from_gh(pr_name, all_prs)
if pr and check_pr_is_valid(pr, module, module_version, jina_core_version):
return pr
with open(manifest_path, 'w') as fp:
yaml.dump(info, fp)
if os.path.exists(requirements_path):
new_requirements = []
update = False
with open(requirements_path, 'r') as fp:
requirements = pkg_resources.parse_requirements(fp)
for req in requirements:
if 'jina' in str(req):
update = True
new_requirements.append(f'jina=={jina_core_version}')
else:
new_requirements.append(str(req))
if update:
with open(requirements_path, 'w') as fp:
fp.write('\n'.join(new_requirements))
br_name = ''
try:
print('preparing the branch ...')
br_name = f'chore-{module.lower()}-{module_version}-core-{jina_core_version.replace(".", "-")}'
# delete any existing branch if exists
# if we don't find the PR (above), then it must've been
# renamed (for a good reason)
delete_remote_branch(br_name, expect_exists=False)
new_branch = hub_repo.create_head(br_name)
new_branch.checkout()
print(f'bumping version for timestamp {timestamp} and committing to {new_branch}...')
hub_repo.git.add(update=True)
# NOTE limited to 72 characters by commit lint
hub_repo.index.commit(f'chore: bump {module}')
hub_repo.git.push('--set-upstream', hub_origin, hub_repo.head.ref)
print('making a PR ...')
body_string = f'Due to the release of jina core v{jina_core_version}, this draft PR is created in order to ' \
f'trigger an automatic build & push of the module '
pr = gh_hub_repo.create_pull(
title=pr_name,
body=body_string,
head=br_name,
base='master',
draft=True
)
except github.GithubException as e:
print('caught GH exception')
print(f'Error: {repr(e), type(e), e.data.get("message")}')
print(f'Retry limit reached? {g.get_rate_limit()}')
except Exception as e:
print(f'Error: {repr(e), type(e), e.data.get("message")}')
print(f'Retry limit reached? {g.get_rate_limit()}')
raise e
finally:
hub_repo.git.checkout('master')
if br_name:
hub_repo.delete_head(br_name, force=True)
return pr
def build_pr_name(jina_core_version, module, module_version):
pr_name = f'chore: testing/building {module} ({module_version}) on new jina core: {jina_core_version}'
if COMPARISON_LEVEL == 'major':
version = semver.parse(jina_core_version)
pr_name = f'chore: testing/building {module} ({module_version}) on new jina core: {version["major"]}.'
elif COMPARISON_LEVEL == 'minor':
version = semver.parse(jina_core_version)
pr_name = f'chore: testing/building {module} ({module_version}) on new jina core: {version["major"]}.{version["minor"]}.'
return pr_name
def all_checks_passed(runs: Optional[List[Dict]]) -> Optional[bool]:
"""
check whether all checks from a PR head ref have completed and passed
"""
if runs is None:
return None
for c in runs:
if c['status'] == 'completed':
if c['conclusion'] == 'failure':
return False
else:
return None
return True
def get_checks_for_pr(sha) -> Optional[List[Dict]]:
result = requests.get(
f'https://api.github.com/repos/{MODULES_REPO}/commits/{sha}/check-runs',
headers=GITHUB_API_HEADERS
)
checks = result.json()
if 'check_runs' not in checks.keys():
print(f'Warning: "check_runs" not in PR JSON from Github API. Will retry...')
return None
pr_checks = checks['check_runs']
print(
f'Got {len(pr_checks)} runs to check for PR: \n{[(r["name"], r["status"], r["conclusion"]) for r in pr_checks]}')
return pr_checks
def open_issue(pr, pr_checks, hub_repo: Repository, module, jina_core_version):
"""opens an issue for the PR with the failed checks (if not already open)"""
issue_name = f'{FIX_MODULE_TEMPLATE}{module}'
existing_issue_for_pr = [
i for i in list(hub_repo.get_issues(state='open'))
if i.title == issue_name
]
if len(existing_issue_for_pr) > 0:
print(f'Found existing issue: {existing_issue_for_pr}')
return existing_issue_for_pr[0]
else:
# open the issue
body = f"""
**[This is an automated issue opened as part of the hub modules update GH action. DO NOT EDIT THIS DESCRIPTION]**
Could not build module {module} for Jina core version {jina_core_version} because some of the checks failed:
```
{[(c['name'], c['status'], c['conclusion']) for c in pr_checks]}
```
See {pr.html_url} for more info. {TAG_IN_ISSUES}
"""
issue = hub_repo.create_issue(
title=issue_name,
body=body,
)
print(f'opened issue at {issue.html_url}')
return issue
def delete_remote_branch(br_name, expect_exists=True):
req = requests.delete(f'https://api.github.com/repos/{MODULES_REPO}/git/refs/heads/{br_name}',
headers=GITHUB_API_HEADERS)
if req.status_code == 204:
if expect_exists:
print(f'branch {br_name} deleted')
else:
print(f'Warning: {br_name} existed and was deleted')
elif expect_exists:
print(f'WARNING: Output from attempting to delete branch. code {req.status_code}, json: {req.json()}')
return
def close_related_issue(pr, hub_repo, module):
issue_name = f'{FIX_MODULE_TEMPLATE}{module}'
existing_issues_for_pr: List[Issue] = [
i for i in list(hub_repo.get_issues(state='open'))
if i.title == issue_name
]
if len(existing_issues_for_pr) > 0:
print(f'Found existing issue: {existing_issues_for_pr}')
if len(existing_issues_for_pr) > 1:
print(f'Warning: Found too many matching issues. Will close them all.')
for i in existing_issues_for_pr:
i.create_comment(f'Closing issue as build succeeded on PR {pr.html_url}')
i.edit(state='closed')
return
def handle_prs(prs_modules: List[Tuple[PullRequest, str]], hub_repo, jina_core_version):
"""
traverses list of open PRs. Confirms whether checks have passed or not. If not, opens issues
"""
# noinspection PyBroadException
if len(prs_modules) == 0:
return
# noinspection PyBroadException
try:
# allow for checks to be initiated. It's not instantaneous
print(f'waiting for 2 mins. before continuing...')
time.sleep(120)
new_prs = []
while len(prs_modules) > 0:
for i, pr_module in enumerate(prs_modules):
print(f'rate limit status: {g.get_rate_limit()}')
pr = None
try:
pr = pr_module[0]
module = pr_module[1]
print(f'Checking PR {pr} ( {pr.html_url} )...')
br_name = pr.head.ref
last_commit = sorted(list(pr.get_commits()), key=lambda t: t.commit.author.date)[-1]
sha = last_commit.sha
pr_checks: Optional[List[Dict]] = get_checks_for_pr(sha)
checks_passed = all_checks_passed(pr_checks)
if checks_passed is None:
print(f'Not all checks have completed for {br_name}. Skipping and will attempt later...')
new_prs.append((pr, module))
else:
if checks_passed:
print(f'All checks completed and passed for {br_name}. Commenting and closing...')
pr.create_issue_comment(
f'Automatic build successful. Image has been built and deployed.'
)
# close any open issues related to this module using the template
close_related_issue(pr, hub_repo, module)
else:
print(
f'warning: not all checks have passed for {br_name}. '
f'Will open issue and abandon trying.')
issue = open_issue(pr, pr_checks, hub_repo, module, jina_core_version)
pr.create_issue_comment(
f'Automatic build failed in this PR and we '
f'have opened an issue here: {issue.html_url}. '
f'Closing this PR. '
)
pr.edit(state='closed')
delete_remote_branch(br_name, expect_exists=True)
except Exception as e:
print(f'Error handling pr {pr}. Error: {repr(e)}')
# starting the checking | |
<filename>Models/model_convPN.py
# Importation of libraries
import math
import torch
import numpy as np
from Models.model_utils import *
import pdb
##############################################
## SLP POOLING BLOCK ##
##############################################
class SLP_Pooling(torch.autograd.Function):
@staticmethod
def forward(ctx, x, x_complete, indexes, weight1, weight2, bias, use_x, pooling_operation, zerotensor, batchtensor, type_tensor='float', max_value=10**10):
with torch.no_grad():
batch_size, num_points, dim_features = x.size()
_, _, dim_features_complete = x_complete.size()
_, nb_subsampled_points, num_neighbors = indexes.size()
output_dim_features, _ = weight1.size()
if type(batch_size) == torch.Tensor:
batch_size = batch_size.item()
if type(num_points) == torch.Tensor:
num_points = num_points.item()
if type(dim_features) == torch.Tensor:
dim_features = dim_features.item()
if type(dim_features_complete) == torch.Tensor:
dim_features_complete = dim_features_complete.item()
if type(nb_subsampled_points) == torch.Tensor:
nb_subsampled_points = nb_subsampled_points.item()
if type(num_neighbors) == torch.Tensor:
num_neighbors = num_neighbors.item()
if type(output_dim_features) == torch.Tensor:
output_dim_features = output_dim_features.item()
x_complete_ = x_complete.contiguous().view(batch_size * num_points, dim_features_complete)
x_complete_ = torch.mm(x_complete_, weight1.transpose(0, 1)) + bias
x_complete_ = x_complete_.view(batch_size, num_points, output_dim_features)
if use_x:
x_ = x.view(batch_size * num_points, dim_features)
x_ = torch.mm(x_, weight2.transpose(0, 1))
x_ = x_.view(batch_size, num_points, output_dim_features)
x_complete_ = x_complete_ + x_
x_complete_ = torch.cat((zerotensor.expand(batch_size, 1, output_dim_features), x_complete_), dim=1)
indexes_ = indexes.contiguous().view(batch_size, nb_subsampled_points * num_neighbors,1).expand(batch_size, nb_subsampled_points * num_neighbors, output_dim_features) + 1
x_complete_ = torch.gather(x_complete_, 1, indexes_).view(batch_size, nb_subsampled_points, num_neighbors, output_dim_features)
if use_x and (num_points == nb_subsampled_points):
x_complete_ = x_complete_ - x_.unsqueeze(2)
elif use_x:
x_ = torch.cat((zerotensor.expand(batch_size, 1, output_dim_features), x_), dim=1)
indexes__ = indexes[:,:,0].contiguous().view(batch_size, nb_subsampled_points, 1).expand(batch_size, nb_subsampled_points, output_dim_features) + 1
x_ = torch.gather(x_, 1, indexes__).view(batch_size, nb_subsampled_points, 1, output_dim_features)
x_complete_ = x_complete_ - x_
if pooling_operation == 'max':
mask = indexes_.view(batch_size, nb_subsampled_points, num_neighbors, output_dim_features) == 0
if type_tensor == 'float':
mask = mask.float()
elif type_tensor == 'double':
mask = mask.double()
x_complete_ = x_complete_ - max_value*mask
x_complete_, indexes_max = torch.max(x_complete_, dim=2)
elif pooling_operation == 'sum':
mask = indexes_.view(batch_size, nb_subsampled_points, num_neighbors, output_dim_features) != 0
if type_tensor == 'float':
mask = mask.float()
elif type_tensor == 'double':
mask = mask.double()
x_complete_ = x_complete_ * mask
x_complete_ = torch.sum(x_complete_, dim=2)
indexes_max = None
ctx.save_for_backward(x, x_complete, indexes, weight1, weight2, indexes_max, zerotensor)
ctx.use_x = use_x
ctx.pooling_operation = pooling_operation
ctx.type_tensor = type_tensor
return x_complete_
@staticmethod
def backward(ctx, grad_output, retain_variables=False):
with torch.no_grad():
x, x_complete, indexes, weight1, weight2, indexes_max, zerotensor = ctx.saved_tensors
use_x = ctx.use_x
pooling_operation = ctx.pooling_operation
# Dimension
batch_size, num_points, dim_features = x.size()
_, _, dim_features_complete = x_complete.size()
_, nb_subsampled_points, num_neighbors = indexes.size()
output_dim_features, _ = weight1.size()
if type(batch_size) == torch.Tensor:
batch_size = batch_size.item()
if type(num_points) == torch.Tensor:
num_points = num_points.item()
if type(dim_features) == torch.Tensor:
dim_features = dim_features.item()
if type(dim_features_complete) == torch.Tensor:
dim_features_complete = dim_features_complete.item()
if type(nb_subsampled_points) == torch.Tensor:
nb_subsampled_points = nb_subsampled_points.item()
if type(num_neighbors) == torch.Tensor:
num_neighbors = num_neighbors.item()
if type(output_dim_features) == torch.Tensor:
output_dim_features = output_dim_features.item()
# Backward in the maxpooling
if pooling_operation == 'max':
tensor = zerotensor.view(1, 1, 1, 1).expand(batch_size, nb_subsampled_points, num_neighbors, output_dim_features).clone()
grad_output = tensor.scatter_(2, indexes_max.unsqueeze(2), grad_output.unsqueeze(2))
elif pooling_operation == 'sum':
grad_output = grad_output.unsqueeze(2).expand(batch_size, nb_subsampled_points, num_neighbors, output_dim_features).clone()
mask = indexes.contiguous().view(batch_size, nb_subsampled_points, num_neighbors, 1).expand(batch_size, nb_subsampled_points, num_neighbors, output_dim_features) == -1
grad_output[mask] = 0
grad_output = grad_output.contiguous().view(batch_size, nb_subsampled_points * num_neighbors, output_dim_features)
indexes = indexes.contiguous().view(batch_size, nb_subsampled_points * num_neighbors, 1).expand(batch_size, nb_subsampled_points * num_neighbors, output_dim_features) + 1
# Bring the gradient down to the good shape
value_grad_x_complete = zerotensor.view(1, 1, 1).expand(batch_size, num_points + 1, output_dim_features).clone()
value_grad_x_complete.scatter_add_(1, indexes, grad_output)
value_grad_x_complete = value_grad_x_complete[:,1:,:]
if use_x:
grad_output = torch.sum(grad_output.view(batch_size, nb_subsampled_points, num_neighbors, output_dim_features), dim=2, keepdim=False)
if num_points == nb_subsampled_points:
value_grad_x = value_grad_x_complete - grad_output
else:
value_grad_x = zerotensor.view(1, 1, 1).expand(batch_size, num_points + 1, output_dim_features).clone()
value_grad_x.scatter_(1, indexes.view(batch_size, nb_subsampled_points, num_neighbors, output_dim_features)[:,:,0,:], grad_output)
value_grad_x = value_grad_x[:,1:,:]
value_grad_x = value_grad_x_complete - value_grad_x
# Gradient with respect to weight1
grad_weight1 = torch.bmm(value_grad_x_complete.transpose(1,2), x_complete)
grad_weight1 = torch.sum(grad_weight1, dim=0)
# Gradient with respect to weight2
if use_x:
grad_weight2 = torch.bmm(value_grad_x.transpose(1, 2), x)
grad_weight2 = torch.sum(grad_weight2, dim=0)
else:
grad_weight2 = None
# Gradient with respect to the bias
grad_bias = torch.sum(value_grad_x_complete, dim=[0, 1])
# Gradient with respect to x
if use_x:
value_grad_x = value_grad_x.view(batch_size * num_points, output_dim_features)
grad_x = torch.mm(value_grad_x,weight2).view(batch_size, num_points, dim_features)
else:
grad_x = zerotensor.view(1, 1, 1).repeat(batch_size, num_points, dim_features)
# Gradient with respect to x_complete
value_grad_x_complete = value_grad_x_complete.contiguous().view(batch_size * num_points, output_dim_features)
grad_x_complete = torch.mm(value_grad_x_complete, weight1).view(batch_size, num_points, dim_features_complete)
return grad_x, grad_x_complete, None, grad_weight1, grad_weight2, grad_bias, None, None, None, None, None
if __name__ == '__main__':
print('Testing the gradients of the SLP_Pooling layer')
# Building the input tensors
device = torch.device("cpu")
num_batch = 4
num_points = 32
dim_features = 3
dim_features_complete = 6
output_dim_features = 8
x = torch.autograd.Variable(torch.randn(num_batch, num_points, dim_features)).double().to(device)
x.requires_grad = True
x_complete = torch.autograd.Variable(torch.randn(num_batch, num_points, dim_features_complete)).double().to(device)
x_complete.requires_grad = True
weight1 = torch.nn.Parameter(torch.randn(output_dim_features, dim_features_complete), requires_grad=True).double().to(device)
weight2 = torch.nn.Parameter(torch.randn(output_dim_features, dim_features), requires_grad=True).double().to(device)
bias = torch.nn.Parameter(torch.randn(output_dim_features), requires_grad=True).double().to(device)
use_x = True
pooling_operation = 'sum'
zerotensor = torch.zeros([1]).double().to(device)
batchtensor = torch.arange(num_batch).to(device)
type_tensor = 'double'
# Sampling
model = Sampling(num_points // 2)
indexes_downsampling = model(x.float())
x_ = model.extract_values(x.float(), indexes_downsampling)
# Grouping
nb_neighbours = 2
sampling_method = 'query_ball'
patch_radius = 0.2
num_points_training = 10000
model = Grouping(nb_neighbours, sampling_method, use_x, num_batch, patch_radius=patch_radius)
indexes = model(x.float(), x_)
# Test of forward/backward pass
try:
slp_pooling = SLP_Pooling.apply
torch.autograd.gradcheck(slp_pooling, (x, x_complete, indexes, weight1, weight2, bias, use_x, pooling_operation, zerotensor, batchtensor, type_tensor), raise_exception=True)
print('Test on SLP_Pooling: Success')
except:
print('Test on SLP_Pooling: Failure')
raise
class SLP_Pooling_Module(torch.nn.Module):
def __init__(self):
super(SLP_Pooling_Module, self).__init__()
self.type_tensor = 'float'
def forward(self, x, x_complete, indexes, weight1, weight2, bias, use_x, pooling_operation, zerotensor, batchtensor, max_value=10**10):
x_complete_ = SLP_Pooling.apply(x, x_complete, indexes, weight1, weight2, bias, use_x, pooling_operation, zerotensor, batchtensor, self.type_tensor)
return x_complete_
##############################################
## TRANSITION BLOCK ##
##############################################
class TransitionBlock(torch.nn.Module):
def __init__(self, nb_scales, input_channels, list_dim_slp):
super(TransitionBlock, self).__init__()
self.nb_scales = nb_scales
self.list_dim_slp = list_dim_slp
self.input_channels = input_channels
self.list_slp = torch.nn.ModuleList()
self.list_bn = torch.nn.ModuleList()
for i in range(self.nb_scales):
last_channel = input_channels[i]
for out_channel in self.list_dim_slp[i]:
self.list_slp.append(torch.nn.Conv1d(last_channel, out_channel, 1, bias=True).float())
self.list_bn.append(torch.nn.BatchNorm1d(out_channel).float())
last_channel = out_channel
def update_decay(self, bn_decay_value):
for i in range(len(self.list_bn)):
self.list_bn[i].momentum = bn_decay_value
def forward(self, list_x_complete, bn_decay_value=None):
if bn_decay_value is not None:
self.update_decay(bn_decay_value)
cpt = 0
list_x_complete_ = []
for i in range(self.nb_scales):
x_complete = list_x_complete[i].transpose(1, 2)
for j in range(len(self.list_dim_slp[i])):
x_complete = self.list_slp[cpt](x_complete)
x_complete = self.list_bn[cpt](x_complete)
x_complete = torch.nn.functional.relu(x_complete)
cpt += 1
x_complete = x_complete.transpose(1, 2)
list_x_complete_.append(x_complete)
return list_x_complete_
class ConvolutionBlock(torch.nn.Module):
def __init__(self, nb_scales, batch_size, spatial_channels, input_channels, patch_radius, nb_neighbours, sampling_method, pooling_operation,
require_grouping, use_x, list_dim_slp, use_crosslinks, use_reslinks):
super(ConvolutionBlock, self).__init__()
self.nb_scales = nb_scales
self.batch_size = batch_size
self.spatial_channels = spatial_channels
self.input_channels = input_channels
self.patch_radius = patch_radius
self.nb_neighbours = nb_neighbours
self.sampling_method = sampling_method
self.pooling_operation = pooling_operation
self.require_grouping = require_grouping
self.use_x = use_x
self.list_dim_slp = list_dim_slp
self.use_crosslinks = use_crosslinks
self.use_reslinks = use_reslinks
# Grouping if needed
self.groupinglist = torch.nn.ModuleList()
for i in range(self.nb_scales):
self.groupinglist.append(Grouping(self.nb_neighbours[i], self.sampling_method[i], self.use_x[i], self.batch_size, patch_radius=self.patch_radius[i]))
# Single Layer Perceptron
self.slppool = torch.nn.ModuleList()
self.slppool_weight1 = torch.nn.ParameterList()
self.slppool_weight2 = torch.nn.ParameterList()
self.slppool_bias = torch.nn.ParameterList()
self.bnlist = torch.nn.ModuleList()
for i in range(self.nb_scales):
self.slppool.append(SLP_Pooling_Module())
weight1 = torch.nn.Parameter(torch.zeros(self.list_dim_slp[i], self.input_channels[i]).float(), requires_grad=True)
self.slppool_weight1.append(weight1)
if self.use_x[i]:
weight2 = torch.nn.Parameter(torch.zeros(self.list_dim_slp[i], self.spatial_channels).float(), requires_grad=True)
self.slppool_weight2.append(weight2)
else:
self.slppool_weight2.append(None)
bias = torch.nn.Parameter(torch.zeros(self.list_dim_slp[i]).float(), requires_grad=True)
self.slppool_bias.append(bias)
self.bnlist.append(torch.nn.BatchNorm1d(self.list_dim_slp[i]).float())
# Cross-Connection
if self.use_crosslinks:
self.slp_crossconnection_downsampling = torch.nn.ModuleList()
self.bn_crossconnection_downsampling = torch.nn.ModuleList()
self.slp_crossconnection_upsampling = torch.nn.ModuleList()
self.bn_crossconnection_upsampling = torch.nn.ModuleList()
for i in range(self.nb_scales):
if i < self.nb_scales - 1:
self.slp_crossconnection_downsampling.append(torch.nn.Conv1d(self.list_dim_slp[i], self.list_dim_slp[i + 1], 1, bias=True).float())
self.bn_crossconnection_downsampling.append(torch.nn.BatchNorm1d(self.list_dim_slp[i + 1]).float())
if i>0:
self.slp_crossconnection_upsampling.append(torch.nn.Conv1d(self.list_dim_slp[i], self.list_dim_slp[i - 1], 1, bias=True).float())
self.bn_crossconnection_upsampling.append(torch.nn.BatchNorm1d(self.list_dim_slp[i - 1]).float())
# Utils
self.list_indexes_grouping = None
self.zerotensor = torch.nn.Parameter(torch.zeros([1]).float(), requires_grad=False)
self.batchtensor = torch.nn.Parameter(torch.arange(self.batch_size), requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
for i in range(self.nb_scales):
# Initializing weight1
torch.nn.init.kaiming_uniform_(self.slppool_weight1[i], a=math.sqrt(5))
if self.use_x[i]:
# Initializing weight2
torch.nn.init.kaiming_uniform_(self.slppool_weight2[i], a=math.sqrt(5))
# Initializing the bias
bound = 1 / math.sqrt(self.slppool_bias[i].size(0))
torch.nn.init.uniform_(self.slppool_bias[i], -bound, bound)
def update_decay(self, bn_decay_value):
if self.use_crosslinks:
for i in range(self.nb_scales - 1):
self.bn_crossconnection_downsampling[i].momentum = bn_decay_value
self.bn_crossconnection_upsampling[i].momentum = bn_decay_value
def forward(self, list_x, list_x_complete, list_indexes_downsampling, list_indexes_upsampling, list_weights_upsampling,
list_indexes_grouping=None, bn_decay_value=None):
if bn_decay_value is not None:
self.update_decay(bn_decay_value)
batch_size, _, _ = list_x[0].size()
if type(batch_size) == torch.Tensor:
batch_size = batch_size.item()
batchtensor = self.batchtensor[:batch_size]
if self.require_grouping:
list_indexes_grouping = []
list_x_complete_ = []
for i in range(self.nb_scales):
# Grouping
if self.require_grouping:
indexes = self.groupinglist[i](list_x[i], list_x[i])
# Saving the list of indexes
list_indexes_grouping.append(indexes)
else:
indexes = list_indexes_grouping[i]
# Convolution: SLP+Pooling
x_complete_ = self.slppool[i](list_x[i], list_x_complete[i], indexes, self.slppool_weight1[i], self.slppool_weight2[i],
self.slppool_bias[i], self.use_x[i], self.pooling_operation[i], self.zerotensor, batchtensor)
x_complete_ = x_complete_.transpose(1, 2)
x_complete_ = self.bnlist[i](x_complete_).transpose(1, 2)
x_complete_ = torch.nn.functional.relu(x_complete_)
list_x_complete_.append(x_complete_)
self.list_indexes_grouping = list_indexes_grouping
# Cross-Connection
offset = 0
offset_sampling = 0
if len(list_indexes_downsampling) > self.nb_scales:
offset_sampling += 1
if self.use_crosslinks:
for i in range(self.nb_scales):
x_complete = list_x_complete_[i]
if i < self.nb_scales-1:
x_complete_ = self.slp_crossconnection_upsampling[i](list_x_complete_[i + 1].transpose(1, 2))
x_complete_ = self.bn_crossconnection_upsampling[i](x_complete_).transpose(1, 2)
x_complete_ = | |
<reponame>quarkme84/rootplots
# -*- coding: utf-8 -*-
"""
Note:
The classes defined in this module do **NOT** provide drawing capabilities, so there is **NO** draw() nor paint()
method. Use other libraries like matplotlib to plot the data. But for your convenience, this package provides a
module for easy plotting using matplotlib. See, :py:mod:`qksplot.mpl`
"""
import math
import warnings
from typing import List, Sequence, Optional
from . import hist as h
__all__ = 'ProfileND', 'Profile1D', 'Profile2D', 'Profile3D'
class ProfileND(h.HistND):
""" Profile histograms are used to compute the mean value of Y and its error for each bin in X.
For each bin we have::
- The mean of Y is the sum of all values in that bin divided by the number of entries per bin.
- The error on X is the width of the bin divided by 2.
- The error on Y is the standard deviation divided by the sqrt(number_of_entries_per_bin).
Args:
dim (int): the number of dimensions of the profile
minBin (array like): an array containing the minimum value of lower edge of bins for each dimension.
maxBin (array like): an array containing the maximum value of upper edge of bins for each dimension.
nBins (array like): an array containing the number of bins for each dimension.
title (string): the title of the profile.
minValue (float): the minimum value accepted to fill a bin.
the lowest value accepted on Y (vertical axis).
This filter is not used when minValue is None
maxValue (float): the maximum value accepted to fill a bin.
the highest value accepted on Y (vertical axis).
This filter is not used when maxValue is None
"""
_sumWeightedValues: int
_sumWeightedValues2: int
_binSumWeightsValues2: List[float]
def __init__(self, dim: int, minBin: Sequence, maxBin: Sequence, nBins: Sequence, minValue: float=None,
maxValue: float=None, title=str()):
h.HistND.__init__(self, dim, minBin, maxBin, nBins, title)
self._minValue = minValue
self._maxValue = maxValue
############################################
# Values computed during each fill
# some stats
self._sumWeightedValues = 0 # Total Sum of weight*Y
self._sumWeightedValues2 = 0 # Total Sum of weight*Y*Y
self._binsValues = [] # contains the values per cell (global linear bin)
self._binSumWeightsValues2 = [] # array of sum of weighted squared values per cell
for v in range(self.cells):
self._binsValues.append(0)
self._binSumWeightsValues2.append(0)
@property
def minY(self):
return self._minValue
@property
def maxY(self):
return self._maxValue
def _get_std_deviation(self, j: int):
H = self._binsValues[j]
L = self._binsEntries[j]
E = self._binSumWeightsValues2[j]
q = E*L - H*H
if L == 0:
return 0
else:
return math.sqrt(E * L - H * H)/L # std deviation = sqrt(rms**2 - mean**2)
def get_cell_content(self, i: int) -> float:
""" Returns the content of the cell 'i'
Args:
i (int): a valid cell index (aka global linear bin)
Returns:
float or 0.0 if the position is outside cells ranges
"""
n = h.HistND.get_cell_content(self, i)
if n == 0.0:
return 0.0
else:
return self._binsValues[i] / n
def get_cells_contents(self, includeEmptyBins=False) -> List[float]:
""" Returns the contents of all bins (cells) per each dimension.
By default it **does not** include empty cells.
Args:
includeEmptyBins (bool): if empty bins are included in the list.
If True then empty bins are included, otherwise they are NOT included
Returns:
list of list.
"""
result = []
for i_cell in range(self.cells):
L = self._binsEntries[i_cell]
if L == 0:
if includeEmptyBins:
result.append(0)
else:
continue
else:
result.append(self._binsValues[i_cell] / L)
return result
def get_cell_content_error(self, i: int) -> float:
"""Returns the error value associated with cell 'i'
The error on per cell is the standard deviation (in that cell) divided by the sqrt(number_of_entries_per_cell).
Args:
i (int): a valid cell index (aka global linear bin)
Returns:
float.
"""
L = self._binsEntries[i]
if L == 0:
return 0.0
else:
return self._get_std_deviation(i) / math.sqrt(self._binsEntries[i])
def get_cells_contents_errors(self, includeEmptyBins=False) -> List[float]:
""" Returns a list containing the errors on the Y axis associated to each bin.
By default it **does not** include empty bins.
The error on Y per bin is the standard deviation (in that bin) divided by the sqrt(number_of_entries_per_bin).
Args:
includeEmptyBins (bool): if empty bins are included in the list.
If True then empty bins are included, otherwise they are NOT included
Returns:
list.
"""
result = []
for iBin in range(self.cells):
L = self._binsEntries[iBin]
if L == 0:
if includeEmptyBins:
result.append(0)
else:
continue
else:
result.append(self._get_std_deviation(iBin) / math.sqrt(self._binsEntries[iBin]))
return result
def fill_cell(self, i_cell: int, **kwargs) -> int:
""" Fill the profile using global cell index.
Args:
i_cell (int): the global bin index (cell index).
kwargs (Dict): currently this dict can accept 3 keys:
weight (float) - the weight to fill in. Defaults to 1.0
error_per_bin(bool) - whether to compute weights per bins. Default is True
value (float) - to specify the value to fill the cell. (**Mandatory**)
Warning:
This is expensive operation because it computes the bins indexes (it decomposes cell index) and positions
per axis before filling weights per bin. Its Recommended to use other fill_* methods
Returns:
int. The **index of the affected cell (global linear bin) or -1** if no cell was found or the input values
are not valid (underflow or overflow)
See Also:
:py:meth:`fill_pos`, :py:meth:`fill_bins`
"""
value = kwargs.get("value", None)
if value is None:
raise ValueError("Need a 'value' argument")
if self._minValue is not None and value < self._minValue: # ignore filtered data
return -1
if self._maxValue is not None and value > self._maxValue: # ignore filtered data
return -1
i_cell = super(ProfileND, self).fill_cell(i_cell, **kwargs)
weight = kwargs.get("weight", 1.0)
error_per_bin = kwargs.get("error_per_bin", True)
# we consider error_per_bin as false when fill_cell() is called indirectly, either from overridden methods of
# this class or from methods in HistND: HistND.fill_pos() or HistND.fill_bins()..
#
# consequently, we consider error_per_bin is true (the default value) when fill_cell() is called directly by
# the user and we must do these computations..
#
# NB: otherwise the fill() will contain illegal data and fail when computing standard_deviation()
if i_cell >= 0 and error_per_bin:
self._binsValues[i_cell] += weight * value
self._binSumWeightsValues2[i_cell] += weight * value * value
self._sumWeightedValues += weight * value
self._sumWeightedValues2 += weight * value * value
return i_cell
def fill_bins(self, *args, **kwargs) -> int:
""" Fill the histogram using bin indexes.
Args:
args (a list of parameters): the bin indexes for each dimension. The number of arguments must be the same as
the number of dimensions. Or pass one argument as a sequence (same size as number of dimensions).
kwargs : this dict can accept 2 key:
weight (float) - the weight to fill in. Defaults to 1.0
value (float) - to specify the value to fill the cell. (**Mandatory**)
Returns:
int. The **index of the affected cell (global linear bin) or -1** if no cell was found or the input values
-are not valid
See Also:
:py:meth:`fill_pos`, :py:meth:`fill_cell`
"""
value: Optional[float] = kwargs.get("value", None)
if value is None:
raise ValueError("Argument 'value' can not be None")
if self._minValue is not None and value < self._minValue: # ignore filtered data
return -1
if self._maxValue is not None and value > self._maxValue: # ignore filtered data
return -1
i_cell = super(ProfileND, self).fill_bins(*args, **kwargs)
weight = kwargs.get("weight", 1.0)
if i_cell >= 0:
self._binsValues[i_cell] += weight * value
self._binSumWeightsValues2[i_cell] += weight * value * value
self._sumWeightedValues += weight * value
self._sumWeightedValues2 += weight * value * value
return i_cell
def fill_pos(self, *args, **kwargs) -> int:
""" Fill the profile using a position.
Args:
args (a list of parameters): the positions on each dimension. The number of arguments must be the same as
the number of dimensions. Or pass one argument as a sequence (same size as number of dimensions).
kwargs : this dict can accept 2 key:
weight (float) - the weight to fill in. Defaults to 1.0
value (float) - to specify the value to fill the cell. (**Mandatory**)
Returns:
int. The **index of the affected cell (global linear bin) or -1** if no cell was found or the input values
are not valid
See Also:
:py:meth:`fill_cell`, :py:meth:`fill_bins`
"""
value: | |
for (
entity_token_start,
entity_token_end,
), special_token_id in token_span_with_special_token_ids:
first_ids = (
first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
)
first_ids = (
first_ids[:entity_token_start]
+ [special_token_id]
+ first_ids[entity_token_start:]
)
elif self.task == "entity_span_classification":
if not (
isinstance(entity_spans, list)
and len(entity_spans) > 0
and isinstance(entity_spans[0], tuple)
):
raise ValueError(
"Entity spans should be provided as a list of tuples, "
"each tuple containing the start and end character indices of an entity"
)
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(
text, entity_spans
)
first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
else:
raise ValueError(f"Task {self.task} not supported")
return (
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
)
def _batch_prepare_for_model(
self,
batch_ids_pairs,
batch_entity_ids_pairs,
batch_entity_token_spans_pairs,
add_special_tokens=True,
padding_strategy=PaddingStrategy.DO_NOT_PAD,
truncation_strategy=TruncationStrategy.DO_NOT_TRUNCATE,
max_length=None,
max_entity_length=None,
stride=0,
pad_to_multiple_of=None,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_length=False,
verbose=True,
):
batch_outputs = {}
for input_ids, entity_ids, entity_token_span_pairs in zip(
batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs
):
first_ids, second_ids = input_ids
first_entity_ids, second_entity_ids = entity_ids
first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
outputs = self.prepare_for_model(
first_ids,
second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def prepare_for_model(
self,
ids,
pair_ids=None,
entity_ids=None,
pair_entity_ids=None,
entity_token_spans=None,
pair_entity_token_spans=None,
add_special_tokens=True,
padding=False,
truncation=False,
max_length=None,
max_entity_length=None,
stride=0,
pad_to_multiple_of=None,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_offsets_mapping=False,
return_length=False,
verbose=True,
prepend_batch_axis=False,
**kw,
):
(
padding_strategy,
truncation_strategy,
max_length,
kw,
) = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kw,
)
# Compute lengths
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
total_len = (
len_ids
+ len_pair_ids
+ (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
)
# Truncation: Handle max sequence length and max_entity_length
overflowing_tokens = []
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and max_length
and total_len > max_length
):
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
entity_token_offset = 1 # 1 * <s> token
pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
entity_token_offset = 0
pair_entity_token_offset = len(ids)
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
if not max_entity_length:
max_entity_length = self.max_entity_length
if entity_ids is not None:
total_entity_len = 0
num_invalid_entities = 0
valid_entity_ids = [
ent_id
for ent_id, span in zip(entity_ids, entity_token_spans)
if span[1] <= len(ids)
]
valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)]
total_entity_len += len(valid_entity_ids)
num_invalid_entities += len(entity_ids) - len(valid_entity_ids)
valid_pair_entity_ids, valid_pair_entity_token_spans = None, None
if pair_entity_ids is not None:
valid_pair_entity_ids = [
ent_id
for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans)
if span[1] <= len(pair_ids)
]
valid_pair_entity_token_spans = [
span for span in pair_entity_token_spans if span[1] <= len(pair_ids)
]
total_entity_len += len(valid_pair_entity_ids)
num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids)
if num_invalid_entities != 0:
logger.warning(
f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the truncation of input tokens"
)
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and total_entity_len > max_entity_length
):
(
valid_entity_ids,
valid_pair_entity_ids,
overflowing_entities,
) = self.truncate_sequences(
valid_entity_ids,
pair_ids=valid_pair_entity_ids,
num_tokens_to_remove=total_entity_len - max_entity_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)]
if valid_pair_entity_token_spans is not None:
valid_pair_entity_token_spans = valid_pair_entity_token_spans[
: len(valid_pair_entity_ids)
]
if return_overflowing_tokens:
encoded_inputs["overflowing_entities"] = overflowing_entities
encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length
final_entity_ids = (
valid_entity_ids + valid_pair_entity_ids
if valid_pair_entity_ids
else valid_entity_ids
)
encoded_inputs["entity_ids"] = list(final_entity_ids)
entity_position_ids = []
entity_start_positions = []
entity_end_positions = []
for (token_spans, offset) in (
(valid_entity_token_spans, entity_token_offset),
(valid_pair_entity_token_spans, pair_entity_token_offset),
):
if token_spans is not None:
for start, end in token_spans:
start += offset
end += offset
position_ids = list(range(start, end))[: self.max_mention_length]
position_ids += [-1] * (self.max_mention_length - end + start)
entity_position_ids.append(position_ids)
entity_start_positions.append(start)
entity_end_positions.append(end - 1)
encoded_inputs["entity_position_ids"] = entity_position_ids
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = entity_start_positions
encoded_inputs["entity_end_positions"] = entity_end_positions
if return_token_type_ids:
encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"])
self._eventual_warn_about_too_long_sequence(
encoded_inputs["input_ids"], max_length, verbose
)
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def pad(
self,
encoded_inputs,
padding=True,
max_length=None,
max_entity_length=None,
pad_to_multiple_of=None,
return_attention_mask=None,
return_tensors=None,
verbose=True,
):
if isinstance(encoded_inputs, (list, tuple)) and isinstance(
encoded_inputs[0], (dict, BatchEncoding)
):
encoded_inputs = {
key: [example[key] for example in encoded_inputs]
for key in encoded_inputs[0].keys()
}
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(
"You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
)
required_input = encoded_inputs[self.model_input_names[0]]
if not required_input:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
if not isinstance(first_element, (int, list, tuple)):
if is_tf_available() and _is_tensorflow(first_element):
return_tensors = "tf" if return_tensors is None else return_tensors
elif is_torch_available() and _is_torch(first_element):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
if max_entity_length is None:
max_entity_length = self.max_entity_length
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and not isinstance(required_input[0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
if any(len(v) != batch_size for v in encoded_inputs.values()):
raise ValueError(
"Some items in the output dictionary have a different batch size than others."
)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in required_input)
max_entity_length = (
max(len(inputs) for inputs in encoded_inputs["entity_ids"])
if "entity_ids" in encoded_inputs
else 0
)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def _pad(
self,
encoded_inputs,
max_length=None,
max_entity_length=None,
padding_strategy=PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of=None,
return_attention_mask=None,
):
entities_provided = bool("entity_ids" in encoded_inputs)
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs["input_ids"])
if entities_provided:
max_entity_length = len(encoded_inputs["entity_ids"])
if (
max_length is not None
and pad_to_multiple_of is not None
and (max_length % pad_to_multiple_of != 0)
):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
if (
entities_provided
and max_entity_length is not None
and pad_to_multiple_of is not None
and (max_entity_length % pad_to_multiple_of != 0)
):
max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (
len(encoded_inputs["input_ids"]) != max_length
or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length)
)
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
if (
entities_provided
and return_attention_mask
and "entity_attention_mask" not in encoded_inputs
):
encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"])
if needs_to_be_padded:
difference = max_length - len(encoded_inputs["input_ids"])
if entities_provided:
| |
<filename>_v5_proc_cv2dnn_ssd.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 <NAME>.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
import numpy as np
import cv2
# 外部プログラム
qExt_face = '__ext_face.bat'
# 共通ルーチン
import _v5__qRiKi
qRiKi = _v5__qRiKi.qRiKi_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
import _v5__qLog
qLog = _v5__qLog.qLog_class()
qPLATFORM = qRiKi.getValue('qPLATFORM' )
qRUNATTR = qRiKi.getValue('qRUNATTR' )
qHOSTNAME = qRiKi.getValue('qHOSTNAME' )
qUSERNAME = qRiKi.getValue('qUSERNAME' )
qPath_pictures = qRiKi.getValue('qPath_pictures' )
qPath_videos = qRiKi.getValue('qPath_videos' )
qPath_cache = qRiKi.getValue('qPath_cache' )
qPath_sounds = qRiKi.getValue('qPath_sounds' )
qPath_icons = qRiKi.getValue('qPath_icons' )
qPath_fonts = qRiKi.getValue('qPath_fonts' )
qPath_log = qRiKi.getValue('qPath_log' )
qPath_work = qRiKi.getValue('qPath_work' )
qPath_rec = qRiKi.getValue('qPath_rec' )
qPath_s_ctrl = qRiKi.getValue('qPath_s_ctrl' )
qPath_s_inp = qRiKi.getValue('qPath_s_inp' )
qPath_s_wav = qRiKi.getValue('qPath_s_wav' )
qPath_s_jul = qRiKi.getValue('qPath_s_jul' )
qPath_s_STT = qRiKi.getValue('qPath_s_STT' )
qPath_s_TTS = qRiKi.getValue('qPath_s_TTS' )
qPath_s_TRA = qRiKi.getValue('qPath_s_TRA' )
qPath_s_play = qRiKi.getValue('qPath_s_play' )
qPath_v_ctrl = qRiKi.getValue('qPath_v_ctrl' )
qPath_v_inp = qRiKi.getValue('qPath_v_inp' )
qPath_v_jpg = qRiKi.getValue('qPath_v_jpg' )
qPath_v_detect = qRiKi.getValue('qPath_v_detect' )
qPath_v_cv = qRiKi.getValue('qPath_v_cv' )
qPath_v_photo = qRiKi.getValue('qPath_v_photo' )
qPath_v_msg = qRiKi.getValue('qPath_v_msg' )
qPath_d_ctrl = qRiKi.getValue('qPath_d_ctrl' )
qPath_d_play = qRiKi.getValue('qPath_d_play' )
qPath_d_prtscn = qRiKi.getValue('qPath_d_prtscn' )
qPath_d_movie = qRiKi.getValue('qPath_d_movie' )
qPath_d_upload = qRiKi.getValue('qPath_d_upload' )
qBusy_dev_cpu = qRiKi.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qRiKi.getValue('qBusy_dev_com' )
qBusy_dev_mic = qRiKi.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qRiKi.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qRiKi.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qRiKi.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qRiKi.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qRiKi.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qRiKi.getValue('qBusy_s_inp' )
qBusy_s_wav = qRiKi.getValue('qBusy_s_wav' )
qBusy_s_STT = qRiKi.getValue('qBusy_s_STT' )
qBusy_s_TTS = qRiKi.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qRiKi.getValue('qBusy_s_TRA' )
qBusy_s_play = qRiKi.getValue('qBusy_s_play' )
qBusy_v_ctrl = qRiKi.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qRiKi.getValue('qBusy_v_inp' )
qBusy_v_QR = qRiKi.getValue('qBusy_v_QR' )
qBusy_v_jpg = qRiKi.getValue('qBusy_v_jpg' )
qBusy_v_CV = qRiKi.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qRiKi.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qRiKi.getValue('qBusy_d_inp' )
qBusy_d_QR = qRiKi.getValue('qBusy_d_QR' )
qBusy_d_rec = qRiKi.getValue('qBusy_d_rec' )
qBusy_d_telework = qRiKi.getValue('qBusy_d_telework' )
qBusy_d_play = qRiKi.getValue('qBusy_d_play' )
qBusy_d_browser = qRiKi.getValue('qBusy_d_browser' )
qBusy_d_upload = qRiKi.getValue('qBusy_d_upload' )
qRdy__s_force = qRiKi.getValue('qRdy__s_force' )
qRdy__s_fproc = qRiKi.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qRiKi.getValue('qRdy__s_sendkey' )
qRdy__v_mirror = qRiKi.getValue('qRdy__v_mirror' )
qRdy__v_reader = qRiKi.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qRiKi.getValue('qRdy__v_sendkey' )
qRdy__d_reader = qRiKi.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qRiKi.getValue('qRdy__d_sendkey' )
class proc_cv2dnn_ssd:
def __init__(self, name='thread', id='0', runMode='debug',
procMode='640x480', ):
self.runMode = runMode
self.procMode = procMode
procWidth, procHeight = qFunc.getResolution(procMode)
self.procWidth = procWidth
self.procHeight= procHeight
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=5, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
# 定義ファイル
file_config = 'cv2dnn/ssd/frozen_inference_graph.pb'
file_weights = 'cv2dnn/ssd/ssd_mobilenet_v2_coco_2018_03_29.pbtxt'
file_labels = 'cv2dnn/ssd/labels.txt'
threshold_score = 0.25
threshold_nms = 0.4
qLog.log('debug', self.proc_id, 'Loading Network.....', )
model = cv2.dnn.readNetFromTensorflow(file_config, file_weights)
qLog.log('debug', self.proc_id, 'Network successfully loaded', )
# モデルの中の訓練されたクラス名
classNames = {}
classColors = {}
r = codecs.open(file_labels, 'r', 'utf-8')
i = 0
for t in r:
t = t.replace('\n', '')
t = t.replace('\r', '')
classNames[i] = str(t).strip()
classColors[i] = np.random.randint(low=0, high=255, size=3, dtype='uint8')
i += 1
# FPS計測
qFPS_class = _v5__qFunc.qFPS_class()
qFPS_last = time.time()
# 待機ループ
self.proc_step = '5'
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# レディ設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
self.proc_last = time.time()
self.proc_seq += 1
if (self.proc_seq > 9999):
self.proc_seq = 1
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
# 画像受取
if (inp_name.lower() == '[img]'):
# 実行カウンタ
self.proc_last = time.time()
self.proc_seq += 1
if (self.proc_seq > 9999):
self.proc_seq = 1
# ビジー設定
if (qFunc.statusCheck(self.fileBsy) == False):
qFunc.statusSet(self.fileBsy, True)
# 画像の読み込み
image = inp_value.copy()
image_height, image_width = image.shape[:2]
# 入力画像成形
if (image_width > image_height):
image_size = image_width
inp_image = np.zeros((image_width,image_width,3), np.uint8)
offset = int((image_width-image_height)/2)
inp_image[offset:offset+image_height, 0:image_width] = image.copy()
out_image = inp_image.copy()
elif (image_height > image_width):
image_size = image_height
inp_image = np.zeros((image_height,image_height,3), np.uint8)
offset = int((image_height-image_width)/2)
inp_image[0:image_height, offset:offset+image_width] = image.copy()
out_image = inp_image.copy()
else:
image_size = image_width
inp_image = image.copy()
out_image = inp_image.copy()
# Imageをセットする
blob = cv2.dnn.blobFromImage(inp_image, size=(300, 300), swapRB=True)
model.setInput(blob)
# 画像から物体検出を行う
output = model.forward()
pass_classids = []
pass_scores = []
pass_boxes = []
# 人物検索
nowTime = datetime.datetime.now()
stamp = nowTime.strftime('%Y%m%d.%H%M%S')
hit_count = 0
hit_img = None
# outputは[1:1:100:7]のリストになっているため、後半の2つを取り出す
detections = output[0, 0, :, :]
# detectionには[?,id番号、予測確率、Xの開始点、Yの開始点、Xの終了点、Yの終了点]が入っている。
for detection in detections:
# 予測確率がthreshold_score以上を取り出す。
score = detection[2]
if (score >= threshold_score):
# 元の画像サイズを掛けて、四角で囲むための4点の座標情報を得る
axis = detection[3:7] * (image_size, image_size, image_size, image_size)
# floatからintに変換して、変数に取り出す。
(start_x, start_y, end_x, end_y) = axis.astype(np.int)[:4]
left = int(start_x)
top = int(start_y)
width = int(end_x - start_x)
height = int(end_y - start_y)
# 変数に取り出す。
if (width >= image_size/20) and (width <= image_size/2) \
and (height >= image_size/30) and (height <= image_size/1.5):
classid = detection[1]
pass_classids.append(classid)
pass_scores.append(float(score))
pass_boxes.append([left, top, width, height])
# 重複した領域を排除した内容を利用する。
indices = cv2.dnn.NMSBoxes(pass_boxes, pass_scores, float(0.8), float(threshold_nms))
if (len(indices)<3):
indices = cv2.dnn.NMSBoxes(pass_boxes, pass_scores, float(0.5), float(threshold_nms))
if (len(indices)<3):
indices = cv2.dnn.NMSBoxes(pass_boxes, pass_scores, float(0.0), float(threshold_nms))
# ループ
for i in indices:
i = i[0]
classid = pass_classids[i]
score = pass_scores[i]
box = pass_boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
# クラス名を取り出す。
class_name = classNames[classid]
class_color = [ int(c) for c in classColors[classid] ]
label = class_name + ' {0:.2f}'.format(score)
# (画像、開始座標、終了座標、色、線の太さ)を指定
cv2.rectangle(out_image, (left, top), (left+width, top+height), class_color, thickness=2)
# (画像、文字列、開始座標、フォント、文字サイズ、色)を指定
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1 , 1)[0]
x = left + t_size[0] + 3
y = top + t_size[1] + 4
cv2.rectangle(out_image, (left, top), (x, y), class_color, -1)
cv2.putText(out_image, label, (left, top + t_size[1] + 1), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
# 認識画像出力
if (score >= 0.5):
if (class_name == 'person') \
or (class_name == 'car'):
# 結果出力
out_name = '[array]'
#out_value = inp_image[top:top+height, left:left+width].copy()
out_value = out_image[top:top+height, left:left+width].copy()
cn_s.put([out_name, out_value])
# 人物保管
if (class_name == 'person'):
hit_count += 1
if (hit_count == 1):
hit_img = inp_image[top:top+height, left:left+width].copy()
# ファイル出力
fn0 = stamp + '.person.jpg'
fn1 = qPath_rec + fn0
fn2 = qPath_v_detect + fn0
if (not os.path.exists(fn1)) and (not os.path.exists(fn2)):
try:
cv2.imwrite(fn1, hit_img)
cv2.imwrite(fn2, hit_img)
# 外部プログラム
if (self.runMode == 'debug') \
or (self.runMode == 'reception'):
if (os.name == 'nt'):
if (os.path.exists(qExt_face)):
ext_face = subprocess.Popen([qExt_face, qPath_rec, fn0, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
except Exception as e:
pass
# 出力画像復元
if (image_width > image_height):
offset = int((image_width-image_height)/2)
out_image = out_image[offset:offset+image_height, 0:image_width].copy()
elif (image_height > image_width):
offset = int((image_height-image_width)/2)
out_image = out_image[0:image_height, offset:offset+image_width].copy()
# FPS計測
fps = qFPS_class.get()
if ((time.time() - qFPS_last) > 5):
qFPS_last = time.time()
# 結果出力(fps)
out_name = '_fps_'
out_value = '{:.2f}'.format(fps)
cn_s.put([out_name, out_value])
# 結果出力(reso)
out_height, out_width = out_image.shape[:2]
out_name = '_reso_'
out_value = str(out_width) + 'x' + str(out_height)
cn_s.put([out_name, out_value])
# 結果出力
out_name = '[img]'
out_value = out_image.copy()
cn_s.put([out_name, out_value])
#time.sleep(0.50)
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu ) == True):
slow = True
if (qFunc.statusCheck(qBusy_dev_cam ) == True):
slow = True
if (qFunc.statusCheck(qBusy_d_play ) == True) \
or (qFunc.statusCheck(qBusy_d_browser) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.05)
# 終了処理
if (True):
# レディ解除
qFunc.statusSet(self.fileRdy, False)
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
# キュー削除
while (cn_r.qsize() > | |
the *scad2d*s:
next_indent: str = indent + " "
scad2d: Scad2D
for scad2d in scad2ds:
scad2d.scad_lines_append(scad_lines, next_indent)
# Output the closing '}':
scad_lines.append(f"{indent}}}")
# Module2d.use_module_get():
def use_module_get(self) -> "UseModule2D":
"""Return the UseModule associated with Module2D."""
module2d: Module2D = self
use_module: UseModule2D = module2d.use_module
return use_module
# Polygon:
class Polygon(Scad2D):
"""Represents an OpenScad `polygon` command."""
# Polygon.__init__():
def __init__(self, name: str, simple_polygons: "List[SimplePolygon]",
convexity: int = -1, lock=True) -> None:
"""Initialize an OpenSCAD polygon command.
Initialze a *Polygon* object to initially contain a list of
*simple_polygons.* If *locked* is *True*, no additional
*SimplePolygon*'s can be append to *polygon* (i.e. *self*);
otherwise both the *Polygon*.*append*() and the
*Polygon*.*extend*() methods can be used to append additional
*SimplePolygon*'s. The *Polygon*.*lock() forces *polygon*
to be locked and it can not be unlocked afterwards.
Args:
*name*: (*str*): The name of OpenSCAD polygon command.
*simple_polygons* (*List*[*SimplePolygon*]): A list of
*SimplePolygon*'s to install into *polygon*
(i.e. *self*). Each of these *SimplePolygon*"s must
be locked.
*convexity* (*int*): A number to estimate the complexit
of the polygon. Higher numbers are needed for
accurate complex polygon rendering. If no value is
provided, a resonable default is provided.
*lock* (*bool*): If *True* the initialized *polygon*
object (i.e. *self*) is locked from having additional
*SimplePolygon*'s appended to it; other no additional
*SimplePolygon*'s can be appended.
Raises:
*ValueError*(*str*): if any of the *SimplePolygon*'s in
*simple_polygons* are not locked .
"""
# Valid that all of the *simple_polygons* are locked:
simple_polygon_index: int
simple_polygon: SimplePolygon
for simple_polygon_index, simple_polygon in enumerate(simple_polygons):
if not simple_polygon.is_locked():
raise ValueError(f"SimplePolygon ('{simple_polygon.name}') "
f"at index {simple_polygon_index} is not locked.")
# Intilize the base class and stuff values into *scad_polygon* (i.e. *self*):
super().__init__(name)
self.locked: bool = lock
self.convexity: int = convexity
self.simple_polygons: List[SimplePolygon] = simple_polygons[:]
# Polygon.__getitem__():
def __getitem__(self, index: int) -> "SimplePolygon":
"""Return the selected Polygon.
Args:
*index* (*int*): The index into the *polygon*
(i.e. *self*) *Polygon*'s list to fetch.
Returns:
(*SimplePolygon*) Returns the selected *SimplePolygon*:
"""
# Grab some values from *polygon* (i.e. *self*):
polygon: Polygon = self
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
simple_polygons_size: int = len(simple_polygons)
if index < 0 or index >= simple_polygons_size:
raise IndexError(f"index={index} and it is not in range 0:{simple_polygons_size-1}")
simple_polygon: SimplePolygon = simple_polygons[index]
return simple_polygon
# Polygon.__len__()
def __len__(self):
"""Return the number of SimplePolygon's in the Polygon.
Returns:
(*int*) Returns the number of *SimplePolygon*'s in *polygon*
(i.e. *self*.)
"""
# Grab the *polygons* from *scad_polygon* (i.e. *self*):
polygon: polygon = self
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
simple_polygons_size: int = len(simple_polygons)
return simple_polygons_size
# Polygon.__str__():
def __str__(self) -> str:
"""Return string for *Polygon."""
# Grab *name* from *polygon* (i.e. *self*) and return formatted string:
polygon: Polygon = self
name: str = polygon.name
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
simple_polygons_size: int = len(simple_polygons)
convexity: int = polygon.convexity
return (f"Polygon('{name}',"
f"len(simple_polygons)={simple_polygons_size},"
f"convexity={convexity})")
# Polygon.append():
def append(self, simple_polygon: "SimplePolygon") -> None:
"""Append a SimplePolygon to the Polygon.
Args:
*simple_polygon*: (*SimplePolygon*): The *SimplePolygon*
to append to *polygon* (i.e. *self*.)
"""
# Grab some values from *polygon* (i.e. *self*):
polygon: Polygon = self
locked: bool = polygon.locked
if locked:
raise ValueError(f"Polygon '{polygon.name}' is locked and can not be appended to.")
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
simple_polygons.append(simple_polygon)
# Polygon.duplicates_find():
def duplicates_find(self) -> List[List["SimplePolygon"]]:
"""Return simple polygons list that have same bounding boxes."""
# Unpack some values from *polygon* (i.e. *self*):
polygon: Polygon = self
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
# Iterate over *simple_polygons* and populate *unique_bounding_boxes*:
# The tuple has (sw_corner, ne_corner, List[matches]):
bounding_box: Tuple[P2D, P2D, List[SimplePolygon]]
bounding_boxes: List[Tuple[P2D, P2D, List[SimplePolygon]]] = []
simple_polygon: SimplePolygon
for simple_polygon in simple_polygons:
current_sw_corner: P2D
current_ne_corner: P2D
current_sw_corner, current_ne_corner = simple_polygon.bounding_box_get()
# Sweep through *bounding_boxes* looking for a match:
for bounding_box in bounding_boxes:
# Unpack *bounding_box*:
sw_corner: P2D
ne_corner: P2D
matches: List[SimplePolygon]
sw_corner, ne_corner, matches = bounding_box
if current_sw_corner.is_close(sw_corner) and current_ne_corner.is_close(ne_corner):
# We have a match, so append this to the current bounding box:
matches.append(simple_polygon)
break
else:
# No match, create a new entry:
bounding_boxes.append((current_sw_corner, current_ne_corner, [simple_polygon]))
# Sweep through *bounding_boxes* and generate the *final_duplicates* list:
final_duplicates: List[List[SimplePolygon]] = []
for bounding_box in bounding_boxes:
duplicate_simple_polygons: List[SimplePolygon] = bounding_box[2]
if len(duplicate_simple_polygons) > 1:
final_duplicates.append(duplicate_simple_polygons)
return final_duplicates
# Polygon.extend():
def extend(self, additional_simple_polygons: "List[SimplePolygon]") -> None:
"""Append a list of SimplePolygon's to the Polygon.
Args:
*additional_simple_polygons*: (*List*[*SimplePolygon*]):
The list of *SimplePolygon*'s to append to
*scad_polygon* (i.e. *self*.)
"""
# Grab some values from *polygon* (i.e. *self*):
polygon: Polygon = self
locked: bool = polygon.locked
if locked:
raise ValueError(f"Polygon '{polygon.name}' is locked and can not be extended.")
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
simple_polygons.extend(additional_simple_polygons)
# Polygon.lock():
def lock(self):
"""Lock Polygon from further expansion."""
polygon: Polygon = self
polygon.locked = True
# Polygon.reposition():
def reposition(self, center: P2D, rotate: float, translate: P2D) -> "Polygon":
"""Return repositioned copy."""
# Unpack *polygon* (i.e. *self*):
polygon: Polygon = self
name: str = polygon.name
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
repositioned_polygon: Polygon = Polygon(f"{name} Repositioned Polygon", [], lock=False)
simple_polygon: SimplePolygon
for simple_polygon in simple_polygons:
new_simple_polygon: SimplePolygon = simple_polygon.reposition(center, rotate,
translate)
repositioned_polygon.append(new_simple_polygon)
return repositioned_polygon
# Polygon.scad_lines_append():
def scad_lines_append(self, scad_lines: List[str], indent: str) -> None:
"""Append Polygon commands to a lines list.
Args:
*scad_lines* (*List*[*str*]): The lines list to append the
*scad_polygon* (i.e. *self*) to.
*indent* (*str*): The indentatation prefix for each line.
"""
polygon: Polygon = self
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
polygon.polygon_scad_lines_append(simple_polygons, scad_lines, indent)
# Polygon.simple_polygons_get():
def simple_polygons_get(self) -> "List[SimplePolygon]":
"""Return current list of Simple Polygons."""
polygon: Polygon = self
simple_polygons: List[SimplePolygon] = polygon.simple_polygons
# Make a copy and return the result:
simple_polygons = simple_polygons[:]
return simple_polygons
# SimplePolygon:
class SimplePolygon(Scad2D):
"""Represents a simple closed polygon of points."""
# SimplePolygon.__init__():
def __init__(self, name: str, points: List[P2D] = [],
lock: bool = False, convexity: int = -1) -> None:
"""Initialize a SimplePolygon.
Args:
*name* (*str*): The name for the polygon.
*points* (*List*[*P2D*]): The list of points to initialize
*close_polygon* (i.e. *self* with.)
*lock* (*bool*): If *True* no additional points can be
appended to *simple_polygon* (*i.e. *self*); othewise
additional points can be appended.
*convexity* (*int*): Specifies the complexity of the
polygon. Larger numbers are needed render ever
complex polygons. The default is -1, which cause
a reasonable initial guess to occur.
"""
# Stuff values into *simple_polygon* (i.e. *self*):
# simple_polygon: SimplePolygon = self
self.locked: bool = lock
self.name: str = name
self.points: List[P2D] = points[:] # Copy the contents of *points*
self.convexity: int = 4 if convexity <= 0 else convexity
# SimplePolygon.__getitem__():
def __getitem__(self, index: int) -> P2D:
"""Fetch a point from the SimplePolygon.
Args:
*index* (*int*): Index into the *simple_polygon*
(i.e. *self*) points list.
Returns:
(*P2D*): Return the *index*'th *P2D* from *simple_polygon*.
"""
# Grab some values from *simple_polygon* (i.e. *self*):
simple_polygon: SimplePolygon = self
points: List[P2D] = simple_polygon.points
points_size: int = len(points)
if index < 0 or index >= points_size:
raise IndexError(f"index of {index} is not in range 0 to {points_size-1}")
point: P2D = points[index]
return point
# SimplePolygon.__len__():
def __len__(self) -> int:
"""Return the number of points currently in the Polygon."""
# Grab some values from *simple_polygon* (i.e. *self*):
simple_polygon: SimplePolygon = self
points: List[P2D] = simple_polygon.points
size: int = len(points)
return size
# SimplePolygon.__str__():
def __str__(self):
"""Return a short string representation of a Polygon."""
# Grab some values from *simplepolygon* (i.e. *self*):
simple_polygon: SimplePolygon = self
name: str = simple_polygon.name
points: List[P2D] = simple_polygon.points
selected_points: List[P2D] = points if len(points) <= 2 else [points[0]] + [points[-1]]
join_text: str = ", " if len(points) <= 2 else ", ..., "
selected_point: P2D
selected_point_texts: List[str] = [f"{selected_point}"
for selected_point in selected_points]
selected_points_text = join_text.join(selected_point_texts)
return f"SimplePolygon('{name}', [{selected_points_text}])"
# SimplePolygon.arc_append():
def arc_append(self, center: P2D, radius: float, start_angle: float, end_angle: float,
points_angle: float = 0.0, debug: bool = False) -> None:
"""Append an arc of points to a Polygon.
Args:
*center* (*P*):
The center of the arc.
*radius* (*float*):
The arc radius.
*start_angle* (*float*):
The starting angle for the arc.
*end_angle* (*float*):
The ending angle for the arc.
*points_angle* (*float*) (Optional: defaults to 0.0):
The angular distance between points along the arc | |
import json
import math
import random
import webcolors
from alive_progress import alive_bar
import cairo
def do_nothing():
"""Do nothing. A dummy function to use in place of alive_bar."""
pass
class CairoPainter:
"""
A class to interface with the Cairo library to draw the map for a given save file.
"""
def __init__(self, parent):
"""
:param parent: The SaveFileParser used to get useful information.
:type parent: SaveFileParser.
"""
self.parent = parent
self.player_colors = [
(200, 0, 0),
(0, 0, 200),
(100, 0, 0),
(0, 0, 100),
(0, 200, 200),
(200, 0, 200),
(200, 200, 0),
(0, 100, 100),
(100, 0, 100),
(100, 100, 0),
(200, 200, 200),
(100, 100, 100),
(50, 50, 50),
(0, 0, 0),
(0, 200, 0),
(0, 100, 0),
]
def log_message(self, message):
"""
Send a message to the logger.
:param message: The message to log.
:type message: string.
"""
self.parent.log_message(message)
def load_rgb(self, rgb_name, default_value):
"""
Update the settings, given a config file path.
:param rgb_name: The name to look up.
:type rgb_name: string.
:param default_value: A tuple of 3 ints for the RGB values.
:type default_value: (integer, integer, integer).
"""
value = self.load_settings.get(rgb_name, default_value)
if isinstance(value, str):
if value in self.load_settings_rgb_values:
return self.load_settings_rgb_values[value]
try:
return webcolors.name_to_rgb(value)
except ValueError:
return (0, 0, 0)
return value
def load_settings(self, file_path, tile_size):
"""
Update the settings, given a config file path.
:param file_path: The path to the config file.
:type file_path: string.
:param tile_size: Override for tile size.
:type tile_size: integer.
"""
with open("config/rgb_values.json") as file_handle:
self.load_settings_rgb_values = json.load(file_handle)
with open(file_path) as file_handle:
self.load_settings = json.load(file_handle)
self.ds = self.load_settings.get("ds", 25)
self.ss = 2 * self.ds
self.ocean_noise = self.load_settings.get("ocean_noise", 50)
self.height_rgb_low = self.load_rgb("height_rgb_low", (255, 255, 255))
self.height_rgb_high = self.load_rgb("height_rgb_high", (200, 200, 200))
self.road_tile_rgb = self.load_rgb("road_tile_rgb", (100, 100, 100))
self.rail_rgb = self.load_rgb("rail_rgb", (255, 255, 255))
self.road_rgb = self.load_rgb("road_rgb", (255, 255, 255))
self.tram_rgb = self.load_rgb("tram_rgb", (255, 255, 255))
self.railway_rgb = self.load_rgb("railway_rgb", (100, 100, 100))
self.electrified_railway_rgb = self.load_rgb("electrified_railway_rgb", (200, 200, 200))
self.monorail_rgb = self.load_rgb("monorail_rgb", (150, 150, 150))
self.maglev_rgb = self.load_rgb("maglev_rgb", (100, 100, 100))
self.town_building_rgb = self.load_rgb("town_building_rgb", (255, 255, 255))
self.industry_rgb = self.load_rgb("industry_rgb", (255, 165, 0))
self.industry_edge_rgb = self.load_rgb("industry_edge_rgb", (0, 0, 0))
self.torb_rgb = self.load_rgb("torb_rgb", (150, 150, 150))
self.objects_rgb = self.load_rgb("objects_rgb", (150, 150, 150))
self.torb_edge_rgb = self.load_rgb("torb_edge_rgb", (0, 100, 100))
self.water_edge_rgb = self.load_rgb("water_edge_rgb", (0, 0, 255))
self.station_rgb = self.load_rgb("station_rgb", (255, 0, 255))
self.rail_station_rgb = self.load_rgb("rail_station_rgb", (255, 0, 255))
self.bus_station_rgb = self.load_rgb("bus_station_rgb", (255, 0, 255))
self.truck_station_rgb = self.load_rgb("truck_station_rgb", (255, 0, 255))
self.airport_rgb = self.load_rgb("airport_rgb", (255, 255, 0))
self.seaport_rgb = self.load_rgb("seaport_rgb", (0, 255, 255))
self.heliport_rgb = self.load_rgb("heliport_rgb", (255, 255, 0))
self.rail_depot_rgb = self.load_rgb("rail_depot_rgb", (0, 100, 100))
self.road_depot_rgb = self.load_rgb("road_depot_rgb", (0, 100, 100))
self.ship_depot_rgb = self.load_rgb("ship_depot_rgb", (0, 100, 100))
self.screen_mode = self.load_settings.get("screen_mode", "normal")
self.player_colors = self.load_settings.get("player_colors", self.player_colors)
self.reverse_track_rgb = self.load_settings.get("reverse_track_rgb", False)
self.show_signals = self.load_settings.get("show_signals", True)
self.show_roads = self.load_settings.get("show_roads", True)
for index, rgb in enumerate(self.player_colors):
if not isinstance(rgb, str):
continue
if rgb in self.load_settings_rgb_values:
self.player_colors[index] = self.load_settings_rgb_values[rgb]
else:
try:
self.player_colors[index] = webcolors.name_to_rgb(rgb)
except ValueError:
self.player_colors[index] = (0, 0, 0)
if tile_size:
self.ss = tile_size
self.ds = (tile_size - 1) // 2
# Thicknesses of various elements.
self.rail_width = self.ds / 3
self.road_width = self.ds / 2
self.tram_width = self.ds / 5
self.bridge_edge_width = self.ss / 10
self.edge_width = int(0.1 * self.ss)
if self.edge_width % 2 == 0:
self.edge_width += 1
def set_rgb(self, rgb):
"""
Set the RGB for the context.
:param rgb: The fill color, expressed as a tuple in the range (0-255, 0-255, 0-255).
:type rgb: (integer, integer, integer).
"""
ctx = self.context
(rgb_r, rgb_g, rgb_b) = rgb
ctx.set_source_rgb(rgb_r / 255, rgb_g / 255, rgb_b / 255)
def set_rgba(self, rgba):
"""
Set the RGBA for the context.
:param rgba: The fill color, expressed as a tuple in the range (0-255, 0-255, 0-255, 0-1).
:type rgba: (integer, integer, integer, integer).
"""
ctx = self.context
(rgba_r, rgba_g, rgba_b, rgba_a) = rgba
ctx.set_source_rgba(rgba_r / 255, rgba_g / 255, rgba_b / 255, rgba_a)
def draw_line(self, x1, y1, x2, y2, rgb, width, round_cap=True):
"""
Draw a line on the context.
:param x1: The x coordinate of the top left corner.
:type x1: float
:param y1: The y coordinate of the top left corner.
:type y1: float
:param x2: The x coordinate of the bottom right corner.
:type x2: float
:param y2: The y coordinate of the bottom right corner.
:type y2: float
:param rgb: The fill color, expressed as a tuple in the range (0-255, 0-255, 0-255).
:type rgb: (integer, integer, integer).
:param width: The width of the line.
:type width: float
:param round_cap: If True, set the line cap to round. Defaults to False.
:type round_cap: Boolean
"""
ctx = self.context
ctx.save()
self.set_rgb(rgb)
ctx.set_line_width(width)
if round_cap:
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.move_to(x1, y1)
ctx.line_to(x2, y2)
ctx.stroke()
ctx.restore()
def draw_rectangle(self, x, y, w, h, rgb_fill, rgb_stroke=None):
"""
Fill a rectangle on the context.
:param x: The x coordinate of the top left corner.
:type x: float
:param y: The y coordinate of the top left corner.
:type y: float
:param w: The width of the rectangle.
:type w: float
:param h: The height of the rectangle.
:type h: float
:param rgb_fill: The fill color, expressed as a tuple in the range (0-255, 0-255, 0-255).
:type rgb_fill: (integer, integer, integer).
:param rgb_stroke: The stroke color, expressed as a tuple in the range (0-255, 0-255, 0-255).
:type rgb_stroke: (integer, integer, integer).
"""
ctx = self.context
ctx.save()
self.set_rgb(rgb_fill)
ctx.rectangle(x, y, w, h)
ctx.fill()
if rgb_stroke is not None:
self.set_rgb(rgb_stroke)
ctx.rectangle(x, y, w, h)
ctx.stroke()
ctx.restore()
def draw_rectangle_rgba(self, x, y, w, h, rgba):
"""
Fill a rectangle on the context.
:param x: The x coordinate of the top left corner.
:type x: float
:param y: The y coordinate of the top left corner.
:type y: float
:param w: The width of the rectangle.
:type w: float
:param h: The height of the rectangle.
:type h: float
:param rgba: The fill color, expressed as a tuple in the range (0-255, 0-255, 0-255, 0-1).
:type rgba: (integer, integer, integer, integer).
"""
ctx = self.context
ctx.save()
self.set_rgba(rgba)
ctx.rectangle(x, y, w, h)
ctx.fill()
ctx.restore()
def cxy_from_rc(self, row, col):
"""
Get the centre of a tile, given its row and column.
:param row: The row of the tile.
:type row: integer
:param col: The column of the tile.
:type col: integer
:return: The coordinates of the centre of the tile, a tuple of floats.
:rtype: (float, float)
"""
x = (self.parent.ncols - col - 1.0 - 0.5) * self.ss
y = (row - 0.5) * self.ss
cx = int(x + 0.5 * self.ss)
cy = int(y + 0.5 * self.ss)
return cx, cy
def xy_from_tile(self, tile):
"""
Get the top left corner of a tile.
:param tile: The tile to consider.
:type tile: TileObject
:return: The coordinates of the centre of the tile, a tuple of floats.
:rtype: (float, float)
"""
x = int((self.parent.ncols - tile.col - 1 - 0.5) * self.ss)
y = int((tile.row - 0.5) * self.ss)
return x, y
def cxy_from_tile(self, tile):
"""
Get the centre of a tile.
:param tile: The tile to consider.
:type tile: TileObject
:return: The coordinates of the centre of the tile, a tuple of floats.
:rtype: (float, float)
"""
return self.cxy_from_rc(tile.row, tile.col)
def transform_to_tile(self, tile, rotation):
"""
Transform the context to a given tile, with a rotation.
:param tile: The tile to consider.
:type tile: TileObject
:param rotation: The direction of the triangle, in the range 0, 3.
:type rotation: integer
"""
ctx = self.context
ctx.save()
cx, cy = self.cxy_from_tile(tile)
ctx.translate(cx + 0.5, cy + 0.5)
ctx.rotate(0.5 * math.pi * rotation)
def end_transform_to_tile(self):
"""Restore the context. This should be called after transform_to_tile."""
self.context.restore()
def draw_square(self, tile, rgb_fill, rgb_stroke=None):
"""
Draw a square for a given tile.
:param tile: The tile to consider.
:type tile: TileObject
:param rgb: The fill color, expressed as a tuple in the range (0-255, 0-255, 0-255).
:type rgb: (integer, integer, integer).
:param rgb_stroke: The stroke color, expressed as a tuple in the | |
<reponame>KinanZ/open_clip
import os
import time
import json
import numpy as np
import torch
import torch.nn as nn
from sklearn import decomposition
from torch.cuda.amp import autocast
import torch.distributed as dist
import sys
sys.path.append('/misc/student/alzouabk/Thesis/self_supervised_pretraining/open_clip_thesis/src/')
from training.zero_shot import zero_shot_eval
import pdb
import wandb
import logging
def is_master(args):
return (not args.distributed) or args.gpu == 0
def get_weights(labels, class_weights):
weights = torch.ones(labels.shape[0])
for i in range(labels.shape[0]):
sample_label = torch.where(labels[i])[0]
sample_weights = []
for class_label in sample_label:
sample_weights.append(class_weights[class_label.item()])
weights[i] = max(sample_weights)
return weights
def get_loss(model, images, loss_img, loss_txt, class_weights, texts, labels, args):
image_features, text_features, logit_scale = model(images, texts)
logit_scale = logit_scale.mean()
if args.distributed and args.aggregate:
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more negatives to contrast with.
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
gathered_labels = [
torch.zeros_like(labels) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
dist.all_gather(gathered_labels, labels)
all_image_features = torch.cat(
[image_features]
+ gathered_image_features[:rank]
+ gathered_image_features[rank + 1:]
)
all_text_features = torch.cat(
[text_features]
+ gathered_text_features[:rank]
+ gathered_text_features[rank + 1:]
)
labels = torch.cat(
[labels]
+ gathered_labels[:rank]
+ gathered_labels[rank + 1:]
)
if args.new_model:
gathered_texts = [torch.zeros_like(texts['input_ids']) for _ in range(world_size)]
dist.all_gather(gathered_texts, texts['input_ids'])
texts = torch.cat(
[texts['input_ids']]
+ gathered_texts[:rank]
+ gathered_texts[rank + 1:]
)
else:
gathered_texts = [torch.zeros_like(texts) for _ in range(world_size)]
dist.all_gather(gathered_texts, texts)
texts = torch.cat(
[texts]
+ gathered_texts[:rank]
+ gathered_texts[rank + 1:]
)
# this is needed to send gradients back everywhere.
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
else:
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
if args.Label_grouped: # Basically supervised
ground_truth = torch.zeros(logits_per_image.shape).float()
for i in range(len(logits_per_image)):
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# instead of an eye matrix we have 1 on the diagonal and 1 if the sample from this column belongs to the healthy class
if labels[i][0] == 1:
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_Caption_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
if labels[i][0] == 1:
# replace 0 with 1 if the sample from this column belongs the healthy class
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
else:
# replace 0 with 1 if the sample from this column belongs the same deseased class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
elif args.Caption_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# replace 0 with 1 if the sample from this column belongs the same class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
else: # Default Clip loss
ground_truth = torch.arange(len(logits_per_image)).long()
weights = get_weights(labels, class_weights)
if args.gpu is not None:
ground_truth = ground_truth.cuda(args.gpu, non_blocking=True)
weights = weights.cuda(args.gpu, non_blocking=True)
loss_vision = loss_img(logits_per_image, ground_truth)
loss_vision = (loss_vision * weights).mean()
loss_text = loss_txt(logits_per_text, ground_truth)
loss_text = (loss_text * weights).mean()
total_loss = (loss_vision + loss_text) / 2
return total_loss
def train(model, data, epoch, optimizer, scaler, scheduler, args, tb_writer=None):
os.environ["WDS_EPOCH"] = str(epoch)
model.train()
dataloader, sampler = data['train'].dataloader, data['train'].sampler
if args.default_loss:
loss_img = nn.CrossEntropyLoss(reduction='none')
loss_txt = nn.CrossEntropyLoss(reduction='none')
else:
loss_img = nn.BCEWithLogitsLoss(reduction='none')
loss_txt = nn.BCEWithLogitsLoss(reduction='none')
if args.use_weights_1:
# class weights where the weight of a class is: 1 - (class_count / total_count)
class_weights = {0: 0.5, 1: 0.995, 2: 0.927, 3: 0.964, 4: 0.989, 5: 0.994, 6: 0.993, 7: 0.997,
8: 0.856, 9: 0.903, 10: 0.998, 11: 0.879, 12: 0.9984, 13: 0.972, 14: 0.988}
elif args.use_weights_2:
# class weights where the weight of a class is: total_count - (num_of_classes / class_count)
class_weights = {0: 0.133, 1: 14.129, 2: 0.913, 3: 1.868, 4: 6.191, 5: 10.805, 6: 9.501, 7: 26.24,
8: 0.461, 9: 0.685, 10: 32.415, 11: 0.552, 12: 30.61, 13: 2.35, 14: 5.681}
else:
class_weights = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0, 6: 1.0, 7: 1.0,
8: 1.0, 9: 1.0, 10: 1.0, 11: 1.0, 12: 1.0, 13: 1.0, 14: 1.0}
if args.gpu is not None:
loss_img = loss_img.cuda(args.gpu)
loss_txt = loss_txt.cuda(args.gpu)
if args.distributed and sampler is not None:
sampler.set_epoch(epoch)
num_batches_per_epoch = dataloader.num_batches
end = time.time()
for i, batch in enumerate(dataloader):
step = num_batches_per_epoch * epoch + i
scheduler(step)
optimizer.zero_grad()
images, texts, labels = batch
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
labels = labels.cuda(args.gpu, non_blocking=True)
if args.new_model:
for key in texts:
texts[key] = texts[key].cuda(args.gpu, non_blocking=True)
else:
texts = texts.cuda(args.gpu, non_blocking=True)
data_time = time.time() - end
m = model.module if args.distributed or args.dp else model
# with automatic mixed precision.
if args.precision == "amp":
with autocast():
total_loss = get_loss(model, images, loss_img, loss_txt, class_weights, texts, labels, args)
scaler.scale(total_loss).backward()
scaler.step(optimizer)
scaler.update()
else:
total_loss = get_loss(model, images, loss_img, loss_txt, class_weights, texts, labels, args)
total_loss.backward()
optimizer.step()
# Note: we clamp to 4.6052 = ln(100), as in the original paper.
m.logit_scale.data = torch.clamp(m.logit_scale.data, 0, 4.6052)
batch_time = time.time() - end
end = time.time()
if is_master(args) and (i % 100) == 0:
num_samples = i * len(images) * args.world_size
samples_per_epoch = dataloader.num_samples
percent_complete = 100.0 * i / num_batches_per_epoch
logging.info(
f"Train Epoch: {epoch} [{num_samples}/{samples_per_epoch} ({percent_complete:.0f}%)]\t"
f"Loss: {total_loss.item():.6f}\tData (t) {data_time:.3f}\tBatch (t) {batch_time:.3f}"
f"\tLR: {optimizer.param_groups[0]['lr']:5f}\tlogit_scale {m.logit_scale.data:.3f}"
)
# save train loss / etc.
timestep = epoch * num_batches_per_epoch + i
log_data = {
"loss": total_loss.item(),
"data_time": data_time,
"batch_time": batch_time,
"scale": m.logit_scale.data.item(),
"lr": optimizer.param_groups[0]["lr"]
}
for name, val in log_data.items():
name = "train/" + name
if tb_writer is not None:
tb_writer.add_scalar(name, val, timestep)
if args.wandb:
wandb.log({name: val, 'step': timestep})
def evaluate(model, data, epoch, args, tb_writer=None, steps=None):
if not is_master(args):
return
model.eval()
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
dataloader = data['val'].dataloader
if args.default_loss:
loss_img = nn.CrossEntropyLoss()
loss_txt = nn.CrossEntropyLoss()
else:
loss_img = nn.BCEWithLogitsLoss()
loss_txt = nn.BCEWithLogitsLoss()
if args.gpu is not None:
loss_img = loss_img.cuda(args.gpu)
loss_txt = loss_txt.cuda(args.gpu)
cumulative_loss = 0.0
num_elements = 0.0
all_image_features, all_text_features, all_labels, all_texts = [], [], [], []
with torch.no_grad():
for batch in dataloader:
images, texts, labels = batch
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if args.new_model:
for key in texts:
texts[key] = texts[key].cuda(args.gpu, non_blocking=True)
else:
texts = texts.cuda(args.gpu, non_blocking=True)
image_features, text_features, logit_scale = model(images, texts)
if args.new_model:
texts = texts['input_ids']
all_image_features.append(image_features)
all_text_features.append(text_features)
all_labels.append(labels)
all_texts.append(texts)
logit_scale = logit_scale.mean()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
if args.Label_grouped:
ground_truth = torch.zeros(logits_per_image.shape).float()
for i in range(len(logits_per_image)):
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_grouped:
ground_truth = torch.eye(
len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# instead of an eye matrix we have 1 on the diagonal and 1 if the sample from this column belongs to the healthy class
if labels[i][0] == 1:
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_Caption_grouped:
ground_truth = torch.eye(
len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
if labels[i][0] == 1:
# replace 0 with 1 if the sample from this column belongs the healthy class
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
else:
# replace 0 with 1 if the sample from this column belongs the same deseased class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
elif args.Caption_grouped:
ground_truth = torch.eye(
len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# replace 0 with 1 if the sample from this column belongs the same class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
else:
ground_truth = torch.arange(len(logits_per_image)).long()
if args.gpu is not | |
(40, 60)
else:
self.print_roll(roll, "Something unusual!")
return self.roll_room_unusual_shape_and_size()
self.print_roll(roll, "A {w}x{h} {kind}".format(w=w, h=h, kind=kind))
return aagen.geometry.rectangle_list(w, h)
def roll_room_unusual_shape_and_size(self):
# First we roll the area:
area = self.roll_room_unusual_size()
print("Generating unusual shape (d20)...")
roll = d20()
if roll <= 5:
self.print_roll(roll, "A circle")
# TODO pool/well/shaft
return aagen.geometry.circle_list(area)
# TODO don't allow circular rooms to be truncated
elif roll <= 8:
self.print_roll(roll, "A triangle")
return aagen.geometry.triangle_list(area)
elif roll <= 11:
self.print_roll(roll, "A trapezoid")
return aagen.geometry.trapezoid_list(area)
elif roll <= 13:
self.print_roll(roll, "Something odd...")
# TODO
elif roll <= 15:
self.print_roll(roll, "An oval")
return aagen.geometry.oval_list(area)
# TODO don't allow oval rooms to be truncated
elif roll <= 17:
self.print_roll(roll, "A hexagon")
return aagen.geometry.hexagon_list(area)
elif roll <= 19:
self.print_roll(roll, "An octagon")
return aagen.geometry.octagon_list(area)
else:
self.print_roll(roll, "A cave?")
# TODO
log.info("Rerolling")
return self.roll_room_unusual_shape_and_size()
def roll_room_unusual_size(self):
print("Generating unusual room size (d20)...")
roll = d20()
if roll <= 3:
size = 500
elif roll <= 6:
size = 900
elif roll <= 8:
size = 1300
elif roll <= 10:
size = 2000
elif roll <= 12:
size = 2700
elif roll <= 14:
size = 3400
else:
self.print_roll(roll, "Add 2000 square feet and roll again!")
size = 2000 + self.roll_room_unusual_size()
if roll <= 14:
self.print_roll(roll, "{size} square feet"
.format(roll=roll, size="{:,}".format(size)))
else:
print("+ 2,000 = {size} square feet"
.format(size="{:,}".format(size)))
return size
def roll_room_exit_count(self, room):
print("Generating number of exits...")
roll = d20()
exit_kind = (Connection.DOOR if room.kind == Region.ROOM
else Connection.ARCH)
if roll <= 3:
exits = self.exits_if_area_less_than(roll, room, 600, 1, 2)
elif roll <= 6:
exits = self.exits_if_area_less_than(roll, room, 600, 2, 3)
elif roll <= 9:
exits = self.exits_if_area_less_than(roll, room, 600, 3, 4)
elif roll <= 12:
exits = self.exits_if_area_less_than(roll, room, 1200, 0, 1)
elif roll <= 15:
exits = self.exits_if_area_less_than(roll, room, 1600, 0, 1)
elif roll <= 18:
exits = d4()
self.print_roll(roll, "1-4 exits: got {0} exits".format(exits))
else:
exits = 1
exit_kind = (Connection.ARCH if room.kind == Region.ROOM
else Connection.DOOR)
self.print_roll(roll, "1 exit of unusual type ({0})".format(exit_kind))
log.info("Room has {0} {1}s".format(exits, exit_kind))
return (exits, exit_kind)
def exits_if_area_less_than(self, roll, region, area_filter, less, more):
"""Helper function for roll_room_exit_count()"""
area = region.polygon.area
log.debug("Room area is {0}".format(area))
if area <= area_filter:
self.print_roll(roll, "No larger than {filter} square feet, "
"so it has {exits} exit(s)"
.format(filter=area_filter, exits=less))
return less
else:
self.print_roll(roll, "Larger than {filter} square feet, "
"so it has {exits} exit(s)"
.format(filter=area_filter, exits=more))
return more
def generate_room_exit(self, room, exit_kind, entrance_dir):
exit_dir = self.roll_room_exit_location(entrance_dir)
log.info("Generating a {0} in direction {1} from {2}"
.format(exit_kind, exit_dir, room))
if exit_kind == Connection.DOOR:
width = 10
elif exit_kind == Connection.ARCH:
width = self.roll_passage_width(exit_dir.is_cardinal())
else:
raise RuntimeError("Not sure how to generate a room exit as {0}"
.format(exit_kind))
# Try a full-width exit, and if that fails,
# successively reduce the exit width:
exit_width = width
conn = None
while exit_width > 0:
candidates = self.dungeon_map.find_options_for_connection(
exit_width, room, exit_dir)
log.debug(candidates)
if len(candidates) > 0:
break
if exit_width > 10:
log.warning("Couldn't find any options for a {0}' exit - "
"retry with reduced width".format(exit_width))
exit_width -= 10
if len(candidates) == 0:
print("If there's no available space that doesn't already lead to "
"an already mapped area, it might be a secret or a one-way "
"door, or else we should just try a different wall...")
roll = d20()
if roll <= 5:
self.print_roll(roll, "There's a secret door")
exit_kind = Connection.SECRET
width = 10
new_only = False
elif roll <= 10:
self.print_roll(roll, "It's a one-way door")
exit_kind = Connection.ONEWAY
width = 10
new_only = False
else:
self.print_roll(roll, "Let's try the opposite direction")
exit_dir = exit_dir.rotate(180)
new_only = True # TODO - try new_only, then try not if fails
# TODO refactor this...
exit_width = width
while exit_width > 0:
candidates = self.dungeon_map.find_options_for_connection(
exit_width, room, exit_dir, new_only=new_only)
log.debug(candidates)
if len(candidates) > 0:
break
exit_width -= 10
if len(candidates) == 0:
log.warning("Unable to generate this exit!")
return None
# For now just pick one at random:
candidate = self.select_best_connection(candidates)
room.polygon = aagen.geometry.union(room.polygon, candidate.polygon)
self.dungeon_map.refresh_conglomerate()
conn = Connection(exit_kind, candidate.line, room, exit_dir)
room.add_connection(conn)
self.dungeon_map.add_connection(conn)
if exit_kind == Connection.ARCH:
# Now construct the passage
possible_directions = self.roll_passage_exit_directions(conn.direction)
self.extend_passage(conn, 30, possible_directions)
def roll_room_exit_location(self, base_direction):
print("Generating exit location...")
roll = d20()
if base_direction.is_cardinal():
if roll <= 7:
direction = base_direction
self.print_roll(roll, "Opposite the entrance ({0})"
.format(direction))
elif roll <= 12:
direction = base_direction.rotate(90)
self.print_roll(roll, "To the left of the entrance ({0})"
.format(direction))
elif roll <= 17:
direction = base_direction.rotate(-90)
self.print_roll(roll, "To the right of the entrance ({0})"
.format(direction))
else:
direction = base_direction.rotate(180)
self.print_roll(roll, "On the same side as the entrance ({0})"
.format(direction))
else:
# Exits are always cardinally oriented...
# Numbers below are picked arbitrarily to try and preserve the
# "feel" of the above cardinal table, preferring forward over back.
if roll <= 6:
direction = base_direction.rotate(45)
self.print_roll(roll, "Forward and left ({0})"
.format(direction))
elif roll <= 12:
direction = base_direction.rotate(-45)
self.print_roll(roll, "Forward and right ({0})"
.format(direction))
elif roll <= 16:
direction = base_direction.rotate(135)
self.print_roll(roll, "Back and left ({0})"
.format(direction))
else:
direction = base_direction.rotate(-135)
self.print_roll(roll, "Back and right ({0})"
.format(direction))
log.info("Exit direction: {0}".format(direction))
return direction
def roll_passage_exit_directions(self, base_direction):
print("Generating exit direction for a passage...")
roll = d20()
if roll <= 16:
self.print_roll(roll, "The passage goes straight {0}"
.format(base_direction))
return [base_direction]
elif roll <= 18:
left = base_direction.rotate(45)
right = base_direction.rotate(-45)
self.print_roll(roll, "The passage goes 45 degrees left ({0}), or "
"if that doesn't work, 45 degrees right ({1})"
.format(left, right))
return [left, right]
else:
left = base_direction.rotate(45)
right = base_direction.rotate(-45)
self.print_roll(roll, "The passage goes 45 degrees right ({0}), or "
"if that doesn't work, 45 degrees left ({1})"
.format(right, left))
return [right, left]
def extend_passage(self, connection, distance,
possible_directions=None,
allow_truncation=True,
allow_shortening=True):
"""Grow a passage from the given connection.
Returns the resulting passage or None """
if possible_directions is None:
possible_directions = [connection.direction]
# Order of preference:
# 1) First possible direction, full distance, no truncation
# 2) Second possible direction, full distance, no truncation
# (continue over all possible directions)
# (reduce distance by 10 if permitted, try all directions again)
# If distance reduced to 0 and still no match found:
# Reset to maximum distance, allow truncation, repeat all of the above
(xmin, ymin, xmax, ymax) = connection.line.bounds
for truncation in [False, True]:
length = distance
while length > 0:
for direction in possible_directions:
if direction != connection.direction:
# TODO - map.construct_intersection() instead?
(fixup_poly, exit_dict) = (
aagen.geometry.construct_intersection(
connection.line, connection.direction,
[direction], connection.size()
))
base_line = exit_dict[direction]
else:
base_line = connection.line
fixup_poly = aagen.geometry.polygon()
(polygon, endwall) = aagen.geometry.sweep(
base_line, direction, length,
connection.direction)
polygon = aagen.geometry.union(polygon, fixup_poly)
candidate = self.dungeon_map.try_region_as_candidate(
polygon, connection)
if candidate is not None:
if not truncation and candidate.amount_truncated > 0:
log.info("Truncated by {0} - not interested"
.format(candidate.amount_truncated))
candidate = None
if candidate is not None:
selected = candidate
#candidate_regions = (
# self.dungeon_map.find_options_for_region(
# [coords], connection))
#if truncation:
# # Truncation is OK, no filtering needed
# filtered = candidate_regions
#else:
# # We don't want truncated passages
# filtered = []
# for candidate in candidate_regions:
# if candidate.amount_truncated == 0:
# filtered.append(candidate)
#if len(filtered) > 0:
#selected = self.select_best_candidate(filtered)
new_region = Region(Region.PASSAGE,
selected.polygon)
connection.add_region(new_region)
if selected.amount_truncated == 0:
# add Open connection to far end of passage if
# it doesn't collide with the geometry
if (aagen.geometry.intersect(
endwall,
self.dungeon_map.conglomerate_polygon)
.length == 0):
conn2 = Connection(Connection.OPEN,
endwall,
new_region,
direction)
else:
log.info("Not adding Open at end of passage {0}"
"because it intersects the existing "
"dungeon".format(to_string(endwall)))
self.dungeon_map.add_region(new_region)
return new_region
# No luck - try new direction?
pass
# No luck - try reducing length?
if allow_shortening:
length -= 10
log.info("No match found - trying reduced length ({0}')"
.format(length))
else:
break
# No luck - try allowing truncation?
if not allow_truncation:
break
# No luck at all?
log.warning("Unable to extend passage at all!")
return None
def step(self, connection=None):
"""Run another step of the dungeon generation algorithm, either
starting from the specified connection or a | |
<reponame>meteogrid/OWSLib
# -*- coding: iso-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 <NAME>
# Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com>
#
# Authors : <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Contact email: <EMAIL>
# =============================================================================
"""
API for Web Map Service (WMS) methods and metadata.
Currently supports only version 1.1.1 of the WMS protocol.
"""
from __future__ import (absolute_import, division, print_function)
import cgi
try: # Python 3
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
import warnings
import six
from owslib.etree import etree
from owslib.util import (openURL, testXMLValue, extract_xml_list,
xmltag_split, OrderedDict, ServiceException,
bind_url)
from owslib.fgdc import Metadata
from owslib.iso import MD_Metadata
from owslib.map.common import WMSCapabilitiesReader
class CapabilitiesError(Exception):
pass
class WebMapService_1_1_1(object):
"""Abstraction for OGC Web Map Service (WMS).
Implements IWebMapService.
"""
def __getitem__(self, name):
''' check contents dictionary to allow dict
like access to service layers
'''
if name in self.__getattribute__('contents'):
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, version='1.1.1', xml=None,
username=None,
password=<PASSWORD>,
parse_remote_metadata=False,
headers=None,
timeout=30):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self.timeout = timeout
self.headers = headers
self._capabilities = None
# Authentication handled by Reader
reader = WMSCapabilitiesReader(self.version, url=self.url,
un=self.username, pw=self.password,
headers=headers)
if xml: # read from stored xml
self._capabilities = reader.readString(xml)
else: # read from server
self._capabilities = reader.read(self.url, timeout=self.timeout)
self.request = reader.request
# avoid building capabilities metadata if the
# response is a ServiceExceptionReport
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message)
# build metadata objects
self._buildMetadata(parse_remote_metadata)
def _buildMetadata(self, parse_remote_metadata=False):
"""Set up capabilities metadata objects."""
self.updateSequence = self._capabilities.attrib.get('updateSequence')
# serviceIdentification metadata
serviceelem = self._capabilities.find('Service')
self.identification = ServiceIdentification(serviceelem, self.version)
# serviceProvider metadata
self.provider = ServiceProvider(serviceelem)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find('Capability/Request')[:]:
self.operations.append(OperationMetadata(elem))
# serviceContents metadata: our assumption is that services use a
# top-level layer as a metadata organizer, nothing more.
self.contents = OrderedDict()
caps = self._capabilities.find('Capability')
# recursively gather content metadata for all layer elements.
# To the WebMapService.contents store only metadata of named layers.
def gather_layers(parent_elem, parent_metadata):
layers = []
for index, elem in enumerate(parent_elem.findall('Layer')):
cm = ContentMetadata(elem, parent=parent_metadata,
index=index + 1,
parse_remote_metadata=parse_remote_metadata)
if cm.id:
if cm.id in self.contents:
warnings.warn('Content metadata for layer "%s" already exists. Using child layer' % cm.id)
layers.append(cm)
self.contents[cm.id] = cm
cm.children = gather_layers(elem, cm)
return layers
gather_layers(caps, None)
# exceptions
self.exceptions = [f.text for f
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getcapabilities(self):
"""Request and return capabilities document from the WMS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
u = self._open(reader.capabilities_url(self.url))
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message)
return u
def __build_getmap_request(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor=None, exceptions=None, **kwargs):
request = {'service': 'WMS', 'version': self.version, 'request': 'GetMap'}
# check layers and styles
assert len(layers) > 0
request['layers'] = ','.join(layers)
if styles:
assert len(styles) == len(layers)
request['styles'] = ','.join(styles)
else:
request['styles'] = ''
# size
request['width'] = str(size[0])
request['height'] = str(size[1])
request['srs'] = str(srs)
request['bbox'] = ','.join([repr(x) for x in bbox])
request['format'] = str(format)
request['transparent'] = str(transparent).upper()
request['bgcolor'] = '0x' + bgcolor[1:7]
request['exceptions'] = str(exceptions)
if time is not None:
request['time'] = str(time)
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
return request
def getmap(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
method='Get',
timeout=None,
**kwargs
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
bbox : tuple
(left, bottom, right, top) in srs units.
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
wms = WebMapService('http://giswebservices.massgis.state.ma.us/geoserver/wms', version='1.1.1')
img = wms.getmap(layers=['massgis:GISDATA.SHORELINES_ARC'],\
styles=[''],\
srs='EPSG:4326',\
bbox=(-70.8, 42, -70, 42.8),\
size=(300, 300),\
format='image/jpeg',\
transparent=True)
out = open('example.jpg', 'wb')
bytes_written = out.write(img.read())
out.close()
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetMap').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'version': self.version, 'request': 'GetMap'}
request = self.__build_getmap_request(
layers=layers,
styles=styles,
srs=srs,
bbox=bbox,
format=format,
size=size,
time=time,
transparent=transparent,
bgcolor=bgcolor,
exceptions=exceptions,
**kwargs)
data = urlencode(request)
self.request = bind_url(base_url) + data
u = openURL(base_url, data, method, username=self.username, password=<PASSWORD>, timeout=timeout or self.timeout)
# check for service exceptions, and return
if u.info()['Content-Type'].split(';')[0] in ['application/vnd.ogc.se_xml']:
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = six.text_type(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message)
return u
def getfeatureinfo(self,
layers=None,
styles=None,
srs=None,
bbox=None,
format=None,
size=None,
time=None,
transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
query_layers=None,
xy=None,
info_format=None,
feature_count=20,
method='Get',
timeout=None,
**kwargs
):
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetFeatureInfo').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
# GetMap-Request
request = self.__build_getmap_request(
layers=layers,
styles=styles,
srs=srs,
bbox=bbox,
format=format,
size=size,
time=time,
transparent=transparent,
bgcolor=bgcolor,
exceptions=exceptions,
kwargs=kwargs)
# extend to GetFeatureInfo-Request
request['request'] = 'GetFeatureInfo'
if not query_layers:
__str_query_layers = ','.join(layers)
else:
__str_query_layers = ','.join(query_layers)
request['query_layers'] = __str_query_layers
request['x'] = str(xy[0])
request['y'] = str(xy[1])
request['info_format'] = info_format
request['feature_count'] = str(feature_count)
data = urlencode(request)
self.request = bind_url(base_url) + data
u = openURL(base_url, data, method, username=self.username, password=self.password, timeout=timeout or self.timeout)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = six.text_type(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message)
return u
def getServiceXML(self):
xml = None
if self._capabilities is not None:
xml = etree.tostring(self._capabilities)
return xml
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root=infoset
self.type = testXMLValue(self._root.find('Name'))
self.version = version
self.title = testXMLValue(self._root.find('Title'))
self.abstract = testXMLValue(self._root.find('Abstract'))
self.keywords = extract_xml_list(self._root.findall('KeywordList/Keyword'))
self.accessconstraints = testXMLValue(self._root.find('AccessConstraints'))
self.fees = testXMLValue(self._root.find('Fees'))
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root=infoset
name=self._root.find('ContactInformation/ContactPersonPrimary/ContactOrganization')
if name is not None:
self.name=name.text
else:
self.name=None
self.url=self._root.find('OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
#contact metadata
contact = self._root.find('ContactInformation')
## sometimes there is a contact block that is empty, so make
## sure there are children to parse
if contact is not None and contact[:] != []:
self.contact = ContactMetadata(contact)
else:
self.contact = None
def getContentByName(self, name):
"""Return a named content item."""
for item in self.contents:
if item.name == name:
return item
raise KeyError("No content named %s" % name)
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ContentMetadata:
"""
Abstraction for WMS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None, children=None, index=0, parse_remote_metadata=False, timeout=30):
if elem.tag != 'Layer':
raise ValueError('%s should be a Layer' % (elem,))
self.parent = parent
if parent:
self.index = "%s.%d" % (parent.index, index)
else:
self.index = str(index)
self._children = children
self.id = self.name = testXMLValue(elem.find('Name'))
# layer attributes
self.queryable = int(elem.attrib.get('queryable', 0))
self.cascaded = int(elem.attrib.get('cascaded', 0))
self.opaque = int(elem.attrib.get('opaque', 0))
self.noSubsets = int(elem.attrib.get('noSubsets', 0))
self.fixedWidth = int(elem.attrib.get('fixedWidth', 0))
self.fixedHeight = int(elem.attrib.get('fixedHeight', 0))
# title is mandatory property
self.title = None
title = testXMLValue(elem.find('Title'))
if title is not None:
self.title = title.strip()
self.abstract = testXMLValue(elem.find('Abstract'))
# bboxes
b = elem.find('BoundingBox')
self.boundingBox = None
if b is not None:
try: # sometimes the SRS attribute is (wrongly) not provided
srs = b.attrib['SRS']
except KeyError:
srs = None
self.boundingBox = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
)
elif self.parent:
if hasattr(self.parent, 'boundingBox'):
self.boundingBox = self.parent.boundingBox
# ScaleHint
sh = elem.find('ScaleHint')
self.scaleHint = None
if sh is not None:
if 'min' in sh.attrib and 'max' in sh.attrib:
self.scaleHint = {'min': sh.attrib['min'], 'max': sh.attrib['max']}
attribution = elem.find('Attribution')
if attribution is not None:
self.attribution = dict()
title = attribution.find('Title')
url = attribution.find('OnlineResource')
logo = attribution.find('LogoURL')
if title is not None:
self.attribution['title'] = | |
"""
Question
--------
How do the core metabolisms of Alphaproteobacteria and Betaproteobacteria compare?
Which neofunctionalised enzymes cause the core metabolism of Alphaproteobacteria or Betaproteobacteria to have increased redundancy? How much do they contribute?
Method
------
- get clades
- get core metabolisms
- calculate "neofunctionalised" ECs
- calculate redundancies
- REPEAT for each clade
- print size of each core metabolism and "neofunctionalised" ECs, including their contribution to redundancy
- print enzyme pairs of neofunctionalisations, which caused the EC to be considered "neofunctionalised", and are in return contributing to redundancy
Result
------
::
core metabolism majority: 80%
neofunctionalisation majority: 0% (this means that gene duplication within a single organism is enough)
type of redundancy: ROBUSTNESS
Alphaproteobacteria:
core metabolism ECs: 215
"neofunctionalised" ECs: 46 (21%)
1.1.1.100
1.1.1.42
1.1.1.85
1.17.7.4
1.2.1.41
1.2.4.1
1.8.1.4
1.8.1.7
2.1.2.2
2.1.3.2
2.1.3.3
2.2.1.1
2.2.1.7
2.3.1.12
2.3.1.39
2.3.1.61
2.4.2.14
2.5.1.90
2.6.1.1
2.6.1.16
2.6.1.9
2.7.4.9
3.1.3.25
3.1.3.7
4.1.1.17
4.1.1.20
4.1.3.-
4.1.3.27
4.3.2.1
4.3.2.2
4.4.1.8
5.3.1.16
6.1.1.12
6.1.1.15
6.1.1.3
6.1.1.4
6.1.1.5
6.1.1.6
6.1.1.9
6.2.1.1
6.3.2.10
6.3.2.13
6.3.2.8
6.3.2.9
6.3.4.18
6.3.5.5
"neofunctionalised" ECs contributing to redundancy: 5 ( 2%)
1.2.4.1 => [1.1.1.40]
2.1.3.3 => [2.1.3.2]
2.2.1.1 => [5.1.3.1, 5.3.1.6]
2.6.1.1 => [2.1.3.2]
4.1.3.- => [2.4.2.-]
Neofunctionalisations contributing to redundancy: 828 (22%)
(aace:A0U92_07025 [1.2.4.1], aace:A0U92_07030 [2.3.1.12]) => [1.1.1.40]
(aak:AA2016_3788 [1.2.4.1], aak:AA2016_3787 [2.3.1.12]) => [1.1.1.40]
(aay:WYH_00471 [1.2.4.1], aay:WYH_00533 [2.3.1.12]) => [1.1.1.40]
(abg:Asbog_01243 [1.2.4.1], abg:Asbog_01244 [2.3.1.12]) => [1.1.1.40]
(acr:Acry_2820 [1.2.4.1], acr:Acry_2821 [2.3.1.12]) => [1.1.1.40]
(ado:A6F68_02189 [1.2.4.1], ado:A6F68_02343 [2.3.1.12]) => [1.1.1.40]
(aep:AMC99_01209 [1.2.4.1], aep:AMC99_01171 [2.3.1.12]) => [1.1.1.40]
(aex:Astex_2920 [1.2.4.1], aex:Astex_2919 [2.3.1.12]) => [1.1.1.40]
(agc:BSY240_693 [1.2.4.1], agc:BSY240_692 [2.3.1.12]) => [1.1.1.40]
(agr:AGROH133_05921 [1.2.4.1], agr:AGROH133_05922 [2.3.1.12]) => [1.1.1.40]
(aht:ANTHELSMS3_03039 [1.2.4.1],aht:ANTHELSMS3_03037 [2.3.1.12])=> [1.1.1.40]
(ahu:A6A40_05280 [1.2.4.1], ahu:A6A40_05275 [2.3.1.12]) => [1.1.1.40]
(amv:ACMV_31580 [1.2.4.1], amv:ACMV_31590 [2.3.1.12]) => [1.1.1.40]
(amx:AM2010_985 [1.2.4.1], amx:AM2010_922 [2.3.1.12]) => [1.1.1.40]
(anh:A6F65_01749 [1.2.4.1], anh:A6F65_01679 [2.3.1.12]) => [1.1.1.40]
(aol:S58_35510 [1.2.4.1], aol:S58_35530 [2.3.1.12]) => [1.1.1.40]
(apb:SAR116_0206 [1.2.4.1], apb:SAR116_0207 [2.3.1.12]) => [1.1.1.40]
(aper:A0U91_02040 [1.2.4.1], aper:A0U91_02045 [2.3.1.12]) => [1.1.1.40]
(apm:HIMB5_00012850 [1.2.4.1], apm:HIMB5_00012840 [2.3.1.12]) => [1.1.1.40]
(apom:CPF11_01350 [1.2.4.1], apom:CPF11_01355 [2.3.1.12]) => [1.1.1.40]
(apt:APA01_12170 [1.2.4.1], apt:APA01_12160 [2.3.1.12]) => [1.1.1.40]
(ara:Arad_2247 [1.2.4.1], ara:Arad_2249 [2.3.1.12]) => [1.1.1.40]
(aro:B0909_06395 [1.2.4.1], aro:B0909_06390 [2.3.1.12]) => [1.1.1.40]
(asv:WG31_06600 [1.2.4.1], asv:WG31_06595 [2.3.1.12]) => [1.1.1.40]
(asz:ASN_3632 [1.2.4.1], asz:ASN_3633 [2.3.1.12]) => [1.1.1.40]
(ati:AL072_12260 [1.2.4.1], ati:AL072_12255 [2.3.1.12]) => [1.1.1.40]
(ato:CIW82_12980 [1.2.4.1], ato:CIW82_12975 [2.3.1.12]) => [1.1.1.40]
(aua:M673_11260 [1.2.4.1], aua:M673_11265 [2.3.1.12]) => [1.1.1.40]
(avi:Avi_2114 [1.2.4.1], avi:Avi_2115 [2.3.1.12]) => [1.1.1.40]
(azc:AZC_1743 [1.2.4.1], azc:AZC_1744 [2.3.1.12]) => [1.1.1.40]
(azl:AZL_015200 [1.2.4.1], azl:AZL_015190 [2.3.1.12]) => [1.1.1.40]
(banc:PU02_1123 [1.2.4.1], banc:PU02_1122 [2.3.1.12]) => [1.1.1.40]
(bapi:BBC0122_009290 [1.2.4.1], bapi:BBC0122_009300 [2.3.1.12]) => [1.1.1.40]
(bara:BA1379B_006780 [1.2.4.1], bara:BA1379B_006770 [2.3.1.12]) => [1.1.1.40]
(barr:Bra60_007650 [1.2.4.1], barr:Bra60_007640 [2.3.1.12]) => [1.1.1.40]
(bart:BJB15x_004370 [1.2.4.1], bart:BJB15x_004380 [2.3.1.12]) => [1.1.1.40]
(barw:BWD162_003840 [1.2.4.1], barw:BWD162_003850 [2.3.1.12]) => [1.1.1.40]
(baus:BAnh1_05010 [1.2.4.1], baus:BAnh1_05020 [2.3.1.12]) => [1.1.1.40]
(bbk:BARBAKC583_0535 [1.2.4.1], bbk:BARBAKC583_0536 [2.3.1.12]) => [1.1.1.40]
(bbt:BBta_4462 [1.2.4.1], bbt:BBta_4460 [2.3.1.12]) => [1.1.1.40]
(bcd:BARCL_0594 [1.2.4.1], bcd:BARCL_0595 [2.3.1.12]) => [1.1.1.40]
(bcet:V910_100859 [1.2.4.1], bcet:V910_100860 [2.3.1.12]) => [1.1.1.40]
(bcet:V910_100859 [1.2.4.1], bcet:V910_200058 [2.3.1.12]) => [1.1.1.40]
(bcs:BCAN_A1147 [1.2.4.1], bcs:BCAN_A1146 [2.3.1.12]) => [1.1.1.40]
(bcs:BCAN_A1147 [1.2.4.1], bcs:BCAN_B0035 [2.3.1.12]) => [1.1.1.40]
(bgr:Bgr_06590 [1.2.4.1], bgr:Bgr_06600 [2.3.1.12]) => [1.1.1.40]
(bhe:BH05760 [1.2.4.1], bhe:BH05770 [2.3.1.12]) => [1.1.1.40]
(bic:LMTR13_19285 [1.2.4.1], bic:LMTR13_19295 [2.3.1.12]) => [1.1.1.40]
(bid:Bind_1506 [1.2.4.1], bid:Bind_1507 [2.3.1.12]) => [1.1.1.40]
(bja:bll4782 [1.2.4.1], bja:bll4779 [2.3.1.12]) => [1.1.1.40]
(bju:BJ6T_49290 [1.2.4.1], bju:BJ6T_49320 [2.3.1.12]) => [1.1.1.40]
(blas:BSY18_579 [1.2.4.1], blas:BSY18_1020 [2.3.1.12]) => [1.1.1.40]
(bme:BMEI0855 [1.2.4.1], bme:BMEI0856 [2.3.1.12]) => [1.1.1.40]
(bme:BMEI0855 [1.2.4.1], bme:BMEII0060 [2.3.1.12]) => [1.1.1.40]
(bmf:BAB1_1151 [1.2.4.1], bmf:BAB1_1150 [2.3.1.12]) => [1.1.1.40]
(bmf:BAB1_1151 [1.2.4.1], bmf:BAB2_0032 [2.3.1.12]) => [1.1.1.40]
(bmr:BMI_I1140 [1.2.4.1], bmr:BMI_II33 [2.3.1.12]) => [1.1.1.40]
(bms:BR1128 [1.2.4.1], bms:BR1127 [2.3.1.12]) => [1.1.1.40]
(bms:BR1128 [1.2.4.1], bms:BRA0033 [2.3.1.12]) => [1.1.1.40]
(bne:DA69_05795 [1.2.4.1], bne:DA69_05785 [2.3.1.12]) => [1.1.1.40]
(bop:AXW83_07560 [1.2.4.1], bop:AXW83_07555 [2.3.1.12]) => [1.1.1.40]
(bos:BSY19_2691 [1.2.4.1], bos:BSY19_2692 [2.3.1.12]) => [1.1.1.40]
(bov:BOV_1086 [1.2.4.1], bov:BOV_1085 [2.3.1.12]) => [1.1.1.40]
(bpp:BPI_I1175 [1.2.4.1], bpp:BPI_I1174 [2.3.1.12]) => [1.1.1.40]
(bpp:BPI_I1175 [1.2.4.1], bpp:BPI_II33 [2.3.1.12]) => [1.1.1.40]
(bqu:BQ04920 [1.2.4.1], bqu:BQ04930 [2.3.1.12]) => [1.1.1.40]
(bra:BRADO4086 [1.2.4.1], bra:BRADO4084 [2.3.1.12]) => [1.1.1.40]
(brad:BF49_6759 [1.2.4.1], brad:BF49_6762 [2.3.1.12]) => [1.1.1.40]
(brc:BCCGELA001_20310 [1.2.4.1],brc:BCCGELA001_20295 [2.3.1.12])=> [1.1.1.40]
(brd:JL11_12170 [1.2.4.1], brd:JL11_12165 [2.3.1.12]) => [1.1.1.40]
(brg:A4249_04935 [1.2.4.1], brg:A4249_04945 [2.3.1.12]) => [1.1.1.40]
(brk:CWS35_25570 [1.2.4.1], brk:CWS35_25560 [2.3.1.12]) => [1.1.1.40]
(brl:BZG35_09905 [1.2.4.1], brl:BZG35_09895 [2.3.1.12]) => [1.1.1.40]
(bro:BRAD285_3292 [1.2.4.1], bro:BRAD285_3294 [2.3.1.12]) => [1.1.1.40]
(brs:S23_33850 [1.2.4.1], brs:S23_33880 [2.3.1.12]) => [1.1.1.40]
(bsb:Bresu_1984 [1.2.4.1], bsb:Bresu_1985 [2.3.1.12]) => [1.1.1.40]
(btr:BT_0862 [1.2.4.1], btr:BT_0863 [2.3.1.12]) => [1.1.1.40]
(bvc:CEP68_16215 [1.2.4.1], bvc:CEP68_16220 [2.3.1.12]) => [1.1.1.40]
(bvl:BF3285c1_1939 [1.2.4.1], bvl:BF3285c1_1938 [2.3.1.12]) => [1.1.1.40]
(bvl:BF3285c1_1939 [1.2.4.1], bvl:BF3285c2_0762 [2.3.1.12]) => [1.1.1.40]
(bvn:BVwin_05150 [1.2.4.1], bvn:BVwin_05160 [2.3.1.12]) => [1.1.1.40]
(bvr:BVIR_1927 [1.2.4.1], bvr:BVIR_1926 [2.3.1.12]) => [1.1.1.40]
(bvv:BHK69_19470 [1.2.4.1], bvv:BHK69_19475 [2.3.1.12]) => [1.1.1.40]
(cak:Caul_2759 [1.2.4.1], cak:Caul_2757 [2.3.1.12]) => [1.1.1.40]
(cbot:ATE48_00565 [1.2.4.1], cbot:ATE48_00550 [2.3.1.12]) => [1.1.1.40]
(ccr:CC_1727 [1.2.4.1], ccr:CC_1729 [2.3.1.12]) => [1.1.1.40]
(cdq:BOQ54_09390 [1.2.4.1], cdq:BOQ54_09395 [2.3.1.12]) => [1.1.1.40]
(ceh:CEW89_06310 [1.2.4.1], ceh:CEW89_05925 [2.3.1.12]) => [1.1.1.40]
(ceh:CEW89_06310 [1.2.4.1], ceh:CEW89_06315 [2.3.1.12]) => [1.1.1.40]
(ceh:CEW89_06310 [1.2.4.1], ceh:CEW89_12460 [2.2.1.7]) => [1.1.1.40]
(chel:AL346_17915 [1.2.4.1], chel:AL346_17920 [2.3.1.12]) => [1.1.1.40]
(chq:AQ619_08905 [1.2.4.1], chq:AQ619_08900 [2.3.1.12]) => [1.1.1.40]
(cid:P73_1703 [1.2.4.1], cid:P73_1702 [2.3.1.12]) => [1.1.1.40]
(cij:WG74_09325 [1.2.4.1], cij:WG74_09505 [2.3.1.12]) => [1.1.1.40]
(cmag:CBW24_10350 [1.2.4.1], cmag:CBW24_10345 [2.3.1.12]) => [1.1.1.40]
(cman:A9D14_07905 [1.2.4.1], cman:A9D14_07640 [2.3.1.12]) => [1.1.1.40]
(cmar:IMCC12053_2102 [1.2.4.1], cmar:IMCC12053_2103 [2.3.1.12]) => [1.1.1.40]
(cmb:CSW64_11565 [1.2.4.1], cmb:CSW64_11555 [2.3.1.12]) => [1.1.1.40]
(cna:AB433_08375 [1.2.4.1], cna:AB433_01620 [2.2.1.7]) => [1.1.1.40]
(cna:AB433_08375 [1.2.4.1], cna:AB433_08610 [2.3.1.12]) => [1.1.1.40]
(con:TQ29_08210 [1.2.4.1], con:TQ29_08205 [2.3.1.12]) => [1.1.1.40]
(cse:Cseg_1968 [1.2.4.1], cse:Cseg_1969 [2.3.1.12]) => [1.1.1.40]
(daa:AKL17_2657 [1.2.4.1], daa:AKL17_2658 [2.3.1.12]) => [1.1.1.40]
(deq:XM25_06740 [1.2.4.1], deq:XM25_06735 [2.3.1.12]) => [1.1.1.40]
(don:BSK21_07650 [1.2.4.1], don:BSK21_07655 [2.3.1.12]) => [1.1.1.40]
(dsh:Dshi_2159 [1.2.4.1], dsh:Dshi_2160 [2.3.1.12]) => [1.1.1.40]
(ead:OV14_2519 [1.2.4.1], ead:OV14_2520 [2.3.1.12]) => [1.1.1.40]
(efv:CHH26_08090 [1.2.4.1], efv:CHH26_08270 [2.3.1.12]) => [1.1.1.40]
(egn:BMF35_a1962 [1.2.4.1], egn:BMF35_a1940 [2.3.1.12]) => [1.1.1.40]
(eli:ELI_06400 [1.2.4.1], eli:ELI_06130 [2.3.1.12]) => [1.1.1.40]
(ery:CP97_09725 [1.2.4.1], ery:CP97_10070 [2.3.1.12]) => [1.1.1.40]
(esj:SJ05684_c12890 [1.2.4.1], esj:SJ05684_c12900 [2.3.1.12]) => [1.1.1.40]
(gal:A0U94_06330 [1.2.4.1], gal:A0U94_06335 [2.3.1.12]) => [1.1.1.40]
(gbe:GbCGDNIH1_1184 [1.2.4.1], gbe:GbCGDNIH1_1185 [2.3.1.12]) => [1.1.1.40]
(gdi:GDI1940 [1.2.4.1], gdi:GDI1941 [2.3.1.12]) => [1.1.1.40]
(gox:GOX2290 [1.2.4.1], gox:GOX2291 [2.3.1.12]) => [1.1.1.40]
(gxl:H845_1900 [1.2.4.1], gxl:H845_1899 [2.3.1.12]) => [1.1.1.40]
(gxy:GLX_07550 [1.2.4.1], gxy:GLX_07540 [2.3.1.12]) => [1.1.1.40]
(hat:RC74_15005 [1.2.4.1], hat:RC74_15000 [2.3.1.12]) => [1.1.1.40]
(hba:Hbal_1687 [1.2.4.1], hba:Hbal_1686 [2.3.1.12]) => [1.1.1.40]
(hbc:AEM38_08000 [1.2.4.1], hbc:AEM38_07995 [2.3.1.12]) => [1.1.1.40]
(hdi:HDIA_1538 [1.2.4.1], hdi:HDIA_1539 [2.3.1.12]) => [1.1.1.40]
(hjo:AY555_08080 [1.2.4.1], hjo:AY555_08085 [2.3.1.12]) => [1.1.1.40]
(hne:HNE_1976 [1.2.4.1], hne:HNE_1975 [2.3.1.12]) => [1.1.1.40]
(hoe:IMCC20628_02090 [1.2.4.1], hoe:IMCC20628_02089 [2.3.1.12]) => [1.1.1.40]
(jan:Jann_1690 [1.2.4.1], jan:Jann_1688 [2.3.1.12]) => [1.1.1.40]
(kba:A0U89_06665 [1.2.4.1], kba:A0U89_06670 [2.3.1.12]) => [1.1.1.40]
(keu:S101446_01150 [1.2.4.1], keu:S101446_01149 [2.3.1.12]) => [1.1.1.40]
(kna:B0W47_03380 [1.2.4.1], kna:B0W47_03375 [2.3.1.12]) => [1.1.1.40]
(kro:BVG79_00934 [1.2.4.1], kro:BVG79_00933 [2.3.1.12]) => [1.1.1.40]
(kvu:EIO_1168 [1.2.4.1], kvu:EIO_1167 [2.3.1.12]) => [1.1.1.40]
(labr:CHH27_06240 [1.2.4.1], labr:CHH27_06235 [2.3.1.12]) => [1.1.1.40]
(lagg:B0E33_26190 [1.2.4.1], lagg:B0E33_26195 [2.3.1.12]) => [1.1.1.40]
(lap:ACP90_14360 [1.2.4.1], lap:ACP90_14365 [2.3.1.12]) => [1.1.1.40]
(lar:lam_319 [1.2.4.1], lar:lam_318 [2.3.1.12]) => [1.1.1.40]
(las:CLIBASIA_02800 [1.2.4.1], las:CLIBASIA_02805 [2.3.1.12]) => [1.1.1.40]
(lcc:B488_11020 [1.2.4.1], lcc:B488_11030 [2.3.1.12]) => [1.1.1.40]
(lmd:METH_09250 [1.2.4.1], lmd:METH_09255 [2.3.1.12]) => [1.1.1.40]
(lvs:LOKVESSMR4R_01694 [1.2.4.1], lvs:LOKVESSMR4R_01695 [2.3.1.12]) => [1.1.1.40]
(maad:AZF01_10055 [1.2.4.1], maad:AZF01_10045 [2.3.1.12]) => [1.1.1.40]
(mag:amb2317 [1.2.4.1], mag:amb2318 [2.3.1.12]) => [1.1.1.40]
(magn:WV31_06605 [1.2.4.1], magn:WV31_06610 [2.3.1.12]) => [1.1.1.40]
(magq:MGMAQ_2051 [1.2.4.1], magq:MGMAQ_2050 [2.3.1.12]) => [1.1.1.40]
(magx:XM1_3336 [1.2.4.1], magx:XM1_3335 [2.3.1.12]) => [1.1.1.40]
(malg:MALG_02099 [1.2.4.1], malg:MALG_02101 [2.3.1.12]) => [1.1.1.40]
(mam:Mesau_04179 [1.2.4.1], mam:Mesau_04178 [2.3.1.12]) => [1.1.1.40]
(mamo:A6B35_19500 [1.2.4.1], mamo:A6B35_19495 [2.3.1.12]) => [1.1.1.40]
(maqu:Maq22A_c16995 [1.2.4.1], maqu:Maq22A_c16980 [2.3.1.12]) => [1.1.1.40]
(mci:Mesci_3696 [1.2.4.1], mci:Mesci_3697 [2.3.1.12]) => [1.1.1.40]
(mes:Meso_1629 [1.2.4.1], mes:Meso_1628 [2.3.1.12]) => [1.1.1.40]
(meso:BSQ44_15320 [1.2.4.1], meso:BSQ44_15310 [2.3.1.12]) => [1.1.1.40]
(mesw:A9K65_019865 [1.2.4.1], mesw:A9K65_019870 [2.3.1.12]) => [1.1.1.40]
(met:M446_5897 [1.2.4.1], met:M446_5900 [2.3.1.12]) => [1.1.1.40]
(meta:Y590_13445 [1.2.4.1], meta:Y590_13455 [2.3.1.12]) => [1.1.1.40]
(mex:Mext_2787 [1.2.4.1], mex:Mext_2789 [2.3.1.12]) => [1.1.1.40]
(mey:TM49_16605 [1.2.4.1], mey:TM49_16600 [2.3.1.12]) => [1.1.1.40]
(mgy:MGMSRv2__0967 [1.2.4.1], mgy:MGMSRv2__0968 [2.3.1.12]) => [1.1.1.40]
(mln:A9174_23275 [1.2.4.1], mln:A9174_23270 [2.3.1.12]) => [1.1.1.40]
(mlo:mlr0384 [1.2.4.1], mlo:mlr0385 [2.3.1.12]) => [1.1.1.40]
(mmed:Mame_02759 [1.2.4.1], mmed:Mame_02760 [2.3.1.12]) => [1.1.1.40]
(mmr:Mmar10_1418 [1.2.4.1], mmr:Mmar10_1420 [2.3.1.12]) => [1.1.1.40]
(mno:Mnod_6516 [1.2.4.1], mno:Mnod_6518 [2.3.1.12]) => [1.1.1.40]
(moc:BB934_27340 [1.2.4.1], moc:BB934_27350 [2.3.1.12]) => [1.1.1.40]
(mop:Mesop_4545 [1.2.4.1], mop:Mesop_4544 [2.3.1.12]) => [1.1.1.40]
(mor:MOC_1206 [1.2.4.1], mor:MOC_1209 [2.3.1.12]) => [1.1.1.40]
(mphy:MCBMB27_00584 [1.2.4.1], mphy:MCBMB27_00587 [2.3.1.12]) => [1.1.1.40]
(mpo:Mpop_2910 [1.2.4.1], mpo:Mpop_2912 [2.3.1.12]) => [1.1.1.40]
(mrd:Mrad2831_0989 [1.2.4.1], mrd:Mrad2831_0992 [2.3.1.12]) => [1.1.1.40]
(msl:Msil_0520 [1.2.4.1], msl:Msil_0521 [2.3.1.12]) => [1.1.1.40]
(mza:B2G69_21495 [1.2.4.1], mza:B2G69_21505 [2.3.1.12]) => [1.1.1.40]
(nao:Y958_07910 [1.2.4.1], nao:Y958_07905 [2.3.1.12]) => [1.1.1.40]
(nar:Saro_1909 [1.2.4.1], nar:Saro_1946 [2.3.1.12]) => [1.1.1.40]
(ncb:C0V82_02645 [1.2.4.1], ncb:C0V82_02650 [2.3.1.12]) => [1.1.1.40]
(nch:A0U93_04645 [1.2.4.1], nch:A0U93_04640 [2.3.1.12]) => [1.1.1.40]
(ngl:RG1141_CH15540 [1.2.4.1], ngl:RG1141_CH15550 [2.3.1.12]) => [1.1.1.40]
(nha:Nham_1750 [1.2.4.1], nha:Nham_1751 [2.3.1.12]) => [1.1.1.40]
(npn:JI59_03450 [1.2.4.1], npn:JI59_03675 [2.3.1.12]) => [1.1.1.40]
(npn:JI59_03450 [1.2.4.1], npn:JI59_10595 [2.2.1.7]) => [1.1.1.40]
(npn:JI59_03450 [1.2.4.1], npn:JI59_22060 [2.3.1.12]) => [1.1.1.40]
(npp:PP1Y_AT26298 [1.2.4.1], npp:PP1Y_AT2120 [2.2.1.7]) => [1.1.1.40]
(npp:PP1Y_AT26298 [1.2.4.1], npp:PP1Y_AT25743 [2.3.1.12]) => [1.1.1.40]
(nre:BES08_04330 [1.2.4.1], nre:BES08_03975 [2.3.1.12]) => [1.1.1.40]
(nre:BES08_04330 [1.2.4.1], | |
7 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '7',
'-indexStep', '0',
'-valueStep', '2.0.0.14',
'-value', '2.0.0.14')
ixNet.commit()
# Adding overlay 8 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '8',
'-indexStep', '0',
'-valueStep', '2.0.0.14',
'-value', '2.0.0.14')
ixNet.commit()
# Adding overlay 9 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '9',
'-indexStep', '0',
'-valueStep', '2.0.0.15',
'-value', '2.0.0.15')
ixNet.commit()
# Adding overlay 10 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '10',
'-indexStep', '0',
'-valueStep', '2.0.0.15',
'-value', '2.0.0.15')
ixNet.commit()
# Adding overlay 11 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 12 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 13 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 14 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 15 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 16 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 17 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '17',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 18 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '18',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 19 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '19',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 20 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '20',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# setting -symbolicPathName
symbolicPathNameMv = ixNet.getAttribute(pccInit2, '-symbolicPathName')
ixNet.add(symbolicPathNameMv, 'string')
ixNet.setMultiAttribute(symbolicPathNameMv + '/string',
'-pattern', 'IXIA LSP {Inc:1,1}')
ixNet.commit()
# setting -includeAssociation
includeAssociationMv = ixNet.getAttribute(pccInit2, '-includeAssociation')
ixNet.add(includeAssociationMv, 'singleValue')
ixNet.setMultiAttribute(includeAssociationMv + '/singleValue',
'-value', 'true')
# Adding overlay 1 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 2 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '7',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 3 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '8',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 4 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '9',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 5 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '10',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 6 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 7 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '17',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 8 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '18',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 9 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '19',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 10 for includeAssociation
ovrly = ixNet.add(includeAssociationMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '20',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# setting -standbyMode
standbyModeMv = ixNet.getAttribute(pccInit2, '-standbyMode')
ixNet.add(standbyModeMv, 'alternate')
# Adding overlay 1 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 2 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 3 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 4 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# setting -protectionLsp
protectionLspMv = ixNet.getAttribute(pccInit2, '-protectionLsp')
ixNet.add(protectionLspMv, 'singleValue')
ixNet.setMultiAttribute(protectionLspMv + '/singleValue',
'-value', 'false')
# Adding overlay 1 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 2 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 3 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 4 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 5 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 6 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 7 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 8 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 9 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 10 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# setting -associationId
associationIdMv = ixNet.getAttribute(pccInit2, '-associationId')
ixNet.add(associationIdMv, 'singleValue')
ixNet.setMultiAttribute(associationIdMv + '/singleValue',
'-value', '1')
# Adding overlay 1 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '11',
'-value', '11')
ixNet.commit()
# Adding overlay 2 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '11',
'-value', '11')
ixNet.commit()
# Adding overlay 3 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '12',
'-value', '12')
ixNet.commit()
# Adding overlay 4 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '12',
'-value', '12')
ixNet.commit()
# Adding overlay 5 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '13',
'-value', '13')
ixNet.commit()
# Adding overlay 6 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '13',
'-value', '13')
ixNet.commit()
# Adding overlay 7 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '111',
'-value', '111')
ixNet.commit()
# Adding overlay 8 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '111',
'-value', '111')
ixNet.commit()
# Adding overlay 9 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '112',
'-value', '112')
ixNet.commit()
# Adding overlay 10 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '112',
'-value', '112')
ixNet.commit()
# Adding overlay 11 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', '113',
'-value', '113')
ixNet.commit()
# Adding overlay 12 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', '113',
'-value', '113')
ixNet.commit()
pccInit2 = pccGroup2+'/pceInitiateLspParameters:10'
# setting -numberOfEroSubObjects
ixNet.setAttribute(pccInit2, '-numberOfEroSubObjects', '1')
ixNet.commit()
# setting -srcEndPointIpv4
srcEndPointIpv4Mv = ixNet.getAttribute(pccInit2, '-srcEndPointIpv4')
ixNet.add(srcEndPointIpv4Mv, 'singleValue')
ixNet.setMultiAttribute(srcEndPointIpv4Mv + '/singleValue',
'-value', '0.0.0.0')
# Adding overlay 1 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '1.0.0.11',
'-value', '1.0.0.11')
ixNet.commit()
# Adding overlay 2 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '1.0.0.11',
'-value', '1.0.0.11')
ixNet.commit()
# Adding overlay 3 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '1.0.0.12',
'-value', '1.0.0.12')
ixNet.commit()
# Adding overlay 4 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '1.0.0.12',
'-value', '1.0.0.12')
ixNet.commit()
# Adding overlay 5 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '1.0.0.13',
'-value', '1.0.0.13')
ixNet.commit()
# Adding overlay 6 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '1.0.0.13',
'-value', '1.0.0.13')
ixNet.commit()
# Adding overlay 7 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '7',
'-indexStep', '0',
'-valueStep', '1.0.0.14',
'-value', '1.0.0.14')
ixNet.commit()
# Adding overlay 8 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '8',
'-indexStep', '0',
'-valueStep', '1.0.0.14',
'-value', '1.0.0.14')
ixNet.commit()
# Adding overlay 9 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '9',
'-indexStep', '0',
'-valueStep', '1.0.0.15',
'-value', '1.0.0.15')
ixNet.commit()
# Adding overlay 10 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '10',
'-indexStep', '0',
'-valueStep', '1.0.0.15',
'-value', '1.0.0.15')
ixNet.commit()
# Adding overlay 11 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 12 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 13 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 14 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay | |
<reponame>CMUSTRUDEL/flask-browser
"""
Foo
"""
from flask import render_template, flash, redirect, url_for, abort
from flask import request
from werkzeug.urls import url_parse
from flask_paginate import Pagination, get_page_parameter, get_page_args
from flask_login import current_user, login_required
from app import app
from app import db, pmongo
from app.forms import LabelForm
from app.models import User
from app.utils import is_toxic
from datetime import datetime
from bson.objectid import ObjectId
# Sophie's handpicked list of PR ids
from app.survey import survey
from app.queries import query_predicted_issues_all, \
query_predicted_issues_w_comments, \
query_annotations, \
query_annotations_toxic, \
query_individual_annotations, \
query_tolabel, \
query_predicted_prs_all, \
query_predicted_prs_w_comments, \
query_predicted_prs_w_review_comments, \
query_stratified, \
query_survey, \
query_closed,\
query_locked,\
query_deleted,\
query_toxiccomment,\
query_coc, qunlabeled, qlabeled_toxic
def get_page_details():
page = int(request.args.get('page', 1))
per_page = app.config['PER_PAGE_PARAMETER']
offset = (page - 1) * per_page
return (page, per_page, offset)
def get_pagination(page, per_page, per_page_parameter, offset, total):
return Pagination(page=page,
per_page=per_page,
per_page_parameter=per_page_parameter,
offset=offset,
prev_label="Previous",
next_label="Next",
total=total,
css_framework='bootstrap3',
search=False)
def get_label_buttons(label_buttons, endpoint, labels, collection, eid):
"""
Creates endpoints with the prefix 'endpoint' to assign qualitative
analysis codes to a given document 'eid' in a given 'collection'.
The codes are taken from 'labels' (typically part of the app config).
Appends the new endpoints to an existing dictionary 'label_buttons'.
"""
label_buttons.setdefault(eid, [])
for l in labels:
d = {'url':'/%s/%s/%s/%s' % (endpoint, collection, eid, l[1]),
'name':l[0]}
label_buttons[eid].append(d)
return label_buttons
def get_labels(labels, collection, prefix, header, eid):
"""
Retrieves all the unique labels of type 'header' (e.g.,
qualitative_analysis_labels) nested under 'prefix' (e.g.,
'toxicity'), that have been assigned to a given document
'eid' in a given 'collection' (e.g., christian_toxic_issues).
Appends the labels to an existing dictionary 'labels'.
"""
labels.setdefault(eid, [])
r = pmongo.db[collection].find_one({"_id":ObjectId(eid)})
if r:
llist = r.get(prefix, {}).get(header,[])
seen = {}
for label in llist:
if not seen.get((label['user'], label['label']), False):
labels[eid].append(label)
seen[(label['user'], label['label'])] = True
return labels
def get_label_buttons_from_db(label_buttons, endpoint, labels, collection, eid):
"""
Creates endpoints with the prefix 'endpoint' to assign qualitative
analysis codes to a given document 'eid' in a given 'collection'.
The codes are taken from the database (so they can change dynamically).
Appends the new endpoints to an existing dictionary 'label_buttons'.
"""
label_buttons.setdefault(eid, [])
# The 'labels' argument is not currently used
labels = [l for l in pmongo.db['bogdan_toxic_qualitative_analysis'].find()]
for l in labels:
d = {'url':'/%s/%s/%s/%s' % (endpoint, collection, eid, l['label_sanitized']),
'name':l['label_sanitized']}
label_buttons[eid].append(d)
return label_buttons
def count_labeled_issues(query):
return pmongo.db["christian_toxic_issues"].count(qlabeled_toxic(query))
@app.route('/pushbacklabel/<table>/<eid>/<label>')
@login_required
def label_pushback_entry(table, eid, label):
score = 0
is_pushback = False
if label=="pushback":
score = 1
is_pushback = True
reason = label
if table == "pull_requests":
table = "christian_toxic_pull_requests"
print(table)
r = pmongo.db[table].find_one_and_update(
{"_id":ObjectId(eid)},
{"$push":
{"pushback.manual_labels":
{'user':current_user.username,
'timestamp':datetime.now(),
'label':reason,
'is_pubshback':score},
},
"$set":
{"has_pb_label":True,
"is_labeled_pushback":is_pushback}
}
)
if not r:
flash(str(eid)+" not found in "+table, category='error')
return redirect(request.referrer)
# if comment, update parent issue
if table == 'christian_toxic_issue_comments':
comment = pmongo.db['issue_comments'].find_one({'_id':ObjectId(eid)})
issue = pmongo.db['issues'].find_one({'owner':comment['owner'],
'repo':comment['repo'],
'number':comment['issue_id']})
r1 = pmongo.db['christian_toxic_issues'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_pb_labeled_comment":True,
"has_labeled_pb_comment":is_pushback
},
"$push": { "pushback.manual_labeled_comments": { 'user': current_user.username, 'is_pushback': score } }
}
)
r2 = pmongo.db['christian_toxic_pull_requests'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_pb_labeled_comment":True,
"has_labeled_pb_comment":is_pushback
},
"$push": { "pushback.manual_labeled_comments": { 'user':
current_user.username, 'is_pushback': score } }
}
)
if not r1 and not r2:
flash("Parent issue / PR for comment "+str(eid)+" not found", category='error')
return redirect(request.referrer)
# if review comment, update parent PR
if table == 'christian_toxic_pull_request_comments':
comment = pmongo.db['pull_request_comments'].find_one({'_id':ObjectId(eid)})
issue = pmongo.db['pull_requests'].find_one({'owner':comment['owner'],
'repo':comment['repo'],
'number':comment['pullreq_id']})
r1 = pmongo.db['christian_toxic_issues'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_pb_labeled_comment":True,
"has_labeled_pb_comment":is_pushback
},
"$push": { "pushback.manual_labeled_comments": { 'user': current_user.username, 'is_pushback': score } }
}
)
r2 = pmongo.db['christian_toxic_pull_requests'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_pb_labeled_comment":True,
"has_labeled_pb_comment":is_pushback
},
"$push": { "pushback.manual_labeled_comments": { 'user': current_user.username, 'is_pushback': score } }
}
)
if not r1 and not r2:
flash("Parent issue / PR for comment "+str(eid)+" not found", category='error')
return redirect(request.referrer)
flash(str(eid)+" updated", category='info')
return redirect(request.referrer)
@app.route('/toxiclabel/<table>/<eid>/<label>')
@login_required
def label_toxic_entry(table, eid, label):
score = 0
is_toxic = False
if label=="toxic":
score = 1
is_toxic = True
reason = label
r = pmongo.db[table].find_one_and_update(
{"_id":ObjectId(eid)},
{"$push":
{"toxicity.manual_labels":
{'user':current_user.username,
'timestamp':datetime.now(),
'label':reason,
'is_toxic':score},
},
"$set":
{"is_labeled":True,
"is_labeled_toxic":is_toxic}
}
)
if not r:
flash(str(eid)+" not found in "+table, category='error')
return redirect(request.referrer)
# if comment, update parent issue
if table == 'christian_toxic_issue_comments':
comment = pmongo.db['issue_comments'].find_one({'_id':ObjectId(eid)})
issue = pmongo.db['issues'].find_one({'owner':comment['owner'],
'repo':comment['repo'],
'number':comment['issue_id']})
r1 = pmongo.db['christian_toxic_issues'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_labeled_comment":True,
"has_labeled_toxic_comment":is_toxic
},
"$push": { "toxicity.manual_labeled_comments": { 'user': current_user.username, 'is_toxic': score } }
}
)
r2 = pmongo.db['christian_toxic_pull_requests'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_labeled_comment":True,
"has_labeled_toxic_comment":is_toxic
},
"$push": { "toxicity.manual_labeled_comments": { 'user': current_user.username, 'is_toxic': score } }
}
)
if not r1 and not r2:
flash("Parent issue / PR for comment "+str(eid)+" not found", category='error')
return redirect(request.referrer)
# if review comment, update parent PR
if table == 'christian_toxic_pull_request_comments':
comment = pmongo.db['pull_request_comments'].find_one({'_id':ObjectId(eid)})
issue = pmongo.db['pull_requests'].find_one({'owner':comment['owner'],
'repo':comment['repo'],
'number':comment['pullreq_id']})
r1 = pmongo.db['christian_toxic_issues'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_labeled_comment":True,
"has_labeled_toxic_comment":is_toxic
},
"$push": { "toxicity.manual_labeled_comments": { 'user': current_user.username, 'is_toxic': score } }
}
)
r2 = pmongo.db['christian_toxic_pull_requests'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":{
"has_labeled_comment":True,
"has_labeled_toxic_comment":is_toxic
},
"$push": { "toxicity.manual_labeled_comments": { 'user': current_user.username, 'is_toxic': score } }
}
)
if not r1 and not r2:
flash("Parent issue / PR for comment "+str(eid)+" not found", category='error')
return redirect(request.referrer)
flash(str(eid)+" updated", category='info')
return redirect(request.referrer)
@app.route('/qualitativelabel/<table>/<eid>/<label>')
@login_required
def add_code(table, eid, label):
# flash(label)
label_sanitized = '-'.join(label.lower().split())
r = pmongo.db[table].find_one_and_update(
{"_id":ObjectId(eid)},
{"$push":
{"toxicity.qualitative_analysis_labels":
{'user':current_user.username,
# 'timestamp':datetime.now(),
'label':label,
'label_sanitized':label_sanitized},
},
"$set":
{"is_coded":True}
}
)
if not r:
flash(str(eid)+" not found in "+table, category='error')
return redirect(request.referrer)
# If comment, update parent issue
if table == 'christian_toxic_issue_comments':
comment = pmongo.db['issue_comments'].find_one({'_id':ObjectId(eid)})
issue = pmongo.db['issues'].find_one({'owner':comment['owner'],
'repo':comment['repo'],
'number':comment['issue_id']})
r = pmongo.db['christian_toxic_issues'].find_one_and_update(
{"_id":ObjectId(issue['_id'])},
{"$set":
{"has_coded_comment":True}
}
)
if not r:
flash("Parent issue for comment "+str(eid)+" not found", category='error')
return redirect(request.referrer)
# Add label to list of labels
code = pmongo.db['bogdan_toxic_qualitative_analysis'].find_one({'label':label,
'label_sanitized':label_sanitized})
if not code:
pmongo.db['bogdan_toxic_qualitative_analysis'].insert({'label':label,
'label_sanitized':label_sanitized})
flash(str(eid)+" updated", category='info')
return redirect(request.referrer)
@app.route('/list/<what>', methods=['GET', 'POST'])
@login_required
def list_issues(what):
with_total = False
order = []
disable_coding = False
is_pr = False
top_collection = 'issues'
toxic_top_collection = 'christian_toxic_issues'
comments_collection = 'issue_comments'
toxic_comments_collection = 'christian_toxic_issue_comments'
review_comments_collection = 'pull_request_comments'
toxic_review_comments_collection = 'christian_toxic_pull_request_comments'
prediction_labels = 'TOXICITY_LABELS'
doc_prefix = 'toxicity'
endpoint_prefix = 'toxiclabel'
definition = ''
task_type = 'toxicity'
label_counter = -1
if what == 'classifier_issues_w_comments':
q = query_predicted_issues_w_comments
elif what == 'classifier_issues_all':
q = query_predicted_issues_all
elif what == 'annotated_issues':
q = query_annotations
with_total = True
elif what == 'annotated_issues_toxic':
q = query_annotations_toxic
with_total = True
elif what == 'individual_annotated_issues':
q = query_individual_annotations(current_user.username)
with_total = True
elif what == "label_toxic":
q = query_tolabel(current_user.username)
order = [("random", 1)]
disable_coding = True
elif what == 'classifier_prs_all':
q = query_predicted_prs_all
is_pr = True
elif what == 'classifier_prs_w_comments':
q = query_predicted_prs_w_comments
is_pr = True
elif what == 'classifier_prs_w_review_comments':
q = query_predicted_prs_w_review_comments
is_pr = True
elif what == 'sophie_sampled_prs':
q = query_tolabel(current_user.username)
is_pr = True
toxic_top_collection = 'christian_toxic_pull_requests'
top_collection = 'pull_requests'
elif what == 'sophie_survey_prs':
q = query_survey(current_user.username)
is_pr = True
toxic_top_collection = 'christian_toxic_pull_requests'
top_collection = 'pull_requests'
prediction_labels = 'PUSHBACK_LABELS'
doc_prefix = 'pushback'
definition = 'Definition: the perception of unnecessary interpersonal conflict in code review while a reviewer is blocking a change request'
task_type = 'pushback'
endpoint_prefix = 'pushbacklabel'
elif what == 'sophie_closed':
q = query_closed(current_user.username)
is_pr = True
toxic_top_collection = 'pull_requests'
top_collection = 'pull_requests'
prediction_labels = 'PUSHBACK_LABELS'
doc_prefix = 'pushback'
definition = 'Definition: the perception of unnecessary interpersonal conflict in code review while a reviewer is blocking a change request'
task_type = 'pushback'
endpoint_prefix = 'pushbacklabel'
elif what == 'label_locked':
q = qunlabeled(query_locked)
label_counter = count_labeled_issues(query_locked)
with_total = True
elif what == 'label_deleted':
q = qunlabeled(query_deleted)
label_counter = count_labeled_issues(query_deleted)
with_total = True
elif what == 'label_toxiccomment':
q = qunlabeled(query_toxiccomment)
label_counter = count_labeled_issues(query_toxiccomment)
with_total = True
elif what == 'label_coc':
q = qunlabeled(query_coc)
label_counter = count_labeled_issues(query_coc)
with_total = True
print(q)
page, per_page, offset = get_page_details()
cursor = pmongo.db[toxic_top_collection].find(q, sort=order)
issues_for_render = cursor.skip(offset).limit(per_page)
form = LabelForm()
if form.validate_on_submit():
element_id = form.element_id.data
element_type = form.element_type.data
if element_type == 'issue':
table = 'christian_toxic_issues'
elif element_type == 'issue_comment':
table = 'christian_toxic_issue_comments'
elif element_type == 'pull_request':
table = 'christian_toxic_pull_requests'
elif element_type == 'pull_request_comment':
table = 'christian_toxic_issue_comments'
else:
table = 'christian_toxic_pull_request_comments'
label = '-'.join(form.element_label.data.lower().split())
submit_pressed = form.submit.data # this will be True if Submit was pressed, False otherwise
add_code(table, element_id, label)
# do stuff
return redirect(request.referrer)
issues = {}
toxicity_labels = {}
toxicity_label_buttons = {}
qualitative_labels = {}
qualitative_label_buttons = {}
issue_titles = {}
comments = {}
for tissue in issues_for_render:
issue = pmongo.db[top_collection]\
.find_one({'_id':ObjectId(tissue['_id'])})
issues[str(tissue['_id'])] | |
# imports csv library
import csv
# RICS .CSV
ricsFileName = 'Oboz'
ricsFile = open(ricsFileName + '.csv')
ricsReader = csv.reader(ricsFile)
ricsData = list(ricsReader)
# AMAZON .CSV
amzFileName = 'Amazon'
amzFile = open(amzFileName + '.csv')
amzReader = csv.reader(amzFile)
amzData = list(amzReader)
# Number of possible rows to go through from Amazon's CSV file
numRows = (len(amzData) + 1)
numRowsRics = (len(ricsData) + 1)
# Name of blank output csv file
outputFileName = 'output ' + ricsFileName
outputFile = open(outputFileName+'.csv', 'w', newline='')
outputWriter = csv.writer(outputFile)
outputWriter.writerow(['Brand', 'RICS SKU', 'Size', '', 'AMZ SKU', 'AMZ Qty'])
# Counting Variables, j must start at 1 so it starts in the right row.
j = 1
i = 0
# Main
while i < numRows:
if j == (numRowsRics-2):
break
amzSku = (amzData[i][0])
amzQty = (amzData[i][3])
ricsSku = (ricsData[j][2])
ricsSupplier = (ricsData[j][7])
ricsSize = (ricsData[j][15])
ricsWidth = (ricsData[j][16])
ricsQty = (ricsData[j][18])
# All possible SKUs on Amazon (so far)
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuFive = ricsSku + ' ' + ricsSize + ' ' + 'ricsWidth'
skuSix = ricsSku + ' ' + ricsSize + ricsWidth
skuSeven = ricsSku + ' ' + ricsSize + '(' + 'ricsWidth' + ')'
skuEight = ricsSku + ' ' + ricsSize + ' ' + '(' + 'ricsWidth' + ')'
skuNine = ricsSku + ' ' + ricsSize + ' ' + 'ricsWidth'
skuTen = ricsSku + ' ' + ricsSize + 'ricsWidth'
skuEleven = ricsSku + ' ' + ricsSize + '(' + 'ricsWidth' + ')'
skuTwelve = ricsSku + ' ' + ricsSize + ' ' + '(' + 'ricsWidth' + ')'
# In case some SKUs are named with EE on Amazon
if ricsWidth == "2E":
ricsWidth2 = "EE"
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuFive = ricsSku + ' ' + ricsSize + ' ' + ricsWidth2
skuSix = ricsSku + ' ' + ricsSize + ricsWidth2
skuSeven = ricsSku + ' ' + ricsSize + '(' + ricsWidth2 + ')'
skuEight = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth2 + ')'
# In case some SKUs are named with EEE on Amazon
elif ricsWidth == "3E":
ricsWidth3 = "EEE"
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuNine = ricsSku + ' ' + ricsSize + ' ' + ricsWidth3
skuTen = ricsSku + ' ' + ricsSize + ricsWidth3
skuEleven = ricsSku + ' ' + ricsSize + '(' + ricsWidth3 + ')'
skuTwelve = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth3 + ')'
# X = SKU
# Y = SIZE
# Z = WIDTH
# Checking if SKU is in form "X Y Z"
if amzSku == skuOne:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZ"
elif amzSku == skuTwo:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(Z)"
elif amzSku == skuThree:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (Z)"
elif amzSku == skuFour:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y ZZ"
elif amzSku == skuFive:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZZ"
elif amzSku == skuSix:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(ZZ)"
elif amzSku == skuSeven:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (ZZ)"
elif amzSku == skuEight:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y ZZZ"
elif amzSku == skuNine:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZZZ"
elif amzSku == skuTen:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(ZZZ)"
elif amzSku == skuEleven:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (ZZZ)"
elif amzSku == skuTwelve:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
| |
Initialize $\mathbf{w} = \mathbf{0}$
# * Repeat $T$ times:
# * for each $(x,y) \in \mathcal{D}$ (in random order):
# * $\tilde{y} = \text{argmax}_{y'\in \mathcal{Y}} \mathbf{Score}_{\textbf{w}, \phi}(x,y') + \mathbf{cost}(y,y')$
# * $\mathbf{w} = \mathbf{w} + \eta(\phi(x,y) - \phi(x,\tilde{y}))$
#
# This is very intuitive – push the weights in the direction of the positive cases. It doesn't require any probability theory. And such loss functions have proven highly effective in many settings. For a more powerful version of this classifier, see [sklearn.linear_model.SGDClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier). With `loss='hinge'`, it should behave much like `BasicSGDClassifier` (but faster!).
# ### Wrapper for SGDClassifier
#
# For the sake of our experimental framework, a simple wrapper for `SGDClassifier`:
# In[13]:
def fit_basic_sgd_classifier(X, y):
"""Wrapper for `BasicSGDClassifier`.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
BasicSGDClassifier
A trained `BasicSGDClassifier` instance.
"""
mod = BasicSGDClassifier()
mod.fit(X, y)
return mod
# ### Wrapper for LogisticRegression
#
# As I said above, we likely don't want to rely on `BasicSGDClassifier` (though it does a good job with SST!). Instead, we want to rely on `sklearn`. Here's a simple wrapper for [sklearn.linear.model.LogisticRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) using our
# `build_dataset` paradigm.
# In[14]:
def fit_softmax_classifier(X, y):
"""Wrapper for `sklearn.linear.model.LogisticRegression`. This is
also called a Maximum Entropy (MaxEnt) Classifier, which is more
fitting for the multiclass case.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear.model.LogisticRegression
A trained `LogisticRegression` instance.
"""
mod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='auto')
mod.fit(X, y)
return mod
# ### Other scikit-learn models
#
# * The [sklearn.linear_model](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model) package has a number of other classifier models that could be effective for SST.
#
# * The [sklearn.ensemble](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.ensemble) package contains powerful classifiers as well. The theme that runs through all of them is that one can get better results by averaging the predictions of a bunch of more basic classifiers. A [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier) will bring some of the power of deep learning models without the optimization challenges (though see [this blog post on some limitations of the current sklearn implementation](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)).
#
# * The [sklearn.svm](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.svm) contains variations on Support Vector Machines (SVMs).
# ## Experiments
#
# We now have all the pieces needed to run experiments. And __we're going to want to run a lot of experiments__, trying out different feature functions, taking different perspectives on the data and labels, and using different models.
#
# To make that process efficient and regimented, `sst` contains a function `experiment`. All it does is pull together these pieces and use them for training and assessment. It's complicated, but the flexibility will turn out to be an asset.
# ### Experiment with default values
# In[15]:
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
train_reader=sst.train_reader,
assess_reader=None,
train_size=0.7,
class_func=sst.ternary_class_func,
score_func=utils.safe_macro_f1,
verbose=True)
# A few notes on this function call:
#
# * Since `assess_reader=None`, the function reports performance on a random train–test split. Give `sst.dev_reader` as the argument to assess against the `dev` set.
#
# * `unigrams_phi` is the function we defined above. By changing/expanding this function, you can start to improve on the above baseline, perhaps periodically seeing how you do on the dev set.
#
# * `fit_softmax_classifier` is the wrapper we defined above. To assess new models, simply define more functions like this one. Such functions just need to consume an `(X, y)` constituting a dataset and return a model.
# ### A dev set run
# In[14]:
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
class_func=sst.ternary_class_func,
assess_reader=sst.dev_reader)
# ### Assessing BasicSGDClassifier
# In[15]:
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_basic_sgd_classifier,
class_func=sst.ternary_class_func,
assess_reader=sst.dev_reader)
# ### Comparison with the baselines from Socher et al. 2013
#
# Where does our default set-up sit with regard to published baselines for the binary problem? (Compare [Socher et al., Table 1](http://www.aclweb.org/anthology/D/D13/D13-1170.pdf).)
# In[16]:
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
class_func=sst.binary_class_func,
assess_reader=sst.dev_reader)
# ### A shallow neural network classifier
#
# While we're at it, we might as well see whether adding a hidden layer to our softmax classifier yields any benefits. Whereas `LogisticRegression` is, at its core, computing
#
# $$\begin{align*}
# y &= \textbf{softmax}(xW_{xy} + b_{y})
# \end{align*}$$
#
# the shallow neural network inserts a hidden layer with a non-linear activation applied to it:
#
# $$\begin{align*}
# h &= \tanh(xW_{xh} + b_{h}) \\
# y &= \textbf{softmax}(hW_{hy} + b_{y})
# \end{align*}$$
# In[17]:
def fit_nn_classifier(X, y):
mod = TorchShallowNeuralClassifier(
hidden_dim=50, max_iter=100)
mod.fit(X, y)
return mod
# In[18]:
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_nn_classifier,
class_func=sst.binary_class_func)
# It looks like, with enough iterations (and perhaps some fiddling with the activation function and hidden dimensionality), this classifier would meet or exceed the baseline set up by `LogisticRegression`.
# ### A softmax classifier in PyTorch
#
# Our PyTorch modules should support easy modification. For example, to turn `TorchShallowNeuralClassifier` into a `TorchSoftmaxClassifier`, one need only write a new `define_graph` method:
# In[19]:
class TorchSoftmaxClassifier(TorchShallowNeuralClassifier):
def define_graph(self):
return nn.Linear(self.input_dim, self.n_classes_)
# In[20]:
def fit_torch_softmax(X, y):
mod = TorchSoftmaxClassifier(max_iter=100)
mod.fit(X, y)
return mod
# In[21]:
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_torch_softmax,
class_func=sst.binary_class_func)
# ## Hyperparameter search
#
# The training process learns __parameters__ — the weights. There are typically lots of other parameters that need to be set. For instance, our `BasicSGDClassifier` has a learning rate parameter and a training iteration parameter. These are called __hyperparameters__. The more powerful `sklearn` classifiers often have many more such hyperparameters. These are outside of the explicitly stated objective, hence the "hyper" part.
#
# So far, we have just set the hyperparameters by hand. However, their optimal values can vary widely between datasets, and choices here can dramatically impact performance, so we would like to set them as part of the overall experimental framework.
# ### utils.fit_classifier_with_crossvalidation
#
# Luckily, `sklearn` provides a lot of functionality for setting hyperparameters via cross-validation. The function `utils.fit_classifier_with_crossvalidation` implements a basic framework for taking advantage of these options.
#
# This method has the same basic shape as `fit_softmax_classifier` above: it takes a dataset as input and returns a trained model. However, to find its favored model, it explores a space of hyperparameters supplied by the user, seeking the optimal combination of settings.
#
# __Note__: this kind of search seems not to have a large impact for SST as we're using it. However, it can matter a lot for other data sets, and it's also an important step to take when trying to publish, since __reviewers are likely to want to check that your comparisons aren't based in part on opportunistic or ill-considered choices for the hyperparameters__.
# ### Example using LogisticRegression
#
# Here's a fairly full-featured use of the above for the `LogisticRegression` model family:
# In[22]:
def fit_softmax_with_crossvalidation(X, y):
"""A MaxEnt model of dataset with hyperparameter
cross-validation. Some notes:
* 'fit_intercept': whether to include the class bias feature.
* 'C': weight for the regularization term (smaller is more regularized).
* 'penalty': type of regularization -- roughly, 'l1' ecourages small
sparse models, and 'l2' encourages the weights to conform to a
gaussian prior distribution.
Other arguments can be cross-validated; see
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear_model.LogisticRegression
A trained model instance, the best model found.
"""
basemod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='auto')
cv = 5
param_grid = {'fit_intercept': [True, False],
'C': [0.4, 0.6, 0.8, 1.0, 2.0, 3.0],
'penalty': ['l1','l2']}
best_mod = utils.fit_classifier_with_crossvalidation(
X, y, basemod, cv, param_grid)
return best_mod
# In[23]:
softmax_experiment = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_with_crossvalidation,
class_func=sst.ternary_class_func)
# ### Example using BasicSGDClassifier
# The models written for this course are also compatible with this framework. They ["duck type"](https://en.wikipedia.org/wiki/Duck_typing) the `sklearn` models by having methods `fit`, `predict`, `get_params`, and `set_params`, and an attribute `params`.
# In[24]:
def fit_basic_sgd_classifier_with_crossvalidation(X, y):
basemod = BasicSGDClassifier()
cv = 5
param_grid = | |
<filename>refman/refman.py
# RefMan - A Simple python-based reference manager.
# Author: <NAME> (<EMAIL>)
import argparse
import dataclasses
import hashlib
import json
import logging
import os
from pathlib import Path
import re
import shutil
import subprocess
from typing import Iterable, List, Tuple
from arxiv2bib import arxiv2bib
import bibtexparser
import pandas as pd
import pyperclip
from tqdm import tqdm
import typer
from ._constants import (
EDITOR,
REFMAN_DIR,
PAPER_DIR,
BIB_DB,
BIB_REF,
META_NAME,
BIBTEX_NAME,
NOTES_NAME,
CROSSREF_URL,
ARXIV_BIBTEX_URL,
ARXIV_PDF_URL,
FMT_BIBTEX,
FMT_CITEPROC,
)
from ._utils import (
md5_hexdigest,
is_valid_url,
is_valid_doi,
fmt_arxiv_bibtex,
fix_bibtex,
ua_requester_get,
)
from ._app import init_flask_app
from ._scihub import SciHub
STATUS_HANDLER = None
LOGGER = logging.getLogger(f"refman.{__name__}")
LOGGER.setLevel(logging.DEBUG)
SH = SciHub()
APP = typer.Typer(help="RefMan - A Simple python-based reference manager.")
def update_status(msg: str, level_attr: str = "info"):
global STATUS_HANDLER
if not isinstance(STATUS_HANDLER, tqdm):
# LOGGER.__getattr__(level_attr)
LOGGER.info(msg)
return
STATUS_HANDLER.set_postfix_str(msg)
def progress_with_status(it: Iterable):
global STATUS_HANDLER
ncols = str(min(len(it), 20))
barfmt = "{l_bar}{bar:" + ncols + "}{r_bar}{bar:-" + ncols + "b}"
return (STATUS_HANDLER := tqdm(it, bar_format=barfmt))
def reset_progress_status_handler():
global STATUS_HANDLER
STATUS_HANDLER = None
@dataclasses.dataclass
class Paper:
meta: dict
bibtex: str
pdf_data: bytes = dataclasses.field(default=None)
notes_data: str = dataclasses.field(default=None)
meta_name: str = dataclasses.field(init=False, default=META_NAME)
bibtex_name: str = dataclasses.field(init=False, default=BIBTEX_NAME)
notes_name: str = dataclasses.field(init=False, default=NOTES_NAME)
@property
def paper_path(self):
return REFMAN_DIR / "_".join((self._bibtex_key, md5_hexdigest(self.bibtex)[:7]))
@property
def meta_path(self):
return self.paper_path / self.meta_name
@property
def bibtex_path(self):
return self.paper_path / self.bibtex_name
@property
def pdf_path(self):
return self.paper_path / (self._bibtex_key + ".pdf")
@property
def notes_path(self):
return self.paper_path / self.notes_name
@property
def _bibtex_key(self) -> str:
return self._bibtex_key_from_bibtex_str(self.meta)
@classmethod
def _bibtex_key_from_bibtex_str(cls, meta: dict):
return meta.get("ID")
@classmethod
def _parser(self):
return bibtexparser.bparser.BibTexParser(common_strings=True)
@classmethod
def _update_bibtex_str_key(cls, bibtex_str: str, key: str):
bib = bibtexparser.loads(bibtex_str, cls._parser())
if not bib.entries[0].get("DOI", False):
bib.entries[0]["DOI"] = ""
bib.entries[0]["ID"] = key
return bibtexparser.dumps(bib)
@classmethod
def _fix_bibtex_format(cls, bib_str: str) -> str:
return fix_bibtex(bib_str)
@classmethod
def parse_from_disk(cls, paper_path: Path, read_pdf: bool = False):
"""Returns a `Paper` object from disk."""
update_status(f"{paper_path}: Loading reference from disk.")
with open(paper_path / cls.meta_name, "r") as f:
meta = json.load(f)
with open(paper_path / cls.bibtex_name, "r") as f:
bibtex = f.read()
notes = None
if (paper_path / cls.notes_name).exists():
notes = (paper_path / cls.notes_name).read_text()
pdf_data = None
if read_pdf:
try:
pdf_path = next(paper_path.glob("*.pdf"))
update_status(f"{paper_path}: Found {pdf_path.name}.")
except StopIteration as e:
update_status(f"{paper_path}: No PDF found.")
with open(paper_path / pdf_path, "wb") as f:
pdf_data = f.read()
return cls(meta=meta, bibtex=bibtex, pdf_data=pdf_data, notes_data=notes)
@classmethod
def new_paper_from_arxiv(cls, arxiv: str, key: str = None):
"""Adds a new paper to the `papers` dir from an arxiv str"""
update_status(f"{arxiv=}: Retrieving bibtex entry.")
bib_str = ua_requester_get(ARXIV_BIBTEX_URL.format(arxiv=arxiv)).content.decode(
"utf-8"
)
bib_str = fmt_arxiv_bibtex(bib_str)
bib_str = cls._fix_bibtex_format(bib_str)
# Set custom key if requested
if key is not None:
bib_str = cls._update_bibtex_str_key(bib_str, key)
meta = dict(bibtexparser.loads(bib_str, cls._parser()).entries[0])
update_status(f"{arxiv=}: Retrieving PDF.")
pdf_data = ua_requester_get(ARXIV_PDF_URL.format(arxiv=arxiv)).content
paper = cls(meta=meta, bibtex=bib_str, pdf_data=pdf_data)
paper.to_disk()
return paper
@classmethod
def new_paper_from_doi(cls, doi: str, key: str = None, pdf: str = None):
"""Adds a new paper to the `papers` dir from a DOI"""
# Fetch the reference data from cross-ref
if not is_valid_doi(doi):
raise ValueError(f"Provided {doi=} is not a valid DOI.")
update_status(f"{doi=}: Retrieving structured reference info.")
r = ua_requester_get((url := CROSSREF_URL.format(doi=doi, fmt=FMT_CITEPROC)))
if not r.ok:
raise ValueError(
"Could not get citeproc+json from crossref.\n"
f"HTTP Response: {r.status_code}\nDOI: {doi}\nURL: {url}"
)
citeproc_json = dict(r.json())
update_status(f"{doi=}: Retrieving bibtex entry.")
bib_str = cls._fix_bibtex_format(
ua_requester_get(
CROSSREF_URL.format(doi=doi.lower(), fmt=FMT_BIBTEX)
).content.decode("utf-8")
)
update_status(f"{doi=}: Retrieving PDF.")
pdf_data = None
if pdf is not None:
pdf_data = cls._get_pdf_data_from_path(pdf_path=str(pdf))
if pdf_data is None:
pdf_data = cls._get_pdf_data_from_doi(doi, citeproc_json)
# Set custom key if requested
if key is not None:
bib_str = cls._update_bibtex_str_key(bib_str, key)
# Prepare the Paper object
meta = dict(bibtexparser.loads(bib_str, cls._parser()).entries[0])
paper = Paper(meta=meta, bibtex=bib_str, pdf_data=pdf_data)
paper.to_disk()
return paper
@classmethod
def new_paper_from_bibtex(
cls,
bibtex_str: str,
pdf_path: str = None,
key: str = None,
):
"""Adds a new paper to the `papers` dir from a bibtex_str.
Optionally: Associate a pdf with the paper via local path or url
"""
# Set custom key if requested
if key is not None:
bibtex_str = cls._update_bibtex_str_key(bibtex_str, key)
bibtex_str = cls._fix_bibtex_format(bibtex_str)
bib = bibtexparser.loads(bibtex_str, cls._parser())
meta = dict(bib.entries[0])
bibtex_key = cls._bibtex_key_from_bibtex_str(meta)
LOGGER.info(f"{bibtex_key}: Parsing BibTeX string.")
pdf_data = None
if pdf_path is not None:
pdf_data = cls._get_pdf_data_from_path(pdf_path=str(pdf_path))
paper = Paper(meta=meta, bibtex=bibtex_str, pdf_data=pdf_data)
paper.to_disk()
return paper
@classmethod
def _get_pdf_data_from_path(cls, pdf_path: str) -> bytes:
LOGGER.info(f"Getting PDF from path.")
pdf_data = None
if is_valid_url(pdf_path):
r = ua_requester_get(pdf_path)
if "application/pdf" not in r.headers["Content-Type"]:
LOGGER.warning(f"{pdf_path} did not contain a PDF.")
pdf_data = None
else:
LOGGER.info(f"Got PDF from URL.")
pdf_data = r.content
else:
if pdf_path is not None and Path(pdf_path).exists():
with open(pdf_path, "rb") as f:
LOGGER.info(f"Got PDF from DISK.")
pdf_data = f.read()
return pdf_data
@classmethod
def _get_pdf_data_from_doi(cls, doi: str, citeproc_json: dict) -> bytes:
# Try to download from other available sources before sci-hub
if "link" in citeproc_json.keys():
update_status(f"{doi=}: Found PDF link from crossref, attempting download.")
try:
link = next(
l["URL"]
for l in citeproc_json["link"]
if l["content-type"] == "application/pdf"
)
r = ua_requester_get(link)
if "application/pdf" not in r.headers["Content-Type"]:
raise TypeError(f"Link did not contain a PDF.")
update_status(f"{doi=}: Got PDF from CITEPROC.")
return r.content
except StopIteration as e:
update_status(
f"{doi=}: While citeproc+json contains link(s), none link to a PDF."
)
except Exception as e:
update_status(
f"{doi=}: Could not get PDF from citeproc+json. "
f"Reason: {str(e)}"
)
# Download the PDF using sci-hub
update_status(f"{doi=}: Falling back to sci-hub for PDF retrieval.")
try:
pdf_data = SH.fetch(doi)["pdf"]
update_status(f"{doi=}: Got PDF from SCI-HUB.")
return pdf_data
except Exception as e:
LOGGER.warning(
f"For {doi=}: Failed to get PDF from sci-hub. No PDF available."
f"Reason: {str(e)}"
)
return bytes()
def to_disk(self):
self.paper_path.mkdir(exist_ok=True, parents=True)
with open(self.meta_path, "w") as f:
json.dump(self.meta, f)
with open(self.bibtex_path, "w") as f:
f.write(self.bibtex)
if self.notes_data is not None and len(self.notes_data) > 0:
with open(self.notes_path, "w") as f:
f.write(self.notes_data)
if self.pdf_data is not None and len(self.pdf_data) > 0:
with open(self.pdf_path, "wb") as f:
f.write(self.pdf_data)
@dataclasses.dataclass
class RefMan:
db: pd.DataFrame = dataclasses.field(init=False, default=None)
def __post_init__(self):
if not os.getenv("REFMAN_DATA", False):
LOGGER.warning(
f"`REFMAN_DATA` not found in environment variables. Using '{REFMAN_DIR}' as data path."
)
REFMAN_DIR.mkdir(exist_ok=True, parents=True)
LOGGER.info("Parsing database.")
paper_paths = self._paper_paths_list()
if len(paper_paths) > 0:
path_it = progress_with_status(paper_paths)
papers = list(map(Paper.parse_from_disk, path_it))
reset_progress_status_handler()
self.db = pd.DataFrame(map(self._get_paper_meta, papers))
else:
self.db = pd.DataFrame(None)
def _paper_paths_list(self):
return list(map(lambda x: x.parent, REFMAN_DIR.rglob(META_NAME)))
def _get_paper_meta(self, paper: Paper) -> dict:
return {
"year": paper.meta.get("year", ""),
"bibtex_key": str(paper._bibtex_key),
"title": paper.meta.get("title", ""),
"doi": paper.meta.get("doi", "").lower(), # For parsing via DOI
"eprint": paper.meta.get("eprint", ""), # For parsing via arxiv,
"paper_path": str(paper.paper_path),
"bibtex_path": str(paper.bibtex_path),
}
def append_to_db(self, paper: Paper):
self.db = self.db.append(self._get_paper_meta(paper), ignore_index=True)
def remove_from_db(self, column: str, value: str):
self.db = self.db[self.db[column] != value]
def add_using_arxiv(self, arxiv: str, key: str = None):
if (
not arxiv
or not arxiv[:4].isnumeric()
or not arxiv[5:].isnumeric()
or arxiv[4] != "."
):
raise ValueError(f"Invalid {arxiv=}")
if arxiv in list(self.db.get("eprint", list())):
logging.info(f"{arxiv=} already in DB. Nothing to do.")
path = self.db[self.db["eprint"] == doi.lower()].iloc[0]["paper_path"]
paper = Paper.parse_from_disk(Path(path))
return paper.meta.get("ID", "")
paper = Paper.new_paper_from_arxiv(arxiv, key)
self.append_to_db(paper)
self._update_db()
return paper.meta.get("ID", "")
def add_using_doi(self, doi: str, key: str, pdf: str):
if not doi:
raise ValueError(f"Invalid: {doi=}")
if doi.lower() in list(self.db.get("doi", list())):
logging.info(f"{doi=} already in DB. Nothing to do.")
path = self.db[self.db["doi"] == doi.lower()].iloc[0]["paper_path"]
paper = Paper.parse_from_disk(Path(path))
return paper.meta.get("ID", "")
paper = Paper.new_paper_from_doi(doi, key, pdf)
self.append_to_db(paper)
self._update_db()
return paper.meta.get("ID", "")
def add_using_bibtex(self, bibtex_str: str, pdf_path: str, key: str = None):
paper = Paper.new_paper_from_bibtex(
bibtex_str=bibtex_str, pdf_path=pdf_path, key=key
)
self.append_to_db(paper)
self._update_db()
return paper.meta.get("ID", "")
def rekey(self, key: str, new_key: str):
paper_path_li = list(REFMAN_DIR.glob(key + "*"))
if len(paper_path_li) > 1:
raise ValueError(
f"Multiple papers matching wildcard: {key + '*'}.\n{paper_path_li=}."
)
if len(paper_path_li) == 0:
raise FileNotFoundError(
f"No papers matching wildcard: {key + '*'}. Exiting."
)
paper_path = paper_path_li[0]
old_paper = Paper.parse_from_disk(paper_path)
new_paper = Paper.new_paper_from_bibtex(
bibtex_str=old_paper.bibtex, pdf_path=old_paper.pdf_path, key=new_key
)
# Remove the old paper
self.remove_paper(old_paper.paper_path.stem, allow_wildcard=True)
self.append_to_db(new_paper)
self._update_db()
return new_paper.meta.get("ID", "")
def meta_edit(self, key: str):
paper_path_li = list(REFMAN_DIR.glob(key + "*"))
if len(paper_path_li) > 1:
raise ValueError(
f"Multiple papers matching wildcard: {key + '*'}.\n{paper_path_li=}."
)
if len(paper_path_li) == 0:
raise FileNotFoundError(
f"No papers matching wildcard: {key + '*'}. Exiting."
)
paper_path = paper_path_li[0]
old_paper = Paper.parse_from_disk(paper_path)
subprocess.call([EDITOR, old_paper.bibtex_path])
with open(old_paper.bibtex_path, "r") as f:
new_bibtex = f.read()
if new_bibtex == old_paper.bibtex:
LOGGER.warning(
"You didn't modify the bibtex! "
"Therefore, there is nothing to do, because the metadata hasn't changed."
)
return old_paper.meta.get("ID", "")
LOGGER.info("Found changes in {old_paper.bibtex_path}. | |
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ <NAME> + <NAME>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# flatcontext.py
#
import math
from sys import platform
from os import listdir, makedirs
from os.path import exists
#from flat import rgb
from pagebot.constants import (DEFAULT_FONT, DEFAULT_FONT_SIZE, FILETYPE_PDF,
FILETYPE_JPG, FILETYPE_SVG, FILETYPE_PNG, FILETYPE_GIF, LEFT,
DEFAULT_FILETYPE, RGB, CENTER, RIGHT, EXPORT)
from pagebot.contexts.basecontext.basecontext import BaseContext
from pagebot.contexts.basecontext.babelstring import BabelString
from pagebot.contexts.basecontext.babelrun import BabelLineInfo, BabelRunInfo
from pagebot.contexts.flatcontext.flatbuilder import flatBuilder
from pagebot.contexts.flatcontext.flatbezierpath import FlatBezierPath
from pagebot.contexts.flatcontext.flatrundata import FlatRunData
from pagebot.contexts.flatcontext.flatbabeldata import FlatBabelData
from pagebot.filepaths import ROOT_FONT_PATHS
from pagebot.fonttoolbox.fontpaths import getFontPathOfFont
from pagebot.fonttoolbox.objects.font import findFont, Font
from pagebot.mathematics import to255
from pagebot.mathematics.transform3d import Transform3D
from pagebot.toolbox.color import color, Color, noColor, blackColor
from pagebot.toolbox.units import pt, em, upt, units
class FlatContext(BaseContext):
"""The FlatContext implements the Flat functionality within the PageBot
framework.
Because the origin is at the bottom, like in DrawBot and as opposed to
Flat, we need to subtract all vertical coordinates from the page height
before an object gets placed. In case of (bounding) boxes, we also need to
subtract the box height.
TODO: merge getTransformed() and translatePoint()?
* xxyxyz.org/flat
Text behavior:
st = strike(font)
st.size(size, leading=0.0, units=Pt.UNIT)
st.color(color)
st.width(string)
sp = st.span(string)
par = st.paragraph(string)
txt = st.text(string)
placed = page.place()
outl = outlines(string)
par = paragraph(spans)
placed = placetext()
placed.position(x, y)
placed.frame(x, y, w, h)
placed.overflow()
placed.lines()
"""
EXPORT_TYPES = (FILETYPE_PDF, FILETYPE_SVG, FILETYPE_PNG, FILETYPE_JPG)
# Default is point document, currently untested for other units.
UNITS = 'pt'
def __init__(self):
"""Constructor of Flat context.
>>> context = FlatContext()
>>> context.newPage(100, 100)
>>> drawing = context.getDrawing()
>>> drawing.__class__.__name__
'document'
"""
super().__init__()
self.b = flatBuilder
# Save current set of values on gState stack.
self.save()
# Current open shape.
self.shape = None
self.fileType = DEFAULT_FILETYPE
self._drawing = None
self._numberOfPages = 0
self._flatFonts = {} # Caching of {font.path:flatFont}
self.setTransform3D()
self._numberOfPages = 0
def setTransform3D(self):
"""Initializes the transforrmation matrix to be used for translation,
rotation and scaling."""
self.transform3D = Transform3D()
# Drawing.
#
# Like in DrawBot, newDrawing creates a new page, while new page creates
# a drawing if it doesn't exist yet. Drawing size is taken from the element
# level document, from size tuple, from w and h, or is set to 1000 x 1000px
# in that order. Changing of drawing size over multiple pages still needs to
# be supported.
def getDrawingSize(self, w=None, h=None, size=None, doc=None):
"""We assume origin is at the bottom, just like in DrawBot."""
if doc is not None:
w = doc.w
h = doc.h
elif size is not None:
w, h = size
w, h = self._getValidSize(w, h)
# Dimensions not allowed to be None or Flat document won't render.
assert w is not None
assert h is not None
# Converts units to point values. Stores width and height information
# in Flat document.
return upt(w, h)
def _getValidSize(self, w, h):
"""Answer a valid size for FlatContext document and pages. Make
default page size, similar to DrawBot."""
if w is None or w < 0:
w = pt(1000)
if h is None or h < 0:
h = pt(1000)
return units(w), units(h)
def newDrawing(self, w=None, h=None, size=None, doc=None, doPage=True):
"""Creates a new Flat canvas to draw on. Flipped `y`-axis by default to
conform to DrawBot's drawing methods.
>>> context = FlatContext()
>>> context.newPage(100, 100)
>>> drawing = context.getDrawing()
>>> int(drawing.width), int(drawing.height)
(100, 100)
"""
if self._drawing:
self.clear()
wpt, hpt = self.getDrawingSize(w=w, h=h, size=size, doc=doc)
self._drawing = self.b.document(wpt, hpt, units=self.UNITS)
if doPage:
self.newPage(w=w, h=h)
def newPage(self, w=None, h=None, doc=None, **kwargs):
"""Other page sizes than default in self._drawing are ignored in Flat.
NOTE: this generates a flat.page, not to be confused with PageBot page.
TODO: when size changes, keep track of multiple Flat documents.
Alternatively, we can try to resize, but not sure what happens with the
dimensions of previous pages (needs testing).
self._drawing.size(wpt, hpt, units=self.UNITS)
documents::
if w != self.w:
<add document to stack>
if h != self.h
<add document to stack>
>>> from pagebot.toolbox.units import pt
>>> context = FlatContext()
>>> w = h = pt(100)
>>> context.newPage(w, h)
"""
w, h = self.getDrawingSize(w=w, h=h, doc=doc)
if not self._drawing:
self.newDrawing(w=w, h=h, doPage=False)
assert self._drawing
self._numberOfPages += 1
self._drawing.addpage()
def saveDrawing(self, path, multiPage=None):
"""Save the current document to file(s)
>>> import os
>>> from pagebot.toolbox.units import pt
>>> from pagebot.filepaths import getRootPath
>>> from pagebot.toolbox.color import blackColor
>>> # _export/* Files are ignored in git.
>>> exportPath = getRootPath() + '/_export'
>>> if not exists(exportPath): os.makedirs(exportPath)
>>> context = FlatContext()
>>> w = h = pt(100)
>>> x = y = pt(0)
>>> c = blackColor
"""
"""
FIX
>>> context.fileType = FILETYPE_JPG
>>> context.newPage(w, h)
>>> context.fill(c)
>>> context.rect(x, y, w-20, h-20)
# Flat is too strict with color-format match?
>>> context.saveDrawing(exportPath + '/MyTextDocument_F.%s' % FILETYPE_JPG)
>>> context.fileType = FILETYPE_PDF
>>> context.newPage(w, h)
>>> context.fill(c)
>>> context.rect(x, y, w-20, h-20)
# Flat is too strict with color-format match?
>>> context.saveDrawing(exportPath + '/MyTextDocument_F.%s' % FILETYPE_PDF)
>>> context.fileType = FILETYPE_PNG
>>> context.newPage(w, h)
>>> context.fill(c)
>>> context.rect(x, y, w-20, h-20)
>>> context.saveDrawing(exportPath + '/MyTextDocument_F.%s' % FILETYPE_PNG)
>>> context.saveDrawing(exportPath + '/MyTextDocument_F.gif')
[FlatContext] Gif not yet implemented for "MyTextDocument_F.gif"
"""
if not multiPage:
multiPage = True
# In case path starts with "_export", make sure that the directory
# exists.
self.checkExportPath(path)
self.fileType = path.split('.')[-1].lower()
if self.fileType == FILETYPE_PNG:
if len(self._drawing.pages) == 1 or not multiPage:
im = self._drawing.pages[0].image(kind=RGB)
im.png(path)
else:
for n, p in enumerate(self._drawing.pages):
pagePath = path.replace('.'+FILETYPE_PNG, '%03d.%s' % (n, FILETYPE_PNG))
p.image(kind=RGB).png(pagePath)
elif self.fileType == FILETYPE_JPG:
if len(self._drawing.pages) == 1 or not multiPage:
self._drawing.pages[0].image(kind=RGB).jpeg(path)
else:
for n, p in enumerate(self._drawing.pages):
pagePath = path.replace('.'+FILETYPE_PNG, '%03d.%s' % (n, FILETYPE_PNG))
p.image(kind=RGB).jpeg(pagePath)
elif self.fileType == FILETYPE_SVG:
if len(self._drawing.pages) == 1 or not multiPage:
self._drawing.pages[0].svg(path)
else:
for n, p in enumerate(self._drawing.pages):
pagePath = path.replace('.'+FILETYPE_SVG, '%03d.%s' % (n, FILETYPE_SVG))
p.svg(pagePath)
elif self.fileType == FILETYPE_PDF:
self._drawing.pdf(path) # Cannot render rgba.
elif self.fileType == FILETYPE_GIF:
msg = '[FlatContext] Gif not yet implemented for "%s"' % path.split('/')[-1]
print(msg)
else:
msg = '[FlatContext] File format "%s" is not implemented' % path.split('/')[-1]
raise NotImplementedError(msg)
# Compatibility with DrawBot API.
saveImage = saveDrawing
def export(self, fileName, folderName=None, extension=None):
"""Saves file to filename with default folder name and extension."""
if not folderName:
folderName = EXPORT
if not extension:
extension = 'pdf'
path = '%s/%s.%s' % (folderName, fileName, extension)
self.saveImage(path)
# Styles
def setStyles(self, styles):
"""Sets the dictionary of style dictionaries. E.g. to be transformed
into paragraph styles in InDesignContext or to be used as styles
for context strings."""
# Magic variables.
def pageCount(self):
return self._numberOfPages
# Public callbacks.
def endDrawing(self, doc=None):
self._drawing = None
def clear(self):
self.endDrawing()
self._numberOfPages = 0
def getDrawing(self):
"""Returns the drawing object in the current state."""
return self._drawing
'''
def setSize(self, w=None, h=None):
"""Set the initial page size of the context, in case something is drawn
before a document or page is created.
>>> from pagebot.toolbox.units import mm, cm
>>> context = FlatContext()
"""
# FIXME: we should get these values from Flat drawing).
"""
>>> context.w, context.h
(None, None)
>>> context.setSize()
>>> context.w, context.h
(1000pt, 1000pt)
>>> context.setSize(100, 200)
>>> context.w, context.h
(100pt, 200pt)
>>> context.setSize(mm(100), cm(200))
>>> context.w, context.h
(100mm, 200cm)
"""
#self.w, self.h = self._getValidSize(w, h)
# TODO: see if Flat document can be resized, else we should create a
# new one and copy its contents.
'''
def getTmpPage(self, w, h):
drawing = self.b.document(w, h, units=self.UNITS)
drawing.addpage()
return drawing.pages[0]
def _get_page(self):
W = 800
H = 600
if self._drawing is None:
self.newDrawing(w=W, h=H)
if self._numberOfPages == 0:
self.newPage(w=W, h=H)
assert self._drawing
assert self._drawing.pages
return self._drawing.pages[-1]
page = property(_get_page)
def _get_width(self):
return self._drawing.width
width = property(_get_width)
def _get_height(self):
return self._drawing.height
height = property(_get_height)
def getTransformed(self, x, y, z=0):
"""Takes a point and puts it through the transformation matrix,
handling translation, rotation and scaling at once.
>>> context = FlatContext()
>>> w = 800
>>> h = 600
| |
<gh_stars>0
import sys
import time
from options.train_options import TrainOptions
from my_seg_depth.my_data import dataloader
from util.visualizer import Visualizer
from tensorboardX import SummaryWriter
import my_seg_depth.networks as networks
from torch.nn import init
import torch
#from .model import Seg_Depth
#from .networks import G_1,Discriminator,Feature_net,SEG,DEP
import torch
import itertools
from util.image_pool import ImagePool
import torch.nn as nn
from util.util import scale_pyramid
import os
import util.util as util
from collections import OrderedDict
class BaseModel():
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return 'BaseModel'
def initialize(self,opt):
self.opt = opt
# self.gpu_ids = opt.gpu_ids
self.is_Train = opt.isTrain
# self.device = torch.device('cuda:{}'.format(self.gpu_ids[0,1])) \
# if self.gpu_ids else torch.device('cpu')
self.save_dir = os.path.join(opt.checkpoints_dir,opt.name)
if opt.resize_or_crop != 'scale_width':
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
def set_input(self,input):
self.input = input
def forward(self):
pass
def setup(self, opt, parser=None):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
self.load_networks(opt.epoch)
self.print_networks(opt.verbose)
# make models eval mode during test time
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net_' + name)
net.eval()
def test(self):
with torch.no_grad():
self.forward()
# get image paths
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self,train_or_test):
pass
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
# return visualization images. train.py will display these images, and save the images to a html
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
value = getattr(self, name)
if isinstance(value, list):
visual_data=value[-1].data
else:
visual_data=value.data
segname_syn=['syn_seg_l',
'syn_seg_pre']
segname_real=['real_seg_l',
'real_seg_pre']
if (name in segname_syn):
visual_ret[name]=util.label2im(visual_data)
elif (name in segname_real):
visual_ret[name]=util.label2im(visual_data)
else:
visual_ret[name] = util.tensor2im(visual_data)
return visual_ret
# return traning losses/errors. train.py will print out these errors as debugging information
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
# float(...) works for both scalar tensor and float number
errors_ret[name] = float(getattr(self,'loss_'+name))
return errors_ret
# save models to the disk
def save_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net_' + name)
torch.save(net.state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
# load models from the disk
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
# print network information
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net_' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
# set requies_grad=Fasle to avoid computation
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
# if len(gpu_ids) > 0:
# assert(torch.cuda.is_available())
# net.to(gpu_ids[0])
# net = torch.nn.DataParallel(net, gpu_ids)
net = net.cuda()
net = nn.DataParallel(net)
init_weights(net, init_type, gain=init_gain)
return net
class Seg_Depth(BaseModel):
def name(self):
return 'Seg_Depth_Model'
def initialize(self,opt):
BaseModel.initialize(self,opt)
self.loss_names=['G1_dis','G1_seg','D_G1',
'G2_dis','G2_seg','D_G2',
'seg_syn','seg_real','dep_syn'
]
self.visual_names = ['syn_img','real_img','syn_seg_l','real_seg_l',
'syn_seg_pre','real_seg_pre',
'syn_dep_l','syn_dep_pre']
if self.is_Train:
self.model_names = ['G_1', 'G_2', 'Dis_en',
'Feature','Seg_de','Dep_de'
]
else: # during test time, only load Gs
self.model_names = ['G_1', 'G_2',
'Feature','Seg_de','Dep_de']
self.net_G_1 = networks.define_G(opt.input_nc, opt.output_nc, opt.netG, opt.init_type, opt.init_gain)
self.net_G_2 = networks.define_G(opt.input_nc, opt.output_nc, opt.netG,
opt.init_type, opt.init_gain)
self.net_Dis_en = networks.define_D(input_nc=128)
# self.net_Feature = networks.Feature_net(input_nc=128,mid_nc =1024)
self.net_Feature = init_net(networks.Feature_net(input_nc=128,mid_nc =1024))
self.net_Seg_de = init_net(networks.SEG(n_cls=28))
self.net_Dep_de = init_net(networks.DEP())
self.optimizer_G_1 = torch.optim.Adam(self.net_G_1.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_G_2 = torch.optim.Adam(self.net_G_2.parameters(),
lr=opt.lr,betas=(opt.beta1,0.999))
self.optimizer_F = torch.optim.Adam(self.net_Feature.parameters(),
lr=opt.lr,betas=(opt.beta1,0.999))
self.optimizer_Seg = torch.optim.Adam(self.net_Seg_de.parameters(),
lr=opt.lr,betas=(opt.beta1,0.999))
self.optimizer_Dep = torch.optim.Adam(self.net_Dep_de.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.net_Dis_en.parameters()),
lr=opt.lr_D, betas=(opt.beta1, 0.999))
self.syn_imgpool = ImagePool(opt.pool_size)
self.real_imgpool = ImagePool(opt.pool_size)
self.criterionGAN = networks.GANLoss(use_lsgan =not opt.no_lsgan).cuda()
self.criterionSeg = torch.nn.CrossEntropyLoss(size_average=True, ignore_index=255).cuda()
self.criterionDep = torch.nn.L1Loss()
def set_input(self,input):
self.real_img = input['img_real'].cuda()
self.syn_img = input['img_syn'].cuda()
self.real_seg_l = input['seg_l_real'].squeeze(1).cuda()
self.syn_seg_l = input['seg_l_syn'].squeeze(1).cuda()
#print(self.syn_seg_l.shape)
self.syn_dep_l = input['dep_l_syn'].squeeze(1).cuda()
def forward(self):
self.syn_features1 = self.net_G_1(self.syn_img)
self.real_features1 = self.net_G_2(self.real_img)
def forward_features2(self,features1):
self.features2 = self.net_Feature(features1.detach())
return self.features2
def backward_D(self):
pre_s = self.net_Dis_en(self.syn_features1.detach())
self.loss_D_G1 = self.criterionGAN(pre_s,False)
pre_r = self.net_Dis_en(self.real_features1.detach())
self.loss_D_G2 = self.criterionGAN(pre_r,True)
return self.loss_D_G1+self.loss_D_G2
def detach_list(self,list):
for i in list:
i = i.detach()
return list
def backward_Seg(self):
syn_f2,syn_inf = self.net_Feature(self.syn_features1.detach())
real_f2, real_inf = self.net_Feature(self.real_features1.detach())
self.syn_seg_pre = self.net_Seg_de(self.detach_list(syn_f2),syn_inf.detach())
self.real_seg_pre = self.net_Seg_de(self.detach_list(real_f2), real_inf.detach())
self.loss_seg_syn = self.criterionSeg(self.syn_seg_pre,self.syn_seg_l)
self.loss_seg_real = self.criterionSeg(self.real_seg_pre,self.real_seg_l)
return self.loss_seg_real+self.loss_seg_syn
def backward_Dep(self):
syn_f2, syn_inf = self.net_Feature(self.syn_features1.detach())
self.syn_dep_pre = self.net_Dep_de(self.detach_list(syn_f2),syn_inf.detach())
self.loss_dep_syn = self.criterionDep(self.syn_dep_pre,self.syn_dep_l)
return self.loss_dep_syn
def backward_G(self):
#print(self.syn_features1.shape)
pre_s = self.net_Dis_en(self.syn_features1)
self.loss_G1_dis = self.criterionGAN(pre_s, True)
pre_r = self.net_Dis_en(self.real_features1)
self.loss_G2_dis = self.criterionGAN(pre_r, False)
syn_f2, syn_inf = self.net_Feature(self.syn_features1)
real_f2, real_inf = self.net_Feature(self.real_features1)
seg_syn_pre = self.net_Seg_de(syn_f2, syn_inf)
seg_real_pre = self.net_Seg_de(real_f2, real_inf)
#print(seg_real_pre.shape,self.real_seg_l.shape)
self.loss_G1_seg = self.criterionSeg(seg_syn_pre, self.syn_seg_l)
self.loss_G2_seg = self.criterionSeg(seg_real_pre, self.real_seg_l)
# self.loss_G_1 = self.+loss_seg_real
# self.loss_G_2 = loss_G_syn+loss_seg_syn
return self.loss_G1_dis+self.loss_G1_seg,self.loss_G2_dis+self.loss_G2_seg
def optimize_parameters(self,train_or_test):
self.set_requires_grad(self.net_Dis_en, False)
self.set_requires_grad([self.net_G_1, self.net_G_2,
self.net_Seg_de, self.net_Feature, self.net_Dep_de], False)
if (train_or_test=='train'):
print('train')
self.set_requires_grad(self.net_Dis_en,False)
self.set_requires_grad([self.net_G_1,self.net_G_2,
self.net_Seg_de,self.net_Feature,self.net_Dep_de],True)
self.forward()
self.optimizer_G_1.zero_grad()
self.optimizer_G_2.zero_grad()
self.loss_G1,self.loss_G2 =self.backward_G()
if (train_or_test == 'train'):
print('g_update')
self.loss_G1.backward()
self.loss_G2.backward()
self.optimizer_G_1.step()
self.optimizer_G_2.step()
self.optimizer_F.zero_grad()
self.loss_s = self.backward_Seg()
self.loss_d = self.backward_Dep()
self.loss_ff = 5*self.loss_s+self.loss_d
if (train_or_test == 'train'):
print('F udate')
self.loss_ff.backward()
self.optimizer_F.step()
#self.forward()
self.optimizer_Seg.zero_grad()
self.loss_s = self.backward_Seg()
if (train_or_test == 'train'):
self.loss_s.backward()
print('seg update')
self.optimizer_Seg.step()
#self.forward()
self.optimizer_Dep.zero_grad()
self.loss_d = self.backward_Dep()
if (train_or_test == 'train'):
self.loss_d.backward()
print('dep update')
self.optimizer_Dep.step()
if (train_or_test == 'train'):
self.set_requires_grad(self.net_Dis_en, True)
self.set_requires_grad([self.net_G_1, self.net_G_2,
self.net_Seg_de, self.net_Feature, self.net_Dep_de], False)
self.optimizer_D.zero_grad()
self.loss_D = self.backward_D()
self.loss_D.backward()
print('dis update')
self.optimizer_D.step()
def create_model_segdepth(opt):
print(opt.model)
model = Seg_Depth()
model.initialize(opt)
print("model [%s] was created." % (model.name()))
return model
if __name__ == '__main__':
opt = TrainOptions().parse()
opt.name = 'synthia_segCycle'
dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
writer_train = SummaryWriter(log_dir='./summary/my_seg_depth')
writer_test = SummaryWriter(log_dir='./summary/my_seg_depth')
opt = TrainOptions().parse()
#opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
#dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
model =create_model_segdepth(opt)
visualizer = Visualizer(opt)
total_steps = 0
global_iter=0
for epoch in range(1,30):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset_train):
print(global_iter)
if (global_iter % 200 == 0 and global_iter > 200):
print("validation start")
model.eval()
for ii, data_test in enumerate(dataset_test):
if (ii == 50):
break
with torch.no_grad():
model.set_input(data_test)
model.optimize_parameters(train_or_test='test')
errors = model.get_current_losses()
for name, error in errors.items():
writer_test.add_scalar("{}test/{}".format(opt.name, name), error, global_iter + ii)
images = model.get_current_visuals()
for name, img in images.items():
im = torch.from_numpy(img.transpose([2, 0, 1])).squeeze(0)
writer_test.add_image("{}test/img_{}".format(opt.name, name), im, global_iter + ii)
print("validation done")
global_iter += 1
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
model.optimize_parameters(train_or_test='train')
if (global_iter % 50 == 0):
errors = model.get_current_losses()
for name, error in errors.items():
print('------------------')
writer_train.add_scalar("{}train/{}".format(opt.name, name), error, global_iter)
images = model.get_current_visuals()
for name, img in images.items():
print(name,img.shape)
img = img / img.max()
# if len(img.shape)==3:
img = torch.from_numpy(img.transpose([2, 0, 1]))
writer_train.add_image("{}train/img_{}".format(opt.name, name), img, global_iter)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('iter_%d' % total_steps)
iter_data_time = time.time()
| |
= sorted(
{
'function': [root+'/'+root+'/'+root+'/'+root],
'type':TypeError,
'msg':mmsg('F'),
'raised': [False]
}.items()
)
assert re.compile(r'\d+/'+emsg('F')).match(exname)
erec['function'] = [
exobj.decode_call(call) for call in erec['function']
]
assert sorted(erec.items()) == ref
###
# Test property search
###
exobj = putil.exh.ExHandle(
full_cname=True, exclude=['_pytest', 'tests.test_exh']
)
cobj = exh_support_module_2.MyClass(exobj)
cobj.value = 5
cdb = cobj._exhobj._flatten_ex_dict()
assert len(list(cdb.keys())) == 1
key = list(cdb.keys())[0]
item = cdb[key]
assert cobj._exhobj.decode_call(item['function'][0]).endswith(
'exh_support_module_2.MyClass.value(setter)'
)
assert item['msg'] == 'Illegal value'
###
# Test exclude: test without exclusion and with exclusion,
# the function name should be 'None'
###
# Test with function that has a contract decorator
putil.exh.set_exh_obj(
putil.exh.ExHandle(full_cname=True, exclude=['_pytest'])
)
_ = putil.eng.peng(15, 3, False)
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert item['function'][0]
putil.exh.set_exh_obj(
putil.exh.ExHandle(
full_cname=True, exclude=['_pytest', 'putil.eng']
)
)
_ = putil.eng.peng(15, 3, False)
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert not item['function'][0]
# Test with function that has an exception in body
import tests.support.exh_support_module_1
putil.exh.set_exh_obj(
putil.exh.ExHandle(full_cname=True, exclude=['_pytest'])
)
tests.support.exh_support_module_1.simple_exception()
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert item['function'][0]
putil.exh.set_exh_obj(
putil.exh.ExHandle(
full_cname=True,
exclude=['_pytest', 'tests.support.exh_support_module_1']
)
)
tests.support.exh_support_module_1.simple_exception()
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert not item['function'][0]
def test_raise_exception_if_exceptions(self):
""" Test raise_exception_if method exceptions """
# pylint: disable=W0702
# Helper functions
def check_rec(mname, extype, exmsg):
assert erec['function'].endswith('{0}'.format(mname))
assert erec['type'] == extype
assert erec['msg'] == exmsg
assert erec['raised']
def check_str(exobj, slist, llist):
root = CTNAME+'.test_raise_exception_if_exceptions'
ref = [
'{0}/{0}.func_top/{0}.func_mid/{0}.func_base'.format(root),
'{0}/{0}.func_base'.format(root),
'{0}/{0}.func_mid/{0}.func_base'.format(root)
]
pb = lambda x, nflip: x if nflip else (not x)
alist = ['*' if item else '' for item in llist]
cdb_int = exobj._flatten_ex_dict()
entry = cdb_int[list(cdb_int.keys())[0]]
entry = cdb_int[list(cdb_int.keys())[0]]
assert (
[exobj.decode_call(item) for item in entry['function']] == ref
)
assert entry['raised'] == llist
stxt = str(exobj).split('\n')[3:]
plist = [item.endswith(' [raised]') for item in stxt]
assert all([pb(item, sitem) for item, sitem in zip(plist, slist)])
db = exobj.exceptions_db
exmsg = 'RuntimeError (Invalid condition)'
for num in range(0, 3):
assert db[num]['data'] == exmsg+alist[num]
for num, fname in zip(range(0, 3), ref):
assert db[num]['name'] == fname
# Tests
obj = putil.exh.ExHandle()
def func3(cond1=False, cond2=False, cond3=False, cond4=False):
exobj = putil.exh.ExHandle()
items = (
(RuntimeError, 'This is an exception'),
(OSError, 'This is an exception with a *[fname]* field')
)
for num, (extype, exmsg) in enumerate(items):
exobj.add_exception('my_exception'+str(num+1), extype, exmsg)
exobj.raise_exception_if('my_exception1', cond1, edata=None)
edata = {'field':'fname', 'value':'my_file.txt'}
exobj.raise_exception_if('my_exception2', cond2, edata=edata)
if cond3:
exobj.raise_exception_if('my_exception3', False)
edata = {'field':'not_a_field', 'value':'my_file.txt'}
exobj.raise_exception_if('my_exception2', cond4, edata=edata)
return exobj
robj = obj.raise_exception_if
AI(robj, 'exname', exname=5, condition=False)
AI(robj, 'condition', exname='my_exception', condition=5)
items = [
354,
{'field':'my_field'},
{'field':3, 'value':5},
{'value':5},
[{'field':'my_field1', 'value':5}, {'field':'my_field'}],
[{'field':'my_field1', 'value':5}, {'field':3, 'value':5}],
[{'field':'my_field1', 'value':5}, {'value':5}]
]
for item in items:
AI(robj, 'edata', 'my_exception', False, item)
AE(func3, RuntimeError, 'This is an exception', True, False)
exmsg = 'This is an exception with a my_file.txt field'
AE(func3, OSError, exmsg, cond2=True)
exmsg = 'Exception name my_exception3 not found'
AE(func3, ValueError, exmsg, cond3=True)
exmsg = 'Field not_a_field not in exception message'
AE(func3, RuntimeError, exmsg, cond4=True)
# Test that edata=None works
exobj = func3()
cdb = exobj._flatten_ex_dict()
if not cdb:
pytest.fail('_ex_dict empty')
for exname, erec in cdb.items():
mname = 'test_exh.test_raise_exception.func3'
if exname.endswith('/{0}.my_exception1'.format(mname)):
check_rec(mname, RuntimeError, 'This is an exception')
if exname.endswith('/{0}.my_exception2'.format(mname)):
msg = 'This is an exception with a *[fname]* field'
check_rec(mname, OSError, msg)
exobj = putil.exh.ExHandle(full_cname=True)
def func_base(exobj, cond):
""" Test raised field """
exobj.add_exception(
'multi_path_exception', RuntimeError, 'Invalid condition'
)
exobj.raise_exception_if(
exname='multi_path_exception', condition=cond
)
def func_mid(exobj, cond):
""" Add multi-path to exception object """
func_base(exobj, cond)
def func_top(exobj, cond):
""" Add another multi-path to exception object """
func_mid(exobj, cond)
# Mangle "natural" order to test __str__, which
# sorts the function names
func_top(exobj, False)
func_base(exobj, False)
func_mid(exobj, False)
fpointers = [func_mid, func_top, func_base]
slist = [[False, True, False], [False, True, True], [True, True, True]]
llist = [[False, False, True], [True, False, True], [True, True, True]]
for fpointer, sitem, litem in zip(fpointers, slist, llist):
try:
fpointer(exobj, True)
except:
pass
check_str(exobj, sitem, litem)
def test_exceptions_db(self):
""" Test _exceptions_db property behavior """
for full_cname in [True, False]:
# Functions definitions
def func4(exobj):
exobj.add_exception(
'my_exception1', RuntimeError, 'This is exception #1'
)
def func5(exobj):
exobj.add_exception(
'my_exception2',
ValueError,
'This is exception #2, *[result]*'
)
exobj.add_exception(
'my_exception3', TypeError, 'This is exception #3'
)
exobj = putil.exh.ExHandle(full_cname)
func4(exobj)
func5(exobj)
# Actual tests
# Test that property cannot be deleted
AROPROP(exobj, 'exceptions_db')
# Test contents
tdata_in = exobj.exceptions_db
if (not tdata_in) or (len(tdata_in) != 3):
pytest.fail('Erroneous exceptions database')
tdata_out = list()
regtext1 = r'[\w|\W]+/'+CTNAME+'.test_exceptions_db.func4'
regtext2 = r'[\w|\W]+/'+CTNAME+'.test_exceptions_db.func5'
regtext3 = r'\d+/my_exception[2-3]'
cname = CTNAME+'.test_exceptions_db'
for erec in tdata_in:
name = None
if full_cname:
if re.compile(regtext1).match(erec['name']):
name = '{0}.func4'.format(cname)
elif re.compile(regtext2).match(erec['name']):
name = '{0}.func5'.format(cname)
else:
if re.compile(r'\d+/my_exception1').match(erec['name']):
name = '{0}.func4'.format(cname)
elif re.compile(regtext3).match(erec['name']):
name = '{0}.func5'.format(cname)
if not name:
pytest.fail('Exception not found')
tdata_out.append({'name':name, 'data':erec['data']})
ref = [
{
'name':'{0}.func4'.format(cname),
'data':'RuntimeError (This is exception #1)'
},
{
'name':'{0}.func5'.format(cname),
'data':'ValueError (This is exception #2, *[result]*)'
},
{
'name':'{0}.func5'.format(cname),
'data':'TypeError (This is exception #3)'
}
]
assert CLDICTS(tdata_out, ref)
def test_save_callables(self):
""" Test save_callables method behavior """
obj1 = putil.pinspect.Callables([PENG_FNAME])
with putil.misc.TmpFile() as fname1:
with putil.misc.TmpFile() as fname2:
callables_fname1 = fname1
callables_fname2 = fname2
obj1.save(callables_fname1)
obj2 = putil.exh.ExHandle(callables_fname=callables_fname1)
obj2.save_callables(callables_fname2)
obj3 = putil.pinspect.Callables()
obj3.load(callables_fname2)
assert obj1 == obj3
def test_save_callables_exceptions(self):
""" Test save_callables method exceptions """
obj = putil.exh.ExHandle()
AI(obj.save_callables, 'callables_fname', True)
def test_callables_db(self):
""" Test callables_db property behavior """
# Function definitions
def func6(exobj):
exobj.add_exception(
'my_exception', RuntimeError, 'This is an exception'
)
return exobj
# Actual tests
exobj = func6(putil.exh.ExHandle())
# Actual contents of what is returned should be checked
# in pinspect module
assert exobj.callables_db is not None
# Test that property cannot be deleted
AROPROP(exobj, 'callables_db')
def test_callables_separator(self):
""" Test callables_separator property behavior """
exobj = putil.exh.ExHandle()
# Actual contents of what is returned should be checked in
# pinspect module
assert exobj.callables_separator == '/'
# Test that property cannot be deleted
AROPROP(exobj, 'callables_separator')
def test_str(self):
""" Test __str__ method behavior """
for full_cname in [True, False]:
# Functions definition
def func7(exobj):
exobj.add_exception(
'my_exception7', RuntimeError, 'This is exception #7'
)
exobj.raise_exception_if('my_exception7', False)
def func8(exobj):
exobj.add_exception(
'my_exception8',
ValueError,
'This is exception #8, *[fname]*'
)
exobj.add_exception(
'my_exception9', TypeError, 'This is exception #9'
)
exobj = putil.exh.ExHandle(full_cname)
func7(exobj)
func8(exobj)
# Actual tests
str_in = str(exobj).split('\n\n')
str_out = list()
cname = 'test_exh.TestExHandle.test_str'
for str_element in str_in:
str_list = str_element.split('\n')
if str_list[0].endswith('/my_exception7'):
str_list[0] = (
'Name : {0}.func7/my_exception7'.format(cname)
)
elif str_list[0].endswith('/my_exception8'):
str_list[0] = (
'Name : {0}.func8/my_exception8'.format(cname)
)
elif str_list[0].endswith('/my_exception9'):
str_list[0] = (
'Name : {0}.func8/my_exception9'.format(cname)
)
if str_list[3].endswith('{0}.func7'.format(cname)):
str_list[3] = 'Function: {0}'.format(
'{0}.func7'.format(cname) if full_cname else 'None'
)
elif str_list[3].endswith('{0}.func8'.format(cname)):
str_list[3] = 'Function: {0}'.format(
'{0}.func8'.format(cname) if full_cname else 'None'
)
str_out.append('\n'.join(str_list))
#
str_check = list()
str_check.append(
'Name : '+cname+'.func7/my_exception7\n'
'Type : RuntimeError\n'
'Message : This is exception #7\n'
'Function: {name}'.format(
name='{0}.func7'.format(cname) if full_cname else 'None'
)
)
str_check.append(
'Name : '+cname+'.func8/my_exception8\n'
'Type : ValueError\n'
'Message : This is exception #8, *[fname]*\n'
'Function: {name}'.format(
name='{0}.func8'.format(cname) if full_cname else 'None'
)
)
str_check.append(
'Name : '+cname+'.func8/my_exception9\n'
'Type : TypeError\n'
'Message : This is exception #9\n'
'Function: {name}'.format(
name='{0}.func8'.format(cname) if full_cname else 'None'
)
)
if sorted(str_out) != sorted(str_check):
print('\n\nActual output:\n{text}'.format(
text='\n'.join(sorted(str_out))
)
)
print('\n\nReference output\n{text}'.format(
text='\n'.join(sorted(str_check))
)
)
assert sorted(str_out) == sorted(str_check)
def test_copy(self):
""" Test __copy__ method behavior """
# Functions definition
def funca(exobj):
exobj.add_exception(
'my_exceptionA', RuntimeError, 'This is exception #A'
)
def funcb(exobj):
exobj.add_exception(
'my_exceptionB', ValueError, 'This is exception #B'
)
exobj.add_exception(
'my_exceptionC', TypeError, 'This is exception #C'
)
class Clsc(object):
def __init__(self, exobj):
self._exobj = exobj
self._value = None
def _set_value(self, value):
self._exobj.add_exception(
'my_exceptionD', OSError, 'This is exception #D'
)
self._value = value
value = property(None, _set_value, None, doc='Value property')
source_obj = putil.exh.ExHandle(full_cname=True)
funca(source_obj)
funcb(source_obj)
obj = Clsc(source_obj)
obj.value = 5
# Actual tests
dest_obj = copy.copy(source_obj)
assert source_obj._ex_dict == dest_obj._ex_dict
assert id(source_obj._ex_dict) != id(dest_obj._ex_dict)
assert source_obj._callables_obj == dest_obj._callables_obj
assert id(source_obj._callables_obj) != id(dest_obj._callables_obj)
assert source_obj._clut == dest_obj._clut
assert id(source_obj._clut) != id(dest_obj._clut)
assert source_obj._full_cname == dest_obj._full_cname
assert CLDICTS(source_obj.exceptions_db, dest_obj.exceptions_db)
def test_multiple_paths_to_same_exception(self):
"""
Test that different paths to a single exception definition do not
overwrite each other
"""
def exdef(obj):
obj.add_exception(
'my_exception', | |
<filename>src/blueprints/data_server.py
import functools
import math
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for, jsonify, send_from_directory, current_app
)
from werkzeug.utils import secure_filename, escape
#from modules.data.database.db import get_db, query_db
from blueprints.auth import login_required, verified_required, tos_required
from logger.logger import Logger
from modules.data.database.query_modules import select_query
from modules.data.database import data_helper
from modules.data.database.data_helper import get_user_id_from_char_id
from modules.account.authentication_checks import is_admin, check_if_user_has_character
import os
bp = Blueprint('dataserver', __name__, url_prefix='/dataserver')
def _get_item_details(char_id, equipment_slot="", item_id=-1):
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(None)
# Get the character's item piece
try:
items = select_query.select_character_data(char_id, user_id)
except:
Logger().error("ERROR: Items were not found")
return ""
if equipment_slot is not None and equipment_slot != "":
try:
item_id = items['Character_' + equipment_slot]
except:
Logger().error("ERROR: Items were not found")
return ""
# Ensure a proper item_id is given
if item_id < 1:
# TODO: needs better error handling
#raise Exception("Tried to get item details of item with id " + str(item_id))
return jsonify(None)
# Check to see if the item has been approved
if int(select_query.select(("Approved",), "Items", False, "WHERE Item_ID=?", (item_id,))[0]) != 1:
return jsonify(None)
item_query_result = select_query.select_items(item_id)
# Check to see if ID has been assigned
if item_query_result is None:
item_query_result = data_helper.init_item_data()
#TODO: this effect query also shows up in admin.py... Could extract this to a function
if item_query_result is not None:
# Check if item has an effect on it
if item_query_result['Item_Effect1'] is not None and item_query_result['Item_Effect1'] > 0:
# Check to see if ID has been assigned
effect1QueryResult = select_query.select_effect_data(item_query_result['Item_Effect1'])
else:
effect1QueryResult = {'Effect_Name' : 'Effect1', 'Effect_Description' : 'None'}
# Check if item has an effect on it
if item_query_result['Item_Effect2'] is not None and item_query_result['Item_Effect2'] > 0:
# Check to see if ID has been assigned
effect2QueryResult = select_query.select_effect_data(item_query_result['Item_Effect2'])
else:
effect2QueryResult = {'Effect_Name' : 'Effect2', 'Effect_Description' : 'None'}
image = url_for('static', filename='images/no_image.png')
if item_query_result['Item_Picture'] is not None and item_query_result['Item_Picture'] != '' and item_query_result['Item_Picture'] != 'no_image.png':
image = '/imageserver/item/' + item_query_result['Item_Picture']
return data_helper.item_data_to_json(item_query_result, image, effect1QueryResult, effect2QueryResult)
# Equipment Data
@bp.route('/equipmentItemDetails/<int:char_id>/<string:equipment_slot>', methods=('GET', 'POST'))
@login_required
@verified_required
@tos_required
def equipmentItemDetails(char_id, equipment_slot):
# TODO: needs better error handling
return _get_item_details(char_id, equipment_slot=equipment_slot)
# Equipment Data
@bp.route('/inventoryItemDetails/<int:char_id>/<int:item_id>', methods=('GET', 'POST'))
@login_required
@verified_required
@tos_required
def inventoryItemDetails(char_id, item_id):
# TODO: needs better error handling
return _get_item_details(char_id, item_id=item_id)
@bp.route('getClassName/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_class(char_id):
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
query_result = select_query.select_character_class(user_id, char_id)
if query_result is None:
return jsonify(current_value="Error getting Class Name")
return jsonify(current_value=query_result["Class_Name"])
@bp.route('getRaceName/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_race(char_id):
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
query_result = select_query.select_character_race(user_id, char_id)
if query_result is None:
return jsonify(current_value="Error getting Race Name")
return jsonify(current_value=query_result["Race_Name"])
@bp.route('getAlignmentOptions')
@login_required
@verified_required
@tos_required
def get_alignment_options():
alignments = select_query.get_alignments()
alignment_name_and_id = []
for item in alignments:
alignment_name_and_id.append({'id' : item['Alignment_ID'], 'name' : item['Alignment_Name']})
return jsonify(opts=alignment_name_and_id)
@bp.route('getLevel/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_level(char_id):
field_name = "Character_Level"
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
query_result = select_query.select_char_fields(user_id, char_id, (field_name,))
if query_result is None:
return jsonify(current_value=0)
return jsonify(current_value=query_result[field_name])
@bp.route('getHealth/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_health(char_id):
field_name = "Character_HP"
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
query_result = select_query.select_char_fields(user_id, char_id, (field_name,))
if query_result is None:
return jsonify(current_value=0)
return jsonify(current_value=query_result[field_name])
def get_stat_data(char_id : int, character_field : str, item_field : str):
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
query_result = select_query.select_char_fields(user_id, char_id, (character_field,))
if query_result is None:
return jsonify(current_value=0, base=0)
base_stat = query_result[character_field]
characters = select_query.select_character_data(char_id)
item_id_list = data_helper.get_character_items_id(characters)
stat_additional = 0
for i in item_id_list:
item = select_query.select_item_fields(i, (item_field,))
if item is not None:
stat_additional += item[item_field]
return jsonify(additional=stat_additional, base=base_stat)
@bp.route('getMaxHealth/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_max_health(char_id):
return get_stat_data(char_id, 'Character_Max_HP', 'Item_Health_Bonus')
@bp.route('getAC/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_AC(char_id):
return get_stat_data(char_id, 'Character_AC', 'Item_AC_Bonus')
@bp.route('getInitiative/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_initiative(char_id):
return get_stat_data(char_id, 'Character_Initiative', 'Item_Initiative_Bonus')
@bp.route('getAttackBonus/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_attack_bonus(char_id):
return get_stat_data(char_id, 'Character_Attack_Bonus', 'Item_Attack_Bonus')
@bp.route('getStr/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_str(char_id):
return get_stat_data(char_id, 'Character_Strength', 'Item_Str_Bonus')
@bp.route('getDex/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_dex(char_id):
return get_stat_data(char_id, 'Character_Dexterity', 'Item_Dex_Bonus')
@bp.route('getCon/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_con(char_id):
return get_stat_data(char_id, 'Character_Constitution', 'Item_Con_Bonus')
@bp.route('getInt/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_int(char_id):
return get_stat_data(char_id, 'Character_Intelligence', 'Item_Int_Bonus')
@bp.route('getWis/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_wis(char_id):
return get_stat_data(char_id, 'Character_Wisdom', 'Item_Wis_Bonus')
@bp.route('getCha/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_cha(char_id):
return get_stat_data(char_id, 'Character_Charisma', 'Item_Cha_Bonus')
@bp.route('getCurrency/<int:char_id>')
@login_required
@verified_required
@tos_required
def get_currency(char_id):
field_name = "Character_Currency"
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
query_result = select_query.select_char_fields(user_id, char_id, (field_name,))
if query_result is None:
return jsonify(current_value=0)
return jsonify(current_value=query_result[field_name])
@bp.route('dummyCall')
def dummy_call():
return jsonify(response='I\'m a dummy')
@bp.route('getItemList/<int:item_slot>')
@login_required
@verified_required
@tos_required
def get_item_list(item_slot):
field_names = ("Item_ID", "Item_Name", "Item_Picture", "Rarities_Color", "Approved")
query_result = select_query.select_item_fields_from_item_slot(item_slot, field_names)
# TODO: handle if query_result is none
item_list = []
for item in query_result:
if int(item["Approved"]) == 1:
# Item has been approved, add the item to the list users can pick from
item_data = {}
for key in field_names:
item_data[key] = item[key]
item_list.append(item_data)
slot_name = select_query.select_slot_names(item_slot)["Slots_Name"]
output = {
'slot_name' : slot_name,
'items' : item_list
}
return jsonify(output)
@bp.route('getCurrentEquipedItems/<int:char_id>/<int:item_slot>')
@login_required
@verified_required
@tos_required
def get_current_equiped_items(char_id, item_slot):
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
characters = select_query.select_char_fields(user_id, char_id)
if characters is None:
return redirect(url_for('character.character_select'))
slot_name = select_query.select_slot_names(item_slot)["Slots_Name"]
multiple = {'Weapon' : 4, 'Ring' : 2, 'Item' : 2, 'Trinket' : 2}
r = 1
slots = []
if slot_name in multiple.keys():
r = int(multiple[slot_name])
for i in range(r):
field_name = "Character_" + slot_name + str(i + 1)
slots.append(field_name)
else:
slots.append("Character_" + slot_name)
item_ids = select_query.select_char_fields(user_id, char_id, tuple(slots))
field_names = ['Item_ID', 'Item_Name', 'Item_Picture', 'Rarities_Color']
item_list = []
item_defaults = {
'Ring' : {'item_name' : 'Ring', 'picture' : 'Ring.png'},
'Item' : {'item_name' : 'Item', 'picture' : 'Item.png'},
'Trinket' : {'item_name' : 'Trinket', 'picture' : 'Trinket.png'}
}
for i in range(r):
temp_dict = {}
temp_dict['Item_ID'] = -1
if slot_name == 'Weapon':
if i % 2 == 1:
temp_dict['Item_Name'] = 'Off Hand '
temp_dict['Item_Picture'] = 'Off_Hand.png'
else:
temp_dict['Item_Name'] = 'Main Hand '
temp_dict['Item_Picture'] = 'Main_Hand.png'
temp_dict['Item_Name'] += str(math.ceil((i + 1) / 2))
else:
temp_dict['Item_Name'] = item_defaults[slot_name]['item_name']
temp_dict['Item_Picture'] = item_defaults[slot_name]['picture']
temp_dict['Rarities_Color'] = '#606060'
item_list.append(temp_dict)
count = 0
for i in item_ids:
join_str = "INNER JOIN Rarities ON Rarities.Rarities_ID=Items.Rarity_ID"
item_data_result = select_query.select(tuple(field_names), "Items", False, "WHERE Item_ID=?", (i,), (join_str,))
if item_data_result is None:
count += 1
continue
temp_data = {}
for name in field_names:
temp_data[name] = item_data_result[name]
item_list[count] = temp_data
count += 1
slot_name = select_query.select_slot_names(item_slot)['Slots_Name']
output = {'slot_name' : slot_name, 'items' : item_list, 'num_of_slots' : r}
return jsonify(output)
@bp.route('getItemsInSlot/<int:char_id>/<string:item_slot>')
@login_required
@verified_required
@tos_required
def get_items_in_slot(char_id, item_slot):
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
characters = select_query.select_char_fields(user_id, char_id)
if characters is None:
return 'NULL'
item_id_list = data_helper.get_character_items_id(characters)
select_fields = (
"Items.Item_ID", "Items.Item_Weight", "Items.Item_Name", "Rarities.Rarities_Color",
"Slots.Slots_ID", "Inventory.Amount"
)
where_clause = "WHERE Inventory.Character_ID = ? AND Slots.Slots_Name = ?"
joins = (
"INNER JOIN Items on Inventory.Item_ID=Items.Item_ID",
"INNER JOIN Rarities on Rarities.Rarities_ID=Items.Rarity_ID",
"INNER JOIN Slots on Items.Item_Slot=Slots.Slots_ID"
)
query_result = select_query.select(select_fields, "Inventory", True, where_clause, (char_id, item_slot), joins)
items = []
for q in query_result:
item_fields = {
'Item_ID' : q['Item_ID'],
'Item_Weight' : q['Item_Weight'],
'Item_Name' : escape(q['Item_Name']),
'Rarities_Color' : q['Rarities_Color'],
'Amount' : q['Amount'],
'Slots_ID' : int(q['Slots_ID']),
'Is_Equiped' : True if q['Item_ID'] in item_id_list else False
}
items.append(item_fields)
return jsonify(items)
@bp.route('getItemAmount/<int:char_id>/<int:item_id>')
@login_required
@verified_required
@tos_required
def getItemAmount(char_id, item_id):
user_id = session["user_id"]
if not check_if_user_has_character(user_id, char_id):
if is_admin():
user_id = get_user_id_from_char_id(char_id)
if user_id < 0:
return jsonify(current_value="Err")
characters = select_query.select_char_fields(user_id, char_id)
if characters is None:
return redirect(url_for('character.character_select'))
select_fields = ("Inventory.Item_ID", "Amount", "Items.Item_Slot", "Slots.Slots_Name")
joins = (
"LEFT JOIN Items ON Inventory.Item_ID = Items.Item_ID",
"LEFT JOIN Slots ON Items.Item_Slot = Slots.Slots_ID"
)
where_clause = "WHERE Character_ID = ? AND Inventory.Item_ID = ?"
query_result = select_query.select(select_fields, "Inventory", False, where_clause, (char_id, item_id), joins)
return jsonify(
current_value=query_result['Amount'],
item_id=query_result['Item_ID'],
slot_name=query_result['Slots_Name']
)
@bp.route('/itemDetails/<int:item_id>')
@login_required
@verified_required
@tos_required
def getItemDetails(item_id):
select_fields = ("Item_Name", "Item_Picture", "Approved", "Rarities.Rarities_Color")
joins = ("INNER JOIN Rarities ON Rarities.Rarities_ID=Items.Rarity_ID",)
where_clause = "WHERE Item_ID = ?"
result = select_query.select(select_fields, "Items", False, where_clause, (item_id,), joins)
# TODO: do we want this to provide data iff approved, will break inv. equiped I think
#if int(item["Approved"]) == 1:
return jsonify(
name=result['Item_name'],
picture=result['Item_Picture'],
color=result['Rarities_Color']
)
@bp.route('/itemFullDetails/<int:item_id>')
@login_required
@verified_required
@tos_required
def getItemFullDetails(item_id):
if is_admin():
# Ensure a proper item_id is given
if item_id < 1:
# TODO: needs better error handling
#raise Exception("Tried to get item details of item with id " + str(item_id))
return jsonify(None)
item_query_result = select_query.select_items(item_id)
# Check to see if ID has been assigned
if item_query_result is None:
item_query_result = data_helper.init_item_data()
#TODO: this | |
#!/usr/bin/env python3
#
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os.path
import sys
output_prefix = sys.argv[1] if len(sys.argv) > 1 else ""
current_isa = []
isa_database = dict()
defaults_cache = None
MISA_A = 1 << 0 # Atomic
MISA_B = 1 << 1 # -reserved-
MISA_C = 1 << 2 # Compressed
MISA_D = 1 << 3 # Double-precision float
MISA_E = 1 << 4 # RV32E base ISA
MISA_F = 1 << 5 # Single-precision float
MISA_G = 1 << 6 # Additional std extensions
MISA_H = 1 << 7 # -reserved-
MISA_I = 1 << 8 # RV32I/RV64I/RV128I base ISA
MISA_J = 1 << 9 # -reserved-
MISA_K = 1 << 10 # -reserved-
MISA_L = 1 << 11 # -reserved-
MISA_M = 1 << 12 # Muliply/Divide
MISA_N = 1 << 13 # User-level interrupts
MISA_O = 1 << 14 # -reserved-
MISA_P = 1 << 15 # -reserved-
MISA_Q = 1 << 16 # Quad-precision float
MISA_R = 1 << 17 # -reserved-
MISA_S = 1 << 18 # Supervisor mode
MISA_T = 1 << 19 # -reserved-
MISA_U = 1 << 20 # User mode
MISA_V = 1 << 21 # -reserved-
MISA_W = 1 << 22 # -reserved-
MISA_X = 1 << 23 # Non-std extensions
MISA_Y = 1 << 24 # -reserved-
MISA_Z = 1 << 25 # -reserved-
def header(f, instruction, isa_mode=False):
if not isa_mode:
global isa_database
for isa in current_isa:
if isa not in isa_database:
isa_database[isa] = set()
isa_database[isa].add(instruction)
global defaults_cache
defaults_cache = dict()
print("// DO NOT EDIT -- auto-generated from hdl/cpu/riscv_formal/isa/gen.py", file=f)
print("", file=f)
if isa_mode:
print("module rvfi_isa_%s (" % instruction, file=f)
else:
print("module rvfi_instruction_%s (" % instruction, file=f)
print(" input rvfi_valid,", file=f)
print(" input [`RISCV_FORMAL_ILEN - 1 : 0] rvfi_instruction,", file=f)
print(" input [`RISCV_FORMAL_XLEN - 1 : 0] rvfi_pc_rdata,", file=f)
print(" input [`RISCV_FORMAL_XLEN - 1 : 0] rvfi_rs1_rdata,", file=f)
print(" input [`RISCV_FORMAL_XLEN - 1 : 0] rvfi_rs2_rdata,", file=f)
print(" input [`RISCV_FORMAL_XLEN - 1 : 0] rvfi_mem_rdata,", file=f)
print("`ifdef RISCV_FORMAL_CSR_MISA", file=f)
print(" input [`RISCV_FORMAL_XLEN - 1 : 0] rvfi_csr_misa_rdata,", file=f)
print(" output [`RISCV_FORMAL_XLEN - 1 : 0] spec_csr_misa_rmask,", file=f)
print("`endif", file=f)
print("", file=f)
print(" output spec_valid,", file=f)
print(" output spec_trap,", file=f)
print(" output [ 4 : 0] spec_rs1_addr,", file=f)
print(" output [ 4 : 0] spec_rs2_addr,", file=f)
print(" output [ 4 : 0] spec_rd_addr,", file=f)
print(" output [`RISCV_FORMAL_XLEN - 1 : 0] spec_rd_wdata,", file=f)
print(" output [`RISCV_FORMAL_XLEN - 1 : 0] spec_pc_wdata,", file=f)
print(" output [`RISCV_FORMAL_XLEN - 1 : 0] spec_mem_addr,", file=f)
print(" output [`RISCV_FORMAL_XLEN/8 - 1 : 0] spec_mem_rmask,", file=f)
print(" output [`RISCV_FORMAL_XLEN/8 - 1 : 0] spec_mem_wmask,", file=f)
print(" output [`RISCV_FORMAL_XLEN - 1 : 0] spec_mem_wdata", file=f)
print(");", file=f)
defaults_cache["spec_valid"] = "0"
defaults_cache["spec_rs1_addr"] = "0"
defaults_cache["spec_rs2_addr"] = "0"
defaults_cache["spec_rd_addr"] = "0"
defaults_cache["spec_rd_wdata"] = "0"
defaults_cache["spec_pc_wdata"] = "0"
defaults_cache["spec_trap"] = "!misa_ok"
defaults_cache["spec_mem_addr"] = "0"
defaults_cache["spec_mem_rmask"] = "0"
defaults_cache["spec_mem_wmask"] = "0"
defaults_cache["spec_mem_wdata"] = "0"
def assign(f, sig, val):
print(" assign %s = %s;" % (sig, val), file=f)
if sig in defaults_cache:
del defaults_cache[sig]
def misa_check(f, mask, ialign16=False):
print("", file=f)
print("`ifdef RISCV_FORMAL_CSR_MISA", file=f)
print(" wire misa_ok = (rvfi_csr_misa_rdata & `RISCV_FORMAL_XLEN'h %x) == `RISCV_FORMAL_XLEN'h %x;" % (mask, mask), file=f)
print(" assign spec_csr_misa_rmask = `RISCV_FORMAL_XLEN'h %x;" % ((mask|MISA_C) if ialign16 else mask), file=f)
if ialign16:
print(" wire ialign16 = (rvfi_csr_misa_rdata & `RISCV_FORMAL_XLEN'h %x) != `RISCV_FORMAL_XLEN'h 0;" % (MISA_C), file=f)
print("`else", file=f)
print(" wire misa_ok = 1;", file=f)
if ialign16:
print("`ifdef RISCV_FORMAL_COMPRESSED", file=f)
print(" wire ialign16 = 1;", file=f)
print("`else", file=f)
print(" wire ialign16 = 0;", file=f)
print("`endif", file=f)
print("`endif", file=f)
def footer(f):
def default_assign(sig):
if sig in defaults_cache:
print(" assign %s = %s;" % (sig, defaults_cache[sig]), file=f)
if len(defaults_cache) != 0:
print("", file=f)
print(" // default assignments", file=f)
default_assign("spec_valid")
default_assign("spec_rs1_addr")
default_assign("spec_rs2_addr")
default_assign("spec_rd_addr")
default_assign("spec_rd_wdata")
default_assign("spec_pc_wdata")
default_assign("spec_trap")
default_assign("spec_mem_addr")
default_assign("spec_mem_rmask")
default_assign("spec_mem_wmask")
default_assign("spec_mem_wdata")
print("endmodule", file=f)
def format_r(f):
print("", file=f)
print(" // R-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [6:0] instruction_funct7 = rvfi_instruction[31:25];", file=f)
print(" wire [4:0] instruction_rs2 = rvfi_instruction[24:20];", file=f)
print(" wire [4:0] instruction_rs1 = rvfi_instruction[19:15];", file=f)
print(" wire [2:0] instruction_funct3 = rvfi_instruction[14:12];", file=f)
print(" wire [4:0] instruction_rd = rvfi_instruction[11: 7];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[ 6: 0];", file=f)
def format_ra(f):
print("", file=f)
print(" // R-type instruction format (atomics variation)", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [6:0] instruction_funct5 = rvfi_instruction[31:27];", file=f)
print(" wire instruction_aq = rvfi_instruction[26];", file=f)
print(" wire instruction_rl = rvfi_instruction[25];", file=f)
print(" wire [4:0] instruction_rs2 = rvfi_instruction[24:20];", file=f)
print(" wire [4:0] instruction_rs1 = rvfi_instruction[19:15];", file=f)
print(" wire [2:0] instruction_funct3 = rvfi_instruction[14:12];", file=f)
print(" wire [4:0] instruction_rd = rvfi_instruction[11: 7];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[ 6: 0];", file=f)
def format_i(f):
print("", file=f)
print(" // I-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [`RISCV_FORMAL_XLEN-1:0] instruction_imm = $signed(rvfi_instruction[31:20]);", file=f)
print(" wire [4:0] instruction_rs1 = rvfi_instruction[19:15];", file=f)
print(" wire [2:0] instruction_funct3 = rvfi_instruction[14:12];", file=f)
print(" wire [4:0] instruction_rd = rvfi_instruction[11: 7];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[ 6: 0];", file=f)
def format_i_shift(f):
print("", file=f)
print(" // I-type instruction format (shift variation)", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [6:0] instruction_funct6 = rvfi_instruction[31:26];", file=f)
print(" wire [5:0] instruction_shamt = rvfi_instruction[25:20];", file=f)
print(" wire [4:0] instruction_rs1 = rvfi_instruction[19:15];", file=f)
print(" wire [2:0] instruction_funct3 = rvfi_instruction[14:12];", file=f)
print(" wire [4:0] instruction_rd = rvfi_instruction[11: 7];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[ 6: 0];", file=f)
def format_s(f):
print("", file=f)
print(" // S-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [`RISCV_FORMAL_XLEN-1:0] instruction_imm = $signed({rvfi_instruction[31:25], rvfi_instruction[11:7]});", file=f)
print(" wire [4:0] instruction_rs2 = rvfi_instruction[24:20];", file=f)
print(" wire [4:0] instruction_rs1 = rvfi_instruction[19:15];", file=f)
print(" wire [2:0] instruction_funct3 = rvfi_instruction[14:12];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[ 6: 0];", file=f)
def format_sb(f):
print("", file=f)
print(" // SB-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [`RISCV_FORMAL_XLEN-1:0] instruction_imm = $signed({rvfi_instruction[31], rvfi_instruction[7], rvfi_instruction[30:25], rvfi_instruction[11:8], 1'b0});", file=f)
print(" wire [4:0] instruction_rs2 = rvfi_instruction[24:20];", file=f)
print(" wire [4:0] instruction_rs1 = rvfi_instruction[19:15];", file=f)
print(" wire [2:0] instruction_funct3 = rvfi_instruction[14:12];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[ 6: 0];", file=f)
def format_u(f):
print("", file=f)
print(" // U-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [`RISCV_FORMAL_XLEN-1:0] instruction_imm = $signed({rvfi_instruction[31:12], 12'b0});", file=f)
print(" wire [4:0] instruction_rd = rvfi_instruction[11:7];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[ 6:0];", file=f)
def format_uj(f):
print("", file=f)
print(" // UJ-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 32;", file=f)
print(" wire [`RISCV_FORMAL_XLEN-1:0] instruction_imm = $signed({rvfi_instruction[31], rvfi_instruction[19:12], rvfi_instruction[20], rvfi_instruction[30:21], 1'b0});", file=f)
print(" wire [4:0] instruction_rd = rvfi_instruction[11:7];", file=f)
print(" wire [6:0] instruction_opcode = rvfi_instruction[6:0];", file=f)
def format_cr(f):
print("", file=f)
print(" // CI-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 16;", file=f)
print(" wire [3:0] instruction_funct4 = rvfi_instruction[15:12];", file=f)
print(" wire [4:0] instruction_rs1_rd = rvfi_instruction[11:7];", file=f)
print(" wire [4:0] instruction_rs2 = rvfi_instruction[6:2];", file=f)
print(" wire [1:0] instruction_opcode = rvfi_instruction[1:0];", file=f)
def format_ci(f):
print("", file=f)
print(" // CI-type instruction format", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 16;", file=f)
print(" wire [`RISCV_FORMAL_XLEN-1:0] instruction_imm = $signed({rvfi_instruction[12], rvfi_instruction[6:2]});", file=f)
print(" wire [2:0] instruction_funct3 = rvfi_instruction[15:13];", file=f)
print(" wire [4:0] instruction_rs1_rd = rvfi_instruction[11:7];", file=f)
print(" wire [1:0] instruction_opcode = rvfi_instruction[1:0];", file=f)
def format_ci_sp(f):
print("", file=f)
print(" // CI-type instruction format (SP variation)", file=f)
print(" wire [`RISCV_FORMAL_ILEN-1:0] instruction_padding = rvfi_instruction >> 16;", file=f)
print(" wire [`RISCV_FORMAL_XLEN-1:0] instruction_imm = $signed({rvfi_instruction[12], rvfi_instruction[4:3], rvfi_instruction[5], rvfi_instruction[2], rvfi_instruction[6], 4'b0});", file=f)
print(" wire [2:0] instruction_funct3 | |
from geometric_primitives import brick
from geometric_primitives import bricks
import dataset_common
def car_demo():
bricks_ = bricks.Bricks(150)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
'''
for i in range(5):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 3 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
'''
brick_ = brick.Brick()
brick_.set_position([-1, 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-2, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 5, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-2, 7, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 9, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 10, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([2, i * 10, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([4, j * 8 + 1, 2 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 6, j * 10, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([12, j * 8 + 1, 2 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([14, i * 10, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([15, i * 8 + 1, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([14 - i * 4, j * 6 + 2, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([1, i * 4 + 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(7):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 2 + 4, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(5):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 3, j * 2 + 1, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 7, j * 2 + 3, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 5, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
'''
# Outer Bricks All Covered
for i in range(7):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 10, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([15, i * 8 + 1, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(7):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([14 - 2 * i,j * 6 + 2, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([1, i * 4 + 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(7):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 2 + 4, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([15, 5, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
# (12 * 18) 2-Layer Created
# Total Block Used = 27 * 2 = 54 So far
'''
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def car_ambulance():
bricks_ = bricks.Bricks(200)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 3 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 6, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(8):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 6, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([17, i * 4 + 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(8):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([16 - 2 * i, j * 2 + 2, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-2, i * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([18, i * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([j * 4 + 3, i * 2, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 3, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([j * 4 + 4, i * 2, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 6, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([-2 + i * 2, 3, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([1, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([j * 4 + 4, i * 2, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([0, i * 2 + 1, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([j * 4 + 4, i * 2 + 1, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([0, i * 2, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([0, i * 2 + 1, 9])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 2 + 2, 10])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([-1, 3, 11])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(6):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 3, 3, 9])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([16, 3, 8])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([15, 3, 9])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([20, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([20, 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([j * 4 + 3, i * 8 - 1, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
for k in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 8 + 5, j * 10 - 2, k * 2 + 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def car_sports():
bricks_ = bricks.Bricks(200)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 3 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
'''
for i in range(11):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 6, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
'''
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 6, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([6, j * 8 - 1, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 8, j * 6, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([18, j * 8 - 1, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 20, j * 6, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([23, i * 4 + 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(11):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([22 - 2 * i, j * 2 + 2, 3 + (i % 2 == 1)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in | |
# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import functools
import re
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from imaginaire.layers import Conv2dBlock, LinearBlock
from imaginaire.model_utils.gancraft.layers import AffineMod, ModLinear
import imaginaire.model_utils.gancraft.mc_utils as mc_utils
import imaginaire.model_utils.gancraft.voxlib as voxlib
from imaginaire.utils.distributed import master_only_print as print
class RenderMLP(nn.Module):
r""" MLP with affine modulation."""
def __init__(self, in_channels, style_dim, viewdir_dim, mask_dim=680,
out_channels_s=1, out_channels_c=3, hidden_channels=256,
use_seg=True):
super(RenderMLP, self).__init__()
self.use_seg = use_seg
if self.use_seg:
self.fc_m_a = nn.Linear(mask_dim, hidden_channels, bias=False)
self.fc_viewdir = None
if viewdir_dim > 0:
self.fc_viewdir = nn.Linear(viewdir_dim, hidden_channels, bias=False)
self.fc_1 = nn.Linear(in_channels, hidden_channels)
self.fc_2 = ModLinear(hidden_channels, hidden_channels, style_dim, bias=False, mod_bias=True, output_mode=True)
self.fc_3 = ModLinear(hidden_channels, hidden_channels, style_dim, bias=False, mod_bias=True, output_mode=True)
self.fc_4 = ModLinear(hidden_channels, hidden_channels, style_dim, bias=False, mod_bias=True, output_mode=True)
self.fc_sigma = nn.Linear(hidden_channels, out_channels_s)
if viewdir_dim > 0:
self.fc_5 = nn.Linear(hidden_channels, hidden_channels, bias=False)
self.mod_5 = AffineMod(hidden_channels, style_dim, mod_bias=True)
else:
self.fc_5 = ModLinear(hidden_channels, hidden_channels, style_dim,
bias=False, mod_bias=True, output_mode=True)
self.fc_6 = ModLinear(hidden_channels, hidden_channels, style_dim, bias=False, mod_bias=True, output_mode=True)
self.fc_out_c = nn.Linear(hidden_channels, out_channels_c)
self.act = nn.LeakyReLU(negative_slope=0.2)
def forward(self, x, raydir, z, m):
r""" Forward network
Args:
x (N x H x W x M x in_channels tensor): Projected features.
raydir (N x H x W x 1 x viewdir_dim tensor): Ray directions.
z (N x style_dim tensor): Style codes.
m (N x H x W x M x mask_dim tensor): One-hot segmentation maps.
"""
b, h, w, n, _ = x.size()
z = z[:, None, None, None, :]
f = self.fc_1(x)
if self.use_seg:
f = f + self.fc_m_a(m)
# Common MLP
f = self.act(f)
f = self.act(self.fc_2(f, z))
f = self.act(self.fc_3(f, z))
f = self.act(self.fc_4(f, z))
# Sigma MLP
sigma = self.fc_sigma(f)
# Color MLP
if self.fc_viewdir is not None:
f = self.fc_5(f)
f = f + self.fc_viewdir(raydir)
f = self.act(self.mod_5(f, z))
else:
f = self.act(self.fc_5(f, z))
f = self.act(self.fc_6(f, z))
c = self.fc_out_c(f)
return sigma, c
class StyleMLP(nn.Module):
r"""MLP converting style code to intermediate style representation."""
def __init__(self, style_dim, out_dim, hidden_channels=256, leaky_relu=True, num_layers=5, normalize_input=True,
output_act=True):
super(StyleMLP, self).__init__()
self.normalize_input = normalize_input
self.output_act = output_act
fc_layers = []
fc_layers.append(nn.Linear(style_dim, hidden_channels, bias=True))
for i in range(num_layers-1):
fc_layers.append(nn.Linear(hidden_channels, hidden_channels, bias=True))
self.fc_layers = nn.ModuleList(fc_layers)
self.fc_out = nn.Linear(hidden_channels, out_dim, bias=True)
if leaky_relu:
self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
self.act = functools.partial(F.relu, inplace=True)
def forward(self, z):
r""" Forward network
Args:
z (N x style_dim tensor): Style codes.
"""
if self.normalize_input:
z = F.normalize(z, p=2, dim=-1)
for fc_layer in self.fc_layers:
z = self.act(fc_layer(z))
z = self.fc_out(z)
if self.output_act:
z = self.act(z)
return z
class SKYMLP(nn.Module):
r"""MLP converting ray directions to sky features."""
def __init__(self, in_channels, style_dim, out_channels_c=3,
hidden_channels=256, leaky_relu=True):
super(SKYMLP, self).__init__()
self.fc_z_a = nn.Linear(style_dim, hidden_channels, bias=False)
self.fc1 = nn.Linear(in_channels, hidden_channels)
self.fc2 = nn.Linear(hidden_channels, hidden_channels)
self.fc3 = nn.Linear(hidden_channels, hidden_channels)
self.fc4 = nn.Linear(hidden_channels, hidden_channels)
self.fc5 = nn.Linear(hidden_channels, hidden_channels)
self.fc_out_c = nn.Linear(hidden_channels, out_channels_c)
if leaky_relu:
self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
self.act = functools.partial(F.relu, inplace=True)
def forward(self, x, z):
r"""Forward network
Args:
x (... x in_channels tensor): Ray direction embeddings.
z (... x style_dim tensor): Style codes.
"""
z = self.fc_z_a(z)
while z.dim() < x.dim():
z = z.unsqueeze(1)
y = self.act(self.fc1(x) + z)
y = self.act(self.fc2(y))
y = self.act(self.fc3(y))
y = self.act(self.fc4(y))
y = self.act(self.fc5(y))
c = self.fc_out_c(y)
return c
class RenderCNN(nn.Module):
r"""CNN converting intermediate feature map to final image."""
def __init__(self, in_channels, style_dim, hidden_channels=256,
leaky_relu=True):
super(RenderCNN, self).__init__()
self.fc_z_cond = nn.Linear(style_dim, 2 * 2 * hidden_channels)
self.conv1 = nn.Conv2d(in_channels, hidden_channels, 1, stride=1, padding=0)
self.conv2a = nn.Conv2d(hidden_channels, hidden_channels, 3, stride=1, padding=1)
self.conv2b = nn.Conv2d(hidden_channels, hidden_channels, 3, stride=1, padding=1, bias=False)
self.conv3a = nn.Conv2d(hidden_channels, hidden_channels, 3, stride=1, padding=1)
self.conv3b = nn.Conv2d(hidden_channels, hidden_channels, 3, stride=1, padding=1, bias=False)
self.conv4a = nn.Conv2d(hidden_channels, hidden_channels, 1, stride=1, padding=0)
self.conv4b = nn.Conv2d(hidden_channels, hidden_channels, 1, stride=1, padding=0)
self.conv4 = nn.Conv2d(hidden_channels, 3, 1, stride=1, padding=0)
if leaky_relu:
self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
self.act = functools.partial(F.relu, inplace=True)
def modulate(self, x, w, b):
w = w[..., None, None]
b = b[..., None, None]
return x * (w+1) + b
def forward(self, x, z):
r"""Forward network.
Args:
x (N x in_channels x H x W tensor): Intermediate feature map
z (N x style_dim tensor): Style codes.
"""
z = self.fc_z_cond(z)
adapt = torch.chunk(z, 2 * 2, dim=-1)
y = self.act(self.conv1(x))
y = y + self.conv2b(self.act(self.conv2a(y)))
y = self.act(self.modulate(y, adapt[0], adapt[1]))
y = y + self.conv3b(self.act(self.conv3a(y)))
y = self.act(self.modulate(y, adapt[2], adapt[3]))
y = y + self.conv4b(self.act(self.conv4a(y)))
y = self.act(y)
y = self.conv4(y)
return y
class StyleEncoder(nn.Module):
r"""Style Encoder constructor.
Args:
style_enc_cfg (obj): Style encoder definition file.
"""
def __init__(self, style_enc_cfg):
super(StyleEncoder, self).__init__()
input_image_channels = style_enc_cfg.input_image_channels
num_filters = style_enc_cfg.num_filters
kernel_size = style_enc_cfg.kernel_size
padding = int(np.ceil((kernel_size - 1.0) / 2))
style_dims = style_enc_cfg.style_dims
weight_norm_type = style_enc_cfg.weight_norm_type
self.no_vae = getattr(style_enc_cfg, 'no_vae', False)
activation_norm_type = 'none'
nonlinearity = 'leakyrelu'
base_conv2d_block = \
functools.partial(Conv2dBlock,
kernel_size=kernel_size,
stride=2,
padding=padding,
weight_norm_type=weight_norm_type,
activation_norm_type=activation_norm_type,
# inplace_nonlinearity=True,
nonlinearity=nonlinearity)
self.layer1 = base_conv2d_block(input_image_channels, num_filters)
self.layer2 = base_conv2d_block(num_filters * 1, num_filters * 2)
self.layer3 = base_conv2d_block(num_filters * 2, num_filters * 4)
self.layer4 = base_conv2d_block(num_filters * 4, num_filters * 8)
self.layer5 = base_conv2d_block(num_filters * 8, num_filters * 8)
self.layer6 = base_conv2d_block(num_filters * 8, num_filters * 8)
self.fc_mu = LinearBlock(num_filters * 8 * 4 * 4, style_dims)
if not self.no_vae:
self.fc_var = LinearBlock(num_filters * 8 * 4 * 4, style_dims)
def forward(self, input_x):
r"""SPADE Style Encoder forward.
Args:
input_x (N x 3 x H x W tensor): input images.
Returns:
mu (N x C tensor): Mean vectors.
logvar (N x C tensor): Log-variance vectors.
z (N x C tensor): Style code vectors.
"""
if input_x.size(2) != 256 or input_x.size(3) != 256:
input_x = F.interpolate(input_x, size=(256, 256), mode='bilinear')
x = self.layer1(input_x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
if not self.no_vae:
logvar = self.fc_var(x)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
z = eps.mul(std) + mu
else:
z = mu
logvar = torch.zeros_like(mu)
return mu, logvar, z
class Base3DGenerator(nn.Module):
r"""Minecraft 3D generator constructor.
Args:
gen_cfg (obj): Generator definition part of the yaml config file.
data_cfg (obj): Data definition part of the yaml config file.
"""
def __init__(self, gen_cfg, data_cfg):
super(Base3DGenerator, self).__init__()
print('Base3DGenerator initialization.')
# ---------------------- Main Network ------------------------
# Exclude some of the features from positional encoding
self.pe_no_pe_feat_dim = getattr(gen_cfg, 'pe_no_pe_feat_dim', 0)
# blk_feat passes through PE
input_dim = (gen_cfg.blk_feat_dim-self.pe_no_pe_feat_dim)*(gen_cfg.pe_lvl_feat*2) + self.pe_no_pe_feat_dim
if (gen_cfg.pe_incl_orig_feat):
input_dim += (gen_cfg.blk_feat_dim-self.pe_no_pe_feat_dim)
print('[Base3DGenerator] Expected input dimensions: ', input_dim)
self.input_dim = input_dim
self.mlp_model_kwargs = gen_cfg.mlp_model_kwargs
self.pe_lvl_localcoords = getattr(gen_cfg, 'pe_lvl_localcoords', 0)
if self.pe_lvl_localcoords > 0:
self.mlp_model_kwargs['poscode_dim'] = self.pe_lvl_localcoords * 2 * 3
# Set pe_lvl_raydir=0 and pe_incl_orig_raydir=False to disable view direction input
input_dim_viewdir = 3*(gen_cfg.pe_lvl_raydir*2)
if (gen_cfg.pe_incl_orig_raydir):
input_dim_viewdir += 3
print('[Base3DGenerator] Expected viewdir input dimensions: ', input_dim_viewdir)
self.input_dim_viewdir = input_dim_viewdir
self.pe_params = [gen_cfg.pe_lvl_feat, gen_cfg.pe_incl_orig_feat,
gen_cfg.pe_lvl_raydir, gen_cfg.pe_incl_orig_raydir]
# Style input dimension
style_dims = gen_cfg.style_dims
self.style_dims = style_dims
interm_style_dims = getattr(gen_cfg, 'interm_style_dims', style_dims)
self.interm_style_dims = interm_style_dims
# ---------------------- Style MLP --------------------------
self.style_net = globals()[gen_cfg.stylenet_model](
style_dims, interm_style_dims, **gen_cfg.stylenet_model_kwargs)
# number of output channels for MLP (before blending)
final_feat_dim = getattr(gen_cfg, 'final_feat_dim', 16)
self.final_feat_dim = final_feat_dim
# ----------------------- Sky Network -------------------------
sky_input_dim_base = 3
# Dedicated sky network input dimensions
sky_input_dim = sky_input_dim_base*(gen_cfg.pe_lvl_raydir_sky*2)
if (gen_cfg.pe_incl_orig_raydir_sky):
sky_input_dim += sky_input_dim_base
print('[Base3DGenerator] Expected sky input dimensions: ', sky_input_dim)
self.pe_params_sky = [gen_cfg.pe_lvl_raydir_sky, gen_cfg.pe_incl_orig_raydir_sky]
self.sky_net = SKYMLP(sky_input_dim, style_dim=interm_style_dims, out_channels_c=final_feat_dim)
# ----------------------- Style Encoder -------------------------
style_enc_cfg = getattr(gen_cfg, 'style_enc', None)
setattr(style_enc_cfg, 'input_image_channels', 3)
setattr(style_enc_cfg, 'style_dims', gen_cfg.style_dims)
self.style_encoder = StyleEncoder(style_enc_cfg)
# ---------------------- Ray Caster -------------------------
self.num_blocks_early_stop = gen_cfg.num_blocks_early_stop
self.num_samples = gen_cfg.num_samples
self.sample_depth = gen_cfg.sample_depth
self.coarse_deterministic_sampling = getattr(gen_cfg, 'coarse_deterministic_sampling', True)
self.sample_use_box_boundaries = getattr(gen_cfg, 'sample_use_box_boundaries', True)
# ---------------------- Blender -------------------------
self.raw_noise_std = getattr(gen_cfg, 'raw_noise_std', 0.0)
self.dists_scale = getattr(gen_cfg, 'dists_scale', 0.25)
self.clip_feat_map = getattr(gen_cfg, 'clip_feat_map', True)
self.keep_sky_out = getattr(gen_cfg, 'keep_sky_out', False)
self.keep_sky_out_avgpool = getattr(gen_cfg, 'keep_sky_out_avgpool', False)
keep_sky_out_learnbg = getattr(gen_cfg, 'keep_sky_out_learnbg', False)
self.sky_global_avgpool = getattr(gen_cfg, 'sky_global_avgpool', False)
if self.keep_sky_out:
self.sky_replace_color = None
if keep_sky_out_learnbg:
sky_replace_color = torch.zeros([final_feat_dim])
sky_replace_color.requires_grad = True
self.sky_replace_color = torch.nn.Parameter(sky_replace_color)
# ---------------------- render_cnn -------------------------
self.denoiser = RenderCNN(final_feat_dim, style_dim=interm_style_dims)
self.pad = gen_cfg.pad
def get_param_groups(self, cfg_opt):
print('[Generator] | |
<gh_stars>0
#!/usr/bin/env python3
# Copyright 2018 <NAME>
# (github.com/santigl)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from sqliteanalyzer import StorageAnalyzer
# This produces a report with the same format as sqlite_analyzer.
# (https://sqlite.org/sqlanalyze.html)
class SQLite3ClassicReport:
def __init__(self, db_path):
self._database_path = db_path
self._stats = StorageAnalyzer(db_path)
def disk_space_report(self):
print('/** Disk-Space Utilization Report For '
'{}\n'.format(self._database_path))
page_count = self._stats.page_count()
self._stat_line('Page size in bytes', self._stats.page_size())
self._stat_line('Pages in the whole file (measured)',
page_count)
self._stat_line('Pages in the whole file (calculated)',
self._stats.calculated_page_count())
in_use_pages = self._stats.in_use_pages()
in_use_percent = self._percentage(in_use_pages, page_count)
self._stat_line('Pages that store data', in_use_pages,
in_use_percent)
self._stat_line('Pages on the freelist (per header)',
self._stats.freelist_count())
self._stat_line('Pages on the freelist (calculated)',
self._stats.calculated_free_pages())
autovacuum_pages = self._stats.autovacuum_page_count()
self._stat_line('Pages of auto-vacuum overhead',
autovacuum_pages,
self._percentage(autovacuum_pages, page_count))
self._stat_line('Number of tables in the database',
self._stats.ntable())
self._stat_line('Number of indices',
self._stats.nindex())
self._stat_line('Number of defined indices',
self._stats.nmanindex())
self._stat_line('Number of implied indices',
self._stats.nautoindex())
true_file_size = self._stats.file_size()
if self._stats.is_compressed():
self._stat_line('Size of uncompressed content in bytes',
true_file_size)
logical_file_size = self._stats.logical_file_size()
efficiency = self._percentage(true_file_size,
logical_file_size)
self._stat_line('Size of compressed file on disk',
self._stats.file_size(), efficiency)
else:
self._stat_line('Size of the file in bytes:',
true_file_size)
payload = self._stats.payload_size()
payload_percent = self._percentage(payload,
self._stats.file_size())
self._stat_line('Bytes of user payload stored', payload,
payload_percent)
print()
def tables_page_count_report(self):
self._title_line('Page counts for all tables with their '
'indices')
# (We want to display the larger tables first)
page_usages = sorted(self._stats.table_space_usage().items(),
key=lambda k: k[1], reverse=True)
for (table_name, table_pages) in page_usages:
self._stat_line(table_name.upper(), table_pages,
self._percentage(table_pages,
self._stats.page_count()))
print()
def tables_and_indices_page_usage_report(self):
self._title_line('Page counts for all tables '
'and indices separately')
page_counts = []
# Getting the page count for tables...
for table in self._stats.tables():
page_count = self._stats.table_page_count(table,
exclude_indices=True)
page_counts.append((table, page_count))
# ... and for indices:
for index in self._stats.indices():
entry = (index['name'],
self._stats.index_page_count(index['name']))
page_counts.append(entry)
# (We want to display the larger entries,
# table or indices, first)
page_counts.sort(key=lambda k: k[1], reverse=True)
for (name, pages) in page_counts:
percentage = self._percentage(pages,
self._stats.page_count())
self._stat_line(name.upper(), pages, percentage)
print()
def table_details(self, table):
table_name = table.upper()
index_list = self._stats.index_list(table)
if not index_list:
self._title_line('Table {}'.format(table_name))
self._print_stats(self._stats.table_stats(table))
print()
return
# Splitting into 3 parts.
# 1) Tables and indices:
self._title_line('Table {} and all its '
'indices'.format(table_name))
self._print_stats(self._stats.table_stats(table))
print()
# 2) Just tables:
self._title_line('Table {} w/o any '
'indices'.format(table_name))
self._print_stats(self._stats.table_stats(table,
exclude_indices=True))
# 3) Indices:
for index in sorted(index_list, key=lambda k: k.name):
index_name = index.name.upper()
title = 'Index {} of table {}'.format(table_name,
index_name)
self._title_line(title)
index_stats = self._stats.index_stats(index.name)
self._print_stats(index_stats)
print()
def global_usage_report(self):
self._title_line('All tables and indices')
global_stats = self._stats.global_stats()
self._print_stats(global_stats)
print()
self._title_line('All tables')
table_stats = self._stats.global_stats(exclude_indices=True)
self._print_stats(table_stats)
print()
def indices_usage_report(self):
if self._stats.indices():
self._title_line('All indices')
self._print_stats(self._stats.indices_stats())
def tables_details_report(self):
# (We display the larger tables first.)
tables = sorted(self._stats.tables(),
key=self._stats.table_page_count,
reverse=True)
for table in tables:
self.table_details(table)
def _print_stats(self, table_stats):
t = table_stats
self._stat_line('Percentage of total database',
'{:g}%'.format(t['total_pages_percent']))
self._stat_line('Number of entries', t['nentry'])
self._stat_line('Bytes of storage consumed', t['storage'])
self._stat_line('Bytes of payload', t['payload'],
self._percentage(t['payload'], t['storage']))
self._stat_line('Bytes of metadata', t['total_metadata'],
self._percentage(t['total_metadata'],
t['storage']))
if t['cnt'] == 1:
self._stat_line('B-tree depth', t['depth'])
self._stat_line('Average payload per entry',
t['average_payload'])
self._stat_line('Average unused bytes per entry',
t['average_unused'])
self._stat_line('Average metadata per entry',
t['average_metadata'])
if 'avg_fanout' in t:
self._stat_line('Average fanout', t['avg_fanout'])
if t['total_pages'] > 1:
self._stat_line('Non-sequential pages', t['gap_cnt'],
t['fragmentation'])
self._stat_line('Maximum payload per entry', t['mx_payload'])
self._stat_line('Entries that use overflow', t['ovfl_cnt'],
t['ovfl_percent'])
if t['int_pages'] > 0:
self._stat_line('Index pages used', t['int_pages'])
self._stat_line('Primary pages used', t['leaf_pages'])
self._stat_line('Overflow pages used', t['ovfl_pages'])
self._stat_line('Total pages used', t['total_pages'])
if t['int_unused'] > 0:
self._stat_line('Unused bytes on index pages',
t['int_unused'],
t['int_unused_percent'])
self._stat_line('Unused bytes on primary pages',
t['leaf_unused'],
t['leaf_unused_percent'])
self._stat_line('Unused bytes on overflow pages',
t['ovfl_unused'],
t['ovfl_unused_percent'])
self._stat_line('Unused bytes on all pages',
t['total_unused'], t['total_unused_percent'])
print()
def stat_db_dump(self):
print('The entire text of this report can be sourced into any '
'SQL database')
print('engine for further analysis. '
'All of the text above is an SQL comment.')
print('The data used to generate this report follows:')
print('*/')
for line in self._stats.stat_db_dump():
print(line)
@staticmethod
def _title_line(title):
print('*** {} {}\n'.format(title, '*' * (79 - len(title) - 5)))
@classmethod
def _stat_line(cls, description, value, percentage=None):
# Line format: 'Label....... value (pct.)%'
# |----40----|-|-----|-|---10---|
# First 40 characters are description and filling dots (fixed):
dots = '.' * (40 - len(description))
label = description + ' ' + dots
# If it's a float, we want only 2 decimal digits.
if isinstance(value, float):
value = round(value, 2)
# Right-aligning value assuming the percentage, when present,
# will take a 10 character field
value_str = ' {}'.format(value).rjust(len(label)-10-3, '.')
if percentage is None:
print('{}{}'.format(label, value_str))
return
percentage_str = '{:.2f}%'.format(percentage).rjust(10, ' ')
print('{}{}{}'.format(label, value_str, percentage_str))
@staticmethod
def _percentage(value, total):
if total == 0:
return 0
return 100 * value / total
def print_definitions(self):
d = '''Page size in bytes
The number of bytes in a single page of the database file.
Usually 1024.
Number of pages in the whole file
The number of {}-byte pages that go into forming the complete
database
Pages that store data
The number of pages that store data, either as primary B*Tree pages or
as overflow pages. The number at the right is the data pages divided by
the total number of pages in the file.
Pages on the freelist
The number of pages that are not currently in use but are reserved for
future use. The percentage at the right is the number of freelist pages
divided by the total number of pages in the file.
Pages of auto-vacuum overhead
The number of pages that store data used by the database to facilitate
auto-vacuum. This is zero for databases that do not support auto-vacuum.
Number of tables in the database
The number of tables in the database, including the SQLITE_MASTER table
used to store schema information.
Number of indices
The total number of indices in the database.
Number of defined indices
The number of indices created using an explicit CREATE INDEX statement.
Number of implied indices
The number of indices used to implement PRIMARY KEY or UNIQUE constraints
on tables.
Size of the file in bytes
The total amount of disk space used by the entire database files.
Bytes of user payload stored
The total number of bytes of user payload stored in the database. The
schema information in the SQLITE_MASTER table is not counted when
computing this number. The percentage at the right shows the payload
divided by the total file size.
Percentage of total database
The amount of the complete database file that is devoted to storing
information described by this category.
Number of entries
The total number of B-Tree key/value pairs stored under this category.
Bytes of storage consumed
The total amount of disk space required to store all B-Tree entries
under this category. The is the total number of pages used times
the pages size.
Bytes of payload
The amount of payload stored under this category. Payload is the data
part of table entries and the key part of index entries. The percentage
at the right is the bytes of payload divided by the bytes of storage
consumed.
Bytes of metadata
The amount of formatting and structural information stored in the
table or index. Metadata includes the btree page header, the cell | |
a tuple of length 2 with the
rms of the noise in the horizontal and vertical plane, respectively
and an optional key: 'cutoff' which must be a float defining in how
many sigma the distribution must be truncated (default: 1). This
noise will be added to the reference orbit (gcod + bba) simulating
errors in the bpms readings (offset errors, bba precision,...).
respm - dictionary with keys 'M', 's', 'V', 'U' which are the response
matrix and its SVD decomposition (M = U * diag(s) * V). If None,
the function WILL CALCULATE the response matrix for each machine in
each step defined by sext_ramp.
OUTPUT:
gcodx : 2D numpy array with the reference orbit, excluding the bpm noise
(gcodx + bba), used in the correction.
gcody : the same as gcodx, but for the vertical plane.
"""
nr_mach = len(machine)
# making sure they are in order
bpms = sorted(bpms)
hcms = sorted(hcms)
vcms = sorted(vcms)
#### Dealing with optional input parameters:
if svs == 'all': svs = len(hcms)+len(vcms)
if not _np.allclose(sext_ramp[-1],1,atol=1e-8,rtol=1e-8): sext_ramp.append(1)
if bpm_err is not None:
cutoff = bpm_err.get('cutoff',1)
sigs = bpm_err.pop('sigma')
noisex = sigs[0]*_scystat.truncnorm(-cutoff,cutoff).rvs((nr_mach,len(bpms)))
noisey = sigs[1]*_scystat.truncnorm(-cutoff,cutoff).rvs((nr_mach,len(bpms)))
else:
noisex = _np.zeros((nr_mach,len(bpms)))
noisey = _np.zeros((nr_mach,len(bpms)))
# hcms must behave as if the magnet was segmented:
types = (list,tuple,_np.ndarray)
if not isinstance(hcms[0],types): hcms = [[ind] for ind in hcms]
if not isinstance(vcms[0],types): vcms = [[ind] for ind in vcms]
#goal orbit must be a numpy array for each machine
if gcodx is None: gcodx = _np.zeros((nr_mach,len(bpms)))
elif len(gcodx) == 1: gcodx = _np.array([gcodx]).repeat(nr_mach,axis=0)
if gcody is None: gcody = _np.zeros((nr_mach,len(bpms)))
elif len(gcody) == 1: gcody = _np.array([gcodx]).repeat(nr_mach,axis=0)
####
print('correcting closed-orbit distortions')
print('sextupole ramp: {0}'.format(sext_ramp))
print('selection of singular values: {0:3d}'.format(svs))
print('maximum number of orbit correction iterations: {0:3d}'.format(nr_iter))
print('tolerance: {0:8.2e}\n'.format(tol))
print(' -----------------------------------------------------------------------------------------------')
print(' | codx [um] | cody [um] | kickx[urad] kicky[urad] | (nr_iter|nr_refactor)')
print(' | all bpm | all bpm | | [sextupole ramp]')
print(' | (max) (rms) | (max) (rms) | (max) (rms) | (max) (rms) | (max) (rms) | (max) (rms) | ',end='')
print(' '.join(['{0:7.5f}'.format(ind) for ind in sext_ramp]))
print('\n---|---------------------------------------------------------------|-------------------------------| ')
#Definition of nonlinear elements indices and ids indices:
sext_idx = [ind for ind in range(len(machine[0])) if machine[0][ind].polynom_b[2] != 0]
ids_idx = [ind for ind in range(len(machine[0]))
if machine[0][ind].pass_method.startswith('kicktable_pass')]
#Definition of Stats functions:
s = _pyaccel.lattice.find_spos(machine[0])
max_rms = lambda v: (1e6*_np.abs(v).max(), 1e6*_np.array(v).std(ddof=1))
max_rmss = lambda v: (1e6*_np.abs(v).max(), 1e6*_np.sqrt((_np.trapz(v**2,x=s)
-_np.trapz(v,x=s)**2/s[-1])/s[-1]))
for i in range(nr_mach):
sext_str = [machine[i][ind].polynom_b[2] for ind in sext_idx]
if ind_bba is not None:
gcodx[i] += _pyaccel.lattice.get_error_misalignment_x(machine[i],ind_bba)
gcody[i] += _pyaccel.lattice.get_error_misalignment_y(machine[i],ind_bba)
Tgcodx = gcodx[i] + noisex[i]
Tgcody = gcody[i] + noisey[i]
niter = _np.zeros(len(sext_ramp),dtype=int)
ntimes = _np.zeros(len(sext_ramp),dtype=int)
for ind in ids_idx: machine[i][ind].pass_method = 'drift_pass'
for j in range(len(sext_ramp)):
if j == len(sext_ramp)-1:
for ind in ids_idx: machine[i][ind].pass_method = 'kicktable_pass'
for ii,ind in enumerate(sext_idx): machine[i][ind].polynom_b[2] = sext_str[ii]*sext_ramp[j]
hkck,vkck,codx,cody,niter[j],ntimes[j] = cod_sg(machine[i],bpms,hcms,vcms,
respm=respm,nr_iters=nr_iter,svs=svs,tolerance=tol,gcodx=Tgcodx,gcody=Tgcody)
if any([_np.isnan(codx).any(),_np.isnan(cody).any()]):
print('Machine {0:03d} became unstable when sextupole strength was {1:5.3f}'.format(i,sext_ramp[j]))
for ii,ind in enumerate(sext_idx): machine[i][ind].polynom_b[2] = sext_str[ii]
break
print('{0:03d}|'.format(i)+
' {0:5.1f} {1:5.1f} |'.format(*max_rmss(codx))+
' {0:5.1f} {1:5.1f} |'.format(*max_rms(codx[bpms] - gcodx[i]))+
' {0:5.1f} {1:5.1f} |'.format(*max_rmss(cody))+
' {0:5.1f} {1:5.1f} |'.format(*max_rms(cody[bpms] - gcody[i]))+
' {0:5.1f} {1:5.1f} |'.format(*max_rms(hkck))+
' {0:5.1f} {1:5.1f} |'.format(*max_rms(vkck)), end='')
print(' '.join(['({0:02d}|{1:02d})'.format(niter[ind], ntimes[ind])
for ind in range(len(niter))]))
print('--------------------------------------------------------------------------------------------------- \n');
return gcodx, gcody
def calc_respm_cod(acc,bpm_idx,hcm_idx,vcm_idx,symmetry=1,printing=False):
"""
"""
# making sure they are in order
bpm_idx = sorted(bpm_idx)
hcm_idx = sorted(hcm_idx)
vcm_idx = sorted(vcm_idx)
nr_bpms = len(bpm_idx)
nr_hcms = len(hcm_idx)
nr_vcms = len(vcm_idx)
M = _np.zeros((2*nr_bpms,nr_vcms+nr_hcms)) # create response matrix and its components
Mxx, Mxy = M[:nr_bpms,:nr_hcms], M[:nr_bpms,nr_hcms:]
Myx, Myy = M[nr_bpms:,:nr_hcms], M[nr_bpms:,nr_hcms:]
len_bpm = nr_bpms // symmetry
len_hcm = nr_hcms // symmetry
len_vcm = nr_vcms // symmetry
if (len_bpm % 1) or (len_hcm % 1) or (len_vcm % 1):
len_bpm = nr_bpm
len_hcm = nr_hcm
len_vcm = nr_vcm
symmetry = 1
else:
hcm_idx = hcm_idx[:len_hcm]
vcm_idx = vcm_idx[:len_vcm]
if printing:
print('bpms:{0:03d}, hcms:{0:03d}, vcms:{0:03d}'.format(
nr_bpms, nr_hcms, nr_vcms))
mxx,mxy,myx,myy = _get_response_matrix(acc, bpm_idx, hcm_idx, vcm_idx)
for i in range(symmetry):
indcs = list(range(i*len_hcm,(i+1)*len_hcm))
Mxx[:,indcs] = _np.roll(mxx,len_bpm*i,axis=0)
Myx[:,indcs] = _np.roll(myx,len_bpm*i,axis=0)
indcs = list(range(i*len_vcm,(i+1)*len_vcm))
Mxy[:,indcs] = _np.roll(mxy,len_bpm*i,axis=0) #the last bpm turns into the first
Myy[:,indcs] = _np.roll(myy,len_bpm*i,axis=0)
r = dict()
r['M'] = M
U, s, V = _np.linalg.svd(M,full_matrices=False) #M = U*np.diag(s)*V
r['U'] = U
r['V'] = V
r['s'] = s
if printing:
print('number of singular values: {0:03d}'.format(len(s)))
print('singular values: {0:3f},{0:3f},{0:3f} ... {0:3f},{0:3f},{0:3f}'.format(
s[0], s[1], s[2], s[-3],s[-2],s[-1]))
return r
def cod_sg(acc,bpms,hcms,vcms,respm=None, nr_iters=20,svs='all',tolerance=1e-5,
gcodx=None,gcody=None):
if gcodx is None: gcodx = _np.zeros(bpms.shape)
if gcody is None: gcody = _np.zeros(bpms.shape)
if respm is None:
respm = calc_respm_cod(acc, bpms, hcms, vcms, symmetry=1, printing=False)
s = respm['s']
U = respm['U']
V = respm['V']
#selection of singular values
if svs == 'all': svs = len(hcms)+len(vcms)
invs = 1/s
invs[svs:] = 0
iS = _np.diag(invs)
CM = - _np.dot(V.T,_np.dot(iS,U.T))
gcod = _np.zeros(2*len(bpms))
gcod[:len(bpms)] = gcodx
gcod[len(bpms):] = gcody
cod = _np.zeros(2*len(bpms))
cod[:len(bpms)], cod[len(bpms):] = _calc_cod(acc, indices=bpms)
corrs = _lnls.utils.ravel(hcms)
corrs.extend(_lnls.utils.ravel(vcms))
corrs = _np.unique(_np.array(corrs))
best_fm = (cod - gcod).std(ddof=1)
best_acc = acc[corrs]
factor, ntimes = 1, 0
for iters in range(nr_iters):
# calcs kicks
cod[:len(bpms)], cod[len(bpms):] = _calc_cod(acc, indices=bpms)
dkicks = factor * _np.dot(CM, cod - gcod)
# sets kicks
hkicks = dkicks[:len(hcms)]
vkicks = dkicks[len(hcms):]
hkicks += _get_kickangle(acc, hcms, 'x')
vkicks += _get_kickangle(acc, vcms, 'y')
_set_kickangle(acc, hcms, hkicks, 'x')
_set_kickangle(acc, vcms, vkicks, 'y')
cod[:len(bpms)], cod[len(bpms):] = _calc_cod(acc, indices=bpms)
fm = (cod - gcod).std(ddof=1)
residue = abs(best_fm-fm)/best_fm
if fm < best_fm:
best_fm = fm
best_acc = acc[corrs]
factor = 1 # reset the correction strength to 1
else:
acc[corrs] = best_acc
factor *= 0.75 # reduces the strength of the correction
ntimes += 1 # check how many times it passed here
# breaks the loop in case convergence is reached
if residue < abs(tolerance): break
hkicks = _get_kickangle(acc, hcms, 'x');
vkicks = _get_kickangle(acc, vcms, 'y');
codx, cody = _calc_cod(acc)
return hkicks, vkicks, codx, cody, iters+1, ntimes
def get_bba_ind(acc,bpms):
# Determinando indices dos quadrupolos na rede
quad_idx = _np.array([ind for ind in range(len(acc)) if acc[ind].angle == 0 and acc[ind].K != 0])
#descobrindo qual a posicao dos elementos na rede
spos = _pyaccel.lattice.find_spos(acc,indices='open')
bpm_spos = spos[bpms] # dos bpms
quad_spos = spos[quad_idx] # e dos quadrupolos
#determinando quais sao os quadrupolos mais proximos aos bpms
In = _np.abs(bpm_spos[:,None] - quad_spos[None,:]).argmin(axis=1)
ind_bba = quad_idx[In]
return ind_bba
def correct_coupling(machine, bpms, hcms, vcms, scms, svs='all',nr_iter=20,
tol=1e-5, bpm_err=None, respm=None):
""" Correct coupling of several machines.
INPUTS:
name : name of the file to which the inputs will be saved;
machine : cell array of lattice models to symmetrize the optics.
coup : structure with fields:
bpm_idx - bpm indexes in the model;
hcm_idx - horizontal correctors indexes in the model;
vcm_idx - vertical correctors indexes in the model;
scm_idx - indexes of the skew quads which will be used to symmetrize;
svs - may be a number denoting how many singular values will be
used in the correction or the string 'all' to use all singular
values. Default: 'all';
max_nr_iter - maximum number of iteractions the correction
algortithm will perform at each call for each machine;
tolerance - if in two subsequent iteractions the relative difference
between the error function values is less than this value the
correction is considered to have converged and will terminate.
simul_bpm_corr_err - if true, the Gains field defined in the bpms and
the Gain field defined in the correctors in thelattice will be used
to simulate gain errors in these elements, changing the response
matrix calculated. Notice that the supra cited fields must exist
in the lattice models of the machine array for each bpm and corrector
in order for this this simulation to work. Otherwise an error will occur.
respm - structure with fields M, S, V, U which are the coupling response
matrix and its SVD decomposition. If NOT present, the function
WILL CALCULATE the coupling response matrix for each machine.
OUTPUT:
machine : cell array of lattice models with the orbit corrected.
"""
bpms | |
<reponame>NVlabs/torchtrainers<filename>torchtrainers/trainer.py
#
# Copyright (c) 2013-2019 <NAME>. All rights reserved.
# This file is part of torchtrainers (see unknown).
# See the LICENSE file for licensing terms (BSD-style).
#
"""Training-related part of the Keras engine.
"""
import os
import os.path
import sys
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.optim
import time
import re
import copy
def get_info():
import platform
import getpass
node = str(platform.node())
user = getpass.getuser()
now = time.ctime()
return "{} {} {}".format(now, node, user)
def loader_test(source, nbatches=10, skip=10):
for i, sample in enumerate(source):
if i >= skip-1: break
start = time.time()
count = 0
for i, sample in enumerate(source):
xs = sample[0]
count += len(xs)
if i >= nbatches-1: break
finish = time.time()
delta = finish-start
print("{:.2f} samples/s {:.2f} batches/s".format(count/delta, nbatches/delta))
for index, a in enumerate(sample):
if isinstance(a, torch.Tensor):
print(index, ":", "Tensor", a.shape, a.device, a.dtype, a.min().item(), a.max().item())
elif isinstance(a, np.ndarray):
print(index, ":", "ndarray", a.shape, a.dtype, np.amin(a), np.amax(a))
else:
print(index, ":", type(a))
def astime(s):
s = int(s+0.999)
seconds = s%60
s = s//60
minutes = s%60
s = s//60
hours = s%24
days = s//24
result = ""
if days>0: result = "{:d}d".format(days)
result += "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds)
return result
def within_jupyter():
try:
cfg = get_ipython().config
if cfg["IPKernelApp"]["parent_appname"] is not None:
return True
else:
return False
except NameError:
return False
def jupyter_plot(axis, ys, xs=None, sigma=0, xscale=None, yscale=None):
#display.clear_output(wait=True)
axis.cla()
if xscale is not None:
axis.set_xscale(xscale)
if yscale is not None:
axis.set_yscale(yscale)
from scipy.ndimage import filters
if sigma>0:
ys = filters.gaussian_filter(np.array(ys, "f"), sigma, mode="nearest")
if xs is not None:
axis.plot(xs, ys)
else:
axis.plot(ys)
#print(np.amin(ys), np.amax(ys))
class Progress(object):
def __init__(self, chunk=1000):
self.chunk = chunk
self.losses = []
self.avg_losses = []
def add(self, count, loss):
self.losses.append((count, loss))
if self.losses[-1][0]-self.losses[0][0] > self.chunk:
avg_loss = np.mean([x[1] for x in self.losses])
self.avg_losses.append((self.losses[-1][0], avg_loss))
self.losses = []
def value(self, index=1):
if len(self.avg_losses) > 0:
return self.avg_losses[-1][index]
elif len(self.losses) > 0:
return self.losses[-1][index]
else:
return -1
def plot(self, axis=None, **kw):
axis = axis or plt.gca()
if len(self.avg_losses)==0: return
xs = [p[0] for p in self.avg_losses]
ys = [p[1] for p in self.avg_losses]
axis.plot(xs, ys, **kw)
def getvalue(x):
if hasattr(x, "item"):
return x.item()
else:
return float(x)
class MovingAverage0(object):
def __init__(self, initial=1.0, range=5000):
self.range = range
self.maverage = initial
self.count = 0
self.total = 0
def add(self, x, weight=1):
self.total += x
self.count += 1
self.maverage = self.maverage * float(weight) / self.range + \
x * float(self.range - weight) / self.range
return self.maverage
def recent(self):
return self.maverage
def value(self):
return float(self.total) / self.count
class MovingAverage(object):
def __init__(self, range=50):
self.range = range
self.values = []
def add(self, x, weight=1):
self.values.append(x)
def recent(self):
if self.values==[]: return -1
return np.mean(self.values[-self.range:])
def value(self):
if self.values==[]: return -1
return np.mean(self.values)
def __len__(self):
return len(self.values)
class Misclassification(object):
def __init__(self):
self.counts = 0
self.errs = 0
self.moving = MovingAverage()
def name(self):
return "err"
def add(self, pred, target):
assert not torch.is_floating_point(target), target.dtype
assert target.ndimension() == 1
if target.ndimension() == pred.ndimension() - 1:
_, pred = pred.max(-1)
assert target.ndimension() == pred.ndimension(), (target.size(), pred.size())
assert target.size() == pred.size(), (target.size(), pred.size())
counts = len(target)
errs = (target != pred).float().sum()
self.counts += counts
self.errs += errs
self.moving.add(float(errs)/counts, counts)
def value(self):
return float(self.errs) / self.counts
def recent(self):
return self.moving.recent()
def apply1(f, x):
if f is None:
return x
else:
return f(x)
def jupyter_after_batch(self):
#fig = plt.gcf()
plt.close("all")
fig = plt.figure(figsize=(12, 6))
fig.clf()
fig.add_subplot(1, 2, 1)
fig.add_subplot(1, 2, 2)
axis = fig.get_axes()[0]
self.losses.plot(axis)
axis = fig.get_axes()[1]
self.errors.plot(axis)
display.clear_output(wait=True)
display.display(axis.figure)
def compare(x, y):
if x<y: return -1
if x>y: return 1
return 0
def getmeta(meta, key, mode="training"):
if key in meta:
return meta[key]
if key=="loss":
return meta.get(mode+"_loss")
errs = meta.get(mode+"_err")
for k, v in errs:
if k==key:
return v
return None
def extract_log(metalist, key="loss", mode="training"):
xs, ys = [], []
for meta in metalist:
xs.append(getmeta(meta, "ntrain"))
ys.append(getmeta(meta, key=key, mode=mode))
return xs, ys
def compare_metas(meta1, meta2):
if meta1 is None:
return 1
if meta2 is None:
return -1
if "testing_err" in meta1:
return compare(meta1["testing_err"][0][1],
meta2["testing_err"][0][1])
elif "testing_loss" in meta1:
return compare(meta1["testing_loss"],
meta2["testing_loss"])
else:
return compare(meta1["training_loss"],
meta2["training_loss"])
class Trainer(object):
"""The `Model` class adds training & evaluation routines to a `Network`.
"""
def __init__(self,
model,
criterion=None,
optimizer=None,
optimizer_factory=torch.optim.SGD,
preprocess=None,
encode_input=None,
encode_target=None,
decode_pred=None,
target_tensors=None,
stop_if=None,
device=None,
metrics=[],
verbose=True,
#loss_weights=None,
#sample_weight_mode=None,
#weighted_metrics=None,
):
self.device = device
self.set_model(model)
self.criterion = criterion
self.criterion.to(device)
self.optimizer_factory = optimizer_factory
if optimizer is not None:
self.set_optimizer(optimizer)
self.verbose = verbose
self.preprocess = preprocess
self.encode_target = encode_target
self.decode_pred = decode_pred
self.encode_input = decode_pred
self.metrics = metrics
self.reset_metrics()
self.count = 0
self.after_batch = self.default_after_batch
self.after_epoch = self.default_after_epoch
self.report_every = 1
self.record_last = True
self.last_lr = None
self.progress_prefix = "training"
self.logs = []
def set_model(self, model):
self.model = model.to(self.device)
if not hasattr(self.model, "META"):
self.model.META = dict(ntrain=0)
self.optimizer = None
def set_optimizer(self, optimizer):
self.optimizer = optimizer
self.optimizer.zero_grad()
def set_lr(self, lr, **kw):
if lr != self.last_lr:
self.optimizer = self.optimizer_factory(self.model.parameters(), lr, **kw)
sopt = re.sub(r"\s+", " ", str(self.optimizer))
if self.verbose:
print("setting optimizer:", sopt)
self.optimizer.zero_grad()
self.last_lr = lr
def reset_metrics(self, current_size=-1, current_count=0):
self.current_size = current_size
self.current_count = current_count
self.losses = MovingAverage()
self.mobjects = [m() for m in self.metrics]
self.current_start = time.time()
def report_recent_short(self):
if self.current_size == 0:
return "..."
now = time.time()
delta = now - self.current_start
total_time = float(self.current_size) / self.current_count * delta
remaining_time = total_time - delta
progress = self.progress_prefix
progress += " {:3.0f}%".format(
100.0 * float(self.current_count) / self.current_size)
if self.current_count>= self.current_size:
progress += " /{} ".format(
astime(total_time))
else:
progress += " {} ".format(
astime(remaining_time))
progress += " loss {:6.4f}".format(
self.losses.recent())
for m in self.mobjects[:1]:
progress+= " {} {:6.4f}".format(m.name()[:3], m.recent())
return progress
def report_recent(self):
if self.current_size == 0:
return "..."
now = time.time()
delta = now - self.current_start
total_time = float(self.current_size) / self.current_count * delta
remaining_time = total_time - delta
progress = self.progress_prefix
progress += "{:8d} / {:8d}".format(self.current_count, self.current_size)
progress += " time {} / {}".format(astime(remaining_time), astime(total_time))
progress += " {:3.0f}%".format(
100.0 * float(self.current_count) / self.current_size)
progress += " loss {:9.5f} [{:6d}]".format(self.losses.recent(), len(self.losses))
for m in self.mobjects:
progress+= " {} {:9.5f}".format(m.name()[:3], m.recent())
return progress
def report_metrics(self):
report = "loss {:6.4f}".format(self.losses.value())
for m in self.mobjects:
report += " {} {:6.4f}".format(m.name()[:3], m.value())
return report
def default_after_batch(self):
if self.current_count == 0: return
progress = self.report_recent()
print(progress + "\r", end="", flush=True)
def default_after_epoch(self):
print()
def train_on_batch(self, x, target, sample_weight=None, class_weight=None):
assert sample_weight is None
assert class_weight is None
self.model.train()
x, target = apply1(self.preprocess, (x, target))
if x is None: return
batch_size = len(x)
assert batch_size > 0
assert len(x) == len(target)
x = apply1(self.encode_input, x)
target = apply1(self.encode_target, target)
pred = self.model(x.to(self.device))
assert len(pred) == batch_size
target = target.to(self.device)
loss = self.criterion(pred, target)
if self.record_last:
self.last_x = x.to("cpu")
self.last_y = target.to("cpu")
self.last_pred = pred.to("cpu")
self.last_loss = getvalue(loss)
self.losses.add(getvalue(loss), batch_size)
for m in self.mobjects:
m.add(pred, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.count += len(x)
self.model.META["ntrain"] += len(x)
def predict_on_batch(self, x):
self.model.eval()
pred = self.model(x.to(device))
if self.decode_pred:
pred = self.decode_pred(pred)
return pred
def test_on_batch(self, x, target, sample_weight=None):
assert sample_weight is None
x, target = apply1(self.preprocess, (x, target))
batch_size = len(x)
x = apply1(self.encode_input, x)
target = apply1(self.encode_target, target)
target = target.to(self.device)
self.model.eval()
pred = self.model(x.to(self.device))
loss = self.criterion(pred, target)
self.losses.add(getvalue(loss), batch_size)
for m in self.mobjects:
m.add(pred, target)
def fit_for(self,
generator,
samples_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
class_weight=None,
stop_if=None):
assert callbacks is None
assert class_weight is None
self.model.eval()
self.reset_metrics(samples_per_epoch * epochs)
batch_count = 0
try:
while True:
try:
for (x, target) in generator:
self.train_on_batch(x, target)
self.current_count += len(x)
if self.after_batch is not None and batch_count%self.report_every==0:
self.after_batch()
if stop_if is not None:
if stop_if(self):
return
if self.current_count >= self.current_size:
return
batch_count += 1
finally:
if self.after_epoch is not None:
self.after_epoch()
finally:
self.model.META["training_info"] = get_info()
self.model.META["training_loss"] = self.losses.value()
self.model.META["training_err"] = self.get_metrics()
self.logs.append(copy.copy(self.model.META))
def test_for(self,
generator,
epoch_size=None,
callbacks=None,
class_weight=None,
verbose=0):
assert callbacks is None
assert class_weight is None
self.model.eval()
self.reset_metrics()
count = 0
try:
for (x, target) in generator:
self.test_on_batch(x, target)
count += len(target)
if epoch_size is not None and count >= epoch_size:
break
finally:
if len(self.logs) > 0 and self.logs[-1]["ntrain"] == self.model.META["ntrain"]:
del self.logs[-1]
self.model.META["testing_info"] = get_info()
self.model.META["testing_loss"] = self.losses.value()
self.model.META["testing_err"] = self.get_metrics()
self.logs.append(copy.copy(self.model.META))
return self.report_metrics()
def get_metrics(self):
return [(m.name(), m.value()) for m in self.mobjects]
def metric_names(self):
return [m.name() for m in self.mobjects]
| |
shared.system,
pseudos = get_pseudos('qmcpack',shared),
)
set_loc(loc+'vmc_inputs jastrows')
jkw = extract_keywords(inputs,jastrow_factor_keys,optional=True)
jkw.system = shared.system
j2kw = jkw.copy()
j2kw.set(J1=1,J2=1,J3=0)
j3kw = jkw.copy()
j3kw.set(J1=1,J2=1,J3=1)
task.J2_inputs = j2kw
task.J3_inputs = j3kw
set_loc(loc+'vmc_inputs vmc_methods')
task.sec_inputs = extract_keywords(inputs,vmc_sections_keys,optional=True)
return inputs
#end def process_vmc_inputs
def process_dmc_inputs(inputs,shared,task,loc):
inputs.set(
system = shared.system,
pseudos = get_pseudos('qmcpack',shared),
)
set_loc(loc+'dmc_inputs jastrows')
jkw = extract_keywords(inputs,jastrow_factor_keys,optional=True)
jkw.system = shared.system
j2kw = jkw.copy()
j2kw.set(J1=1,J2=1,J3=0)
j3kw = jkw.copy()
j3kw.set(J1=1,J2=1,J3=1)
task.J2_inputs = j2kw
task.J3_inputs = j3kw
set_loc(loc+'dmc_inputs dmc_methods')
task.sec_inputs = extract_keywords(inputs,dmc_sections_keys,optional=True)
return inputs
#end def process_dmc_inputs
sim_input_defaults = obj(
system = system_input_defaults,
vasp = vasp_input_defaults,
scf = scf_input_defaults,
nscf = nscf_input_defaults,
p2q = p2q_input_defaults,
pwf = pwf_input_defaults,
pp = pp_input_defaults,
opt = opt_input_defaults,
vmc = vmc_input_defaults,
dmc = dmc_input_defaults,
)
sim_workflow_keys = obj(
system = system_workflow_keys,
vasp = vasp_workflow_keys,
scf = scf_workflow_keys,
nscf = nscf_workflow_keys,
p2q = p2q_workflow_keys,
pwf = pwf_workflow_keys,
pp = pp_workflow_keys,
opt = opt_workflow_keys,
vmc = vmc_workflow_keys,
dmc = dmc_workflow_keys,
)
process_sim_inp = obj(
system = process_system_inputs,
vasp = process_vasp_inputs,
scf = process_scf_inputs,
nscf = process_nscf_inputs,
p2q = process_p2q_inputs,
pwf = process_pwf_inputs,
pp = process_pp_inputs,
opt = process_opt_inputs,
vmc = process_vmc_inputs,
dmc = process_dmc_inputs,
)
def process_sim_inputs(name,inputs_in,defaults,shared,task,loc):
set_loc(loc)
inputs = obj(**inputs_in)
assign_defaults(inputs,sim_input_defaults[name][defaults])
inputs = process_sim_inp[name](inputs,shared,task,loc)
workflow = extract_keywords(inputs,sim_workflow_keys[name])
task.inputs = inputs
task.workflow = workflow
#end def process_sim_inputs
qmcpack_chain_sim_names = ['system','vasp','scf','nscf','p2q','pwf','pp','opt','vmc','dmc']
def capture_qmcpack_chain_inputs(kwargs):
kw_deep = obj()
kw_shallow = obj()
inp_shallow = obj()
shallow_types = tuple([Simulation])#(Job,Simulation)
for k,v in kwargs.iteritems():
if '_inputs' not in k or isinstance(v,(tuple,list)):
if isinstance(v,shallow_types):
kw_shallow[k] = v
else:
kw_deep[k] = v
#end if
else:
shallow = obj()
deep = obj()
for kk,vv in v.iteritems():
if isinstance(vv,shallow_types):
shallow[kk]=vv
else:
deep[kk]=vv
#end if
#end for
inp_shallow[k] = shallow
kw_deep[k] = deep
#end if
#end for
# deep copy
kwcap = kw_deep.copy()
# shallow copy
kwcap.set(**kw_shallow)
for k,v in inp_shallow.iteritems():
kwcap[k].set(**v)
#end for
return kwcap
#end def capture_qmcpack_chain_inputs
def process_qmcpack_chain_kwargs(
basepath = '',
orb_source = None,
J2_source = None,
J3_source = None,
system = missing,
system_function = missing,
sim_list = missing,
pseudos = missing,
dft_pseudos = missing,
qmc_pseudos = missing,
loc = 'process_qmcpack_chain_kwargs',
valid_inputs = None,
fake = False,
sims = None,
**other_kwargs
):
if valid_inputs is None:
valid_inputs = []
#end if
valid_inputs.extend([
'basepath','orb_source','J2_source','J3_source','system',
'system_function','sim_list','pseudos','dft_pseudos',
'qmc_pseudos','sims'
])
for sname in qmcpack_chain_sim_names:
valid_inputs.append(sname+'_inputs')
#end for
if 'system_inputs' in other_kwargs:
system_inputs = other_kwargs['system_inputs']
else:
system_inputs = missing
#end if
sim_names = set(qmcpack_chain_sim_names)
# collect other inputs into tasks
# organize all sim_inputs and sim_defaults input data
invalid = []
tasks = obj()
task_defaults = obj()
for name in sim_names:
task_defaults[name] = defaults_version
#end for
for k,v in other_kwargs.iteritems():
if '_inputs' in k:
name,tmp = k.split('_',1)
if name in sim_names:
index = tmp.replace('inputs','')
if len(index)==0:
index = 1
else:
try:
index = int(index)
except:
error('inputs index must be an integer\ninput section: {0}\ninvalid index: {1}'.format(k,index),loc)
#end try
#end if
if not isinstance(v,(dict,obj)):
error('{0} must be a dict or obj\ntype received: {1}'.format(k,v.__class__.__name__),loc)
#end if
if name not in tasks:
tasks[name] = obj()
#end if
if index in tasks[name]:
error('repeated inputs index encountered\ninput section: {0}\ninvalid index: {1}'.format(k,index),loc)
#end if
if isinstance(v,SectionPlaceHolder):
tasks[name][index] = placeholder(inputs_in=obj())
else:
tasks[name][index] = obj(inputs_in = obj(**v)) # capture the inputs
#end if
else:
invalid.append(k)
#end if
elif k.endswith('_defaults') and k.split('_')[0] in sim_names:
name = k.split('_')[0]
if name in sim_names:
task_defaults[name] = v
else:
invalid.append(k)
#end if
else:
invalid.append(k)
#end if
#end for
for name in sim_names:
if name not in tasks:
tasks[name] = None
#end if
#end for
prevent_invalid_input(invalid,valid_inputs,loc)
set_loc(loc)
kw = obj()
run = obj()
for name in sim_names:
run[name] = tasks[name]!=None
#end for
kw.run = run
kw.basepath = basepath
kw.orb_source = orb_source
kw.J2_source = J2_source
kw.J3_source = J3_source
kw.pseudos = pseudos
kw.fake = fake
kw.sims = sims
kw.system_inputs_provided = not missing(system_inputs)
if not missing(system):
kw.system = system
elif not missing(system_inputs):
if missing(system_function):
system_function = generate_physical_system
#end if
system = system_function(**system_inputs)
kw.system = system
else:
assign_require(kw,'system' ,system )
#end if
assign_require(kw,'sim_list' ,sim_list )
if missing(pseudos):
if run.scf or run.nscf:
assign_require(kw,'dft_pseudos',dft_pseudos)
#end if
if run.opt or run.vmc or run.dmc:
assign_require(kw,'qmc_pseudos',qmc_pseudos)
#end if
#end if
for name in qmcpack_chain_sim_names: # order matters
if run[name]:
sim_tasks = tasks[name]
prev_index = None
for index in sorted(sim_tasks.keys()):
sim_task = sim_tasks[index]
if 'vary' in sim_task.inputs_in:
vary_index = sim_task.inputs_in.delete('vary')
else:
vary_index = prev_index
#end if
if vary_index!=None:
inputs_in = obj(**sim_tasks[vary_index].inputs_in)
inputs_in.set(**sim_task.inputs_in)
sim_task.inputs_in = inputs_in
#end if
sim_task.vary = vary_index
process_sim_inputs(
name = name,
inputs_in = sim_task.inputs_in,
defaults = task_defaults[name],
shared = kw,
task = sim_task,
loc = '{0} {1}_inputs'.format(loc,name),
)
if index==1:
sind = ''
else:
sind = str(index)
#end if
label = name+sind
sim_task.name = name
sim_task.label = label
sim_task.index = index
sim_task.workflow.set(
basepath = basepath,
path = os.path.join(basepath,label),
vary_index = vary_index,
)
prev_index = index
#end for
#end if
#end for
kw.tasks = tasks
return kw
#end def process_qmcpack_chain_kwargs
def gen_vasp_chain(ch,loc):
task = ch.task
wf = task.workflow
vasp = generate_vasp(
path = wf.path,
**task.inputs
)
return vasp
#end def gen_vasp_chain
def gen_scf_chain(ch,loc):
task = ch.task
wf = task.workflow
scf = generate_pwscf(
path = wf.path,
**task.inputs
)
return scf
#end def gen_scf_chain
def gen_nscf_chain(ch,loc):
task = ch.task
wf = task.workflow
deps = resolve_deps('nscf',[('scf','charge_density')],ch,loc)
# obtain user inputs to scf
scf_label,scf_index = resolve_label('scf',ch,loc,ind=True)
scf_inputs = obj(ch.tasks.scf[scf_index].inputs)
# pass inputs on to nscf after removing kpoint information
scf_inputs.delete_optional('kgrid')
scf_inputs.delete_optional('kshift')
task.inputs.set_optional(**scf_inputs)
# generate the nscf sim
if wf.use_scf_dir:
path = resolve_path('scf',ch,loc) # added to always run in scf dir
else:
path = wf.path
#end if
nscf = generate_pwscf(
path = path,
dependencies = deps,
**task.inputs
)
return nscf
#end def gen_nscf_chain
def gen_p2q_chain(ch,loc):
kw = ch.kw
task = ch.task
wf = task.workflow
run = kw.run
if run.nscf and wf.orb_src_type!='scf':
if wf.orb_src is None:
nscf_label,nscf_index = resolve_label('scf',ch,loc,ind=True)
else:
nscf_index = wf.orb_src
#end if
use_scf_dir = ch.tasks.nscf[nscf_index].workflow.use_scf_dir
orb_source = 'nscf'
if use_scf_dir:
path_source = 'scf'
else:
path_source = 'nscf'
#end if
else:
orb_source = 'scf'
path_source = 'scf'
#end if
path = resolve_path(path_source,ch,loc)
#path = resolve_path('scf',ch,loc) # added to always run in scf dir
deps = resolve_deps('p2q',[(orb_source,'orbitals')],ch,loc)
p2q = generate_pw2qmcpack(
path = path,
dependencies = deps,
**task.inputs
)
return p2q
#end def gen_p2q_chain
def gen_pwf_chain(ch,loc):
kw = ch.kw
task = ch.task
wf = task.workflow
run = kw.run
if run.nscf:
source = 'nscf'
else:
source = 'scf'
#end if
path = resolve_path(source,ch,loc)
deps = resolve_deps('pwf',[(source,'other')],ch,loc)
pwf = generate_projwfc(
path = path,
dependencies = deps,
**task.inputs
)
return pwf
#end def gen_pwf_chain
def gen_pp_chain(ch,loc):
kw = ch.kw
task = ch.task
wf = task.workflow
run = kw.run
source = 'scf'
path = resolve_path(source,ch,loc)
deps = resolve_deps('pp',[(source,'other')],ch,loc)
pp = generate_pp(
path = path,
dependencies = deps,
**task.inputs
)
return pp
#end def gen_pp_chain
def gen_opt_chain(ch,loc):
kw = ch.kw
task = ch.task
sims = ch.sims
wf = task.workflow
orbdep = [('p2q','orbitals')]
J2dep = orbdep + [(task.label+'_J2','jastrow')]
if wf.J2_src is None:
optJ2_dep = orbdep
else:
optJ2_dep = orbdep + [('opt_J2','jastrow')]
#end if
if wf.J3_src is None:
if wf.J2_run:
optJ3_dep = J2dep
else:
optJ3_dep = orbdep
#end if
elif wf.use_J2:
optJ3_dep = orbdep + [('opt_J2','jastrow')]
else:
optJ3_dep = orbdep + [('opt_J3','jastrow')]
#end if
if wf.J2_run:
label = task.label+'_J2'
deps = resolve_deps(label,optJ2_dep,ch,loc)
optJ2 = generate_qmcpack(
path = os.path.join(wf.basepath,label),
jastrows = jastrow_factor(**task.J2_inputs),
calculations = opt_sections(**task.sec_inputs),
dependencies = deps,
**task.inputs
)
sims[label] = optJ2
kw.def_srcs[task.name+'_J2'] = task.index
#end if
if wf.J3_run:
label = task.label+'_J3'
deps = resolve_deps(label,optJ3_dep,ch,loc)
optJ3 = generate_qmcpack(
path = os.path.join(wf.basepath,label),
jastrows = jastrow_factor(**task.J3_inputs),
calculations = opt_sections(**task.sec_inputs),
dependencies = deps,
**task.inputs
)
sims[label] = optJ3
kw.def_srcs[task.name+'_J3'] = task.index
#end if
return None
#end def gen_opt_chain
def gen_vmc(simlabel,ch,depset,J,test=0,loc=''):
task = ch.task
sims = ch.sims
wf = task.workflow
deps = resolve_deps(simlabel,depset[J],ch,loc)
if test:
qmcjob = wf.test_job
else:
qmcjob = wf.job
#end if
other_inputs = obj(task.inputs)
if 'jastrows' in other_inputs:
jastrows = other_inputs.delete('jastrows')
elif J=='J0':
jastrows = []
elif J=='J2':
jastrows = jastrow_factor(**task.J2_inputs)
elif J=='J3':
jastrows = jastrow_factor(**task.J3_inputs)
#end if
qmc = generate_qmcpack(
path = os.path.join(wf.basepath,simlabel),
job = qmcjob,
jastrows = jastrows,
calculations = vmc_sections(test=test,J0=J=='J0',**task.sec_inputs),
dependencies = deps,
**other_inputs
)
sims[simlabel] = qmc
#end def gen_vmc
def gen_vmc_chain(ch,loc):
task = ch.task
wf = task.workflow
orbdep = [('p2q','orbitals')]
if wf.prior_opt:
J2dep = orbdep + [('opt_J2','jastrow')]
J3dep = orbdep + [('opt_J3','jastrow')]
else:
J2dep = orbdep
J3dep = orbdep
| |
is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- Sources (number): Number of Sources
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching pimV4Interface resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of pimV4Interface data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the pimV4Interface resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AutoPickNeighbor=None, BootstrapHashMaskLength=None, BootstrapInterval=None, BootstrapPriority=None, BootstrapTimeout=None, DisableTriggered=None, DiscardLearnedRpInfo=None, EnableBfdRegistration=None, EnableBootstrap=None, EnablePrune=None, ForceSemantic=None, HelloHoldTime=None, HelloInterval=None, LanPruneTbit=None, LearnSelectedRpSet=None, OverrideInterval=None, PruneDelay=None, SendBidirectional=None, SendGenerationIdOption=None, SendGenerationMode=None, SupportUnicastBsm=None, TriggeredHelloDelay=None, V4Neighbor=None):
"""Base class infrastructure that gets a list of pimV4Interface device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- AutoPickNeighbor (str): optional regex of autoPickNeighbor
- BootstrapHashMaskLength (str): optional regex of bootstrapHashMaskLength
- BootstrapInterval (str): optional regex of bootstrapInterval
- BootstrapPriority (str): optional regex of bootstrapPriority
- BootstrapTimeout (str): optional regex of bootstrapTimeout
- DisableTriggered (str): optional regex of disableTriggered
- DiscardLearnedRpInfo (str): optional regex of discardLearnedRpInfo
- EnableBfdRegistration (str): optional regex of enableBfdRegistration
- EnableBootstrap (str): optional regex of enableBootstrap
- EnablePrune (str): optional regex of enablePrune
- ForceSemantic (str): optional regex of forceSemantic
- HelloHoldTime (str): optional regex of helloHoldTime
- HelloInterval (str): optional regex of helloInterval
- LanPruneTbit (str): optional regex of lanPruneTbit
- LearnSelectedRpSet (str): optional regex of learnSelectedRpSet
- OverrideInterval (str): optional regex of overrideInterval
- PruneDelay (str): optional regex of pruneDelay
- SendBidirectional (str): optional regex of sendBidirectional
- SendGenerationIdOption (str): optional regex of sendGenerationIdOption
- SendGenerationMode (str): optional regex of sendGenerationMode
- SupportUnicastBsm (str): optional regex of supportUnicastBsm
- TriggeredHelloDelay (str): optional regex of triggeredHelloDelay
- V4Neighbor (str): optional regex of v4Neighbor
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def ClearLearnedInfo(self, *args, **kwargs):
"""Executes the clearLearnedInfo operation on the server.
Clear Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
clearLearnedInfo(SessionIndices=list)
-------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
clearLearnedInfo(SessionIndices=string)
---------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
clearLearnedInfo(Arg2=list)list
-------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearLearnedInfo', payload=payload, response_object=None)
def GetLearnedInfo(self, *args, **kwargs):
"""Executes the getLearnedInfo operation on the server.
Get Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getLearnedInfo(SessionIndices=list)
-----------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getLearnedInfo(SessionIndices=string)
-------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getLearnedInfo(Arg2=list)list
-----------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getLearnedInfo', payload=payload, response_object=None)
def IncrementGenID(self, *args, **kwargs):
"""Executes the incrementGenID operation on the server.
Increment GenID
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
incrementGenID(SessionIndices=list)
-----------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
incrementGenID(SessionIndices=string)
-------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('incrementGenID', payload=payload, response_object=None)
def IncrementGenId(self, *args, **kwargs):
"""Executes the incrementGenId operation on the server.
Stops the protocol state machine for the given protocol session instances.
incrementGenId(Arg2=list)list
-----------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('incrementGenId', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def ResumeBSM(self, *args, **kwargs):
"""Executes the resumeBSM operation on the server.
Resume Bootstrap
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
resumeBSM(SessionIndices=list)
------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
resumeBSM(SessionIndices=string)
--------------------------------
- | |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import datetime
import django
import logging
import os
import pysftp
import re
import shutil
import sys
import time
import uuid
from django.db import connections
from django.conf import settings
from djimix.core.utils import get_connection
from djimix.core.utils import xsql
from djtools.fields import TODAY
from djtools.utils.mail import send_mail
# django settings for shell environment
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djmapache.settings.shell')
django.setup()
# informix environment
os.environ['INFORMIXSERVER'] = settings.INFORMIXSERVER
os.environ['DBSERVERNAME'] = settings.DBSERVERNAME
os.environ['INFORMIXDIR'] = settings.INFORMIXDIR
os.environ['ODBCINI'] = settings.ODBCINI
os.environ['ONCONFIG'] = settings.ONCONFIG
os.environ['INFORMIXSQLHOSTS'] = settings.INFORMIXSQLHOSTS
os.environ['LD_LIBRARY_PATH'] = settings.LD_LIBRARY_PATH
os.environ['LD_RUN_PATH'] = settings.LD_RUN_PATH
# set up command-line options
desc = """
Upload Common Application data to CX
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--test',
action='store_true',
help='Dry run?',
dest='test',
)
parser.add_argument(
'-d',
'--database',
help='database name.',
dest='database',
)
# some constants
TO = settings.COMMONAPP_TO_EMAIL
FROM = settings.COMMONAPP_FROM_EMAIL
SUBJECT = '[Common Application] failed'
TEMPLATE = 'email.html'
INFORMIX_DEBUG = settings.INFORMIX_DEBUG
HEADER = '{0}\n'.format('-' * 80)
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
phandler = logging.FileHandler('{0}commonapp.log'.format(settings.LOG_FILEPATH))
phandler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s: %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
)
phandler.setFormatter(formatter)
logger.addHandler(phandler)
# create error file handler and set level to error
phandler = logging.FileHandler('{0}commonapp_error.log'.format(
settings.LOG_FILEPATH,
))
phandler.setLevel(logging.ERROR)
formatter = logging.Formatter(
'%(asctime)s: %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
)
phandler.setFormatter(formatter)
logger.addHandler(phandler)
def file_download():
"""Fetch the file from Common App file from server."""
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
# External connection information for Common Application server
xtrnl_connection = {
'host': settings.COMMONAPP_HOST,
'username': settings.COMMONAPP_USER,
'password': settings.COMMONAPP_<PASSWORD>,
'cnopts': cnopts,
}
###########################################################################
# sFTP GET downloads the file from Common App file from server
# and saves in local directory.
###########################################################################
with pysftp.Connection(**xtrnl_connection) as sftp:
# Remote Path is the Common App server and once logged in we fetch
# directory listing
remotepath = sftp.listdir()
# Loop through remote path directory list
for filename in remotepath:
remotefile = filename
# set local directory for which the common app file will be
# downloaded to
local_dir = ('{0}'.format(settings.COMMONAPP_CSV_OUTPUT))
localpath = local_dir + remotefile
# GET file from sFTP server and download it to localpath
sftp.get(remotefile, localpath)
#############################################################
# Delete original file %m_%d_%y_%h_%i_%s_Applications(%c).txt
# from sFTP (Common App) server
#############################################################
sftp.remove(filename)
def insert_exam(phile, aid, ctgry, cmpl_date, score1, score2, score3, score4, score5, score6):
"""Execute query for test scores (ACT, SAT)."""
if cmpl_date != '':
# creates examtmp record if there are any test scores
try:
q_exam = """
INSERT INTO
app_examtmp_rec (
id, ctgry, cmpl_date, self_rpt, site,
score1, score2, score3, score4, score5, score6
)
VALUES
(
{0}, "{1}", TO_DATE("{2}", "%Y-%m-%d"), "Y", "CART",
"{3}", "{4}", "{5}", "{6}", "{7}", "{8}"
)
""".format(
aid, ctgry, cmpl_date, score1, score2, score3, score4,
score5, score6,
)
phile.write('{0}\n'.format(q_exam))
logger.info('Inserted into app_examtmp_rec\n')
connection = get_connection(earl)
with connection:
xsql(q_exam, connection, key=INFORMIX_DEBUG)
except Exception:
logger.exception()
def munge(localpath):
"""Obtain the CommonApp file and munge it."""
# set start_time in order to see how long script takes to execute
start_time = time.time()
# set date and time to be added to the filename
datetimestr = time.strftime('%Y%m%d%H%M%S')
# initializing file counter
file_count = 1
# set destination path and new filename that it will be
# renamed to when archived
destination = ('{0}commonapp-{1}_{2}.txt'.format(
settings.COMMONAPP_CSV_ARCHIVED,
datetimestr,
str(file_count),
))
# renamed file name to be processed
renamedfile = ('{0}carthage_applications.txt'.format(
settings.COMMONAPP_CSV_OUTPUT,
))
# renaming file fetched from Common App server
# The filename comming in
# %m_%d_%y_%h_%i_%s_Applications(%c).txt
# The filename renamed to carthage_applications.txt
shutil.move(localpath, renamedfile)
# set name for the sqloutput file
sqloutput = ('{0}/commonapp_output.sql'.format(os.getcwd()))
# create the sql file
with open(sqloutput, 'a') as sql_file:
try:
# establish mySQL database connection
cursor = connections['admissions'].cursor()
engine = get_engine(EARL)
# set directory and filename where to read from
filename=('{0}carthage_applications.txt'.format(
settings.COMMONAPP_CSV_OUTPUT,
))
sql_file.write(HEADER)
sql_file.write('-- CREATES APPLICATION FROM COMMON APP DATA\n')
sql_file.write(HEADER)
# open text file from CommonApp
with open(filename, 'rb') as common_apps:
reader = csv.DictReader(common_apps, delimiter='|')
# set apptmp_no_list variable which is used append apptmp_no
apptmp_no_list = []
# set variable which is used to get count of TRAD applications
app_TRAD_list = []
# set variable which is used to get count of PTSM applications
app_PTSM_list = []
for row in reader:
# prints the records information for all fields
#print([col+'='+row[col] for col in reader.fieldnames])
# create UUID
temp_uuid = (uuid.uuid4())
# checks if waiver code is used which determines the
# payment method
if row['feeWaiverCode'] == '':
paymentMethod = 'CREDIT'
waiverCode = ''
else:
paymentMethod = 'WAIVER'
waiverCode = row['feeWaiverCode']
# creates apptmp record
try:
q_create_app = """
INSERT INTO apptmp_rec (
add_date, add_tm, app_source, stat, reason_txt,
payment_method, waiver_code
)
VALUES (
TODAY, TO_CHAR(CURRENT, '%H%M'), "WEBA", "H",
"{0}", "{1}", "{2}"
);
""".format(temp_uuid, paymentMethod, waiverCode)
sql_file.write('{0}\n'.format(q_create_app))
logger.info("Inserted into apptmp_rec")
do_sql(q_create_app, key=INFORMIX_DEBUG, earl=EARL)
except Exception:
logger.exception()
# fetch id from apptmp_no table
lookup_apptmp_no = """
SELECT
apptmp_no
FROM
apptmp_rec
WHERE
reason_txt = "{0}";
""".format(temp_uuid)
sqlresult = do_sql(lookup_apptmp_no, earl=EARL)
sql_file.write('{0}\n'.format(lookup_apptmp_no))
results = sqlresult.fetchone()
# sets the apptmp_no variable which is used through out
# the queries
apptmp_no = (results[0])
apptmp_no_list.append(str(apptmp_no))
sql_file.write(HEADER)
sql_file.write('{0} {1} {2} - {3}\n'.format(
'-- START INSERT NEW STUDENT APPLICATION for:',
row['firstName'],
row['lastName'],
str(apptmp_no),
))
sql_file.write(HEADER)
logger.info('Begin Student Application:')
logger.info(
str(apptmp_no),
row['firstName'],
row['lastName'],
)
# fetch id from app_voucher on mySQL dB (admissions)
# insert into app_vouchers_users on mySQL dB
if row['feeWaiverCode'] != '':
try:
q_match = """
SELECT
id
FROM
app_vouchers
WHERE
NOW() < expiration
AND
REPLACE(code,"-","") = "{0}"
""" .format(row['feeWaiverCode'].replace('-', ''))
sql_file.write('{0}\n'.format(q_match))
cursor.execute(q_match)
voucher_result = cursor.fetchone()
except Exception:
logger.exception()
# if no results are returned set voucher_id to zero
if voucher_result:
# if results are returned set voucher_id to result
voucher_id = (voucher_result[0])
# inserts voucher id into app_voucher_users
try:
q_update_voucher = """
INSERT INTO
app_voucher_users (
voucher_id, app_id, submitted_on
)
VALUES (
{0}, {1}, NOW()
)
""" .format(voucher_id, apptmp_no)
sql_file.write('{0}\n'.format(q_update_voucher))
logger.info("Inserted into app_voucher_users")
cursor.execute(q_update_voucher)
except Exception:
logger.exception()
else:
voucher_id = 0
sql_file.write("--There were no waiver codes used ")
sql_file.write("for this application\n")
# creates application temp record
try:
q_create_id = """
INSERT INTO
app_idtmp_rec (
id, firstname, lastname, suffixname,
cc_username, cc_password, addr_line1,
addr_line2, city, st, zip, ctry, phone, ss_no,
middlename, aa, add_date, ofc_add_by,
upd_date, purge_date, prsp_no, name_sndx,
correct_addr, decsd, valid
)
VALUES (
{0}, "{1}", "{2}", "{3}", "{4}", "{0}", "{5}",
"{6}", "{7}", "{8}", "{9}", "{10}", "{11}",
"{12}", "{13}", "PERM", TODAY, "ADMS", TODAY,
TODAY + 2 UNITS YEAR, "0", "", "Y", "N", "Y"
)""".format(
apptmp_no, row['firstName'], row['lastName'],
row['suffix'], row['emailAddress'],
re.sub('\W+', ' ', row['permanentAddress1']),
re.sub('\W+', ' ', row['permanentAddress2']),
row['permanentAddressCity'],
row['permanentAddressState'],
row['permanentAddressZip'],
row['permanentAddressCountry'],
row['preferredPhoneNumber'].replace('+1.', ''),
row['ssn'], row['middleName'],
)
sql_file.write('{0}\n'.format(q_create_id))
logger.info("Inserted into app_idtmp_rec");
do_sql(q_create_id, key=INFORMIX_DEBUG, earl=EARL)
except Exception:
logger.exception()
##################################################################################
# The Y/N value of contactConsent may seem a little backwards intuitively.
# Y = The student has opted out meaning Carthage does NOT have permission to text
# N = The student has opted in meaning Carthage does have permission to text
#################################################################################
################################################
# BEGIN - preferred phone for student
################################################
if row['preferredPhone'] == 'Mobile':
if row['contactConsent'] == 'Y':
contactConsent = 'N'
elif row['contactConsent'] == 'N':
contactConsent = 'Y'
# preferred phone is a Mobile
try:
q_insert_aa_cell = """
INSERT INTO
app_aatmp_rec (
id, aa, beg_date, phone, opt_out
)
VALUES (
{0}, "CELL", TODAY, "{1}", "{2}"
)
""".format(
apptmp_no,
row['preferredPhoneNumber'].replace('+1.', ''),
row['contactConsent'],
)
sql_file.write('{0}\n'.format(q_insert_aa_cell))
logger.info("Inserted into app_aatmp_rec")
do_sql(q_insert_aa_cell, key=INFORMIX_DEBUG, earl=EARL)
except Exception:
logger.exception()
################################################
# BEGIN - alternate phone available for student
################################################
if row['alternatePhoneAvailable'] != '' and row['alternatePhoneAvailable'] != 'N':
altType = 'CELL'
if row['contactConsent'] == 'Y':
contactConsent = 'N'
elif row['contactConsent'] == 'N':
contactConsent = 'Y'
if row['alternatePhoneAvailable'] == 'Home':
altType = 'HOME'
# alternate phone is available
try:
q_insert_aa_cell = """
INSERT INTO
app_aatmp_rec (
id, aa, beg_date, phone, opt_out
)
VALUES (
{0}, "{1}", TODAY, "{2}", "{3}"
)
""".format(
apptmp_no,
altType,
row["alternatePhoneNumber"].replace('+1.', ''),
row["contactConsent"],
)
sql_file.write('{0}\n'.format(q_insert_aa_cell))
logger.info("Inserted into app_aatmp_rec")
do_sql(q_insert_aa_cell, key=INFORMIX_DEBUG, earl=EARL)
except Exception:
logger.exception()
# creates application site record
try:
q_create_site = """
INSERT INTO
app_sitetmp_rec (
id, home, site, beg_date
)
VALUES (
{0}, "Y", "CART", TODAY
)
""".format(apptmp_no)
sql_file.write('{0}\n'.format(q_create_site))
logger.info("Inserted into app_sitetmp_rec")
do_sql(q_create_site, key=INFORMIX_DEBUG, earl=EARL)
except Exception:
logger.exception()
# determine the type of studentStatus and set studentStatus
# and Hours Enrolled
if row['studentStatus'] == 'Full Time':
studentStatus = 'TRAD'
intendHoursEnrolled = 16
app_TRAD_list.append(str(apptmp_no))
elif row['studentStatus'] == 'Part Time':
studentStatus = 'TRAD'
intendHoursEnrolled = 16
app_PTSM_list.append(str(apptmp_no))
###############################################
# Adjust the code for Common App
# to make any app that comes in through
# Common App full time
| |
"""Tests for traitlets.config.configurable"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import logging
from unittest import TestCase
from pytest import mark
from traitlets.config.application import Application
from traitlets.config.configurable import (
Configurable,
LoggingConfigurable,
SingletonConfigurable,
)
from traitlets.config.loader import Config
from traitlets.log import get_logger
from traitlets.traitlets import (
CaselessStrEnum,
Dict,
Enum,
Float,
FuzzyEnum,
Integer,
List,
Set,
Unicode,
_deprecations_shown,
validate,
)
from ...tests._warnings import expected_warnings
class MyConfigurable(Configurable):
a = Integer(1, help="The integer a.").tag(config=True)
b = Float(1.0, help="The integer b.").tag(config=True)
c = Unicode("no config")
mc_help = """MyConfigurable(Configurable) options
------------------------------------
--MyConfigurable.a=<Integer>
The integer a.
Default: 1
--MyConfigurable.b=<Float>
The integer b.
Default: 1.0"""
mc_help_inst = """MyConfigurable(Configurable) options
------------------------------------
--MyConfigurable.a=<Integer>
The integer a.
Current: 5
--MyConfigurable.b=<Float>
The integer b.
Current: 4.0"""
# On Python 3, the Integer trait is a synonym for Int
mc_help = mc_help.replace("<Integer>", "<Int>")
mc_help_inst = mc_help_inst.replace("<Integer>", "<Int>")
class Foo(Configurable):
a = Integer(0, help="The integer a.").tag(config=True)
b = Unicode("nope").tag(config=True)
flist = List([]).tag(config=True)
fdict = Dict().tag(config=True)
class Bar(Foo):
b = Unicode("gotit", help="The string b.").tag(config=False)
c = Float(help="The string c.").tag(config=True)
bset = Set([]).tag(config=True, multiplicity="+")
bset_values = Set([2, 1, 5]).tag(config=True, multiplicity="+")
bdict = Dict().tag(config=True, multiplicity="+")
bdict_values = Dict({1: "a", "0": "b", 5: "c"}).tag(config=True, multiplicity="+")
foo_help = """Foo(Configurable) options
-------------------------
--Foo.a=<Int>
The integer a.
Default: 0
--Foo.b=<Unicode>
Default: 'nope'
--Foo.fdict=<key-1>=<value-1>...
Default: {}
--Foo.flist=<list-item-1>...
Default: []"""
bar_help = """Bar(Foo) options
----------------
--Bar.a=<Int>
The integer a.
Default: 0
--Bar.bdict <key-1>=<value-1>...
Default: {}
--Bar.bdict_values <key-1>=<value-1>...
Default: {1: 'a', '0': 'b', 5: 'c'}
--Bar.bset <set-item-1>...
Default: set()
--Bar.bset_values <set-item-1>...
Default: {1, 2, 5}
--Bar.c=<Float>
The string c.
Default: 0.0
--Bar.fdict=<key-1>=<value-1>...
Default: {}
--Bar.flist=<list-item-1>...
Default: []"""
class TestConfigurable(TestCase):
def test_default(self):
c1 = Configurable()
c2 = Configurable(config=c1.config)
c3 = Configurable(config=c2.config)
self.assertEqual(c1.config, c2.config)
self.assertEqual(c2.config, c3.config)
def test_custom(self):
config = Config()
config.foo = "foo"
config.bar = "bar"
c1 = Configurable(config=config)
c2 = Configurable(config=c1.config)
c3 = Configurable(config=c2.config)
self.assertEqual(c1.config, config)
self.assertEqual(c2.config, config)
self.assertEqual(c3.config, config)
# Test that copies are not made
self.assertTrue(c1.config is config)
self.assertTrue(c2.config is config)
self.assertTrue(c3.config is config)
self.assertTrue(c1.config is c2.config)
self.assertTrue(c2.config is c3.config)
def test_inheritance(self):
config = Config()
config.MyConfigurable.a = 2
config.MyConfigurable.b = 2.0
c1 = MyConfigurable(config=config)
c2 = MyConfigurable(config=c1.config)
self.assertEqual(c1.a, config.MyConfigurable.a)
self.assertEqual(c1.b, config.MyConfigurable.b)
self.assertEqual(c2.a, config.MyConfigurable.a)
self.assertEqual(c2.b, config.MyConfigurable.b)
def test_parent(self):
config = Config()
config.Foo.a = 10
config.Foo.b = "wow"
config.Bar.b = "later"
config.Bar.c = 100.0
f = Foo(config=config)
with expected_warnings(["`b` not recognized"]):
b = Bar(config=f.config)
self.assertEqual(f.a, 10)
self.assertEqual(f.b, "wow")
self.assertEqual(b.b, "gotit")
self.assertEqual(b.c, 100.0)
def test_override1(self):
config = Config()
config.MyConfigurable.a = 2
config.MyConfigurable.b = 2.0
c = MyConfigurable(a=3, config=config)
self.assertEqual(c.a, 3)
self.assertEqual(c.b, config.MyConfigurable.b)
self.assertEqual(c.c, "no config")
def test_override2(self):
config = Config()
config.Foo.a = 1
config.Bar.b = "or" # Up above b is config=False, so this won't do it.
config.Bar.c = 10.0
with expected_warnings(["`b` not recognized"]):
c = Bar(config=config)
self.assertEqual(c.a, config.Foo.a)
self.assertEqual(c.b, "gotit")
self.assertEqual(c.c, config.Bar.c)
with expected_warnings(["`b` not recognized"]):
c = Bar(a=2, b="and", c=20.0, config=config)
self.assertEqual(c.a, 2)
self.assertEqual(c.b, "and")
self.assertEqual(c.c, 20.0)
def test_help(self):
self.assertEqual(MyConfigurable.class_get_help(), mc_help)
self.assertEqual(Foo.class_get_help(), foo_help)
self.assertEqual(Bar.class_get_help(), bar_help)
def test_help_inst(self):
inst = MyConfigurable(a=5, b=4)
self.assertEqual(MyConfigurable.class_get_help(inst), mc_help_inst)
def test_generated_config_enum_comments(self):
class MyConf(Configurable):
an_enum = Enum("Choice1 choice2".split(), help="Many choices.").tag(config=True)
help_str = "Many choices."
enum_choices_str = "Choices: any of ['Choice1', 'choice2']"
rst_choices_str = "MyConf.an_enum : any of ``'Choice1'``|``'choice2'``"
or_none_str = "or None"
cls_help = MyConf.class_get_help()
self.assertIn(help_str, cls_help)
self.assertIn(enum_choices_str, cls_help)
self.assertNotIn(or_none_str, cls_help)
cls_cfg = MyConf.class_config_section()
self.assertIn(help_str, cls_cfg)
self.assertIn(enum_choices_str, cls_cfg)
self.assertNotIn(or_none_str, cls_help)
# Check order of Help-msg <--> Choices sections
self.assertGreater(cls_cfg.index(enum_choices_str), cls_cfg.index(help_str))
rst_help = MyConf.class_config_rst_doc()
self.assertIn(help_str, rst_help)
self.assertIn(rst_choices_str, rst_help)
self.assertNotIn(or_none_str, rst_help)
class MyConf2(Configurable):
an_enum = Enum(
"Choice1 choice2".split(),
allow_none=True,
default_value="choice2",
help="Many choices.",
).tag(config=True)
defaults_str = "Default: 'choice2'"
cls2_msg = MyConf2.class_get_help()
self.assertIn(help_str, cls2_msg)
self.assertIn(enum_choices_str, cls2_msg)
self.assertIn(or_none_str, cls2_msg)
self.assertIn(defaults_str, cls2_msg)
# Check order of Default <--> Choices sections
self.assertGreater(cls2_msg.index(defaults_str), cls2_msg.index(enum_choices_str))
cls2_cfg = MyConf2.class_config_section()
self.assertIn(help_str, cls2_cfg)
self.assertIn(enum_choices_str, cls2_cfg)
self.assertIn(or_none_str, cls2_cfg)
self.assertIn(defaults_str, cls2_cfg)
# Check order of Default <--> Choices sections
self.assertGreater(cls2_cfg.index(defaults_str), cls2_cfg.index(enum_choices_str))
def test_generated_config_strenum_comments(self):
help_str = "Many choices."
defaults_str = "Default: 'choice2'"
or_none_str = "or None"
class MyConf3(Configurable):
an_enum = CaselessStrEnum(
"Choice1 choice2".split(),
allow_none=True,
default_value="choice2",
help="Many choices.",
).tag(config=True)
enum_choices_str = "Choices: any of ['Choice1', 'choice2'] (case-insensitive)"
cls3_msg = MyConf3.class_get_help()
self.assertIn(help_str, cls3_msg)
self.assertIn(enum_choices_str, cls3_msg)
self.assertIn(or_none_str, cls3_msg)
self.assertIn(defaults_str, cls3_msg)
# Check order of Default <--> Choices sections
self.assertGreater(cls3_msg.index(defaults_str), cls3_msg.index(enum_choices_str))
cls3_cfg = MyConf3.class_config_section()
self.assertIn(help_str, cls3_cfg)
self.assertIn(enum_choices_str, cls3_cfg)
self.assertIn(or_none_str, cls3_cfg)
self.assertIn(defaults_str, cls3_cfg)
# Check order of Default <--> Choices sections
self.assertGreater(cls3_cfg.index(defaults_str), cls3_cfg.index(enum_choices_str))
class MyConf4(Configurable):
an_enum = FuzzyEnum(
"Choice1 choice2".split(),
allow_none=True,
default_value="choice2",
help="Many choices.",
).tag(config=True)
enum_choices_str = "Choices: any case-insensitive prefix of ['Choice1', 'choice2']"
cls4_msg = MyConf4.class_get_help()
self.assertIn(help_str, cls4_msg)
self.assertIn(enum_choices_str, cls4_msg)
self.assertIn(or_none_str, cls4_msg)
self.assertIn(defaults_str, cls4_msg)
# Check order of Default <--> Choices sections
self.assertGreater(cls4_msg.index(defaults_str), cls4_msg.index(enum_choices_str))
cls4_cfg = MyConf4.class_config_section()
self.assertIn(help_str, cls4_cfg)
self.assertIn(enum_choices_str, cls4_cfg)
self.assertIn(or_none_str, cls4_cfg)
self.assertIn(defaults_str, cls4_cfg)
# Check order of Default <--> Choices sections
self.assertGreater(cls4_cfg.index(defaults_str), cls4_cfg.index(enum_choices_str))
class TestSingletonConfigurable(TestCase):
def test_instance(self):
class Foo(SingletonConfigurable):
pass
self.assertEqual(Foo.initialized(), False)
foo = Foo.instance()
self.assertEqual(Foo.initialized(), True)
self.assertEqual(foo, Foo.instance())
self.assertEqual(SingletonConfigurable._instance, None)
def test_inheritance(self):
class Bar(SingletonConfigurable):
pass
class Bam(Bar):
pass
self.assertEqual(Bar.initialized(), False)
self.assertEqual(Bam.initialized(), False)
bam = Bam.instance()
self.assertEqual(Bar.initialized(), True)
self.assertEqual(Bam.initialized(), True)
self.assertEqual(bam, Bam._instance)
self.assertEqual(bam, Bar._instance)
self.assertEqual(SingletonConfigurable._instance, None)
class TestLoggingConfigurable(TestCase):
def test_parent_logger(self):
class Parent(LoggingConfigurable):
pass
class Child(LoggingConfigurable):
pass
log = get_logger().getChild("TestLoggingConfigurable")
parent = Parent(log=log)
child = Child(parent=parent)
self.assertEqual(parent.log, log)
self.assertEqual(child.log, log)
parent = Parent()
child = Child(parent=parent, log=log)
self.assertEqual(parent.log, get_logger())
self.assertEqual(child.log, log)
def test_parent_not_logging_configurable(self):
class Parent(Configurable):
pass
class Child(LoggingConfigurable):
pass
parent = Parent()
child = Child(parent=parent)
self.assertEqual(child.log, get_logger())
class MyParent(Configurable):
pass
class MyParent2(MyParent):
pass
class TestParentConfigurable(TestCase):
def test_parent_config(self):
cfg = Config(
{
"MyParent": {
"MyConfigurable": {
"b": 2.0,
}
}
}
)
parent = MyParent(config=cfg)
myc = MyConfigurable(parent=parent)
self.assertEqual(myc.b, parent.config.MyParent.MyConfigurable.b)
def test_parent_inheritance(self):
cfg = Config(
{
"MyParent": {
"MyConfigurable": {
"b": 2.0,
}
}
}
)
parent = MyParent2(config=cfg)
myc = MyConfigurable(parent=parent)
self.assertEqual(myc.b, parent.config.MyParent.MyConfigurable.b)
def test_multi_parent(self):
cfg = Config(
{
"MyParent2": {
"MyParent": {
"MyConfigurable": {
"b": 2.0,
}
},
# this one shouldn't count
"MyConfigurable": {
"b": 3.0,
},
}
}
)
parent2 = MyParent2(config=cfg)
parent = MyParent(parent=parent2)
myc = MyConfigurable(parent=parent)
self.assertEqual(myc.b, parent.config.MyParent2.MyParent.MyConfigurable.b)
def test_parent_priority(self):
cfg = Config(
{
"MyConfigurable": {
"b": 2.0,
},
"MyParent": {
"MyConfigurable": {
"b": 3.0,
}
},
"MyParent2": {
"MyConfigurable": {
"b": 4.0,
}
},
}
)
parent = MyParent2(config=cfg)
myc = MyConfigurable(parent=parent)
self.assertEqual(myc.b, parent.config.MyParent2.MyConfigurable.b)
def test_multi_parent_priority(self):
cfg = Config(
{
"MyConfigurable": {
"b": 2.0,
},
"MyParent": {
"MyConfigurable": {
"b": 3.0,
},
},
"MyParent2": {
"MyConfigurable": {
"b": 4.0,
},
"MyParent": {
"MyConfigurable": {
"b": 5.0,
},
},
},
}
)
parent2 = MyParent2(config=cfg)
parent = MyParent2(parent=parent2)
myc = MyConfigurable(parent=parent)
self.assertEqual(myc.b, parent.config.MyParent2.MyParent.MyConfigurable.b)
class Containers(Configurable):
lis = List().tag(config=True)
def _lis_default(self):
return [-1]
s = Set().tag(config=True)
def _s_default(self):
return {"a"}
d = Dict().tag(config=True)
def _d_default(self):
return {"a": "b"}
class TestConfigContainers(TestCase):
def test_extend(self):
c = Config()
c.Containers.lis.extend(list(range(5)))
obj = Containers(config=c)
self.assertEqual(obj.lis, list(range(-1, 5)))
def test_insert(self):
c = Config()
c.Containers.lis.insert(0, "a")
c.Containers.lis.insert(1, "b")
obj = Containers(config=c)
self.assertEqual(obj.lis, ["a", "b", -1])
def test_prepend(self):
c = Config()
c.Containers.lis.prepend([1, 2])
c.Containers.lis.prepend([2, 3])
obj = Containers(config=c)
self.assertEqual(obj.lis, [2, 3, 1, 2, -1])
def test_prepend_extend(self):
c = Config()
c.Containers.lis.prepend([1, 2])
c.Containers.lis.extend([2, 3])
obj = Containers(config=c)
self.assertEqual(obj.lis, [1, 2, -1, 2, 3])
def test_append_extend(self):
c = Config()
c.Containers.lis.append([1, 2])
c.Containers.lis.extend([2, 3])
obj = Containers(config=c)
self.assertEqual(obj.lis, [-1, [1, 2], 2, 3])
def test_extend_append(self):
c = Config()
c.Containers.lis.extend([2, 3])
c.Containers.lis.append([1, 2])
obj = Containers(config=c)
self.assertEqual(obj.lis, [-1, 2, 3, [1, 2]])
def test_insert_extend(self):
c = Config()
c.Containers.lis.insert(0, 1)
c.Containers.lis.extend([2, 3])
obj = Containers(config=c)
self.assertEqual(obj.lis, [1, -1, 2, 3])
def test_set_update(self):
c = Config()
c.Containers.s.update({0, 1, 2})
c.Containers.s.update({3})
obj = Containers(config=c)
self.assertEqual(obj.s, {"a", 0, 1, 2, 3})
def test_dict_update(self):
c = Config()
c.Containers.d.update({"c": "d"})
c.Containers.d.update({"e": "f"})
obj = Containers(config=c)
self.assertEqual(obj.d, {"a": "b", "c": "d", "e": "f"})
def test_update_twice(self):
c = Config()
c.MyConfigurable.a = 5
m = MyConfigurable(config=c)
self.assertEqual(m.a, 5)
c2 = Config()
c2.MyConfigurable.a = 10
m.update_config(c2)
self.assertEqual(m.a, 10)
c2.MyConfigurable.a = 15
m.update_config(c2)
self.assertEqual(m.a, 15)
def test_update_self(self):
"""update_config with same config object still triggers config_changed"""
c = Config()
c.MyConfigurable.a = 5
m = MyConfigurable(config=c)
self.assertEqual(m.a, 5)
c.MyConfigurable.a = 10
m.update_config(c)
self.assertEqual(m.a, 10)
def test_config_default(self):
class SomeSingleton(SingletonConfigurable):
pass
class DefaultConfigurable(Configurable):
a = Integer().tag(config=True)
def _config_default(self):
if SomeSingleton.initialized():
return SomeSingleton.instance().config
return Config()
c = Config()
c.DefaultConfigurable.a = 5
d1 = DefaultConfigurable()
self.assertEqual(d1.a, 0)
single = SomeSingleton.instance(config=c)
d2 = DefaultConfigurable()
self.assertIs(d2.config, single.config)
self.assertEqual(d2.a, 5)
def test_config_default_deprecated(self):
"""Make sure configurables work even with the deprecations in traitlets"""
class SomeSingleton(SingletonConfigurable):
pass
# reset deprecation limiter
_deprecations_shown.clear()
with expected_warnings([]):
class DefaultConfigurable(Configurable):
a = Integer(config=True)
def _config_default(self):
if SomeSingleton.initialized():
return SomeSingleton.instance().config
return Config()
c = Config()
c.DefaultConfigurable.a = | |
group by x.a;
''', explainResponse=["SELECT A FROM NATIVE.T", "SELECT V1 FROM NATIVE.G"])
self.compareWithNativeExtended('''
SELECT GROUP_CONCAT(A ORDER BY C), GROUP_CONCAT(A ORDER BY C) FROM {v}.t;
''', explainResponse="SELECT GROUP_CONCAT(A ORDER BY C), GROUP_CONCAT(A ORDER BY C) FROM NATIVE.T")
self.compareWithNativeExtended('''
select level, level from {v}.T_CONNECT start with val = 100 connect by val = prior parent;
''', explainResponse="SELECT * FROM NATIVE.T_CONNECT")
self.compareWithNativeExtended('''
select extract(hour from a-1) || ':' || extract(minute from a-1) from {v}.T_datetime;
''', explainResponse="SELECT CONCAT(EXTRACT(HOUR FROM (A - 1)), CONCAT(':', EXTRACT(MINUTE FROM (A - 1)))) FROM NATIVE.T_DATETIME")
self.compareWithNativeExtended('''
select concat(v2, v2), concat(v2, v2) from {v}.g;
''', explainResponse="SELECT CONCAT(V2, V2), CONCAT(V2, V2) FROM NATIVE.G")
self.createFastAdapter(schemaName="ADAPTER", adapterName="FAST_ADAPTER")
self.query('DROP VIRTUAL SCHEMA IF EXISTS FAST_VS1 CASCADE')
self.query('CREATE VIRTUAL SCHEMA FAST_VS1 USING ADAPTER.FAST_ADAPTER')
rows = self.query('''
SELECT CONCAT(KEY, KEY), CONCAT(KEY,KEY) FROM FAST_VS1.DUMMY;
''')
self.assertRowsEqual([('FOOFOO', 'FOOFOO')],rows)
rows = self.query('''
SELECT PUSHDOWN_SQL FROM (EXPLAIN VIRTUAL SELECT CONCAT(KEY, KEY), CONCAT(KEY,KEY) FROM FAST_VS1.DUMMY);
''')
self.assertRowsEqual([("SELECT * FROM (VALUES ('FOO', 'BAR')) t",)],rows)
def testWithAnalytical(self):
self.compareWithNativeExtended('''
SELECT k, v1, count(*) over (PARTITION BY k ORDER BY v1) AS COUNT FROM {v}.g;
''', explainResponse="SELECT K, V1 FROM NATIVE.G")
self.compareWithNativeExtended('''
SELECT k, v1, sum(v1) over (PARTITION BY k ORDER BY v1) AS SUM FROM {v}.g;
''', explainResponse="SELECT K, V1 FROM NATIVE.G")
# and with order-by
self.compareWithNativeExtended('''
SELECT k, v1, sum(v1) over (PARTITION BY k ORDER BY v1) AS SUM FROM {v}.g order by k desc, sum;
''', explainResponse="SELECT K, V1 FROM NATIVE.G")
def testWithConnectBy(self):
self.compareWithNativeExtended('''
SELECT val,
PRIOR val PARENT_NAME,
CONNECT_BY_ROOT val ROOT_NAME,
SYS_CONNECT_BY_PATH(val, '/') "PATH"
FROM {v}.T_CONNECT
CONNECT BY PRIOR val = parent
START WITH val = 100;
''', ignoreOrder=True, explainResponse="SELECT * FROM NATIVE.T_CONNECT")
self.compareWithNativeExtended('''
WITH tmp_view(x) AS (SELECT a+1 from {v}.T) SELECT x FROM tmp_view;
''', explainResponse="SELECT (A + 1) FROM NATIVE.T")
def testWithPreferring(self):
self.compareWithNativeExtended('''
SELECT * FROM {v}.t
PREFERRING (LOW ROUND(a/1000) PLUS HIGH ROUND(c/10))
PRIOR TO (b = 'a');
''', explainResponse="SELECT * FROM NATIVE.T")
def testWithWithClause(self):
self.compareWithNativeExtended('''
WITH tmp_view(x) AS (SELECT a+1 from {v}.T) SELECT x FROM tmp_view;
''', explainResponse="SELECT (A + 1) FROM NATIVE.T")
self.compareWithNativeExtended('''
WITH tmp_view2(x) AS (SELECT a+1 from {v}.T),
tmp_view(y) AS (select a*a from {v}.T) SELECT x,y FROM tmp_view, tmp_view2 order by x, y;
''', explainResponse=["SELECT (A + 1) FROM NATIVE.T", "SELECT (A * A) FROM NATIVE.T"])
self.compareWithNativeExtended('''
With tmp_view(x) AS (SELECT a+1 from {v}.T),
tmp_view2(y) as (SELECT x*x+c FROM tmp_view, {v}.T) SELECT y FROM tmp_view2 order by y;
''', explainResponse=["SELECT (A + 1) FROM NATIVE.T", "SELECT C FROM NATIVE.T"])
self.compareWithNativeExtended('''
(With tmp_view(x) AS (SELECT a+1 from {v}.T) select * FROM tmp_view)
union (with tmp_view2(y) as (SELECT c FROM {v}.T) select * from tmp_view2);
''', explainResponse=["SELECT (A + 1) FROM NATIVE.T", "SELECT C FROM NATIVE.T"])
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW5_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW5_VS1")
self.query('''CREATE VIEW NATIVE.PUSHDOWN_VIEW5_NATIVE as (With tmp_view(x) AS (SELECT a+1 from NATIVE.T),
tmp_view2(y) as (SELECT x*x+c FROM tmp_view, NATIVE.T)
SELECT y FROM tmp_view2 order by y);''')
self.query('''CREATE VIEW NATIVE.PUSHDOWN_VIEW5_VS1 as (With tmp_view(x) AS (SELECT a+1 from VS1.T),
tmp_view2(y) as (SELECT x*x+c FROM tmp_view, VS1.T)
SELECT y FROM tmp_view2 order by y);''')
self.compareWithNativeExtended('''
select * from NATIVE.PUSHDOWN_VIEW5_{v};
''', ignoreOrder=True, explainResponse=["SELECT (A + 1) FROM NATIVE.T", "SELECT C FROM NATIVE.T"])
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW5_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW5_VS1")
def testWithViews(self):
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW1_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW1_VS1")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW2_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW2_VS1")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW3_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW3_VS1")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW4_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW4_VS1")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW1_NATIVE AS SELECT A, K FROM NATIVE.T, NATIVE.G")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW1_VS1 AS SELECT A, K FROM VS1.T, VS1.G")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW2_NATIVE AS SELECT * FROM NATIVE.T")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW2_VS1 AS SELECT * FROM VS1.T")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW3_NATIVE AS SELECT SUM(A) X FROM NATIVE.T WHERE A < 4")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW3_VS1 AS SELECT SUM(A) X FROM VS1.T WHERE A < 4")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW4_NATIVE AS SELECT SUM(A) X, AVG(C) Y FROM NATIVE.PUSHDOWN_VIEW2_NATIVE")
self.query("CREATE VIEW NATIVE.PUSHDOWN_VIEW4_VS1 AS SELECT SUM(A) X, AVG(C) Y FROM NATIVE.PUSHDOWN_VIEW2_VS1")
self.compareWithNativeExtended('''
select * from NATIVE.PUSHDOWN_VIEW1_{v};
''', ignoreOrder=True, explainResponse=["SELECT A FROM NATIVE.T", "SELECT K FROM NATIVE.G"])
self.compareWithNativeExtended('''
select * from NATIVE.PUSHDOWN_VIEW2_{v};
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T"])
self.compareWithNativeExtended('''
select SUM(A) from NATIVE.PUSHDOWN_VIEW2_{v};
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T"])
self.compareWithNativeExtended('''
select * from NATIVE.PUSHDOWN_VIEW3_{v};
''', ignoreOrder=True, explainResponse=["SELECT SUM(A) FROM NATIVE.T WHERE A < 4"])
self.compareWithNativeExtended('''
select * from NATIVE.PUSHDOWN_VIEW4_{v};
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T"])
# test view materialization
self.compareWithNativeExtended('''
SELECT * FROM (SELECT a, rownum FROM NATIVE.PUSHDOWN_VIEW2_{v}) t1 join (SELECT * FROM NATIVE.PUSHDOWN_VIEW2_{v}) t2 on t1.a = t2.a;
''', ignoreOrder=True)
# test view elimination
self.compareWithNativeExtended('''
SELECT * FROM (SELECT * FROM NATIVE.PUSHDOWN_VIEW2_{v}) t1 join (SELECT * FROM NATIVE.PUSHDOWN_VIEW2_{v}) t2 on t1.a = t2.a;
''', ignoreOrder=True)
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW1_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW1_VS1")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW2_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW2_VS1")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW3_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW3_VS1")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW4_NATIVE")
self.query("DROP VIEW IF EXISTS NATIVE.PUSHDOWN_VIEW4_VS1")
def testWithSetOperations(self):
self.compareWithNativeExtended('''
(SELECT c from {v}.T) UNION (SELECT v1 FROM {v}.G);
''', explainResponse=["SELECT C FROM NATIVE.T", "SELECT V1 FROM NATIVE.G"])
self.compareWithNativeExtended('''
(SELECT * FROM VS1.T WHERE C in (SELECT c FROM (SELECT c from VS1.T) UNION ALL (SELECT v1 FROM VS1.G)));
''', explainResponse=["SELECT * FROM NATIVE.T", "SELECT C FROM NATIVE.T", "SELECT V1 FROM NATIVE.G"])
self.compareWithNativeExtended('''
(SELECT c from {v}.T) INTERSECT (SELECT v1 FROM {v}.G);
''', explainResponse=["SELECT C FROM NATIVE.T GROUP BY C", "SELECT V1 FROM NATIVE.G"])
self.compareWithNativeExtended('''
(SELECT c from {v}.T) MINUS (SELECT v1 FROM {v}.G);
''', explainResponse=["SELECT C FROM NATIVE.T GROUP BY C", "SELECT V1 FROM NATIVE.G"])
self.compareWithNativeExtended('''
(SELECT c from {v}.T) EXCEPT (SELECT v1 FROM {v}.G);
''', explainResponse=["SELECT C FROM NATIVE.T GROUP BY C","SELECT V1 FROM NATIVE.G"])
def testWithExists(self):
self.compareWithNativeExtended('''
SELECT a FROM {v}.t WHERE EXISTS (SELECT * FROM {v}.g WHERE t.a=g.v1);
''', explainResponse=["SELECT A FROM NATIVE.T", "SELECT * FROM NATIVE.G"])
self.compareWithNativeExtended('''
SELECT a FROM {v}.t WHERE EXISTS (SELECT * FROM {v}.t);
''', explainResponse=["SELECT A FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
class ExtendedFunctionTest(VSchemaTest):
setupDone = False
def setUp(self):
# TODO This is another ugly workaround for the problem that the framework doesn't offer us a query in classmethod setUpClass. Rewrite!
if self.__class__.setupDone:
self.query(''' CLOSE SCHEMA ''')
return
#self.initPool()
self.createJdbcAdapter()
self.createNative()
self.commit() # We have to commit, otherwise the adapter won't see these tables
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
self.query(''' CLOSE SCHEMA ''')
self.__class__.setupDone = True
# def testOperatorsPar(self):
# procs = []
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('b||b', 't', 'CONCAT(B, B)')))
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('a + a', 't', 'A + A')))
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('a + 2', 't_datetime', 'A + 2')))
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('a - a', 't', 'A - A')))
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('a - a', 't_datetime', 'A - A')))
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('a - 2', 't_datetime', 'A - 2')))
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('2 * a', 't', '2 * A')))
# procs.append(self.executor.submit(target=self.completeFunctionTestPar, args=('a / 2', 't', 'A / 2')))
# concurrent.futures.wait(procs, NONE, ALL_COMPLETED)
def testOperators(self):
self.completeFunctionTest('+A', 't', 'A')
self.completeFunctionTest('-A', 't', '-A')
self.completeFunctionTest('B||B', 't', 'CONCAT(B, B)')
self.completeFunctionTest('A + A', 't', 'A + A')
self.completeFunctionTest('A + A', 't_interval', 'A + A')
self.completeFunctionTest('A + 2', 't_datetime', 'A + 2')
self.completeFunctionTest("A + INTERVAL '5' MONTH", 't_interval', "A + INTERVAL '+0-05' YEAR (2) TO MONTH")
self.completeFunctionTest("A + INTERVAL '130' MONTH (3)", 't_interval', "A + INTERVAL '+10-10' YEAR (3) TO MONTH")
self.completeFunctionTest("A + INTERVAL '27' YEAR", 't_interval', "A + INTERVAL '+27-00' YEAR (2) TO MONTH")
self.completeFunctionTest("A + INTERVAL '2-1' YEAR TO MONTH", 't_interval', "A + INTERVAL '+2-01' YEAR (2) TO MONTH")
self.completeFunctionTest("A + INTERVAL '100-1' YEAR(3) TO MONTH", 't_interval', "A + INTERVAL '+100-01' YEAR (3) TO MONTH")
self.completeFunctionTest("B + INTERVAL '5' DAY", 't_interval', "B + INTERVAL '+5 00:00:00.000' DAY (2) TO SECOND (3)")
self.completeFunctionTest("B + INTERVAL '100' HOUR(3)", 't_interval', "B + INTERVAL '+4 04:00:00.000' DAY (3) TO SECOND (3)")
self.completeFunctionTest("B + INTERVAL '6' MINUTE", 't_interval', "B + INTERVAL '+0 00:06:00.000' DAY (2) TO SECOND (3)")
self.completeFunctionTest("B + INTERVAL '1.99999' SECOND(2,2)", 't_interval', "B + INTERVAL '+0 00:00:02.000' DAY (2) TO SECOND (2)")
self.completeFunctionTest("B + INTERVAL '10:20' HOUR | |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import treecorr
from test_helper import get_from_wiki, get_script_name, do_pickle, CaptureLog
from test_helper import assert_raises, timer, assert_warns
from numpy import sin, cos, tan, arcsin, arccos, arctan, arctan2, pi
@timer
def test_direct():
# If the catalogs are small enough, we can do a direct calculation to see if comes out right.
# This should exactly match the treecorr result if brute_force=True
ngal = 200
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 50.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True)
gg.process(cat1, cat2)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
for i in range(ngal):
# It's hard to do all the pairs at once with numpy operations (although maybe possible).
# But we can at least do all the pairs for each entry in cat1 at once with arrays.
rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1[i]-x2) - 1j*(y1[i]-y2)) / r
ww = w1[i] * w2
xip = ww * (g11[i] + 1j*g21[i]) * (g12 - 1j*g22)
xim = ww * (g11[i] + 1j*g21[i]) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0.
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
print('diff = ',gg.xim - true_xim.real)
print('max diff = ',np.max(np.abs(gg.xim - true_xim.real)))
print('rel diff = ',(gg.xim - true_xim.real)/true_xim.real)
# This is the one that is highly affected by the approximation from averaging the shears
# before projecting, rather than averaging each shear projected to its own connecting line.
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=3.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, atol=1.e-3)
# Check a few basic operations with a GGCorrelation object.
do_pickle(gg)
gg2 = gg.copy()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, 2*gg.npairs)
np.testing.assert_allclose(gg2.weight, 2*gg.weight)
np.testing.assert_allclose(gg2.meanr, 2*gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, 2*gg.meanlogr)
np.testing.assert_allclose(gg2.xip, 2*gg.xip)
np.testing.assert_allclose(gg2.xip_im, 2*gg.xip_im)
np.testing.assert_allclose(gg2.xim, 2*gg.xim)
np.testing.assert_allclose(gg2.xim_im, 2*gg.xim_im)
gg2.clear()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, gg.npairs)
np.testing.assert_allclose(gg2.weight, gg.weight)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
ascii_name = 'output/gg_ascii.txt'
gg.write(ascii_name, precision=16)
gg3 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg3.read(ascii_name)
np.testing.assert_allclose(gg3.npairs, gg.npairs)
np.testing.assert_allclose(gg3.weight, gg.weight)
np.testing.assert_allclose(gg3.meanr, gg.meanr)
np.testing.assert_allclose(gg3.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg3.xip, gg.xip)
np.testing.assert_allclose(gg3.xip_im, gg.xip_im)
np.testing.assert_allclose(gg3.xim, gg.xim)
np.testing.assert_allclose(gg3.xim_im, gg.xim_im)
fits_name = 'output/gg_fits.fits'
gg.write(fits_name)
gg4 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg4.read(fits_name)
np.testing.assert_allclose(gg4.npairs, gg.npairs)
np.testing.assert_allclose(gg4.weight, gg.weight)
np.testing.assert_allclose(gg4.meanr, gg.meanr)
np.testing.assert_allclose(gg4.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg4.xip, gg.xip)
np.testing.assert_allclose(gg4.xip_im, gg.xip_im)
np.testing.assert_allclose(gg4.xim, gg.xim)
np.testing.assert_allclose(gg4.xim_im, gg.xim_im)
with assert_raises(TypeError):
gg2 += config
gg4 = treecorr.GGCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg4
gg5 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg5
gg6 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2)
with assert_raises(ValueError):
gg2 += gg6
@timer
def test_direct_spherical():
# Repeat in spherical coords
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) ) + 200
z2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
ra1, dec1 = coord.CelestialCoord.xyz_to_radec(x1,y1,z1)
ra2, dec2 = coord.CelestialCoord.xyz_to_radec(x2,y2,z2)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, ra_units='rad', dec_units='rad', w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, ra_units='rad', dec_units='rad', w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 10.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', brute=True)
gg.process(cat1, cat2)
r1 = np.sqrt(x1**2 + y1**2 + z1**2)
r2 = np.sqrt(x2**2 + y2**2 + z2**2)
x1 /= r1; y1 /= r1; z1 /= r1
x2 /= r2; y2 /= r2; z2 /= r2
north_pole = coord.CelestialCoord(0*coord.radians, 90*coord.degrees)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
rad_min_sep = min_sep * coord.degrees / coord.radians
c1 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra1, dec1)]
c2 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra2, dec2)]
for i in range(ngal):
for j in range(ngal):
rsq = (x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2
r = np.sqrt(rsq)
logr = np.log(r)
index = np.floor(np.log(r/rad_min_sep) / bin_size).astype(int)
if index < 0 or index >= nbins:
continue
# Rotate shears to coordinates where line connecting is horizontal.
# Original orientation is where north is up.
theta1 = 90*coord.degrees - c1[i].angleBetween(north_pole, c2[j])
theta2 = 90*coord.degrees - c2[j].angleBetween(north_pole, c1[i])
exp2theta1 = np.cos(2*theta1) + 1j * np.sin(2*theta1)
exp2theta2 = np.cos(2*theta2) + 1j * np.sin(2*theta2)
g1 = g11[i] + 1j * g21[i]
g2 = g12[j] + 1j * g22[j]
g1 *= exp2theta1
g2 *= exp2theta2
ww = w1[i] * w2[j]
xip = ww * g1 * np.conjugate(g2)
xim = ww * g1 * g2
true_npairs[index] += 1
true_weight[index] += ww
true_xip[index] += xip
true_xim[index] += xim
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct_spherical.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', bin_slop=0, max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-3, atol=1.e-6)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-3, atol=1.e-6)
diff = np.abs(gg.xim - true_xim.real)
reldiff = diff / true_xim.real
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=2.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-3, atol=2.e-4)
@timer
def test_pairwise():
# Test the pairwise option.
ngal = 1000
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = | |
import json
from collections import OrderedDict
from logging import debug
from flask import request, render_template, Markup, Response, abort
from coocstats import CountStats
from charts import chart_types
from hocqueries import hocqueries
from examples import example_terms, example_pairs
from common import uniq, merge_dicts, pretty_dumps
from chat import pubmed_text_store, pubmed_ann_store
from chat import db_controller
from chat import app
from significance import term_significance
from hallmarks import *
from legends import get_legend
from config import DEFAULT_CHART, DEFAULT_MEASURE, DEBUG, DEVEL
from charts import POS_ONLY, POS_LEFT, POS_RIGHT
@app.errorhandler(Exception)
def catchall_exception_handler(e):
import traceback
try:
trace = traceback.format_exc()
except:
trace = '(no traceback)'
if DEVEL:
msg = '<html><pre>{}</pre></html>'.format(trace)
else:
msg = 'Application error'
return msg, 500
### Static views
@app.route('/help')
def help():
return render_template('help.html')
@app.route('/api')
def api():
args = { 'root': '/' } # TODO don't hardcode app root
return render_template('api.html', **args)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/hallmarks')
def hallmarks():
return render_template('hallmarks.html')
### Dynamic views
@app.route('/')
@app.route('/index')
def index():
terms = get_query_terms()
if not terms:
return generate_index()
else:
return generate_charts(terms, measure=getarg('measure'),
chart_type=getarg('chart_type'))
@app.route('/metadata')
def metadata():
metadata = fill_template_arguments({})
metadata['term_examples'] = example_terms
metadata['comparison_examples'] = example_pairs
metadata['full_hallmark_codes'] = hallmark_codes
metadata['top_hallmark_codes'] = top_hallmark_codes
metadata['top_hallmark_colors'] = top_hallmark_colors
metadata['full_hallmark_colors'] = full_hallmark_colors
response = app.response_class(
response=json.dumps(metadata),
status=200,
mimetype='application/json'
)
return response
@app.route('/compare')
def compare():
terms = uniq(get_query_terms())
template_args = { 'compare' : True, 'tabindex': 1 }
if not terms:
return generate_index(template_args)
elif len(terms) < 2:
messages = { 'errors': [ 'Please provide two different query terms' ] }
return generate_index(merge_dicts(template_args, messages))
else:
terms = terms[:2] # Ignore extra terms
return generate_comparison(terms, measure=getarg('measure'))
@app.route('/pubmed/<id_>')
def pubmed(id_):
from so2html import standoff_to_html
from operator import itemgetter
from itertools import groupby
try:
text = '\n'.join(pubmed_text_store.get_tiab(id_))
except KeyError:
abort(404)
try:
ann = pubmed_ann_store.get_spans(id_, minimize=True)
except KeyError:
# Assume no annotations.
ann = []
html = standoff_to_html(text, ann, legend=True, tooltips=True,
links=False, content_only=True)
template_args = { 'documents': { id_: { 'html': Markup(html), } } }
return render_template_with_globals('pubmed.html', template_args)
@app.route('/pubmed/<id_>/text')
def pubmed_text(id_):
try:
return '\n'.join(pubmed_text_store.get_tiab(id_))
except KeyError:
abort(404)
@app.route('/pubmed/<id_>/annotations')
def pubmed_annotations(id_):
return pretty_dumps(pubmed_ann_store.get_spans(id_, minimize=True))
@app.route('/search')
def search():
pmids = get_doc_pmids()
terms, hallmarks = get_query_terms(), get_hallmark_terms()
offset = getarg('offset', 0)
try:
offset = int(offset)
except:
offset = 0
results = []
#results = db_controller.searchTextAndHallmarks(terms[0], hallmarks, 100)
if len(terms) > 0:
results = db_controller.searchTextAndHallmarks(terms[0], hallmarks, 20,offset)
elif len(pmids) > 0:
results = db_controller.searchPMIDs(pmids, 10000,offset)
#pass
# Add PMID based on sentence ID. By convention, sentence IDs are
# of the form <PMID>-<SENTIDX>.
for r in results:
r['pmid'] = r['id'].split('-')[0]
# Add title text by looking up document text.
for r in results:
try:
r['title'] = pubmed_text_store.get(r['pmid']).split('\n')[0]
except KeyError:
# Document missing. TODO: better resolution?
r['title'] = ''
template_args = {
'hm': [ hallmark_codes.get(hm) for hm in hallmarks ],
'results': results
}
if len(terms) > 0:
template_args['term'] = terms[0]
elif len(pmids) > 0:
template_args['pmids'] = pmids
response = app.response_class(
response=json.dumps(template_args),
status=200,
mimetype='application/json'
)
return response
#return render_template_with_globals('searchresult.html', template_args)
@app.route('/chartdata')
def chart_data():
# TODO: eliminate duplication with generate_charts()
asjson = getarg('asjson',False)
terms = get_query_terms()
pmids = get_doc_pmids()
if not terms and not pmids:
return 'No query terms or pmids'
measure = getarg('measure')
hallmarks = getarg('hallmarks', 'top')
hallmark_filter = top_hallmark_codes if hallmarks == 'top' else None
stats = hallmark_cooccurrences(terms, hallmark_filter) if terms else \
hallmark_cooccurrences_by_pmids(pmids, hallmark_filter)
m = select_measure(stats, measure)
measure_name = selected_measure(stats, measure)
def fmt(v):
"""Format measure value as string."""
if isinstance(v, int) or float(v).is_integer():
return str(int(v))
else:
return '{:.4f}'.format(v)
if not asjson:
fields = []
for term in terms:
fields.append((term, measure_name))
term_total = int(stats.count(term, None))
values = [(hm, fmt(m(term, hm))) for hm in stats[term] if hm is not None]
fields.extend(values)
fields.append([])
return Response(
response='\n'.join('\t'.join(f) for f in fields),
status=200,
mimetype="text/plain", # TODO make parameter?
)
template_args = [
{ 'term': term,
'hallmarks': hallmarks,
'measure':measure_name,
'values': {hm : max(0,m(term,hm)) for hm in stats[term] if hm is not None},
'counts': { k: v for k, v in stats.getcounts().items() if k == term }[term] }
for term in (terms if terms else [','.join(pmids)])
]
if not terms:
template_args = [{key: value for key, value in a.items() \
if key != 'term'} for a in template_args]
response = app.response_class(
response=json.dumps(template_args),
status=200,
mimetype='application/json'
)
return response
def getarg(arg, default=None):
return request.args.get(arg, default)
def get_query_terms():
from config import QUERY_PARAMETER as q
return [ t for t in request.args.getlist(q) if t and not t.isspace() ]
def get_doc_pmids():
from config import PMIDS_PARAMETER as pmids
args = request.args.get(pmids, default='')
pmids = args.split(',')
print pmids
return pmids
def get_hallmark_terms():
from config import HALLMARK_PARAMETER as h
return [ t for t in request.args.getlist(h) if t and not t.isspace() ]
def hidden_parameters(terms):
"""Return hidden HTML form input fields including the given terms."""
from config import QUERY_PARAMETER as q
return Markup('\n'.join(
'<input type="hidden" name="{}" value="{}">'.format(q, t)
for t in terms
))
def selected_measure(stats, name):
return name if name in stats.measures else DEFAULT_MEASURE
def select_measure(stats, name):
return stats.measures.get(name, stats.measures[DEFAULT_MEASURE])
def select_chart(chart_type):
return chart_types.get(chart_type, chart_types[DEFAULT_CHART])
def generate_index(template_args=None):
if template_args is None:
template_args = {}
template_args['term_examples'] = example_terms
template_args['comparison_examples'] = example_pairs
return render_template_with_globals('index.html', template_args)
def filter_cooccurrence_data(data, hallmark_filter):
filtered = OrderedDict()
for term in data:
filtered[term] = OrderedDict()
for hm in data[term]:
if hm is not None and hm not in hallmark_filter:
continue
filtered[term][hm] = data[term][hm]
return filtered
def hallmark_cooccurrences(terms,pmids, hallmark_filter=None):
data = OrderedDict()
# Initialize all hallmark counts to zero. This is required because
# the API omits keys with zero occurrences from its dict.
for term in terms:
data[term] = OrderedDict()
for code, hm in hallmark_codes.items():
data[term][hm] = 0
# Get totals for hallmarks, store as (None, hm)
data[None] = {}
hallmark_total = db_controller.getHallmarksCount()
for code, count in sorted(hallmark_total.items()):
data[None][hallmark_codes[code]] = count
# Get hallmark-term cooccurrences and term totals
for term in terms:
term_total, hallmark_count = db_controller.getHallmarksForQuery(term)
data[term][None] = term_total
for code, count in hallmark_count.items():
hm = hallmark_codes[code]
data[term][hm] = count
total = db_controller.getTotalNumberOfSentences()
data[None][None] = total
if hallmark_filter is not None:
data = filter_cooccurrence_data(data, hallmark_filter.values())
debug('cooccurrence data: {}'.format(data))
return CountStats(data)
def hallmark_cooccurrences_by_pmids(pmids, hallmark_filter=None):
key = ','.join(pmids)
data = OrderedDict()
data[key] = OrderedDict()
for code, hm in hallmark_codes.items():
data[key][hm] = 0
data[None] = {}
hallmark_total = db_controller.getHallmarksCount()
for code, count in sorted(hallmark_total.items()):
data[None][hallmark_codes[code]] = count
pmids_total, hallmark_count = db_controller.getHallmarksForPMIDs(pmids)
data[key][None] = pmids_total
for code, count in hallmark_count.items():
hm = hallmark_codes[code]
data[key][hm] = count
total = db_controller.getTotalNumberOfSentences()
data[None][None] = total
print data
if hallmark_filter is not None:
data = filter_cooccurrence_data(data, hallmark_filter.values())
print "filtered data"
print data
debug('pmids cooccurrence data: {}'.format(data))
return CountStats(data)
def fill_template_arguments(args):
"""Add global arguments to template to given dictionary."""
args.update({
'measure': getarg('measure', DEFAULT_MEASURE),
'measures': CountStats().measures.keys(),
'chart_type': getarg('chart_type', DEFAULT_CHART),
'chart_types': chart_types.keys(),
'hallmarks': getarg('hallmarks', 'top'),
'hallmark_options': ['top', 'full'],
'hallmark_codes': hallmark_codes,
})
return args
def render_template_with_globals(template, args):
"""Fill in global arguments and render given template."""
args = fill_template_arguments(args)
debug('Rendering {} with args: {}'.format(template, args))
return render_template(template, **args)
def generate_comparison(terms, measure='npmi'):
# TODO eliminate duplication with generate_charts
chart_type='column'
hallmarks = getarg('hallmarks', 'top')
hallmark_filter = top_hallmark_codes if hallmarks == 'top' else None
stats = hallmark_cooccurrences(terms, hallmark_filter)
m = select_measure(stats, measure)
measure_name = selected_measure(stats, measure)
assert(len([t for t in stats if t is not None]) == 2)
# Get raw counts, excluding standalone
term_counts = { k: v for k, v in stats.getcounts().items()
if k is not None }
ssmetrics = term_significance(term_counts)
def format_pvalue(term):
p = '{:.2f}'.format(ssmetrics['pvalue'].get(term))
if p == '0.00':
p = '< 0.01'
return ' (p {})'.format(p)
charts = []
render_large = True
for i, term in enumerate(terms):
position = POS_LEFT if i%2 == 0 else POS_RIGHT
term_total = int(stats.count(term, None))
values = [ (hm, m(term, hm)) for hm in stats[term] if hm is not None ]
# Mark statistical significance in labels
values = [
(hm + format_pvalue(hm), v)
for hm, v in values
]
label = '{} ({})'.format(term, term_total)
Chart = select_chart(chart_type)
charts.append(Chart(
label, values, term, measure=measure_name, position=position,
render_large=render_large, hallmarks=hallmarks
))
template_args = {
'includereset': True,
'hiddenparams': hidden_parameters(terms), # Previous queries
'containers': Markup('\n'.join(c.container() for c in charts)),
'chartdata': Markup('\n'.join(c.datastr() for c in charts)),
'chartinit': Markup('\n'.join(c.initstr() for c in charts)),
'DEBUG' : DEBUG,
}
return render_template_with_globals('comparison.html', template_args)
def generate_charts(terms, measure='npmi', chart_type='polar'):
hallmarks = getarg('hallmarks', 'top')
hallmark_filter = top_hallmark_codes if hallmarks == 'top' else None
stats = hallmark_cooccurrences(terms, hallmark_filter)
m = select_measure(stats, measure)
measure_name = selected_measure(stats, measure)
charts = []
render_large = True
for term in terms:
term_total = int(stats.count(term, None))
values = [ (hm, m(term, hm)) for hm in stats[term] if hm is not None ]
label = '{} ({})'.format(term, term_total)
Chart | |
<gh_stars>1-10
from webpie import WebPieApp, WebPieHandler, Response, app_synchronized
import couchbase
from ConfigParser import ConfigParser
from threading import RLock
import os, time, json, urllib2, zlib, yaml, urllib, sys
from cStringIO import StringIO
from striped.client import CouchBaseConfig, CouchBaseBackend
from striped.common import standalone_data_key, stripe_key
from striped.common.signed_token import SignedToken, generate_secret, TokenBox
from striped.common.rfc2617 import digest_server
from striped.common.exceptions import StripedNotFoundException
def debug(msg):
open("/tmp/striped.log","a").write(
"%s: %s\n" % (time.ctime(time.time()), msg))
class StripedConfiguration:
def __init__(self, path):
self.Config = yaml.load(open(path, "r").read())
server_cfg = self.Config.get("Server", {})
self.Buckets = sorted(server_cfg["buckets"])
self.OperationTimeout = float(server_cfg.get("operation_timeout", 10.0))
self.DataTTL = server_cfg.get("DataTTL", 30*24*3600)
self.MetadataTTL = server_cfg.get("MetadataTTL", 24*3600)
self.TagsConnect = self.Config.get("Tags", {}).get("connect")
self.AAInfo = self.Config.get("Authorizations") # { user: { "password": <password>,
# "actions": [action, action...]}
def getPassword(self, role, user):
user_info = self.AAInfo.get(user)
#print "user_info: %s %s %s" % (action, user, user_info)
if user_info is None: return None
roles = user_info.get("roles", [])
#print "roles:", roles
if role in roles or "*" in roles:
#print "password:", user_info.get("password")
return user_info.get("password")
else:
return None
def cacheability_control(method):
def decorated(*params, **args):
resp = method(*params, **args)
if args.get("cache","yes") != "yes":
resp.headers["Cache-control"] = "no-store"
return resp
return decorated
class StripedHandler(WebPieHandler):
def __init__(self, request, app, path=None):
WebPieHandler.__init__(self, request, app, path)
self.App.init(request)
self.Config = self.App.Config
self.MetadataTTL = self.Config.MetadataTTL
self.DataTTL = self.Config.DataTTL
def hello(self, req, relpath, **args):
return Response("hello")
def index(self, req, relpath, **args):
return self.render_to_response("index.html")
def authenticate(self, req, role):
body = req.body
#print folder.Name
ok, header = digest_server(role, req.environ, self.App.authorizeUser)
if not ok:
resp = Response("Authorization required", status=401)
if header:
#print "Header: %s %s" % (type(header), header)
resp.headers['WWW-Authenticate'] = header
return False, resp
user = header
return True, user
def datasets(self, req, relpath, **args):
ds_last = {}
buckets = self.Config.Buckets
ds_info = {}
ds_to_bucket = self.App.scanDatasets()
for ds in self.App.datasets():
cb = self.App.backendForDataset(ds)
ne, ng, fileset = 0,0,set()
for rginfo in cb.RGInfos(ds):
ne += rginfo["NEvents"]
ng += 1
for s in rginfo["Segments"]:
fileset.add(s["FileName"])
ds_info[ds] = (ne, ng, fileset)
total_datasets = len(ds_info)
total_events = sum([ne for ne, _, _ in ds_info.values()])
total_groups = sum([ng for _, ng, _ in ds_info.values()])
total_files = sum([len(fs) for _, _, fs in ds_info.values()])
resp = self.render_to_response("datasets.html",
total_datasets = total_datasets, total_events = total_events, total_groups = total_groups,
total_files = total_files,
data=sorted([(ds, len(fs), ne, ng, int(float(ne)/ng+0.5) if ng > 0 else 0, ds_to_bucket[ds]) for ds, (ne, ng, fs) in ds_info.items()]
))
resp.cache_expires(self.MetadataTTL)
return resp
def rescan(self, req, relpath, **args):
self.App.scanDatasets()
def dataset_info(self, req, relpath, ds=None, **args):
self.App.scanDatasets()
backend = self.App.backendForDataset(ds)
if backend is None:
return Response("Dataset %s is not found" % (ds,), status=404)
try: schema = backend.schema(ds)
except StripedNotFoundException:
return Response("Dataset %s is not found" % (ds,), status=404)
ne = 0
ng = 0
files = set()
for rginfo in backend.RGInfos(ds):
ne += rginfo["NEvents"]
ng += 1
for s in rginfo["Segments"]:
files.add(s["FileName"])
branches = [ (bname, sorted(bdict.items())) for bname, bdict in schema["branches"].items() ]
return self.render_to_response("dataset_info.html", ds=ds, nfiles = len(files), nevents = ne, ngroups = ng,
attributes = schema["attributes"].items(),
branches = branches
)
def ____buckets(self, req, relpath, **args):
#print self.App.bucketInfoURL
data = urllib2.urlopen(self.App.bucketInfoURL).read()
data = json.loads(data)
data_lst = []
my_buckets = self.Config.Buckets
for binfo in data:
bname = binfo["name"]
if bname in my_buckets:
basic_stats = binfo["basicStats"]
memUsed = basic_stats["memUsed"]
diskUsed = basic_stats["diskUsed"]
items = basic_stats["itemCount"]
ops = basic_stats["opsPerSec"]
quotaUsed = basic_stats["quotaPercentUsed"]
datasets = [ds for ds in self.App.datasets()
if self.App.bucketForDataset(ds) == bname]
data_lst.append((bname, items, float(diskUsed)/1024/1024/1024,
float(memUsed)/1024/1024/1024, quotaUsed, ops,
sorted(datasets)))
data_lst.sort() # by bname
return self.render_to_response("buckets.html", data = data_lst)
#
# Data
#
def dataset_list(self, req, relpath, **args):
self.App.scanDatasets()
lst = sorted(self.App.datasets())
return Response(json.dumps(lst), content_type="text/json", cache_expires=self.MetadataTTL)
def dataset_schema(self, req, relpath, ds=None, **args):
backend = self.App.backendForDataset(ds)
if not backend:
return Response("Dataset %s is not found" % (ds,), status=404)
try:
schema = backend.schema(ds)
except StripedNotFoundException:
return Response("Dataset %s is not found" % (ds,), status=404)
return Response(json.dumps(schema), content_type="text/json", cache_expires=self.MetadataTTL)
@cacheability_control
def stripe(self, req, relpath, ds=None, rgid=None, column=None, compressed="no", **args):
#
# stripe?ds=<dataset>&rgid=<rgid>&column=<column>
#
compressed = compressed == "yes"
backend = self.App.backendForDataset(ds)
key = stripe_key(ds, column, rgid)
data = backend.get_data([key]).get(key)
if data is None:
return Response("Stripe %s %s %s not found" % (ds, column, rgid),
status=400)
if compressed:
data = zlib.compress(data, 1)
return Response(data, cache_expires=self.DataTTL)
@cacheability_control
def data(self, req, relpath, ds=None, key=None, compressed="no", **args):
as_json = args.get("json","no") == "yes"
if ds is not None:
ds = urllib.unquote(ds or "")
compressed = compressed == "yes"
backend = self.App.backendForDataset(ds)
if backend is None:
return Response("Can not find bucket", status=400)
key = urllib.unquote(key)
data = backend.getStandaloneData([key], dataset_name=ds, as_json=as_json)[key]
if data is None:
return Response("Not found", status=400)
if as_json:
data = json.dumps(data)
return Response(data, cache_expires=self.DataTTL, content_type="text/json")
if compressed:
data = zlib.compress(data, 1)
return Response(data, cache_expires=self.DataTTL)
def add_data(self, req, relpath, key=None, ds=None, **args):
if req.environ.get("REQUEST_METHOD") != "POST":
return Response("Bad request", status=402)
ok, resp = self.authenticate(req, "add_data")
if not ok: return resp
backend = self.App.backendForDataset(ds)
backend.putStandaloneData({key:bytes(req.body)}, dataset_name=ds)
return Response("OK")
def token(self, req, relpath, role=None, ds=None, **args):
#
# Dataset (ds) is not use for now
#
if isinstance(role, unicode):
role = role.encode("ascii", "ignore")
if isinstance(ds, unicode):
ds = ds.encode("ascii", "ignore")
ok, resp = self.authenticate(req, role)
#print "authenticate:", ok, type(resp), resp
if not ok:
return resp
user = resp
token = self.App.tokenForUserRole(user, role)
if not token:
return Response("Unauthorized", status=403)
return Response(token, content_type="text/plain")
@cacheability_control
def stripe_meta(self, req, relpath, ds=None, rgid=None, column=None, compressed="no", **args):
#
# stripe?ds=<dataset>&rgid=<rgid>&column=<column>
#
compressed = compressed == "yes"
backend = self.App.backendForDataset(ds)
key = "%s:%s:%s.bin" % (ds, column, rgid)
data = backend.get_data([key]).get(key)
if data is None:
return Response("Stripe %s %s %s not found" % (ds, column, rgid),
status=400)
if compressed:
data = zlib.compress(data, 1)
return Response(json.dumps({"length": len(data)}), cache_expires=self.MetadataTTL, content_type="text/json")
def upload_stripes(self, req, relpath, ds=None, rgid=None, **args):
token = req.headers.get("X-Authorization-Token")
if not token:
return Response("Unauthorized", status=403)
authorized, extra = self.App.authorizeToken(token, "upload_stripes")
if not authorized:
return Response("Unauthorized", status=403)
payload = extra
username = extra["identity"]
data = req.body
header, data = data.split("\n",1)
l = len(data)
i = 0
stripes = {}
for w in header.split():
column, size = w.split(":")
size = int(size)
stripe = data[i:i+size]
i += size
assert i <= l # i can be == l only after last stripe
key = "%s:%s:%s.bin" % (ds, column, rgid)
stripes[key] = stripe
backend = self.App.backendForDataset(ds)
backend.put_data(stripes)
return Response("OK")
@cacheability_control
def stripes(self, req, relpath, ds=None, rgid=None, columns=None, compressed="no", **args):
compressed = compressed == "yes"
backend = self.App.backendForDataset(ds)
columns = columns.split(",")
keys = ["%s:%s:%s.bin" % (ds, column, rgid) for column in columns]
data = backend.get_data(keys)
data_out = []
for k, val in data.items():
cn = k.split(":")[1]
#print type(cn), cn
if val is not None:
if compressed:
val = zlib.compress(val, 1)
data_out.append((cn, len(val), val))
header = " ".join(("%s:%d" % (cn, n) for cn, n, v in data_out)) + "\n"
def data_iterator(header, data):
yield bytes(header)
for cn, n, v in data:
yield bytes(v)
return Response(app_iter=data_iterator(header, data_out), cache_expires=self.DataTTL)
@cacheability_control
def stripes_sizes(self, req, relpath, ds=None, rgids=None, columns=None, **args):
if req.body:
params = json.loads(req.body)
ds = params["ds"]
rgids = params["rgids"]
columns = params["columns"]
else:
rgids = rgids.split(",")
columns = columns.split(",")
backend = self.App.backendForDataset(ds)
out = {}
for cn in columns:
keys = ["%s:%s:%s.bin" % (ds, cn, rgid) for rgid in rgids]
data = backend.get_data(keys)
lst = []
for k in keys:
rgid = int(k.split(".bin",1)[0].split(":")[2])
lst.append([rgid, len(data[k])])
out[cn] = lst
out = json.dumps(out)
exp = 0 if req.body else self.DataTTL
return Response(out, cache_expires=exp, content_type="text/json")
def column_desc(self, req, relpath, ds=None, column=None, **args):
backend = self.App.backendForDataset(ds)
key = "%s:%s:@@desc.json" % (ds, column)
data = backend.get_json([key]).get(key)
if data is None:
return Response("Column %s %s not found" % (ds, column), status=400)
if not isinstance(data, (str, unicode)):
data = json.dumps(data)
return Response(data, cache_expires=self.MetadataTTL, content_type="text/json")
def column_descs(self, req, relpath, ds=None, columns=None, **args):
backend = self.App.backendForDataset(ds)
columns = columns.split(",")
keys = ["%s:%s:@@desc.json" % (ds, column) for column in columns]
data = backend.get_json(keys)
out = {}
for key, desc in data.items():
if isinstance(desc, (str, unicode)):
desc = json.loads(desc)
column = key.split(":")[1]
out[column] = desc
return Response(json.dumps(out), content_type="text/json", cache_expires=self.MetadataTTL)
def columns(self, req, | |
<filename>src/api/datamanage/pro/datamodel/dmm/calculation_atom_manager.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.translation import ugettext as _
from django.db.models import Count
from django.db.models import Q
from common.log import logger
from common.transaction import auto_meta_sync
from datamanage.pro.datamodel.utils import list_diff
from datamanage.pro import exceptions as dm_pro_errors
from datamanage.pro.datamodel.models.datamodel import DmmModelInfo, DmmModelFieldStage
from datamanage.pro.datamodel.models.indicator import (
DmmModelCalculationAtom,
DmmModelCalculationAtomImage,
DmmModelCalculationAtomImageStage,
DmmCalculationFunctionConfig,
DmmModelIndicatorStage,
DmmModelIndicator,
)
from datamanage.pro.datamodel.dmm.object import (
get_object,
check_and_assign_update_content,
is_objects_existed,
models_object_to_dict,
update_data_models_objects,
)
from datamanage.pro.datamodel.models.model_dict import (
CalculationContentType,
CalculationAtomType,
AggregateFunction,
DataModelObjectType,
DataModelObjectOperationType,
)
from datamanage.pro.datamodel.handlers.verifier import validate_calculation_content, strip_comment
from datamanage.pro.datamodel.dmm.indicator_manager import IndicatorManager
from datamanage.pro.datamodel.dmm.schema import get_schema_list
from datamanage.pro.datamodel.dmm.state_machine import update_data_model_state
from datamanage.pro.datamodel.dmm.operation_log_manager import OperationLogManager
class CalculationAtomManager(object):
@staticmethod
def get_calculation_formula(content_dict, option):
"""
获得统计口径聚合SQL
:param content_dict: {Dict} 统计内容
:param option: {String} 统计内容提交方式
:return: calculation_formula:{String} 聚合SQL
"""
# 判断统计方式提交类型:表单/SQL
if option == CalculationContentType.TABLE.value:
calculation_formula = AggregateFunction.get_calculation_formula(
content_dict['calculation_function'], content_dict['calculation_field']
)
else:
calculation_formula = content_dict['calculation_formula']
return calculation_formula
@classmethod
@auto_meta_sync(using='bkdata_basic')
def create_calculation_atom(cls, params, bk_username):
"""
创建统计口径
:param params: {Dict} 创建统计口径参数,例如
{
"model_id": 32,
"calculation_atom_name": "item_sales_distinct_count",
"calculation_atom_alias": "item_sales_distinct_count",
"description": "item_sales_distinct_count",
"field_type": "long",
"calculation_content": {
"option": "TABLE",
"content": {
"calculation_field": "price",
"calculation_function": "count_distinct"
}
}
}
:param bk_username: {String} 用户名
:return: calculation_atom_dict:{Dict} 统计口径详情
"""
# 1) 判断model_id对应的数据模型是否存在
datamodel_object = get_object(DmmModelInfo, model_id=params['model_id'])
project_id = datamodel_object.project_id
# 2) 根据统计口径名称判断统计口径是否已经存在
is_calculation_atom_existed, calc_atom_queryset = is_objects_existed(
DmmModelCalculationAtom, calculation_atom_name=params['calculation_atom_name']
)
if is_calculation_atom_existed:
raise dm_pro_errors.CalculationAtomNameAlreadyExistError()
# 3) 统计SQL
calculation_formula = cls.get_calculation_formula(
params['calculation_content']['content'], params['calculation_content']['option']
)
# 判断该模型下统计口径聚合逻辑是否已经存在
cls.validate_calc_formula_existed(
params['model_id'], calculation_formula, DataModelObjectOperationType.CREATE.value
)
# 4) 对统计SQL进行校验
schema_list = get_schema_list(params['model_id'])
origin_fields_dict, condition_fields = validate_calculation_content(
datamodel_object.table_name,
params['calculation_atom_name'],
params['field_type'],
calculation_formula,
schema_list,
)
if params['calculation_atom_name'] not in origin_fields_dict:
raise dm_pro_errors.SqlValidateError(message_kv={'error_info': _('未解析出正确的计算来源字段origin_fields')})
origin_fields = origin_fields_dict[params['calculation_atom_name']]
# 5) 新增统计口径记录
calculation_atom_object = DmmModelCalculationAtom(
model_id=params['model_id'],
project_id=project_id,
calculation_atom_name=params['calculation_atom_name'],
calculation_atom_alias=params['calculation_atom_alias'],
description=params['description'],
field_type=params['field_type'],
calculation_content=params['calculation_content'],
calculation_formula=calculation_formula,
origin_fields=origin_fields,
created_by=bk_username,
updated_by=bk_username,
)
calculation_atom_object.save()
# 更新模型publish_status状态
step_id, publish_status = update_data_model_state(
params['model_id'], cls.create_calculation_atom.__name__, bk_username
)
# 6) 返回统计口径信息
calculation_atom_dict = models_object_to_dict(
calculation_atom_object,
step_id=step_id,
publish_status=publish_status,
calculation_formula_stripped_comment=strip_comment(calculation_atom_object.calculation_formula),
)
# 7) 操作记录
OperationLogManager.create_operation_log(
created_by=bk_username,
model_id=params['model_id'],
object_type=DataModelObjectType.CALCULATION_ATOM.value,
object_operation=DataModelObjectOperationType.CREATE.value,
object_id=calculation_atom_dict['calculation_atom_name'],
object_name=calculation_atom_dict['calculation_atom_name'],
object_alias=calculation_atom_dict['calculation_atom_alias'],
content_after_change=calculation_atom_dict,
)
return calculation_atom_dict
@classmethod
@auto_meta_sync(using='bkdata_basic')
def update_calculation_atom(cls, calculation_atom_name, params, bk_username):
"""
修改统计口径
:param calculation_atom_name: {String} 统计口径名称
:param params: {Dict} 修改统计口径参数,例如:
{
"model_id": 32,
"description": "item_sales_distinct_count",
"calculation_content": {
"option": "SQL",
"content": {
"calculation_formula": "count(distinct price)"
}
}
}
:param bk_username: {String} 用户名
:return: calculation_atom_dict:{Dict} 统计口径详情
"""
# 1) 判断model_id对应的数据模型是否存在
datamodel_object = get_object(DmmModelInfo, model_id=params['model_id'])
publish_status = datamodel_object.publish_status
# 2) 判断统计口径是否存在,防止修改引用统计口径
calculation_atom_object = get_object(
DmmModelCalculationAtom, model_id=params['model_id'], calculation_atom_name=calculation_atom_name
)
# 获取变更前的统计口径详情
orig_calculation_atom_dict = cls.get_calculation_atom_info(calculation_atom_name)
# 3) 统计SQL和格式化统计方式内容
has_cal_content = 'calculation_content' in params
if has_cal_content:
params['calculation_formula'] = cls.get_calculation_formula(
params['calculation_content']['content'], params['calculation_content']['option']
)
# 判断该模型下统计口径聚合逻辑是否已经存在
cls.validate_calc_formula_existed(
params['model_id'],
params['calculation_formula'],
DataModelObjectOperationType.UPDATE.value,
calculation_atom_name,
)
# 4) 修改统计口径记录
# 判断模型是否有需要修改的内容
has_update = check_and_assign_update_content(calculation_atom_object, params, allow_null_fields=['description'])
# 有需要修改的内容
if has_update:
# 如果关键参数不允许修改
if not cls.is_calatom_deletable_param_editable(params['model_id'], [calculation_atom_name])[
calculation_atom_name
]['key_params_editable'] and not cls.validate_calc_atom_can_be_editable(
orig_calculation_atom_dict, calculation_atom_object
):
raise dm_pro_errors.CalculationAtomKeyParamsCanNotBeUpdatedError()
# 统计内容有变更, 对统计SQL进行校验
if has_cal_content:
field_type = params['field_type'] if 'field_type' in params else calculation_atom_object.field_type
schema_list = get_schema_list(params['model_id'])
origin_fields_dict, condition_fields = validate_calculation_content(
datamodel_object.table_name,
calculation_atom_name,
field_type,
params['calculation_formula'],
schema_list,
)
if calculation_atom_name not in origin_fields_dict:
raise dm_pro_errors.SqlValidateError(message_kv={'error_info': _('未解析出正确的计算来源字段origin_fields')})
calculation_atom_object.origin_fields = origin_fields_dict[calculation_atom_name]
calculation_atom_object.updated_by = bk_username
calculation_atom_object.save()
# 更新模型publish_status状态
step_id, publish_status = update_data_model_state(
params['model_id'], cls.update_calculation_atom.__name__, bk_username
)
# 5) 返回修改后的统计口径信息
calculation_atom_dict = models_object_to_dict(
calculation_atom_object,
step_id=datamodel_object.step_id,
publish_status=publish_status,
calculation_formula_stripped_comment=strip_comment(calculation_atom_object.calculation_formula),
)
# 6) 如果统计口径有变更,增加操作记录
if has_update:
OperationLogManager.create_operation_log(
created_by=bk_username,
model_id=params['model_id'],
object_type=DataModelObjectType.CALCULATION_ATOM.value,
object_operation=DataModelObjectOperationType.UPDATE.value,
object_id=calculation_atom_dict['calculation_atom_name'],
object_name=calculation_atom_dict['calculation_atom_name'],
object_alias=calculation_atom_dict['calculation_atom_alias'],
content_before_change=orig_calculation_atom_dict,
content_after_change=calculation_atom_dict,
)
return calculation_atom_dict
@classmethod
def validate_calc_atom_can_be_editable(cls, orig_calc_atom_dict, new_calc_atom_dict):
"""
校验统计口径是否可以编辑
:param orig_calc_atom_dict: {dict} 统计口径原内容,如果传入的参数是models_obj,则model_to_dict
:param new_calc_atom_dict: {dict} 统计口径新内容,如果传入的参数是models_obj,则model_to_dict
:return: 可以编辑返回True,否则返回False
"""
if isinstance(orig_calc_atom_dict, DmmModelCalculationAtom):
orig_calc_atom_dict = models_object_to_dict(orig_calc_atom_dict)
if isinstance(new_calc_atom_dict, DmmModelCalculationAtom):
new_calc_atom_dict = models_object_to_dict(new_calc_atom_dict)
# 部分关键参数不可修改
return cls.get_calc_atom_key_params_hash(orig_calc_atom_dict) == cls.get_calc_atom_key_params_hash(
new_calc_atom_dict
)
@classmethod
def get_calc_atom_key_params_hash(cls, calc_atom_dict):
"""
获取统计口径关键参数对应的hash值
:param calc_atom_dict: {dict} 统计口径信息
:return:
"""
calc_atom_dict['calculation_formula'] = cls.get_calculation_formula(
calc_atom_dict['calculation_content']['content'], calc_atom_dict['calculation_content']['option']
)
return hash('{}-{}'.format(calc_atom_dict['calculation_formula'], calc_atom_dict['field_type']))
@staticmethod
def validate_calc_formula_existed(model_id, calc_formula, calc_atom_edit_type, calc_atom_name=None):
"""
判断统计口径聚合逻辑是否已经存在
:param model_id: {Int} 模型ID
:param calc_formula: {String} 统计口径聚合逻辑
:param calc_atom_edit_type: {String} create/update
:param calc_atom_name: {String} 统计口径名称
:return:
"""
is_calculation_formula_existed, calc_atom_queryset = is_objects_existed(
DmmModelCalculationAtom, model_id=model_id, calculation_formula=calc_formula
)
if not is_calculation_formula_existed:
return
if calc_atom_edit_type == DataModelObjectOperationType.CREATE.value or (
calc_atom_edit_type == DataModelObjectOperationType.UPDATE.value
and calc_atom_queryset[0].calculation_atom_name != calc_atom_name
):
raise dm_pro_errors.CalculationFormulaAlreadyExistError()
@staticmethod
def is_calatom_deletable_param_editable(model_id, calculation_atom_names):
"""
统计口径列表是否可以被删除:是否被其他模型引用/被指标应用
:param model_id: {Int} 模型ID
:param calculation_atom_names: {List} 统计口径名称列表
:return: calc_atom_deletable_dict: {Dict} 统计口径是否可以删除 & 是否被其他模型引用/被指标应用
"""
# 判断 1) 统计口径在当前模型下草稿态&发布态是否被指标应用
calc_atom_deletable_dict = {}
calc_atom_names_applied_by_indicators_in_stage = list(
DmmModelIndicatorStage.objects.filter(model_id=model_id, calculation_atom_name__in=calculation_atom_names)
.values('calculation_atom_name')
.annotate(count=Count('calculation_atom_name'))
.values_list('calculation_atom_name', flat=True)
)
calc_atom_names_applied_by_indicators = list(
DmmModelIndicator.objects.filter(model_id=model_id, calculation_atom_name__in=calculation_atom_names)
.values('calculation_atom_name')
.annotate(count=Count('calculation_atom_name'))
.values_list('calculation_atom_name', flat=True)
)
applied_calc_atom_names = set(
list(calc_atom_names_applied_by_indicators_in_stage + calc_atom_names_applied_by_indicators)
)
_, _, same_applied_calculation_atom_names = list_diff(applied_calc_atom_names, calculation_atom_names)
# 判断 2) 统计口径是否被其他模型引用
# 模型中创建的统计口径
created_calc_atom_names = [
calc_atom_dict['calculation_atom_name']
for calc_atom_dict in DmmModelCalculationAtom.objects.filter(model_id=model_id).values(
'calculation_atom_name'
)
]
quoted_calc_atom_queryset = DmmModelCalculationAtomImage.objects.filter(
calculation_atom_name__in=created_calc_atom_names
)
quoted_calc_atom_names = set(
list([calc_atom_obj.calculation_atom_name for calc_atom_obj in quoted_calc_atom_queryset])
)
_, _, same_quoted_calc_atom_names = list_diff(quoted_calc_atom_names, calculation_atom_names)
# 3) 返回模型是否被引用结果
for calc_atom_name in calculation_atom_names:
calc_atom_deletable_dict[calc_atom_name] = {
'is_applied_by_indicators': False,
'is_quoted_by_other_models': False,
'deletable': True,
'key_params_editable': True,
}
if calc_atom_name in same_applied_calculation_atom_names:
calc_atom_deletable_dict[calc_atom_name].update(
{'is_applied_by_indicators': True, 'deletable': False, 'key_params_editable': False}
)
if calc_atom_name in same_quoted_calc_atom_names:
calc_atom_deletable_dict[calc_atom_name].update(
{'is_quoted_by_other_models': True, 'deletable': False, 'key_params_editable': False}
)
return calc_atom_deletable_dict
@classmethod
def set_calatom_deletable_editable(cls, model_id, calculation_atom_names, calc_atom_list):
"""
设置统计口径是否可以被删除/关键参数是否可以被修改:是否被其他模型引用/被指标应用
:param model_id: {int} 模型ID
:param calculation_atom_names: {list} 统计口径名称列表
:param calc_atom_list: {list} 统计口径列表
:return:
"""
calc_atom_deletable_dict = cls.is_calatom_deletable_param_editable(model_id, calculation_atom_names)
for calc_atom_dict in calc_atom_list:
calc_atom_dict.update(calc_atom_deletable_dict.get(calc_atom_dict['calculation_atom_name'], {}))
@classmethod
def set_calculation_atoms_editable(cls, calc_atom_list):
"""
设置统计口径是否可以被编辑:当前模型下创建的统计口径可以被编辑,引用的统计口径不可以被编辑
:param calc_atom_list: {List} 统计口径列表
:return:
"""
for calc_atom_dict in calc_atom_list:
calc_atom_dict['editable'] = (
True if calc_atom_dict['calculation_atom_type'] == CalculationAtomType.CREATE else False
)
@staticmethod
def get_calc_atom_object_and_type(calculation_atom_name, model_id):
"""
获取统计口径object(DmmModelCalculationAtom object/DmmModelCalculationAtomImage object) & 统计口径类型
:param calculation_atom_name: {String} 统计口径名称
:param model_id: {Int} 模型ID
:return:calc_atom_obj: {Object} 统计口径object, calculation_atom_type: {String} 统计口径类型:create/quote
"""
is_calculation_atom_created, calculation_atom_queryset = is_objects_existed(
DmmModelCalculationAtom, model_id=model_id, calculation_atom_name=calculation_atom_name
)
if is_calculation_atom_created:
# 统计口径在当前模型下是创建
calc_atom_obj = calculation_atom_queryset[0]
calculation_atom_type = CalculationAtomType.CREATE
else:
calc_atom_obj = get_object(
DmmModelCalculationAtomImageStage, model_id=model_id, calculation_atom_name=calculation_atom_name
)
calculation_atom_type = CalculationAtomType.QUOTE
return calc_atom_obj, calculation_atom_type
@classmethod
@auto_meta_sync(using='bkdata_basic')
def delete_calculation_atom(cls, calculation_atom_name, model_id, bk_username):
"""
删除统计口径
:param calculation_atom_name: {String} 统计口径名称
:param model_id: {Int} 模型ID
:param bk_username: {String} 用户名
:return:
"""
# 1) 判断统计口径在当前模型下是创建还是引用
calculation_atom_object, calculation_atom_type = cls.get_calc_atom_object_and_type(
calculation_atom_name, model_id
)
# 2) 判断是否可以删除统计口径记录
if not cls.is_calatom_deletable_param_editable(model_id, [calculation_atom_name])[calculation_atom_name][
'deletable'
]:
raise dm_pro_errors.CalculationAtomCanNotBeDeletedError(message=_('被指标应用/被其他模型引用的统计口径不能被删除'))
# 3) 获取变更前的统计口径详情
orig_calculation_atom_dict = cls.get_calculation_atom_info(calculation_atom_name)
# 4) 删除统计口径记录
calculation_atom_object.delete()
# 5) 获取模型基本信息object
datamodel_object = get_object(DmmModelInfo, model_id=model_id)
# 6) 更新模型publish_status状态
step_id, publish_status = update_data_model_state(
datamodel_object.model_id, cls.delete_calculation_atom.__name__, bk_username
)
# 7) 增加操作记录
OperationLogManager.create_operation_log(
created_by=bk_username,
model_id=model_id,
object_type=DataModelObjectType.CALCULATION_ATOM.value,
object_operation=DataModelObjectOperationType.DELETE.value,
object_id=calculation_atom_name,
object_name=calculation_atom_name,
object_alias=orig_calculation_atom_dict['calculation_atom_alias'],
content_before_change=orig_calculation_atom_dict,
)
return {'step_id': step_id, 'publish_status': publish_status}
@classmethod
@auto_meta_sync(using='bkdata_basic')
def delete_all_calc_atoms_in_model(cls, model_id):
"""
删除模型中所有的统计口径(模型中创建的统计口径 & 模型中应用的统计口径)
:param model_id: {Int} 模型ID
:return:
"""
# 由于元数据同步不支持queryset的删除,只能在for循环中删除
# 1) 删除模型中创建的统计口径
calculation_atom_queryset = DmmModelCalculationAtom.objects.filter(model_id=model_id)
for calculation_atom_obj in calculation_atom_queryset:
calculation_atom_obj.delete()
# 2) 删除模型中引用的统计口径
calculation_atom_image_queryset = DmmModelCalculationAtomImage.objects.filter(model_id=model_id)
for calculation_atom_image_obj in calculation_atom_image_queryset:
calculation_atom_image_obj.delete()
calculation_atom_image_stage_queryset = DmmModelCalculationAtomImageStage.objects.filter(model_id=model_id)
for calculation_atom_image_obj in calculation_atom_image_stage_queryset:
calculation_atom_image_obj.delete()
@staticmethod
def validate_calc_atoms_existed(calculation_atom_names):
"""
批量判断被引用的统计口径列表是否存在, 存在返回引用的统计口径queryset
:param calculation_atom_names: {List} 统计口径名称列表
:return: calculation_atom_queryset: {Queryset} 统计口径queryset
"""
calculation_atom_queryset = DmmModelCalculationAtom.objects.filter(
calculation_atom_name__in=calculation_atom_names
)
orig_calculation_atom_names = [
calculation_atom_obj.calculation_atom_name for calculation_atom_obj in calculation_atom_queryset
]
add_calculation_atom_names, _, _ = list_diff(orig_calculation_atom_names, calculation_atom_names)
if add_calculation_atom_names:
raise dm_pro_errors.CalculationAtomNotExistError()
return calculation_atom_queryset
@staticmethod
def validate_calculation_atoms_quoted(model_id, calculation_atom_names, calculation_atom_queryset):
"""
校验当前模型是否能引用统计口径列表,返回dmm_model_calculation_atom_image需要新引用的统计口径名称列表
:param model_id: {Int} 模型ID
:param calculation_atom_names: {List} 引用统计口径名称列表
:param calculation_atom_queryset: {QuerySet} 待引用的统计口径queryset
:return: add_quoted_calc_atom_names: {List} 需要新引用的统计口径名称列表
"""
# 1) 校验统计口径是否是该模型中创建的统计口径
for calc_atom_obj in calculation_atom_queryset:
if calc_atom_obj.model_id == model_id:
raise dm_pro_errors.CalculationAtomCanNotBeQuotedError()
# 2) 校验统计口径是否已经被引用,已经被引用的统计口径不在重复引用
# 已经被引用的统计口径
quoted_calculation_atom_queryset = DmmModelCalculationAtomImageStage.objects.filter(
model_id=model_id, calculation_atom_name__in=calculation_atom_names
)
quoted_calculation_atom_names = [
calculation_atom_obj.calculation_atom_name for calculation_atom_obj in quoted_calculation_atom_queryset
]
# 待新增 & 待删除的统计口径
added_quoted_calc_atom_names, _, updated_quoted_calc_atom_names = | |
parent module contains this
submodule _or_ `None`.
"""
return self._submodule_basename_to_node.get(submodule_basename)
def remove_global_attr_if_found(self, attr_name):
"""
Record the global attribute (e.g., class, variable) with the passed
name if previously recorded as defined by the pure-Python module
corresponding to this graph node to be subsequently undefined by the
same module.
If this module is actually a package, this method instead records this
attribute to be undefined by this package's pure-Python `__init__`
submodule.
This method is intended to be called on globals previously defined by
this module that are subsequently undefined via the `del` built-in by
this module, thus "forgetting" or "undoing" these globals.
For safety, there exists no corresponding `remove_global_attr()`
method. While defining this method is trivial, doing so would invite
`KeyError` exceptions on scanning valid Python that lexically deletes a
global in a scope under this module's top level (e.g., in a function)
_before_ defining this global at this top level. Since `ModuleGraph`
cannot and should not (re)implement a full-blown Python interpreter,
ignoring out-of-order deletions is the only sane policy.
Parameters
----------
attr_name : str
Unqualified name of the attribute to be removed.
"""
if self.is_global_attr(attr_name):
self._global_attr_names.remove(attr_name)
def __cmp__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return cmp(self.graphident, otherIdent) # noqa: F821
def __eq__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return False
return self.graphident == otherIdent
def __ne__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return True
return self.graphident != otherIdent
def __lt__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident < otherIdent
def __le__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident <= otherIdent
def __gt__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident > otherIdent
def __ge__(self, other):
try:
otherIdent = getattr(other, 'graphident')
except AttributeError:
return NotImplemented
return self.graphident >= otherIdent
def __hash__(self):
return hash(self.graphident)
def infoTuple(self):
return (self.identifier,)
def __repr__(self):
return '%s%r' % (type(self).__name__, self.infoTuple())
# TODO: This indirection is, frankly, unnecessary. The
# ModuleGraph.alias_module() should directly add the desired AliasNode instance
# to the graph rather than indirectly adding an Alias instance to the
# "lazynodes" dictionary.
class Alias(str):
"""
Placeholder aliasing an existing source module to a non-existent target
module (i.e., the desired alias).
For obscure reasons, this class subclasses `str`. Each instance of this
class is the fully-qualified name of the existing source module being
aliased. Unlike the related `AliasNode` class, instances of this class are
_not_ actual nodes and hence _not_ added to the graph; they only facilitate
communication between the `ModuleGraph.alias_module()` and
`ModuleGraph.findNode()` methods.
"""
class AliasNode(Node):
"""
Graph node representing the aliasing of an existing source module under a
non-existent target module name (i.e., the desired alias).
"""
def __init__(self, name, node):
"""
Initialize this alias.
Parameters
----------
name : str
Fully-qualified name of the non-existent target module to be
created (as an alias of the existing source module).
node : Node
Graph node of the existing source module being aliased.
"""
super(AliasNode, self).__init__(name)
#FIXME: Why only some? Why not *EVERYTHING* except "graphident", which
#must remain equal to "name" for lookup purposes? This is, after all,
#an alias. The idea is for the two nodes to effectively be the same.
# Copy some attributes from this source module into this target alias.
for attr_name in (
'identifier', 'packagepath',
'_global_attr_names', '_starimported_ignored_module_names',
'_submodule_basename_to_node'):
if hasattr(node, attr_name):
setattr(self, attr_name, getattr(node, attr_name))
def infoTuple(self):
return (self.graphident, self.identifier)
class BadModule(Node):
pass
class ExcludedModule(BadModule):
pass
class MissingModule(BadModule):
pass
class InvalidRelativeImport (BadModule):
def __init__(self, relative_path, from_name):
identifier = relative_path
if relative_path.endswith('.'):
identifier += from_name
else:
identifier += '.' + from_name
super(InvalidRelativeImport, self).__init__(identifier)
self.relative_path = relative_path
self.from_name = from_name
def infoTuple(self):
return (self.relative_path, self.from_name)
class Script(Node):
def __init__(self, filename):
super(Script, self).__init__(filename)
self.filename = filename
def infoTuple(self):
return (self.filename,)
class BaseModule(Node):
def __init__(self, name, filename=None, path=None):
super(BaseModule, self).__init__(name)
self.filename = filename
self.packagepath = path
def infoTuple(self):
return tuple(filter(None, (self.identifier, self.filename, self.packagepath)))
class BuiltinModule(BaseModule):
pass
class SourceModule(BaseModule):
pass
class InvalidSourceModule(SourceModule):
pass
class CompiledModule(BaseModule):
pass
class InvalidCompiledModule(BaseModule):
pass
class Extension(BaseModule):
pass
class Package(BaseModule):
"""
Graph node representing a non-namespace package.
"""
pass
class NamespacePackage(Package):
"""
Graph node representing a namespace package.
"""
pass
class RuntimeModule(BaseModule):
"""
Graph node representing a non-package Python module dynamically defined at
runtime.
Most modules are statically defined on-disk as standard Python files.
Some modules, however, are dynamically defined in-memory at runtime
(e.g., `gi.repository.Gst`, dynamically defined by the statically
defined `gi.repository.__init__` module).
This node represents such a runtime module. Since this is _not_ a package,
all attempts to import submodules from this module in `from`-style import
statements (e.g., the `queue` submodule in `from six.moves import queue`)
will be silently ignored.
To ensure that the parent package of this module if any is also imported
and added to the graph, this node is typically added to the graph by
calling the `ModuleGraph.add_module()` method.
"""
pass
class RuntimePackage(Package):
"""
Graph node representing a non-namespace Python package dynamically defined
at runtime.
Most packages are statically defined on-disk as standard subdirectories
containing `__init__.py` files. Some packages, however, are dynamically
defined in-memory at runtime (e.g., `six.moves`, dynamically defined by
the statically defined `six` module).
This node represents such a runtime package. All attributes imported from
this package in `from`-style import statements that are submodules of this
package (e.g., the `queue` submodule in `from six.moves import queue`) will
be imported rather than ignored.
To ensure that the parent package of this package if any is also imported
and added to the graph, this node is typically added to the graph by
calling the `ModuleGraph.add_module()` method.
"""
pass
#FIXME: Safely removable. We don't actually use this anywhere. After removing
#this class, remove the corresponding entry from "compat".
class FlatPackage(BaseModule):
def __init__(self, *args, **kwds):
warnings.warn(
"This class will be removed in a future version of modulegraph",
DeprecationWarning)
super(FlatPackage, *args, **kwds)
#FIXME: Safely removable. We don't actually use this anywhere. After removing
#this class, remove the corresponding entry from "compat".
class ArchiveModule(BaseModule):
def __init__(self, *args, **kwds):
warnings.warn(
"This class will be removed in a future version of modulegraph",
DeprecationWarning)
super(FlatPackage, *args, **kwds)
# HTML templates for ModuleGraph generator
header = """\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>%(TITLE)s</title>
<style>
.node { padding: 0.5em 0 0.5em; border-top: thin grey dotted; }
.moduletype { font: smaller italic }
.node a { text-decoration: none; color: #006699; }
.node a:visited { text-decoration: none; color: #2f0099; }
</style>
</head>
<body>
<h1>%(TITLE)s</h1>"""
entry = """
<div class="node">
<a name="%(NAME)s"></a>
%(CONTENT)s
</div>"""
contpl = """<tt>%(NAME)s</tt> <span class="moduletype">%(TYPE)s</span>"""
contpl_linked = """\
<a target="code" href="%(URL)s" type="text/plain"><tt>%(NAME)s</tt></a>
<span class="moduletype">%(TYPE)s</span>"""
imports = """\
<div class="import">
%(HEAD)s:
%(LINKS)s
</div>
"""
footer = """
</body>
</html>"""
def _ast_names(names):
result = []
for nm in names:
if isinstance(nm, ast.alias):
result.append(nm.name)
else:
result.append(nm)
result = [r for r in result if r != '__main__']
return result
def uniq(seq):
"""Remove duplicates from a list, preserving order"""
# Taken from https://stackoverflow.com/questions/480214
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
if sys.version_info[0] == 2:
DEFAULT_IMPORT_LEVEL = -1
else:
DEFAULT_IMPORT_LEVEL = 0
class _Visitor(ast.NodeVisitor):
def __init__(self, graph, module):
self._graph = graph
self._module = module
self._level = DEFAULT_IMPORT_LEVEL
self._in_if = [False]
self._in_def = [False]
self._in_tryexcept = [False]
@property
def in_if(self):
return self._in_if[-1]
@property
def in_def(self):
return self._in_def[-1]
@property
def in_tryexcept(self):
return self._in_tryexcept[-1]
def _collect_import(self, name, fromlist, level):
if sys.version_info[0] == 2:
if name == '__future__' and 'absolute_import' in (fromlist or ()):
self._level = 0
have_star = False
if fromlist is not None:
fromlist = uniq(fromlist)
if '*' in fromlist:
fromlist.remove('*')
have_star = True
# Record this import as originating from this module for subsequent
# handling by the _process_imports() method.
self._module._deferred_imports.append(
(have_star,
(name, self._module, fromlist, level),
{'edge_attr': DependencyInfo(
conditional=self.in_if,
tryexcept=self.in_tryexcept,
function=self.in_def,
fromlist=False)}))
def visit_Import(self, node):
for nm in _ast_names(node.names):
self._collect_import(nm, None, self._level)
def visit_ImportFrom(self, node):
level = node.level if node.level != 0 else self._level
self._collect_import(node.module or '', _ast_names(node.names), level)
def visit_If(self, node):
self._in_if.append(True)
self.generic_visit(node)
self._in_if.pop()
def visit_FunctionDef(self, node):
self._in_def.append(True)
self.generic_visit(node)
self._in_def.pop()
visit_AsyncFunctionDef = visit_FunctionDef
def visit_Try(self, node):
self._in_tryexcept.append(True)
self.generic_visit(node)
self._in_tryexcept.pop()
def visit_TryExcept(self, node):
self._in_tryexcept.append(True)
self.generic_visit(node)
self._in_tryexcept.pop()
def visit_Expression(self, node):
# Expression node's cannot contain import statements or
# other | |
<filename>sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccessPolicyEntity(ProxyResource):
"""Access policies help define the authentication rules, and control access to specific video resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param role: Defines the access level granted by this policy. Possible values include:
"Reader".
:type role: str or ~video_analyzer.models.AccessPolicyRole
:param authentication: Authentication method to be used when validating client API access.
:type authentication: ~video_analyzer.models.AuthenticationBase
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'role': {'key': 'properties.role', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntity, self).__init__(**kwargs)
self.role = kwargs.get('role', None)
self.authentication = kwargs.get('authentication', None)
class AccessPolicyEntityCollection(msrest.serialization.Model):
"""A collection of AccessPolicyEntity items.
:param value: A collection of AccessPolicyEntity items.
:type value: list[~video_analyzer.models.AccessPolicyEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AccountEncryption(msrest.serialization.Model):
"""Defines how the Video Analyzer account is (optionally) encrypted.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~video_analyzer.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~video_analyzer.models.KeyVaultProperties
:param identity: The Key Vault identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Key Vault mapping.
:vartype status: str
"""
_validation = {
'type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
self.identity = kwargs.get('identity', None)
self.status = None
class AudioEncoderBase(msrest.serialization.Model):
"""Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioEncoderAac.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'}
}
def __init__(
self,
**kwargs
):
super(AudioEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
class AudioEncoderAac(AudioEncoderBase):
"""A custom preset for encoding audio with the AAC codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AudioEncoderAac, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str
class AuthenticationBase(msrest.serialization.Model):
"""Base class for access policies authentication methods.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JwtAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'}
}
def __init__(
self,
**kwargs
):
super(AuthenticationBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CertificateSource(msrest.serialization.Model):
"""Base class for certificate sources.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PemCertificateList.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'}
}
def __init__(
self,
**kwargs
):
super(CertificateSource, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""The check availability request body.
:param name: The name of the resource for which availability needs to be checked.
:type name: str
:param type: The resource type.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The check availability result.
:param name_available: Indicates if the resource name is available.
:type name_available: bool
:param reason: The reason why the given name is not available. Possible values include:
| |
getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-1:] == "a"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "gen"):
if (cleanForm[-2:] == "hs"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-2:] == "as"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "dat"):
if (cleanForm[-2:] == "hi" or cleanForm[-1:] == "h"):
return getReturnResult([DIALECT.IONIC,], [DIALECT.AEOLIC])
elif (cleanForm[-2:] == "ai" or cleanForm[-1:] == "a"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "acc"):
if (cleanForm[-2:] == "hn"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-2:] == "an"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
return getReturnResult([DIALECT.ANY], [])
# Attic-ionic h for a_; a_ stems (Ionic even after e, i, r) (Buck 8, pp. 21)
def Rule_NE_1b(info):
formData = info[0]
lemmaInfo = info[1]
form = formData["form"]
cleanForm = generalUtils.removeDiacritics(form)
lemma = formData["lemma"]
pos = formData["pos"]
if (((pos == "noun" and (lemmaInfo == generalUtils.SHORT_A_STEM or lemmaInfo == generalUtils.EIR_SHORT_A_STEM)) or
isShortAPpl(formData)) or (pos == "adj" and (lemmaInfo == generalUtils.ADJ_US
or lemmaInfo == generalUtils.ADJ_EIS_ESSA)) and ("gender" in formData) and
("number" in formData)):
gender = formData["gender"]
number = formData["number"]
if (gender == "fem" and number == "sg" and ("case" in formData)):
case = formData["case"]
if (case == "nom"):
if (cleanForm[-1:] == "h"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
if (case == "gen"):
if (cleanForm[-2:] == "hs"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-2:] == "as"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "dat"):
if (cleanForm[-2:] == "hi" or cleanForm[-1:] == "h"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-1:] == "ai" or cleanForm[-1:] == "a"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "acc"): #technically this is a stem alteration, but it fits here
if (cleanForm[-2:] == "hn"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
return getReturnResult([DIALECT.ANY], [])
# Genitive singular masculine alpha stems (Buck 41.4, pp. 37; Benner 65, pp. 363)
def Rule_NE_2(info):
formData = info[0]
lemmaInfo = info[1]
form = formData["form"]
cleanForm = generalUtils.removeDiacritics(form)
lemma = formData["lemma"]
pos = formData["pos"]
if (pos == "noun" and ((lemmaInfo == generalUtils.H_A_STEM or lemmaInfo == generalUtils.EIR_A_STEM)
and ("gender" in formData) and ("number" in formData))):
gender = formData["gender"]
number = formData["number"]
if (gender == "masc" and number == "sg" and ("case" in formData)):
case = formData["case"]
if (case == "nom"):
if (cleanForm[-2:] == "hs"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-2:] == "as"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "gen"):
# cleanForm[-2:] == "ou") - # attic
if (cleanForm[-1:] == "w"): # ionic
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
if (cleanForm[-2:] == "ao"): # ionic
return getReturnResult([DIALECT.HOMERIC], [DIALECT.AEOLIC, DIALECT.IONIC])
elif (cleanForm[-1:] == "a"): # West greek, Lesbian, Thessalian
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "dat"):
if (cleanForm[-2:] == "hi" or cleanForm[-1:] == "h"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-2:] == "ai" or cleanForm[-1:] == "a"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
elif (case == "acc"):
if (cleanForm[-2:] == "hn"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-2:] == "an"):
return getReturnResult([DIALECT.AEOLIC], [DIALECT.IONIC])
return getReturnResult([DIALECT.ANY], [])
# Plural a_-stems, (Buck 41.4, pp. 37; Benner 65, pp. 363)
def Rule_NE_3(info):
formData = info[0]
lemmaInfo = info[1]
form = formData["form"]
cleanForm = generalUtils.removeDiacritics(form)
lemma = formData["lemma"]
pos = formData["pos"]
if (((pos == "noun" and (lemmaInfo == generalUtils.H_A_STEM or
lemmaInfo == generalUtils.EIR_A_STEM or
lemmaInfo == generalUtils.EIR_SHORT_A_STEM or
lemmaInfo == generalUtils.SHORT_A_STEM)) or (pos == "adj" and
(lemmaInfo == generalUtils.ADJ_3_TERMINATION or lemmaInfo == generalUtils.ADJ_US
or lemmaInfo == generalUtils.ADJ_EIS_ESSA) and ("gender" in formData)
and formData["gender"] == "fem") or (lemma == "o(/s") or
(pos == "part" and ("gender" in formData) and formData["gender"] == "fem"))
and ("case" in formData) and ("number" in formData)):
number = formData["number"]
case = formData["case"]
if (case == "gen" and number == "pl"):
if (form[-4:] == "a/wn"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [DIALECT.IONIC])
elif (form[-3:] == "w=n" or cleanForm[-2:] == "wn"): # includes e/wn
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC, DIALECT.HOMERIC])
elif (form[-3:] == "a=n" or cleanForm[-2:] == "an"): #second cuz p. sux
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [DIALECT.IONIC])
return getReturnResult([DIALECT.ANY], [])
# For nouns ending in -eus (Buck 43, 111, pp. 41, 92; Benner 86, pp. 366)
def Rule_NE_4(info):
formData = info[0]
lemmaInfo = info[1]
form = formData["form"]
cleanForm = generalUtils.removeDiacritics(form)
lemma = formData["lemma"]
pos = formData["pos"]
if (pos == "noun" and lemmaInfo == generalUtils.W_STEM
and ("case" in formData) and ("number" in formData)):
number = formData["number"]
case = formData["case"]
if (number == "sg"):
if (case == "gen"):
# Attic only: if (cleanForm[-3:] == "ews"):
if (cleanForm[-3:] == "eos"): # ionic, west greek
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC, DIALECT.HOMERIC])
elif (cleanForm[-3:] == "hos"): # aeolic, homer
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [DIALECT.IONIC])
elif (case == "dat"):
if (cleanForm[-2:] == "ei"): # ionic, west greek
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC, DIALECT.HOMERIC])
elif (cleanForm[-2:] == "hi"): # aeolic, homer
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [DIALECT.IONIC])
elif (case == "acc"):
if (cleanForm[-2:] == "ea"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC, DIALECT.HOMERIC])
elif (cleanForm[-2:] == "ha"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [DIALECT.IONIC])
# -h= in doric
elif (number == "pl"):
# nom plural h=s is attic
if (case == "nom"):
if (cleanForm[-3:] == "eis"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.IONIC], [DIALECT.HOMERIC])
elif (cleanForm[-3:] == "hes"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.IONIC, DIALECT.AEOLIC])
elif (case == "gen"):
if (cleanForm[-3:] == "ewn"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.IONIC], [DIALECT.HOMERIC])
elif (cleanForm[-3:] == "hwn"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.IONIC, DIALECT.AEOLIC])
elif (case == "acc"):
if (cleanForm[-3:] == "eas"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.IONIC], [DIALECT.HOMERIC])
elif (cleanForm[-3:] == "has"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.IONIC, DIALECT.AEOLIC])
return getReturnResult([DIALECT.ANY], [])
# Singular endings of iota-stems; (Buck 109, pp. 91; Benner 103, pp. 369)
def Rule_NE_5(info):
formData = info[0]
lemmaInfo = info[1]
form = formData["form"]
cleanForm = generalUtils.removeDiacritics(form)
lemma = formData["lemma"]
pos = formData["pos"]
if (pos == "noun" and lemmaInfo == generalUtils.I_STEM
and ("case" in formData) and ("number" in formData)):
number = formData["number"]
case = formData["case"]
if (number == "sg"):
if (case == "gen"):
if (cleanForm[-3:] == "ews"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC, DIALECT.HOMERIC])
elif (cleanForm[-3:] == "ios"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [])
elif (cleanForm[-3:] == "hos"):
return getReturnResult([DIALECT.HOMERIC], [])
elif (case == "dat"):
if (cleanForm[-2:] == "ei"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-1:] == "i"):
return getReturnResult([DIALECT.AEOLIC], [])
elif (number == "pl"):
if (case == "nom" or case == "voc"):
if (cleanForm[-3:] == "eis"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC, DIALECT.HOMERIC])
elif (cleanForm[-3:] == "ies"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [])
elif (cleanForm[-3:] == "hes"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.AEOLIC, DIALECT.IONIC])
elif (case == "gen"):
if (cleanForm[-3:] == "ewn"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-3:] == "iwn"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [])
elif (case == "dat"):
# this has to come first because "-esi" below woudl catch "-iesi"
if (cleanForm[-3:] == "isi" or cleanForm[-4:] == "iesi"):
return getReturnResult([DIALECT.AEOLIC], [])
elif (cleanForm[-3:] == "esi" or cleanForm[-4:] == "esin"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC])
elif (cleanForm[-5:] == "iessi" or cleanForm[-6:] == "iessin"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.IONIC, DIALECT.AEOLIC])
#second from Pindar
elif (case == "acc"):
if (cleanForm[-3:] == "eis"):
return getReturnResult([DIALECT.IONIC], [DIALECT.AEOLIC, DIALECT.HOMERIC])
elif (cleanForm[-2:] == "is" or cleanForm[-3:] == "ias"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [])
elif (cleanForm[-3:] == "has"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.AEOLIC, DIALECT.IONIC])
return getReturnResult([DIALECT.ANY], [])
# Homeric, Aeolic dative plural in -essi (Buck 107.3, pp. 89)
def Rule_NE_6(info):
formData = info[0]
lemmaInfo = info[1]
form = formData["form"]
cleanForm = generalUtils.removeDiacritics(form)
lemma = formData["lemma"]
if ("case" in formData and "number" in formData):
case = formData["case"]
number = formData["number"]
if (number == "pl" and case == "dat"):
if (cleanForm[-4:] == "essi" or cleanForm[-5:] == "essin"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.HOMERIC], [DIALECT.IONIC])
return getReturnResult([DIALECT.ANY], [])
# Homeric second declension endings (Benner 73-74, pp. 364)
def Rule_NE_7(info):
formData = info[0]
lemmaInfo = info[1]
form = formData["form"]
cleanForm = generalUtils.removeDiacritics(form)
lemma = formData["lemma"]
pos = formData["pos"]
if (((pos == "noun" and lemmaInfo == generalUtils.O_STEM)
or (pos == "adj" and (lemmaInfo == generalUtils.ADJ_3_TERMINATION or lemmaInfo == generalUtils.ADJ_2ND_DECL))
or (pos == "part") or (lemma == "o(/s" or lemma == "o("))
and ("number" in formData) and ("case" in formData)):
number = formData["number"]
case = formData["case"]
if (number == "sg" and case == "gen"):
if (cleanForm[-3:] == "oio" or cleanForm[-2:] == "oo"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.AEOLIC, DIALECT.IONIC])
elif (cleanForm[-2:] == "ou"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.IONIC], [DIALECT.HOMERIC])
elif (number == "dual" and (case == "gen" or case == "dat")):
if (cleanForm[-4:] == "oiin"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.AEOLIC, DIALECT.IONIC])
else:
return getReturnResult([DIALECT.AEOLIC, DIALECT.IONIC], [DIALECT.HOMERIC])
elif (number == "pl" and case == "dat"):
if (cleanForm[-4:] == "oisi" or cleanForm[-5:] == "oisin"):
return getReturnResult([DIALECT.HOMERIC], [DIALECT.AEOLIC, DIALECT.IONIC])
elif (cleanForm[-3:] == "ois"):
return getReturnResult([DIALECT.AEOLIC, DIALECT.IONIC], [DIALECT.HOMERIC])
return getReturnResult([DIALECT.ANY], [])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Ionic mi-verbs inflected like contracts (Buck 160, pp. 125)
def Rule_VE_1(info):
formData = info[0]
form = formData["form"]
lemma = formData["lemma"]
if (lemma | |
<reponame>bjornbytes/OpenXR-SDK-Source
#!/usr/bin/env python3
# Copyright (c) 2019 Collabora, Ltd.
#
# SPDX-License-Identifier: Apache-2.0
#
# Author(s): <NAME> <<EMAIL>>
#
# Purpose: This script helps drive a per-section PDF diff.
import re
from itertools import chain, zip_longest
from pathlib import Path
from pprint import pprint
import attr
from PyPDF2 import PdfFileReader
from pdf_diff import command_line as pdf_diff
NUMBERED_TITLE_RE = re.compile(
r'(?P<section_num>([0-9]+[.])+) (?P<title_text>.*)')
@attr.s
class Bookmark:
title = attr.ib()
level = attr.ib()
page_number = attr.ib()
top = attr.ib(default=None)
bottom = attr.ib(default=None)
left = attr.ib(default=None)
right = attr.ib(default=None)
nested_title = attr.ib(default=None)
def asdict(self):
# fields = attr.fields(Section)
return attr.asdict(self)
@attr.s
class Section:
title = attr.ib()
level = attr.ib()
page_start = attr.ib()
page_end = attr.ib()
pdf = attr.ib(default=None)
page_start_top = attr.ib(default=None)
page_end_bottom = attr.ib(default=None)
@property
def page_numbers(self):
return range(self.page_start, self.page_end + 1)
@property
def section_number(self):
match = NUMBERED_TITLE_RE.match(self.title)
if match:
return match.group('section_num')
return None
@property
def title_text(self):
match = NUMBERED_TITLE_RE.match(self.title)
if match:
return match.group('title_text')
return self.title
@property
def pdf_diff_options(self):
fields = attr.fields(Section)
ret = attr.asdict(self, filter=attr.filters.include(
fields.page_start, fields.page_end))
ret['fn'] = str(self.pdf.fn)
if self.page_start_top:
ret['page_start_top'] = self.page_start_top
if self.page_end_bottom:
ret['page_end_bottom'] = self.page_end_bottom
return ret
def asdict(self):
return attr.asdict(self,
filter=attr.filters.exclude(
attr.fields(Section).pdf))
def add_nested_title(bookmarks):
title_stack = []
for i, bookmark in enumerate(bookmarks):
while bookmark.level >= len(title_stack):
title_stack.pop()
title_stack.append(bookmark.title)
bookmark.nested_title = " : ".join(title_stack)
def outline_to_bookmarks(reader, outline=None, level=1, bookmarks=None):
if outline is None:
outline = reader.getOutlines()
if bookmarks is None:
bookmarks = []
for elt in outline:
if isinstance(elt, list):
outline_to_bookmarks(reader, outline=elt, level=level+1,
bookmarks=bookmarks)
else:
page_num = reader.getDestinationPageNumber(elt)
bookmark = Bookmark(
title=elt.title,
level=level,
page_number=page_num + 1)
page = reader.getPage(page_num)
_, ul_y = page.bleedBox.upperLeft
# print(page.bleedBox.upperLeft, page.bleedBox.lowerRight)
# Coordinate system is flipped compared to pdf_diff...
if elt.top:
bookmark.top = float(ul_y) - float(elt.top)
if elt.bottom:
bookmark.bottom = float(ul_y) - float(elt.bottom)
if elt.left:
bookmark.left = float(elt.left)
if elt.right:
bookmark.right = float(elt.right)
bookmarks.append(bookmark)
return bookmarks
def add_section_to_page_map(section, page_map):
for i in section.page_numbers:
if i not in page_map:
page_map[i] = []
page_map[i].append(section)
def compute_section_page_map(sections):
page_map = {}
for sec in sections:
# +1 is for an inclusive range
for page_num in sec.page_numbers:
if page_num not in page_map:
page_map[page_num] = []
page_map[page_num].append(sec)
return page_map
class PdfSpec:
def __init__(self, fn):
self.fn = fn
self.reader = PdfFileReader(open(str(fn), 'rb'))
self.bookmark_data = outline_to_bookmarks(self.reader)
self._page_pdfs = None
self._dom = None
@property
def dom(self):
if self._dom is None:
self._dom = pdf_diff.pdf_to_dom(str(self.fn))
return self._dom
@property
def page_pdfs(self):
import pypdftk
if not self._page_pdfs:
self._page_pdfs = pypdftk.split(self.fn)
return self._page_pdfs
def pdf_for_page(self, pagenum):
if pagenum is None:
return None
return self.page_pdfs[pagenum]
def compute_sections(self,
level=None,
bookmarks=None,
bookmark_predicate=None):
if level is None:
level = 1
if bookmarks is None:
if bookmark_predicate is not None:
bookmarks = [x for x in self.bookmark_data
if bookmark_predicate(x)]
else:
bookmarks = [x for x in self.bookmark_data
if x.level == level]
sections = []
# Add a placeholder section taking up all front-matter pages
first_bookmark = bookmarks[0]
if first_bookmark.page_number != 1:
sections.append(Section(title="0. Front Matter",
level=level,
page_start=1,
page_end=first_bookmark.page_number,
pdf=self))
prev_bookmark = None
for i, bookmark in enumerate(bookmarks):
if prev_bookmark is not None:
s = prev_bookmark.page_number
e = bookmark.page_number
if not bookmark.top or bookmark.top == 0:
# If no "top", then assume the section ends on a page break.
e -= 1
sec = Section(title=prev_bookmark.title,
level=prev_bookmark.level,
page_start=s,
page_end=e,
pdf=self)
if prev_bookmark.top:
sec.page_start_top = prev_bookmark.top
if bookmark.top:
sec.page_end_bottom = bookmark.top
sections.append(sec)
prev_bookmark = bookmark
if bookmark is not None:
# TODO Deal with the last section here!
pass
# Now, populate the object fields
self.comparable_sections = sections
self.section_by_title = {sec.title: sec
for sec in self.comparable_sections}
self.section_by_title_text = {sec.title_text: sec
for sec in self.comparable_sections
if sec.title_text}
self.section_by_number = {sec.section_number: sec
for sec in self.comparable_sections
if sec.section_number}
self.page_map = compute_section_page_map(self.comparable_sections)
return sections
def find_corresponding_section(self, section):
"""Find our own section corresponding to the supplied section from another PDF."""
own_section = self.section_by_title.get(section.title)
if own_section:
# Easy - full title matches
return own_section
own_section = self.section_by_title_text.get(section.title_text)
if own_section:
# Not as easy, we had a section renumber, possible issue here!
return own_section
own_section = self.section_by_number.get(section.section_number)
if own_section:
# Only the section number matched - WARNING! might be bad match!
return own_section
# Total failure
return None
@attr.s
class MatchingSection:
title = attr.ib()
orig_range = attr.ib()
new_range = attr.ib()
changes = attr.ib(default=None)
def __str__(self):
return '{} ({}:{}-{}, {}:{}-{})'.format(
self.title,
self.orig_range['fn'],
self.orig_range['page_start'],
self.orig_range['page_end'],
self.new_range['fn'],
self.new_range['page_start'],
self.new_range['page_end'],
)
def get_section_range_pairs(orig_section, new_pdf):
"""Return MatchingSection for a section."""
other_section = new_pdf.find_corresponding_section(orig_section)
if not other_section:
print("Skipping section {} - no match in the other doc!".format(
orig_section.title))
return None
return MatchingSection(
title=orig_section.title,
orig_range=orig_section.pdf_diff_options,
new_range=other_section.pdf_diff_options)
def get_section_page_pairs(orig_section, new_pdf):
"""Return (orig_page_num, new_page_num) pairs for each page in section."""
other_section = new_pdf.find_corresponding_section(orig_section)
if not other_section:
print("Skipping section {} - no match in the other doc!".format(
orig_section.title))
return []
return zip_longest(orig_section.page_numbers, other_section.page_numbers)
def get_page_pairs_by_section(orig_pdf, new_pdf):
"""Return an iterable of lists of (orig_page_num, new_page_num) pairs.
One such list of pairs is returned for each section in orig_pdf."""
return (list(get_section_page_pairs(sec, new_pdf))
for sec in orig_pdf.comparable_sections)
def get_all_page_pairs(orig_pdf, new_pdf):
"""Get a single list of all page pairs.
This accommodates inserted pages between sections."""
# Flatten into a single list of pairs
raw_pairs = list(chain.from_iterable(
get_page_pairs_by_section(orig_pdf, new_pdf)))
# For any "full pair" (where both parts are non-None),
# we can skip any half-diffs involving either page
# in the next pass. We compute that set here.
# For example, if we have the pairs:
# (1, 1), (2, None), (2, 1), (3, 2)
# we can drop the (2, None) pair because the original page 2
# is already being compared against the new page 1 -- see the (2, 1) --
# so there's no sense in saying it only exists in the original.
unique_full_pairs = set(((orig_page, new_page)
for orig_page, new_page in raw_pairs
if orig_page is not None
and new_page is not None))
skip_orig_only = set(((orig_page, None)
for orig_page, _ in unique_full_pairs))
skip_new_only = set(((None, new_page)
for _, new_page in unique_full_pairs))
skip_half_pairs = skip_orig_only.union(skip_new_only)
# Main filter pass: deduplicate and filter out excluded half-diffs
pairs = []
included_pairs = set()
for page_pair in raw_pairs:
if page_pair in skip_half_pairs:
print("Dropping half-pair covered by other full pair", page_pair)
# Don't unnecessarily include a "page only in document X" item.
continue
if page_pair in included_pairs:
print("Dropping duplicated pair", page_pair)
# Don't let small sections result in big dupes.
continue
pairs.append(page_pair)
included_pairs.add(page_pair)
return pairs
class SequenceGapFinder:
def __init__(self):
self.prev = None
def process_and_get_gap(self, current):
"""Return the range of expected numbers skipped between the last call
to this method and the current one.
Return None if no numbers skipped."""
ret = None
if current is not None:
if self.prev is not None and current > self.prev + 1:
ret = range(self.prev + 1, current)
self.prev = current
return ret
def zip_longest_permitting_none(a, b):
"""Do zip_longest except treat None as a zero-element iterable."""
if a is None and b is None:
return ()
if a is None:
return ((None, elt) for elt in b)
if b is None:
return ((elt, None) for elt in a)
return zip_longest(a, b)
def fill_pair_gaps(pairs):
"""Add any missing pages to the list of pairs."""
orig_pages = [orig_page for orig_page, _ in pairs
if orig_page is not None]
new_pages = [new_page for _, new_page in pairs
if new_page is not None]
assert(orig_pages == sorted(orig_pages))
assert(new_pages == sorted(new_pages))
fixed_pairs = []
orig_gaps = SequenceGapFinder()
new_gaps = SequenceGapFinder()
for orig_page, new_page in pairs:
orig_gap = orig_gaps.process_and_get_gap(orig_page)
new_gap = new_gaps.process_and_get_gap(new_page)
if orig_gap is not None or new_gap is not None:
gap_pairs = list(zip_longest_permitting_none(orig_gap, new_gap))
print("Found gap pairs", gap_pairs)
fixed_pairs.extend(gap_pairs)
fixed_pairs.append((orig_page, new_page))
return fixed_pairs
class GranularPdfDiff:
def __init__(self, orig_fn, new_fn):
self.orig_pdf = PdfSpec(orig_fn)
self.new_pdf = PdfSpec(new_fn)
def generate_matching_sections(self):
"""Return a generator of MatchingSection.
At most one MatchingSection is returned for each section in orig_pdf.
"""
for sec in self.orig_pdf.comparable_sections:
matching = get_section_range_pairs(sec, self.new_pdf)
if not matching:
continue
matching.orig_range["dom"] = self.orig_pdf.dom
matching.new_range["dom"] = self.new_pdf.dom
changes = pdf_diff.compute_changes(
matching.orig_range, matching.new_range, bottom_margin=93)
if changes:
matching.changes = changes
yield matching
else:
print("No changes in", matching)
def compute_sections(self, **kwargs):
self.orig_pdf.compute_sections(**kwargs)
self.new_pdf.compute_sections(**kwargs)
def compute_page_pairs(self):
pairs = get_all_page_pairs(self.orig_pdf, self.new_pdf)
return fill_pair_gaps(pairs)
def generate_page_diff(self, orig_page_num, new_page_num):
orig_page = self.orig_pdf.pdf_for_page(orig_page_num)
new_page = self.new_pdf.pdf_for_page(new_page_num)
if orig_page and new_page:
changes = pdf_diff.compute_changes(
orig_page, new_page)
pprint(changes)
# TODO
def generate_diff_from_pairs(self, pairs):
out_docs = []
for orig_page_num, new_page_num in | |
a Keras sequential model.
Since this SSE handles preprocessing the number of features will be inferenced from the training data.
However, when using 3D or 4D data the input_shape needs to be specified in the first layer.
Note that this SSE does not support the keras input_dim argument. Please always use input_shape.
The request should contain: model name, sort order, layer type, args, kwargs
With a row for each layer, with a final row for compilation keyword arguments.
Arguments should take the form of a comma separated string with the type of the value specified.
Use the pipe | character to specify type: 'arg1=value1|str, arg2=value2|int, arg3=value3|bool'
Sample input expected from the request:
'DNN', 1, 'Dense', '12|int', 'activation=relu|str'
'DNN', 2, 'Dropout', '0.25|float', ''
'DNN', 3, 'Dense', '1|int', 'activation=sigmoid|str'
'DNN', 4, 'Compilation', '', 'loss=binary_crossentropy|str, optimizer=adam|str, metrics=accuracy|list|str'
If you want to specify parameters for the optimizer, you can add that as the second last row of the architecture:
...
'DNN', 4, 'SGD', '', 'lr=0.01|float, clipvalue=0.5|float'
'DNN', 5, 'Compilation', '', 'loss=binary_crossentropy|str, metrics=accuracy|str'
Layer wrappers can be used by including them in the layer type followed by a space:
'DNN', 1, 'TimeDistributed Dense' ...
...
For the Bidirectional wrapper the merge_mode parameter can be specified in the kwargs.
When using recurrent or convolutional layers you will need to pass a valid input_shape to reshape the data.
Additionally, the feature definitions should include an 'identifier' variable that will be used for reshaping.
E.g. The identifier is a date field and you want to use a LSTM with 10 time steps on a dataset with 20 features.
'RNN', 1, 'LSTM', '64|int', 'input_shape=10;20|tuple|int, activation=relu|str'
Note that you can pass None instead of 20 for the features, as the actual number of features will be calculated after
preprocessing the data.
"""
# Interpret the request data based on the expected row and column structure
row_template = ['strData', 'numData', 'strData', 'strData', 'strData']
col_headers = ['model_name', 'sort_order', 'layer_type', 'args', 'kwargs']
# Create a Pandas Data Frame for the request data
self.request_df = utils.request_df(self.request, row_template, col_headers)
# Create a model that can be persisted to disk
self.model = PersistentModel()
# Get the model name from the request dataframe
self.model.name = self.request_df.loc[0, 'model_name']
# Get the model from cache or disk
self._get_model()
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(3)
# Set a flag which will let us know that this is a Keras model
self.model.using_keras = True
# Sort the layers, drop unnecessart columns and save the model architecture to a new data frame
architecture = self.request_df.sort_values(by=['sort_order']).reset_index(drop=True).drop(labels=['model_name', 'sort_order'], axis = 1)
# Convert args to a list and kwargs to a dictionary
architecture['args'] = architecture['args'].apply(utils.get_args_by_type)
architecture['kwargs'] = architecture['kwargs'].apply(utils.get_kwargs).apply(utils.get_kwargs_by_type)
# Add the architecture to the model
self.model.architecture = architecture
# Add the first layer's kwargs as a property for easy reference
self.model.first_layer_kwargs = self.model.architecture.iloc[0, 2]
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(10)
# Persist the model to disk
self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)
# Update the cache to keep this model in memory
self._update_cache()
# Prepare the output
message = [[self.model.name, 'Keras model architecture saved to disk',\
time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp))]]
self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp'])
# Send the reponse table description to Qlik
self._send_table_description("setup")
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
# Finally send the response
return self.response
def set_features(self):
"""
Add feature definitions for the model
"""
# Interpret the request data based on the expected row and column structure
row_template = ['strData', 'strData', 'strData', 'strData', 'strData', 'strData']
col_headers = ['model_name', 'name', 'variable_type', 'data_type', 'feature_strategy', 'strategy_args']
# Create a Pandas Data Frame for the request data
self.request_df = utils.request_df(self.request, row_template, col_headers)
# Initialize the persistent model
self.model = PersistentModel()
# Get the model name from the request dataframe
self.model.name = self.request_df.loc[0, 'model_name']
# Get the model from cache or disk
self._get_model()
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(3)
# Add the feature definitions to the model
self.model.features_df = self.request_df
self.model.features_df.set_index("name", drop=False, inplace=True)
# Store a copy of the features_df that will remain untouched in later calls
self.model.original_features_df = self.model.features_df.copy()
# Ensure there is at most one feature with variable_type identifier
if len(self.model.features_df.loc[self.model.features_df["variable_type"] == "identifier"]) > 1:
err = "Invalid feature definitions. Detected more than one feature with variable_type set to identifier. You can only pass one unique identifier."
raise Exception(err)
# Persist the model to disk
self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)
# Update the cache to keep this model in memory
self._update_cache()
# Prepare the output
message = [[self.model.name, 'Feature definitions successfully saved to model',\
time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp))]]
self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp'])
# Send the reponse table description to Qlik
self._send_table_description("setup")
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
# Finally send the response
return self.response
def get_features(self):
"""
Get feature definitions for an existing model
"""
# Get the model from cache or disk based on the model_name in request
self._get_model_by_name()
# Prepare the output
self.response = self.model.features_df
self.response["sort_order"] = pd.Series([i+1 for i in range(len(self.response.index))], index=self.response.index)
self.response = self.response[["model_name", "sort_order", "name", "variable_type", "data_type",\
"feature_strategy", "strategy_args"]]
# Send the reponse table description to Qlik
self._send_table_description("features")
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
# Finally send the response
return self.response
def fit(self):
"""
Train and test the model based on the provided dataset
"""
# Open an existing model and get the training & test dataset and targets
train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)
# Check that the estimator is an supervised ML algorithm
if self.model.estimator_type not in ["classifier", "regressor"]:
err = "Incorrect usage. The estimator specified is not a known classifier or regressor: {0}".format(self.model.estimator)
raise Exception(err)
# Check which validation strategy is to be used, if any
# For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation
if self.model.time_series_split > 0:
self.model.validation = "timeseries"
# Set up cross validation to be performed using TimeSeriesSplit
self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)
elif self.model.cv > 0:
self.model.validation = "k-fold"
elif self.model.test_size > 0:
self.model.validation = "hold-out"
else:
self.model.validation = "external"
if self.model.validation == "hold-out":
# Split the data into training and testing subsets
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)
else:
self.X_train = train_test_df
self.y_train = target_df
# Add the training and test data to the model if required
if self.model.retain_data:
self.model.X_train = self.X_train
self.model.y_train = self.y_train
try:
self.model.X_test = self.X_test
self.model.y_test = self.y_test
except AttributeError:
pass
# Scale the targets and increase stationarity if required
if self.model.scale_target or self.model.make_stationary:
# Set up the target transformer
self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\
missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)
# Fit the transformer to the training targets
self.model.target_transformer = self.model.target_transformer.fit(self.y_train)
# Apply the transformer to the training targets
self.y_train = self.model.target_transformer.transform(self.y_train)
# Drop samples where the target cannot be transformed due to insufficient lags
self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):]
# Add lag observations to the samples if required
if self.model.lags or self.model.lag_target:
# Check if the current sample will be included as an input, or whether we only use lag observations for predictions
extrapolate = 1 if self.model.current_sample_as_input else 0
# Add the lag observations
self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)
# Drop targets for samples which were dropped due to null values after adding lags.
if len(self.y_train) | |
chunked_keys = [keys[start: start + chunk_size] for start in range(0, len(keys), chunk_size) ]
for chunk in chunked_keys:
items = self.read_items_from_redis(chunk)
self.delete_keys(chunk)
# transform them into lines
items = [json.dumps(items[x]).decode('utf-8') + '\n' for x in items]
f.writelines(items)
log.info(f"wrote : {len(items)}")
def kgx_merge_dict(self, dict_1, dict_2):
# collect values that are same first
merged = {}
# if properties match up with value treat as one
# get dict_1 intersection dict_2 ,
merged = {x: dict_1[x] for x in dict_1 if dict_1.get(x) == dict_2.get(x)}
# get dict_1 disjoint dict 2
unique_dict_1_props = {x: dict_1[x] for x in dict_1 if x not in merged.keys()}
# get dict_2 disjoint dict 1
unique_dict_2_props = {x: dict_2[x] for x in dict_2 if x not in merged.keys()}
merged.update(kgx_merge_dict(unique_dict_1_props, unique_dict_2_props))
for keys in merged:
attribute = merged[keys]
# When mergeing array's for bulk loading
# we have to make sure that items in lists
# don't contain single-quotes.
# Single quotes in array items break parsing of arrays on bulk loading
# downstream.
if isinstance(attribute, list):
new_attribute = []
for value in attribute:
if isinstance(value, str):
value = value.replace("'", '`')
new_attribute.append(value)
attribute = new_attribute
merged[keys] = attribute
return merged
def sort_node_types(self, node_dict):
categories = node_dict.get('category')
if not categories:
return node_dict
leaf_type = self.biolink.get_leaf_class(categories)
# brings leaf class in the top
categories = [leaf_type] + [x for x in categories if x != leaf_type]
node_dict['category'] = categories
return node_dict
def merge_node_and_edges (self, nodes, edges, current_metric , data_set_name ):
read_time = current_metric['read_kgx_file_time']
total_time = current_metric['total_processing_time']
# prefix keys for fetching back and writing to file.
nodes = {f"node-{node['id']}": self.sort_node_types(node) for node in nodes}
edges = {f"edge-{edge['subject']}-{edge['object']}-{edge['predicate']}": edge for edge in
edges}
read_from_redis_time = time.time()
# read nodes and edges scoped to current file
nodes_in_redis = self.read_items_from_redis(list(nodes.keys()))
edges_in_redis = self.read_items_from_redis(list(edges.keys()))
read_from_redis_time = time.time() - read_from_redis_time
current_metric['read_redis_time'] = read_from_redis_time
merge_time = time.time()
log.info(f"Found matching {len(nodes_in_redis)} nodes {len(edges_in_redis)} edges from redis...")
for node_id in nodes_in_redis:
nodes[node_id] = self.kgx_merge_dict(nodes[node_id], nodes_in_redis[node_id])
for edge_id in edges_in_redis:
edges[edge_id] = self.kgx_merge_dict(edges[edge_id], edges_in_redis[edge_id])
# add predicate labels to edges;
for edge_id in edges:
edges[edge_id]['predicate_label'] = self.biolink.get_label(edges[edge_id]['predicate'])
merge_time = time.time() - merge_time
current_metric['merge_time'] = merge_time
write_to_redis_time = time.time()
self.write_items_to_redis(nodes)
self.write_items_to_redis(edges)
write_to_redis_time = time.time() - write_to_redis_time
current_metric['write_to_redis_time'] = write_to_redis_time
log.debug(
"path {:>45} read_file:{:>5} read_nodes_from_redis:{:>7} merge_time:{:>3} write_nodes_to_redis: {"
":>3}".format(
Util.trunc(data_set_name, 45), read_time, read_from_redis_time, merge_time, write_to_redis_time))
total_file_processing_time = time.time() - total_time
current_metric['total_processing_time'] = total_file_processing_time
current_metric['total_nodes_in_kgx_file'] = len(nodes)
current_metric['total_edges_in_kgx_file'] = len(edges)
current_metric['nodes_found_in_redis'] = len(nodes_in_redis)
current_metric['edges_found_in_redis'] = len(edges_in_redis)
log.info(f"processing {data_set_name} took {total_file_processing_time}")
return current_metric
def merge (self):
""" Merge nodes. Would be good to have something less computationally intensive. """
data_set_version = self.config.get('kgx', {}).get('dataset_version')
metrics = {}
start = time.time()
json_format_files = Util.kgx_objects("json")
jsonl_format_files = set([
x.replace(f'nodes_{data_set_version}.jsonl', '').replace(f'edges_{data_set_version}.jsonl', '') for x in Util.kgx_objects("jsonl")
])
log.info("Deleting any redis merge keys from previous run....")
self.delete_all_keys()
for file in json_format_files:
current_metric = {}
total_time = read_time = time.time()
current_kgx_data = Util.read_object(file)
read_time = time.time() - read_time
current_metric['read_kgx_file_time'] = read_time
current_metric['total_processing_time'] = total_time
self.merge_node_and_edges(nodes=current_kgx_data['nodes'],
edges=current_kgx_data['edges'],
current_metric=current_metric,
data_set_name=file)
for file in jsonl_format_files:
current_metric = {}
total_time = read_time = time.time()
edges = Util.json_line_iter(Util.kgx_path(file + f'edges_{data_set_version}.jsonl'))
nodes = Util.json_line_iter(Util.kgx_path(file + f'nodes_{data_set_version}.jsonl'))
read_time = time.time() - read_time
current_metric['read_kgx_file_time'] = read_time
current_metric['total_processing_time'] = total_time
self.merge_node_and_edges(nodes=nodes,
edges=edges,
current_metric=current_metric,
data_set_name=file)
log.info(f"total time for dumping to redis : {time.time() - start}")
# now we have all nodes and edges merged in redis we scan the whole redis back to disk
write_merge_metric = {}
t = time.time()
log.info("getting all nodes")
start_nodes_jsonl = time.time()
nodes_file_path = Util.merge_path("nodes.jsonl")
self.write_redis_back_to_jsonl(nodes_file_path, "node-*")
log.info(f"writing nodes to took : {time.time() - start_nodes_jsonl}")
write_merge_metric['nodes_writing_time'] = time.time() - start_nodes_jsonl
start_edge_jsonl = time.time()
log.info("getting all edges")
edge_output_file_path = Util.merge_path("edges.jsonl")
self.write_redis_back_to_jsonl(edge_output_file_path, "edge-*")
write_merge_metric['edges_writing_time'] = time.time() - start_edge_jsonl
log.info(f"writing edges took: {time.time() - start_edge_jsonl}")
write_merge_metric['total_time'] = time.time() - t
metrics['write_jsonl'] = write_merge_metric
metrics['total_time'] = time.time() - start
log.info(f"total took: {time.time() - start}")
if self.enable_metrics:
path = Util.metrics_path('merge_metrics.yaml')
Util.write_object(metrics, path)
class BiolinkModel:
root_type = 'biolink:NamedThing'
def __init__(self, bl_version='1.5.0'):
self.bl_url = f'https://raw.githubusercontent.com/biolink/biolink-model/{bl_version}/biolink-model.yaml'
self.toolkit = Toolkit(self.bl_url)
def find_biolink_leaves(self, biolink_concepts):
"""
Given a list of biolink concepts, returns the leaves removing any parent concepts.
:param biolink_concepts: list of biolink concepts
:return: leave concepts.
"""
ancestry_set = set()
all_mixins_in_tree = set()
all_concepts = set(biolink_concepts)
# Keep track of things like "MacromolecularMachine" in current datasets
# @TODO remove this and make nodes as errors
unknown_elements = set()
for x in all_concepts:
current_element = self.toolkit.get_element(x)
mixins = set()
if current_element:
if 'mixins' in current_element and len(current_element['mixins']):
for m in current_element['mixins']:
mixins.add(self.toolkit.get_element(m).class_uri)
else:
unknown_elements.add(x)
ancestors = set(self.toolkit.get_ancestors(x, reflexive=False, formatted=True))
ancestry_set = ancestry_set.union(ancestors)
all_mixins_in_tree = all_mixins_in_tree.union(mixins)
leaf_set = all_concepts - ancestry_set - all_mixins_in_tree - unknown_elements
return leaf_set
def get_leaf_class (self, names):
""" Return the leaf classes in the provided list of names. """
leaves = list(self.find_biolink_leaves(names))
return leaves[0]
def get_label(self, class_name):
element = self.toolkit.get_element(class_name)
if element:
name = element.name
return name
return class_name.replace("biolink:", "").replace("_", " ")
class BulkLoad:
""" Tools for creating a Redisgraph bulk load dataset. """
def __init__(self, biolink, config=None):
self.biolink = biolink
if not config:
config = get_config()
self.config = config
separator = self.config.get('bulk_loader',{}).get('separator', '|')
self.separator = chr(separator) if isinstance(separator, int) else separator
def tables_up_to_date (self):
return Util.is_up_to_date (
source=[
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json"),
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json")
] + Util.merged_objects (),
targets=glob.glob (Util.bulk_path ("nodes/**.csv")) + \
glob.glob (Util.bulk_path ("edges/**.csv")))
def create_nodes_csv_file(self):
if self.tables_up_to_date ():
log.info ("up to date.")
return
# clear out previous data
bulk_path = Util.bulk_path("nodes")
if os.path.exists(bulk_path):
shutil.rmtree(bulk_path)
categories_schema = Util.read_schema (SchemaType.CATEGORY)
state = defaultdict(lambda: None)
log.info(f"processing nodes")
""" Write node data for bulk load. """
categories = defaultdict(lambda: [])
category_error_nodes = set()
merged_nodes_file = Util.merge_path("nodes.jsonl")
counter = 1
for node in Util.json_line_iter(merged_nodes_file):
if not node['category']:
category_error_nodes.add(node['id'])
node['category'] = [BiolinkModel.root_type]
index = self.biolink.get_leaf_class(node['category'])
categories[index].append(node)
if len(category_error_nodes):
log.error(f"some nodes didn't have category assigned. KGX file has errors."
f"Nodes {len(category_error_nodes)}. They will be typed {BiolinkModel.root_type}"
f"Showing first 10: {list(category_error_nodes)[:10]}.")
# flush every 100K
if counter % 100_000 == 0:
self.write_bulk(Util.bulk_path("nodes"), categories, categories_schema,
state=state, is_relation=False)
# reset variables.
category_error_nodes = set()
categories = defaultdict(lambda: [])
counter += 1
# write back if any thing left.
if len(categories):
self.write_bulk(Util.bulk_path("nodes"), categories, categories_schema,
state=state, is_relation=False)
def create_edges_csv_file(self):
""" Write predicate data for bulk load. """
if self.tables_up_to_date ():
log.info ("up to date.")
return
# Clear out previous data
bulk_path = Util.bulk_path("edges")
if os.path.exists(bulk_path):
shutil.rmtree(bulk_path)
predicates_schema = Util.read_schema(SchemaType.PREDICATE)
predicates = defaultdict(lambda: [])
edges_file = Util.merge_path('edges.jsonl')
counter = 1
state = {}
for edge in Util.json_line_iter(edges_file):
predicates[edge['predicate']].append(edge)
# write out every 100K , to avoid large predicate dict.
if counter % 100_000 == 0:
self.write_bulk(Util.bulk_path("edges"), predicates, predicates_schema, state=state, is_relation=True)
predicates = defaultdict(lambda : [])
counter += 1
# if there are some items left (if loop ended before counter reached the specified value)
if len(predicates):
self.write_bulk(Util.bulk_path("edges"), predicates, predicates_schema, state=state, is_relation=True)
@staticmethod
def create_redis_schema_header(attributes: dict, is_relation=False):
"""
Creates col headers for csv to be used by redis bulk loader by assigning redis types
:param attributes: dictionary of data labels with values as python type strings
:param separator: CSV separator
:return: list of attributes where each item is attributeLabel:redisGraphDataType
"""
redis_type_conversion_map = {
'str': 'STRING',
'float': 'FLOAT', # Do we need to handle double
'int': 'INT',
'bool': 'BOOL',
'list': 'ARRAY'
}
col_headers = []
format_for_redis = lambda label, typ: f'{label}:{typ}'
for attribute, attribute_type in attributes.items():
col_headers.append(format_for_redis(attribute, redis_type_conversion_map[attribute_type]))
# Note this two fields are only important to bulk loader
# they will not be members of the graph
# https://github.com/RedisGraph/redisgraph-bulk-loader/tree/master#input-schemas
if is_relation:
col_headers.append('internal_start_id:START_ID')
col_headers.append('internal_end_id:END_ID')
# replace id:STRING with id:ID
col_headers.append('id:ID')
col_headers = list(filter(lambda x: x != 'id:STRING', col_headers))
return col_headers
@staticmethod
def group_items_by_attributes_set(objects: list, processed_object_ids: set):
"""
Groups items into a dictionary where the keys are sets of attributes set for all
items accessed in that key.
Eg. { set(id,name,category): [{id:'xx0',name:'bbb', 'category':['type']}....
{id:'xx1', name:'bb2', category: ['type1']}] }
:param objects: list of nodes or edges
:param processed_object_ids: ids of object to skip since they are processed.
:return: dictionary grouping based on set attributes
"""
clustered_by_set_values = {}
improper_keys = set()
value_set_test = lambda x: True if (x is not None | |
<filename>pairwise_fusion_kd_new_data/log/train_single_seq/2021-07-05_15-37-16/data/Dataset_com.py
from data.obj_util import *
from torch.utils.data import Dataset
import numpy as np
import os
import warnings
import torch
import torch.multiprocessing
from multiprocessing import Manager
from data.config_com import Config, ConfigGlobal
from matplotlib import pyplot as plt
import cv2
def delay_seq(seq, mu = 0, variance = 0):
new_seq_dirs = {}
bias_gaussan = mu + variance * np.random.rand(1)
bias = np.round(bias_gaussan)
for item in seq:
temp_item = item.split("/")[-1]
temp_id_list = temp_item.split("_")
inscene_id = int(temp_id_list[1])
if inscene_id >= 100:
inscene_id = 99
temp_id = str(int(temp_id_list[0]) * 100 + inscene_id).zfill(4)
new_seq_dirs[temp_id] = item
new_seq_list = []
for i in range(len(seq)):
temp_id = str(i).zfill(4)
temp2_item = new_seq_dirs[temp_id]
inscene_id_2 = int(temp2_item.split("/")[-1].split("_")[1])
if inscene_id_2 + bias <= 99:
temp_id2 = str(int(int(temp_id) + bias)).zfill(4)
new_seq_list.append(new_seq_dirs[temp_id2])
else:
new_seq_list.append(new_seq_dirs[temp_id])
return new_seq_list
class NuscenesDataset(Dataset):
def __init__(self, dataset_root=None, config=None,split=None,cache_size=10000,val=False):
"""
This dataloader loads single sequence for a keyframe, and is not designed for computing the
spatio-temporal consistency losses. It supports train, val and test splits.
dataset_root: Data path to the preprocessed sparse nuScenes data (for training)
split: [train/val/test]
future_frame_skip: Specify to skip how many future frames
voxel_size: The lattice resolution. Should be consistent with the preprocessed data
area_extents: The area extents of the processed LiDAR data. Should be consistent with the preprocessed data
category_num: The number of object categories (including the background)
cache_size: The cache size for storing parts of data in the memory (for reducing the IO cost)
"""
if split is None:
self.split = config.split
else:
self.split = split
self.voxel_size = config.voxel_size
self.area_extents = config.area_extents
self.category_num = config.category_num
self.future_frame_skip = config.future_frame_skip
self.pred_len = config.pred_len
self.box_code_size = config.box_code_size
self.anchor_size = config.anchor_size
self.val = val
self.only_det = config.only_det
self.binary = config.binary
self.config = config
self.use_vis = config.use_vis
#dataset_root = dataset_root + '/'+split
if dataset_root is None:
raise ValueError("The {} dataset root is None. Should specify its value.".format(self.split))
self.dataset_root = dataset_root
seq_dirs = [os.path.join(self.dataset_root, d) for d in os.listdir(self.dataset_root)
if os.path.isdir(os.path.join(self.dataset_root, d))]
seq_dirs = sorted(seq_dirs)
self.seq_files = [os.path.join(seq_dir, f) for seq_dir in seq_dirs for f in os.listdir(seq_dir)
if os.path.isfile(os.path.join(seq_dir, f))]
self.num_sample_seqs = len(self.seq_files)
print("The number of {} sequences: {}".format(self.split, self.num_sample_seqs))
'''
# For training, the size of dataset should be 17065 * 2; for validation: 1623; for testing: 4309
if split == 'train' and self.num_sample_seqs != 17065 * 2:
warnings.warn(">> The size of training dataset is not 17065 * 2.\n")
elif split == 'val' and self.num_sample_seqs != 1623:
warnings.warn(">> The size of validation dataset is not 1719.\n")
elif split == 'test' and self.num_sample_seqs != 4309:
warnings.warn('>> The size of test dataset is not 4309.\n')
'''
#object information
self.anchors_map = init_anchors_no_check(self.area_extents,self.voxel_size,self.box_code_size,self.anchor_size)
self.map_dims = [int((self.area_extents[0][1]-self.area_extents[0][0])/self.voxel_size[0]),\
int((self.area_extents[1][1]-self.area_extents[1][0])/self.voxel_size[1])]
self.reg_target_shape = (self.map_dims[0],self.map_dims[1],len(self.anchor_size),self.pred_len,self.box_code_size)
self.label_shape = (self.map_dims[0],self.map_dims[1],len(self.anchor_size))
self.label_one_hot_shape = (self.map_dims[0],self.map_dims[1],len(self.anchor_size),self.category_num)
self.dims = config.map_dims
self.num_past_pcs = config.num_past_pcs
manager = Manager()
self.cache = manager.dict()
self.cache_size = cache_size if split == 'train' else 0
#self.cache_size = cache_size
def __len__(self):
return self.num_sample_seqs
def get_one_hot(self,label,category_num):
one_hot_label = np.zeros((label.shape[0],category_num))
for i in range(label.shape[0]):
one_hot_label[i][label[i]] = 1
return one_hot_label
def __getitem__(self, idx):
if idx in self.cache:
gt_dict = self.cache[idx]
else:
seq_file = self.seq_files[idx]
gt_data_handle = np.load(seq_file, allow_pickle=True)
gt_dict = gt_data_handle.item()
if len(self.cache) < self.cache_size:
self.cache[idx] = gt_dict
allocation_mask = gt_dict['allocation_mask'].astype(np.bool)
reg_loss_mask = gt_dict['reg_loss_mask'].astype(np.bool)
gt_max_iou = gt_dict['gt_max_iou']
motion_one_hot = np.zeros(5)
motion_mask = np.zeros(5)
#load regression target
reg_target_sparse = gt_dict['reg_target_sparse']
#need to be modified Yiqi , only use reg_target and allocation_map
reg_target = np.zeros(self.reg_target_shape).astype(reg_target_sparse.dtype)
reg_target[allocation_mask] = reg_target_sparse
reg_target[np.bitwise_not(reg_loss_mask)] = 0
label_sparse = gt_dict['label_sparse']
one_hot_label_sparse = self.get_one_hot(label_sparse,self.category_num)
label_one_hot = np.zeros(self.label_one_hot_shape)
label_one_hot[:,:,:,0] = 1
label_one_hot[allocation_mask] = one_hot_label_sparse
if self.config.motion_state:
motion_sparse = gt_dict['motion_state']
motion_one_hot_label_sparse = self.get_one_hot(motion_sparse,3)
motion_one_hot = np.zeros(self.label_one_hot_shape[:-1]+(3,))
motion_one_hot[:,:,:,0] = 1
motion_one_hot[allocation_mask] = motion_one_hot_label_sparse
motion_mask = (motion_one_hot[:,:,:,2] == 1)
if self.only_det:
reg_target = reg_target[:,:,:,:1]
reg_loss_mask = reg_loss_mask[:,:,:,:1]
#only center for pred
elif self.config.pred_type in ['motion','center']:
reg_loss_mask = np.expand_dims(reg_loss_mask,axis=-1)
reg_loss_mask = np.repeat(reg_loss_mask,self.box_code_size,axis=-1)
reg_loss_mask[:,:,:,1:,2:]=False
if self.config.use_map:
if ('map_allocation_0' in gt_dict.keys()) or ('map_allocation' in gt_dict.keys()):
semantic_maps = []
for m_id in range(self.config.map_channel):
map_alloc = gt_dict['map_allocation_'+str(m_id)]
map_sparse = gt_dict['map_sparse_'+str(m_id)]
recover = np.zeros(tuple(self.config.map_dims[:2]))
recover[map_alloc] = map_sparse
recover = np.rot90(recover,3)
#recover_map = cv2.resize(recover,(self.config.map_dims[0],self.config.map_dims[1]))
semantic_maps.append(recover)
semantic_maps = np.asarray(semantic_maps)
else:
semantic_maps = np.zeros(0)
'''
if self.binary:
reg_target = np.concatenate([reg_target[:,:,:2],reg_target[:,:,5:]],axis=2)
reg_loss_mask = np.concatenate([reg_loss_mask[:,:,:2],reg_loss_mask[:,:,5:]],axis=2)
label_one_hot = np.concatenate([label_one_hot[:,:,:2],label_one_hot[:,:,5:]],axis=2)
'''
padded_voxel_points = list()
for i in range(self.num_past_pcs):
indices = gt_dict['voxel_indices_' + str(i)]
curr_voxels = np.zeros(self.dims, dtype=np.bool)
curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1
curr_voxels = np.rot90(curr_voxels,3)
padded_voxel_points.append(curr_voxels)
padded_voxel_points = np.stack(padded_voxel_points, 0).astype(np.float32)
anchors_map = self.anchors_map
'''
if self.binary:
anchors_map = np.concatenate([anchors_map[:,:,:2],anchors_map[:,:,5:]],axis=2)
'''
if self.config.use_vis:
vis_maps = np.zeros((self.num_past_pcs,self.config.map_dims[-1],self.config.map_dims[0],self.config.map_dims[1]))
vis_free_indices = gt_dict['vis_free_indices']
vis_occupy_indices = gt_dict['vis_occupy_indices']
vis_maps[vis_occupy_indices[0,:],vis_occupy_indices[1,:],vis_occupy_indices[2,:],vis_occupy_indices[3,:]] = math.log(0.7/(1-0.7))
vis_maps[vis_free_indices[0,:],vis_free_indices[1,:],vis_free_indices[2,:],vis_free_indices[3,:]] = math.log(0.4/(1-0.4))
vis_maps = np.swapaxes(vis_maps,2,3)
vis_maps = np.transpose(vis_maps,(0,2,3,1))
for v_id in range(vis_maps.shape[0]):
vis_maps[v_id] = np.rot90(vis_maps[v_id],3)
vis_maps = vis_maps[-1]
else:
vis_maps = np.zeros(0)
padded_voxel_points = padded_voxel_points.astype(np.float32)
label_one_hot = label_one_hot.astype(np.float32)
reg_target = reg_target.astype(np.float32)
anchors_map = anchors_map.astype(np.float32)
motion_one_hot = motion_one_hot.astype(np.float32)
semantic_maps = semantic_maps.astype(np.float32)
vis_maps = vis_maps.astype(np.float32)
if self.val:
return padded_voxel_points, label_one_hot,\
reg_target,reg_loss_mask,anchors_map,motion_one_hot,motion_mask,vis_maps,[{"gt_box":gt_max_iou}],[seq_file]
else:
return padded_voxel_points, label_one_hot,\
reg_target,reg_loss_mask,anchors_map,motion_one_hot,motion_mask,vis_maps
class CarscenesDataset(Dataset):
def __init__(self, padded_voxel_points_example, label_one_hot_example, reg_target_example, reg_loss_mask_example, anchors_map_example, \
motion_one_hot_example, motion_mask_example, vis_maps_example, dataset_root=None, config=None,config_global=None,split=None,cache_size=10000,val=False, mu = 1):
"""
This dataloader loads single sequence for a keyframe, and is not designed for computing the
spatio-temporal consistency losses. It supports train, val and test splits.
dataset_root: Data path to the preprocessed sparse nuScenes data (for training)
split: [train/val/test]
future_frame_skip: Specify to skip how many future frames
voxel_size: The lattice resolution. Should be consistent with the preprocessed data
area_extents: The area extents of the processed LiDAR data. Should be consistent with the preprocessed data
category_num: The number of object categories (including the background)
cache_size: The cache size for storing parts of data in the memory (for reducing the IO cost)
"""
if split is None:
self.split = config.split
else:
self.split = split
self.voxel_size = config.voxel_size
self.area_extents = config.area_extents
self.category_num = config.category_num
self.future_frame_skip = config.future_frame_skip
self.pred_len = config.pred_len
self.box_code_size = config.box_code_size
self.anchor_size = config.anchor_size
self.mu = mu
self.val = val
self.only_det = config.only_det
self.binary = config.binary
self.config = config
self.use_vis = config.use_vis
self.padded_voxel_points_example = padded_voxel_points_example
self.label_one_hot_example = label_one_hot_example
self.reg_target_example = reg_target_example
self.reg_loss_mask_example = reg_loss_mask_example
self.anchors_map_example = anchors_map_example
self.motion_one_hot_example = motion_one_hot_example
self.motion_mask_example = motion_mask_example
self.vis_maps_example = vis_maps_example
#dataset_root = dataset_root + '/'+split
if dataset_root is None:
raise ValueError("The {} dataset root is None. Should specify its value.".format(self.split))
self.dataset_root = dataset_root
seq_dirs = [os.path.join(self.dataset_root, d) for d in os.listdir(self.dataset_root)
if os.path.isdir(os.path.join(self.dataset_root, d))]
seq_dirs = sorted(seq_dirs)
seq_dirs = delay_seq(seq_dirs, self.mu)
self.seq_files = [os.path.join(seq_dir, f) for seq_dir in seq_dirs for f in os.listdir(seq_dir)
if os.path.isfile(os.path.join(seq_dir, f))]
self.num_sample_seqs = len(self.seq_files)
print("The number of {} sequences: {}".format(self.split, self.num_sample_seqs))
#object information
self.anchors_map = init_anchors_no_check(self.area_extents,self.voxel_size,self.box_code_size,self.anchor_size)
self.map_dims = [int((self.area_extents[0][1]-self.area_extents[0][0])/self.voxel_size[0]),\
int((self.area_extents[1][1]-self.area_extents[1][0])/self.voxel_size[1])]
self.reg_target_shape = (self.map_dims[0],self.map_dims[1],len(self.anchor_size),self.pred_len,self.box_code_size)
self.label_shape = (self.map_dims[0],self.map_dims[1],len(self.anchor_size))
self.label_one_hot_shape = (self.map_dims[0],self.map_dims[1],len(self.anchor_size),self.category_num)
self.dims = config.map_dims
self.num_past_pcs = config.num_past_pcs
manager = Manager()
self.cache = manager.dict()
self.cache_size = cache_size if split == 'train' else 0
if self.val:
self.voxel_size_global = config_global.voxel_size
self.area_extents_global = config_global.area_extents
self.pred_len_global = config_global.pred_len
self.box_code_size_global = config_global.box_code_size
self.anchor_size_global = config_global.anchor_size
#object information
self.anchors_map_global = init_anchors_no_check(self.area_extents_global,self.voxel_size_global,self.box_code_size_global,self.anchor_size_global)
self.map_dims_global = [int((self.area_extents_global[0][1]-self.area_extents_global[0][0])/self.voxel_size_global[0]),\
int((self.area_extents_global[1][1]-self.area_extents_global[1][0])/self.voxel_size_global[1])]
self.reg_target_shape_global = (self.map_dims_global[0],self.map_dims_global[1],len(self.anchor_size_global),self.pred_len_global,self.box_code_size_global)
self.dims_global = config_global.map_dims
def __len__(self):
return self.num_sample_seqs
def get_one_hot(self,label,category_num):
one_hot_label = np.zeros((label.shape[0],category_num))
for i in range(label.shape[0]):
one_hot_label[i][label[i]] = 1
return one_hot_label
def __getitem__(self, idx):
empty_flag = False
if idx in self.cache:
gt_dict = self.cache[idx]
else:
seq_file = self.seq_files[idx]
gt_data_handle = np.load(seq_file, allow_pickle=True)
if gt_data_handle == 0:
empty_flag = True
padded_voxel_points = np.zeros(self.padded_voxel_points_example.shape).astype(np.float32)
label_one_hot = np.zeros(self.label_one_hot_example.shape).astype(np.float32)
reg_target = np.zeros(self.reg_target_example.shape).astype(np.float32)
anchors_map = np.zeros(self.anchors_map_example.shape).astype(np.float32)
motion_one_hot = np.zeros(self.motion_one_hot_example.shape).astype(np.float32)
vis_maps = np.zeros(self.vis_maps_example.shape).astype(np.float32)
reg_loss_mask = np.zeros(self.reg_loss_mask_example.shape).astype(self.reg_loss_mask_example.dtype)
motion_mask = np.zeros(self.motion_mask_example.shape).astype(self.motion_mask_example.dtype)
if self.val:
return padded_voxel_points, label_one_hot,\
reg_target, reg_loss_mask, anchors_map, vis_maps, [{"gt_box":[[0, 0, 0, 0], [0, 0, 0, 0]]}], [seq_file], \
0, 0, np.zeros((5,4,4)), 0, 0, 0, 0, 0
else:
return padded_voxel_points, padded_voxel_points, label_one_hot, reg_target, reg_loss_mask, anchors_map, vis_maps, 0, 0, np.zeros((5,4,4))
else:
gt_dict = gt_data_handle.item()
if len(self.cache) < self.cache_size:
self.cache[idx] = gt_dict
if empty_flag == False:
allocation_mask = gt_dict['allocation_mask'].astype(np.bool)
reg_loss_mask = gt_dict['reg_loss_mask'].astype(np.bool)
gt_max_iou = gt_dict['gt_max_iou']
motion_one_hot = np.zeros(5)
motion_mask = np.zeros(5)
#load regression target
reg_target_sparse = gt_dict['reg_target_sparse']
#need to be modified Yiqi , only use reg_target and allocation_map
reg_target = np.zeros(self.reg_target_shape).astype(reg_target_sparse.dtype)
reg_target[allocation_mask] = reg_target_sparse
reg_target[np.bitwise_not(reg_loss_mask)] = 0
label_sparse = gt_dict['label_sparse']
one_hot_label_sparse = self.get_one_hot(label_sparse,self.category_num)
label_one_hot = np.zeros(self.label_one_hot_shape)
label_one_hot[:,:,:,0] = 1
label_one_hot[allocation_mask] = one_hot_label_sparse
if self.only_det:
reg_target = reg_target[:,:,:,:1]
reg_loss_mask | |
+ 268965040 * uk_72
+ 822048080 * uk_73
+ 71976560 * uk_74
+ 238706473 * uk_75
+ 729567671 * uk_76
+ 63879197 * uk_77
+ 2229805417 * uk_78
+ 195236419 * uk_79
+ 19 * uk_8
+ 17094433 * uk_80
+ 250047 * uk_81
+ 265923 * uk_82
+ 75411 * uk_83
+ 317520 * uk_84
+ 281799 * uk_85
+ 861273 * uk_86
+ 75411 * uk_87
+ 282807 * uk_88
+ 80199 * uk_89
+ 2242306609 * uk_9
+ 337680 * uk_90
+ 299691 * uk_91
+ 915957 * uk_92
+ 80199 * uk_93
+ 22743 * uk_94
+ 95760 * uk_95
+ 84987 * uk_96
+ 259749 * uk_97
+ 22743 * uk_98
+ 403200 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 331128 * uk_100
+ 984312 * uk_101
+ 303912 * uk_102
+ 335727 * uk_103
+ 997983 * uk_104
+ 308133 * uk_105
+ 2966607 * uk_106
+ 915957 * uk_107
+ 282807 * uk_108
+ 117649 * uk_109
+ 2320297 * uk_11
+ 160867 * uk_110
+ 172872 * uk_111
+ 175273 * uk_112
+ 521017 * uk_113
+ 160867 * uk_114
+ 219961 * uk_115
+ 236376 * uk_116
+ 239659 * uk_117
+ 712411 * uk_118
+ 219961 * uk_119
+ 3172651 * uk_12
+ 254016 * uk_120
+ 257544 * uk_121
+ 765576 * uk_122
+ 236376 * uk_123
+ 261121 * uk_124
+ 776209 * uk_125
+ 239659 * uk_126
+ 2307361 * uk_127
+ 712411 * uk_128
+ 219961 * uk_129
+ 3409416 * uk_13
+ 300763 * uk_130
+ 323208 * uk_131
+ 327697 * uk_132
+ 974113 * uk_133
+ 300763 * uk_134
+ 347328 * uk_135
+ 352152 * uk_136
+ 1046808 * uk_137
+ 323208 * uk_138
+ 357043 * uk_139
+ 3456769 * uk_14
+ 1061347 * uk_140
+ 327697 * uk_141
+ 3154963 * uk_142
+ 974113 * uk_143
+ 300763 * uk_144
+ 373248 * uk_145
+ 378432 * uk_146
+ 1124928 * uk_147
+ 347328 * uk_148
+ 383688 * uk_149
+ 10275601 * uk_15
+ 1140552 * uk_150
+ 352152 * uk_151
+ 3390408 * uk_152
+ 1046808 * uk_153
+ 323208 * uk_154
+ 389017 * uk_155
+ 1156393 * uk_156
+ 357043 * uk_157
+ 3437497 * uk_158
+ 1061347 * uk_159
+ 3172651 * uk_16
+ 327697 * uk_160
+ 10218313 * uk_161
+ 3154963 * uk_162
+ 974113 * uk_163
+ 300763 * uk_164
+ 3969 * uk_17
+ 3087 * uk_18
+ 4221 * uk_19
+ 63 * uk_2
+ 4536 * uk_20
+ 4599 * uk_21
+ 13671 * uk_22
+ 4221 * uk_23
+ 2401 * uk_24
+ 3283 * uk_25
+ 3528 * uk_26
+ 3577 * uk_27
+ 10633 * uk_28
+ 3283 * uk_29
+ 49 * uk_3
+ 4489 * uk_30
+ 4824 * uk_31
+ 4891 * uk_32
+ 14539 * uk_33
+ 4489 * uk_34
+ 5184 * uk_35
+ 5256 * uk_36
+ 15624 * uk_37
+ 4824 * uk_38
+ 5329 * uk_39
+ 67 * uk_4
+ 15841 * uk_40
+ 4891 * uk_41
+ 47089 * uk_42
+ 14539 * uk_43
+ 4489 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 109873023841 * uk_47
+ 150234542803 * uk_48
+ 161446075848 * uk_49
+ 72 * uk_5
+ 163688382457 * uk_50
+ 486580534153 * uk_51
+ 150234542803 * uk_52
+ 187944057 * uk_53
+ 146178711 * uk_54
+ 199877013 * uk_55
+ 214793208 * uk_56
+ 217776447 * uk_57
+ 647362863 * uk_58
+ 199877013 * uk_59
+ 73 * uk_6
+ 113694553 * uk_60
+ 155459899 * uk_61
+ 167061384 * uk_62
+ 169381681 * uk_63
+ 503504449 * uk_64
+ 155459899 * uk_65
+ 212567617 * uk_66
+ 228430872 * uk_67
+ 231603523 * uk_68
+ 688465267 * uk_69
+ 217 * uk_7
+ 212567617 * uk_70
+ 245477952 * uk_71
+ 248887368 * uk_72
+ 739843272 * uk_73
+ 228430872 * uk_74
+ 252344137 * uk_75
+ 750118873 * uk_76
+ 231603523 * uk_77
+ 2229805417 * uk_78
+ 688465267 * uk_79
+ 67 * uk_8
+ 212567617 * uk_80
+ 250047 * uk_81
+ 194481 * uk_82
+ 265923 * uk_83
+ 285768 * uk_84
+ 289737 * uk_85
+ 861273 * uk_86
+ 265923 * uk_87
+ 151263 * uk_88
+ 206829 * uk_89
+ 2242306609 * uk_9
+ 222264 * uk_90
+ 225351 * uk_91
+ 669879 * uk_92
+ 206829 * uk_93
+ 282807 * uk_94
+ 303912 * uk_95
+ 308133 * uk_96
+ 915957 * uk_97
+ 282807 * uk_98
+ 326592 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 321300 * uk_100
+ 929628 * uk_101
+ 209916 * uk_102
+ 354375 * uk_103
+ 1025325 * uk_104
+ 231525 * uk_105
+ 2966607 * uk_106
+ 669879 * uk_107
+ 151263 * uk_108
+ 21952 * uk_109
+ 1325884 * uk_11
+ 38416 * uk_110
+ 53312 * uk_111
+ 58800 * uk_112
+ 170128 * uk_113
+ 38416 * uk_114
+ 67228 * uk_115
+ 93296 * uk_116
+ 102900 * uk_117
+ 297724 * uk_118
+ 67228 * uk_119
+ 2320297 * uk_12
+ 129472 * uk_120
+ 142800 * uk_121
+ 413168 * uk_122
+ 93296 * uk_123
+ 157500 * uk_124
+ 455700 * uk_125
+ 102900 * uk_126
+ 1318492 * uk_127
+ 297724 * uk_128
+ 67228 * uk_129
+ 3220004 * uk_13
+ 117649 * uk_130
+ 163268 * uk_131
+ 180075 * uk_132
+ 521017 * uk_133
+ 117649 * uk_134
+ 226576 * uk_135
+ 249900 * uk_136
+ 723044 * uk_137
+ 163268 * uk_138
+ 275625 * uk_139
+ 3551475 * uk_14
+ 797475 * uk_140
+ 180075 * uk_141
+ 2307361 * uk_142
+ 521017 * uk_143
+ 117649 * uk_144
+ 314432 * uk_145
+ 346800 * uk_146
+ 1003408 * uk_147
+ 226576 * uk_148
+ 382500 * uk_149
+ 10275601 * uk_15
+ 1106700 * uk_150
+ 249900 * uk_151
+ 3202052 * uk_152
+ 723044 * uk_153
+ 163268 * uk_154
+ 421875 * uk_155
+ 1220625 * uk_156
+ 275625 * uk_157
+ 3531675 * uk_158
+ 797475 * uk_159
+ 2320297 * uk_16
+ 180075 * uk_160
+ 10218313 * uk_161
+ 2307361 * uk_162
+ 521017 * uk_163
+ 117649 * uk_164
+ 3969 * uk_17
+ 1764 * uk_18
+ 3087 * uk_19
+ 63 * uk_2
+ 4284 * uk_20
+ 4725 * uk_21
+ 13671 * uk_22
+ 3087 * uk_23
+ 784 * uk_24
+ 1372 * uk_25
+ 1904 * uk_26
+ 2100 * uk_27
+ 6076 * uk_28
+ 1372 * uk_29
+ 28 * uk_3
+ 2401 * uk_30
+ 3332 * uk_31
+ 3675 * uk_32
+ 10633 * uk_33
+ 2401 * uk_34
+ 4624 * uk_35
+ 5100 * uk_36
+ 14756 * uk_37
+ 3332 * uk_38
+ 5625 * uk_39
+ 49 * uk_4
+ 16275 * uk_40
+ 3675 * uk_41
+ 47089 * uk_42
+ 10633 * uk_43
+ 2401 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 62784585052 * uk_47
+ 109873023841 * uk_48
+ 152476849412 * uk_49
+ 68 * uk_5
+ 168172995675 * uk_50
+ 486580534153 * uk_51
+ 109873023841 * uk_52
+ 187944057 * uk_53
+ 83530692 * uk_54
+ 146178711 * uk_55
+ 202860252 * uk_56
+ 223742925 * uk_57
+ 647362863 * uk_58
+ 146178711 * uk_59
+ 75 * uk_6
+ 37124752 * uk_60
+ 64968316 * uk_61
+ 90160112 * uk_62
+ 99441300 * uk_63
+ 287716828 | |
<filename>project/analysis/models.py
import json
import datetime
import hashlib
import logging
import os
import uuid
import io
import requests
import zipfile
import itertools
import pandas as pd
import math
import numpy
from scipy import stats, ndimage
from collections import defaultdict
from django.db import models
from django.conf import settings
from django.core.cache import cache
from django.core.mail import send_mail
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.urlresolvers import reverse
from django.core.files.base import ContentFile
from django.contrib.sites.models import Site
from django.contrib.postgres.fields import JSONField
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django.utils.text import slugify
from django.template.loader import render_to_string
from utils.models import ReadOnlyFileSystemStorage, get_random_filename, DynamicFilePathField
from async_messages import messages
from .import managers, tasks
from orio.matrix import BedMatrix
from orio.matrixByMatrix import MatrixByMatrix
from orio import validators
from orio.utils import get_data_path
logger = logging.getLogger(__name__)
class Dataset(models.Model):
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name='%(class)s',)
name = models.CharField(
max_length=128)
slug = models.CharField(
max_length=128)
description = models.TextField(
blank=True)
uuid = models.UUIDField(
default=uuid.uuid4,
editable=False)
public = models.BooleanField(
default=False)
validated = models.BooleanField(
default=False)
validation_errors = models.TextField(
blank=True)
validation_warnings = models.TextField(
blank=True)
created = models.DateTimeField(
auto_now_add=True)
last_updated = models.DateTimeField(
auto_now=True)
class Meta:
abstract = True
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def get_form_cancel_url(self):
if self.id:
return self.get_absolute_url()
else:
return reverse('analysis:manage_data')
def user_can_view(self, user):
return self.public or self.owner == user or user.is_staff
def user_can_edit(self, user):
return self.owner == user or user.is_staff
class GenomeAssembly(models.Model):
name = models.CharField(
unique=True,
max_length=32)
chromosome_size_file = DynamicFilePathField(
unique=True,
max_length=128,
path=get_data_path,
recursive=False)
annotation_file = DynamicFilePathField(
unique=True,
max_length=128,
path=get_data_path,
recursive=False)
class Meta:
verbose_name_plural = 'genome assemblies'
def __str__(self):
return self.name
class ValidationMixin:
def validate(self):
# return tuple (is_valid: bool, validation_text: str)
raise NotImplementedError('Requires implementation')
def validate_and_save(self):
is_valid, text = self.validate()
self.validated = is_valid
self.validation_errors = self.scrub_validation_text(text)
self.send_validation_message()
self.save()
def scrub_validation_text(self, text):
# remove any path information from outputs
user_home = self.owner.path
media_path = settings.MEDIA_ROOT
notes = text\
.replace(user_home, '/***')\
.replace(media_path, '/***')\
# remove extra whitespace from all lines
return '\n'.join([l.strip() for l in notes.splitlines()])
def send_validation_message(self):
msg = '{} {}: '.format(self._meta.verbose_name.title(), self)
if self.validated:
msg += 'validation complete!'
messages.success(self.owner, msg)
else:
msg += "<a href='{}'>validation failed (view errors)</a>"\
.format(self.get_absolute_url())
messages.warning(self.owner, msg)
class DatasetDownload(models.Model):
NOT_STARTED = 0
STARTED = 1
FINISHED_ERROR = 2
FINISHED_SUCCESS = 3
STATUS_CHOICES = (
(NOT_STARTED, 'not-started'),
(STARTED, 'started'),
(FINISHED_ERROR, 'finished with errors'),
(FINISHED_SUCCESS, 'successfully completed'),
)
CHUNK = 1024 * 1024
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
related_name='%(class)s',)
url = models.URLField()
data = models.FileField(
blank=True,
max_length=256,
storage=ReadOnlyFileSystemStorage.create_store(settings.USERDATA_PATH))
filesize = models.FloatField(
null=True)
md5 = models.CharField(
max_length=64,
null=True)
status_code = models.PositiveSmallIntegerField(
default=NOT_STARTED,
choices=STATUS_CHOICES)
status = models.TextField(
blank=True,
null=True)
start_time = models.DateTimeField(
blank=True,
null=True)
end_time = models.DateTimeField(
blank=True,
null=True)
def get_retry_url(self, parent):
return reverse('analysis:dataset_download_retry',
args=[parent.id, self.id])
@property
def basename(self):
return os.path.basename(self.data.path)
def set_filename(self):
basename, ext = os.path.splitext(os.path.basename(self.url))
path = self.owner.path
fn = os.path.join(path, "{}{}".format(basename, ext))
i = 1
while os.path.exists(fn):
fn = os.path.join(path, "{}-{}{}".format(basename, i, ext))
i += 1
logger.info('Setting filename to {}'.format(fn))
# set filename to object
self.data.name = fn[len(settings.USERDATA_PATH) + 1:]
# write a temporary file prevent-overwriting file
with open(fn, 'w') as f:
f.write('temporary')
def save(self, *args, **kwargs):
if not self.data.name:
self.set_filename()
super().save(*args, **kwargs)
@staticmethod
def check_valid_url(url):
# ensure URL is valid and doesn't raise a 400/500 error
try:
resp = requests.head(url)
except requests.exceptions.ConnectionError:
return False, '{} not found.'.format(url)
except requests.exceptions.InvalidSchema:
return False, '{} must be available via HTTP or HTTPS.'.format(url)
else:
return resp.ok, "{}: {}".format(resp.status_code, resp.reason)
def show_download_retry(self):
return self.status_code == self.FINISHED_ERROR
def download(self):
related_ds = list(self.related_datasets())
fn = self.data.path
self.reset()
try:
r = requests.get(self.url, stream=True)
with open(fn, 'wb') as f:
for chunk in r.iter_content(chunk_size=self.CHUNK):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
self.end_time = now()
self.status_code = self.FINISHED_SUCCESS
self.md5 = self.get_md5()
self.filesize = os.path.getsize(fn)
for ds in related_ds:
msg = 'Download complete (will validate next): {}'.format(self.url)
messages.success(ds.owner, msg)
except Exception as e:
self.start_time = None
self.status_code = self.FINISHED_ERROR
self.status = str(e)
for ds in related_ds:
msg = 'Download failed: {}'.format(self.url)
messages.warning(ds.owner, msg)
self.save()
for ds in related_ds:
ds.validate_and_save()
def get_md5(self):
# equivalent to "md5 -q $FN"
fn = self.data.path
hasher = hashlib.md5()
with open(fn, "rb") as f:
for block in iter(lambda: f.read(self.CHUNK), b''):
hasher.update(block)
return hasher.hexdigest()
def reset(self):
self.status_code = self.STARTED
self.status = ''
self.start_time = now()
self.end_time = None
self.filesize = None
self.md5 = ''
def delete_file(self):
if self.data and os.path.exists(self.data.path):
logger.info('Deleting {}'.format(self.data.path))
os.remove(self.data.path)
def related_datasets(self):
return itertools.chain(
self.plus.all(),
self.minus.all(),
self.ambiguous.all(),
)
class GenomicDataset(Dataset):
genome_assembly = models.ForeignKey(
GenomeAssembly)
@property
def subclass(self):
# this is inherited model; get subclass
if hasattr(self, 'encodedataset'):
return self.encodedataset
else:
return self.userdataset
@property
def is_stranded(self):
raise NotImplementedError('Abstract method')
def to_dict(self):
# Useful for debugging
return {
'id': self.id,
'name': self.name,
'stranded': self.is_stranded,
'bigwigs': self.get_bigwig_paths(),
}
class UserDataset(ValidationMixin, GenomicDataset):
DATA_TYPES = (
('Cage', 'Cage'),
('ChiaPet', 'ChiaPet'),
('ChipSeq', 'ChipSeq'),
('DnaseDgf', 'DnaseDgf'),
('DnaseSeq', 'DnaseSeq'),
('FaireSeq', 'FaireSeq'),
('Mapability', 'Mapability'),
('Nucleosome', 'Nucleosome'),
('Orchid', 'Orchid'),
('RepliChip', 'RepliChip'),
('RepliSeq', 'RepliSeq'),
('RipSeq', 'RipSeq'),
('RnaPet', 'RnaPet'),
('RnaSeq', 'RnaSeq'),
('SmartSeq', 'SmartSeq'),
('Other', 'Other (describe in "description" field)'),
)
data_type = models.CharField(
max_length=16,
choices=DATA_TYPES,
help_text='Experiment type')
ambiguous = models.ForeignKey(
DatasetDownload,
null=True,
related_name='ambiguous',
help_text='Coverage data for which strand is ambiguous or unknown')
plus = models.ForeignKey(
DatasetDownload,
null=True,
related_name='plus',
help_text='Coverage data for which strand is plus')
minus = models.ForeignKey(
DatasetDownload,
null=True,
related_name='minus',
help_text='Coverage data for which strand is minus')
url = models.URLField(
max_length=256,
null=True)
expiration_date = models.DateTimeField(
null=True)
@property
def is_stranded(self):
return self.plus is not None
@property
def is_downloaded(self):
success_code = DatasetDownload.FINISHED_SUCCESS
if self.is_stranded:
return self.plus.status_code == success_code and \
self.minus.status_code == success_code
else:
return self.ambiguous.status_code == success_code
@classmethod
def usable(cls, user):
# must be owned by user and validated
return cls.objects.filter(owner=user, validated=True)
def get_absolute_url(self):
return reverse('analysis:user_dataset',
args=[self.pk, self.slug])
def get_update_url(self):
return reverse('analysis:user_dataset_update',
args=[self.pk, self.slug])
def get_delete_url(self):
return reverse('analysis:user_dataset_delete',
args=[self.pk, self.slug])
def get_bigwig_paths(self):
if self.is_stranded:
return [self.plus.data.path, self.minus.data.path]
else:
return [self.ambiguous.data.path]
def validate_and_save(self):
# wait until all files are downloaded before attempting validation
if self.is_downloaded:
super().validate_and_save()
def validate(self):
size_file = self.genome_assembly.chromosome_size_file
if self.is_stranded:
validatorA = validators.BigWigValidator(
self.plus.data.path, size_file)
validatorA.validate()
validatorB = validators.BigWigValidator(
self.minus.data.path, size_file)
validatorB.validate()
is_valid = validatorA.is_valid and validatorB.is_valid
notes = '\n'.join([
validatorA.display_errors(),
validatorB.display_errors()
]).strip()
else:
validator = validators.BigWigValidator(
self.ambiguous.data.path, size_file)
validator.validate()
is_valid = validator.is_valid
notes = validator.display_errors()
return is_valid, notes
class EncodeDataset(GenomicDataset):
store = ReadOnlyFileSystemStorage.create_store(settings.ENCODE_PATH)
data_ambiguous = models.FileField(
blank=True,
max_length=256,
storage=store,
help_text='Coverage data for which strand is ambiguous or unknown')
data_plus = models.FileField(
blank=True,
max_length=256,
storage=store,
help_text='Coverage data for which strand is plus')
data_minus = models.FileField(
blank=True,
max_length=256,
storage=store,
help_text='Coverage data for which strand is minus')
data_type = models.CharField(
max_length=16,
db_index=True,
help_text='Experiment type')
cell_type = models.CharField(
max_length=32,
db_index=True,
help_text='Cell type (cell line or tissue); reported by ENCODE')
antibody = models.CharField(
max_length=32,
blank=True,
db_index=True,
help_text='Antibody used in pulldown; reported by ENCODE')
rna_extract = models.CharField(
max_length=32,
blank=True,
db_index=True,
help_text='RNA extraction protocol; reported by ENCODE')
treatment = models.CharField(
max_length=32,
blank=True,
db_index=True,
help_text='Experimental treatment of cells; reported by ENCODE')
phase = models.CharField(
max_length=32,
blank=True,
db_index=True,
help_text='Cell phase; reported by ENCODE')
localization = models.CharField(
max_length=32,
blank=True,
db_index=True,
help_text='Cellular localization; reported by ENCODE')
extra_content = JSONField(default=dict)
@property
def is_stranded(self):
return self.data_ambiguous.name == ''
@classmethod
def get_field_options(cls):
dicts = {}
fields = [
'data_type',
'cell_type',
'antibody',
'rna_extract',
'treatment',
'phase',
'localization',
]
for genome in GenomeAssembly.objects.all():
dicts[genome.id] = {}
for fld in fields:
dicts[genome.id][fld] = cls.objects\
.filter(genome_assembly=genome)\
.values_list(fld, flat=True)\
.distinct()\
.order_by(fld)
return dicts
def get_bigwig_paths(self):
if self.is_stranded:
return[self.data_plus.path, self.data_minus.path]
else:
return [self.data_ambiguous.path]
class FeatureList(ValidationMixin, Dataset):
genome_assembly = models.ForeignKey(
GenomeAssembly)
stranded = models.BooleanField(
default=True)
dataset = models.FileField(
max_length=256)
@classmethod
def usable(cls, user):
# must be owned by user or no-user (public) and validated
return cls.objects\
.filter(((models.Q(owner=user) | models.Q(owner__isnull=True))),
validated=True)\
.order_by('owner', 'name')
@classmethod
def usable_json(cls, user):
return json.dumps(list(
cls.usable(user)
.values('id', 'name', 'genome_assembly')
))
def get_absolute_url(self):
return reverse('analysis:feature_list',
args=[self.pk, self.slug])
def get_update_url(self):
return reverse('analysis:feature_list_update',
args=[self.pk, self.slug])
def get_delete_url(self):
return reverse('analysis:feature_list_delete',
args=[self.pk, self.slug])
def validate(self):
validator = validators.FeatureListValidator(
self.dataset.path,
self.genome_assembly.chromosome_size_file,
self.stranded)
validator.validate()
return validator.is_valid, validator.display_errors()
class SortVector(ValidationMixin, Dataset):
feature_list = models.ForeignKey(
FeatureList)
dataset = models.FileField(
max_length=256)
@classmethod
def usable(cls, user):
# must be owned by user or no-user (public) and validated
return cls.objects\
.filter(((models.Q(owner=user) | models.Q(owner__isnull=True))),
validated=True)\
.order_by('owner', 'name')
@classmethod
def usable_json(cls, user):
return json.dumps(list(
cls.usable(user)
.values('id', 'name', 'feature_list_id')
))
def get_absolute_url(self):
return reverse('analysis:sort_vector',
args=[self.pk, self.slug])
def get_update_url(self):
return reverse('analysis:sort_vector_update',
args=[self.pk, self.slug])
def get_delete_url(self):
return reverse('analysis:sort_vector_delete',
args=[self.pk, self.slug])
def validate(self):
validator = validators.SortVectorValidator(
self.feature_list.dataset.path,
self.dataset.path)
validator.validate()
return validator.is_valid, validator.display_errors()
class AnalysisDatasets(models.Model):
analysis = models.ForeignKey(
'Analysis')
dataset = | |
if replace_control_characters is None:
replace_control_characters = False
replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters")
if Tsplits is None:
Tsplits = _dtypes.int64
Tsplits = _execute.make_type(Tsplits, "Tsplits")
input = _ops.convert_to_tensor(input, _dtypes.string)
_inputs_flat = [input]
_attrs = ("input_encoding", input_encoding, "errors", errors,
"replacement_char", replacement_char, "replace_control_characters",
replace_control_characters, "Tsplits", Tsplits)
_result = _execute.execute(b"UnicodeDecode", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"UnicodeDecode", _inputs_flat, _attrs, _result)
_result = _UnicodeDecodeOutput._make(_result)
return _result
_UnicodeDecodeWithOffsetsOutput = collections.namedtuple(
"UnicodeDecodeWithOffsets",
["row_splits", "char_values", "char_to_byte_starts"])
def unicode_decode_with_offsets(input, input_encoding, errors="replace", replacement_char=65533, replace_control_characters=False, Tsplits=_dtypes.int64, name=None):
r"""Decodes each string in `input` into a sequence of Unicode code points.
The character codepoints for all strings are returned using a single vector
`char_values`, with strings expanded to characters in row-major order.
Similarly, the character start byte offsets are returned using a single vector
`char_to_byte_starts`, with strings expanded in row-major order.
The `row_splits` tensor indicates where the codepoints and start offsets for
each input string begin and end within the `char_values` and
`char_to_byte_starts` tensors. In particular, the values for the `i`th
string (in row-major order) are stored in the slice
`[row_splits[i]:row_splits[i+1]]`. Thus:
* `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
character in the `i`th string (in row-major order).
* `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th
character in the `i`th string (in row-major order).
* `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
string (in row-major order).
Args:
input: A `Tensor` of type `string`.
The text to be decoded. Can have any shape. Note that the output is flattened
to a vector of char values.
input_encoding: A `string`.
Text encoding of the input strings. This is any of the encodings supported
by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
errors: An optional `string` from: `"strict", "replace", "ignore"`. Defaults to `"replace"`.
Error handling policy when there is invalid formatting found in the input.
The value of 'strict' will cause the operation to produce a InvalidArgument
error on any invalid input formatting. A value of 'replace' (the default) will
cause the operation to replace any invalid formatting in the input with the
`replacement_char` codepoint. A value of 'ignore' will cause the operation to
skip any invalid formatting in the input and produce no corresponding output
character.
replacement_char: An optional `int`. Defaults to `65533`.
The replacement character codepoint to be used in place of any invalid
formatting in the input when `errors='replace'`. Any valid unicode codepoint may
be used. The default value is the default unicode replacement character is
0xFFFD or U+65533.)
replace_control_characters: An optional `bool`. Defaults to `False`.
Whether to replace the C0 control characters (00-1F) with the
`replacement_char`. Default is false.
Tsplits: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (row_splits, char_values, char_to_byte_starts).
row_splits: A `Tensor` of type `Tsplits`.
char_values: A `Tensor` of type `int32`.
char_to_byte_starts: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "UnicodeDecodeWithOffsets", name, input, "input_encoding",
input_encoding, "errors", errors, "replacement_char",
replacement_char, "replace_control_characters",
replace_control_characters, "Tsplits", Tsplits)
_result = _UnicodeDecodeWithOffsetsOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return unicode_decode_with_offsets_eager_fallback(
input, input_encoding=input_encoding, errors=errors,
replacement_char=replacement_char,
replace_control_characters=replace_control_characters,
Tsplits=Tsplits, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
input_encoding = _execute.make_str(input_encoding, "input_encoding")
if errors is None:
errors = "replace"
errors = _execute.make_str(errors, "errors")
if replacement_char is None:
replacement_char = 65533
replacement_char = _execute.make_int(replacement_char, "replacement_char")
if replace_control_characters is None:
replace_control_characters = False
replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters")
if Tsplits is None:
Tsplits = _dtypes.int64
Tsplits = _execute.make_type(Tsplits, "Tsplits")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"UnicodeDecodeWithOffsets", input=input,
input_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char,
replace_control_characters=replace_control_characters,
Tsplits=Tsplits, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("input_encoding", _op.get_attr("input_encoding"), "errors",
_op.get_attr("errors"), "replacement_char",
_op._get_attr_int("replacement_char"),
"replace_control_characters",
_op._get_attr_bool("replace_control_characters"), "Tsplits",
_op._get_attr_type("Tsplits"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"UnicodeDecodeWithOffsets", _inputs_flat, _attrs, _result)
_result = _UnicodeDecodeWithOffsetsOutput._make(_result)
return _result
UnicodeDecodeWithOffsets = tf_export("raw_ops.UnicodeDecodeWithOffsets")(_ops.to_raw_op(unicode_decode_with_offsets))
def unicode_decode_with_offsets_eager_fallback(input, input_encoding, errors, replacement_char, replace_control_characters, Tsplits, name, ctx):
input_encoding = _execute.make_str(input_encoding, "input_encoding")
if errors is None:
errors = "replace"
errors = _execute.make_str(errors, "errors")
if replacement_char is None:
replacement_char = 65533
replacement_char = _execute.make_int(replacement_char, "replacement_char")
if replace_control_characters is None:
replace_control_characters = False
replace_control_characters = _execute.make_bool(replace_control_characters, "replace_control_characters")
if Tsplits is None:
Tsplits = _dtypes.int64
Tsplits = _execute.make_type(Tsplits, "Tsplits")
input = _ops.convert_to_tensor(input, _dtypes.string)
_inputs_flat = [input]
_attrs = ("input_encoding", input_encoding, "errors", errors,
"replacement_char", replacement_char, "replace_control_characters",
replace_control_characters, "Tsplits", Tsplits)
_result = _execute.execute(b"UnicodeDecodeWithOffsets", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"UnicodeDecodeWithOffsets", _inputs_flat, _attrs, _result)
_result = _UnicodeDecodeWithOffsetsOutput._make(_result)
return _result
def unicode_encode(input_values, input_splits, output_encoding, errors="replace", replacement_char=65533, name=None):
r"""Encode a tensor of ints into unicode strings.
Returns a vector of strings, where `output[i]` is constructed by encoding the
Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`
using `output_encoding`.
---
Example:
```
input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]
input_splits = [0, 5, 10]
output_encoding = 'UTF-8'
output = ['Hello', 'World']
```
Args:
input_values: A `Tensor` of type `int32`.
A 1D tensor containing the unicode codepoints that should be encoded.
input_splits: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1D tensor specifying how the unicode codepoints should be split into strings.
In particular, `output[i]` is constructed by encoding the codepoints in the
slice `input_values[input_splits[i]:input_splits[i+1]]`.
output_encoding: A `string` from: `"UTF-8", "UTF-16-BE", "UTF-32-BE"`.
Unicode encoding of the output strings. Valid encodings are: `"UTF-8",
"UTF-16-BE", and "UTF-32-BE"`.
errors: An optional `string` from: `"ignore", "replace", "strict"`. Defaults to `"replace"`.
Error handling policy when there is invalid formatting found in the input.
The value of 'strict' will cause the operation to produce a InvalidArgument
error on any invalid input formatting. A value of 'replace' (the default) will
cause the operation to replace any invalid formatting in the input with the
`replacement_char` codepoint. A value of 'ignore' will cause the operation to
skip any invalid formatting in the input and produce no corresponding output
character.
replacement_char: An optional `int`. Defaults to `65533`.
The replacement character codepoint to be used in place of any invalid
formatting in the input when `errors='replace'`. Any valid unicode codepoint may
be used. The default value is the default unicode replacement character is
0xFFFD (U+65533).
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "UnicodeEncode", name, input_values, input_splits, "errors",
errors, "output_encoding", output_encoding, "replacement_char",
replacement_char)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return unicode_encode_eager_fallback(
input_values, input_splits, errors=errors,
output_encoding=output_encoding, replacement_char=replacement_char,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
output_encoding = _execute.make_str(output_encoding, "output_encoding")
if errors is None:
errors = "replace"
errors = _execute.make_str(errors, "errors")
if replacement_char is None:
replacement_char = 65533
replacement_char = _execute.make_int(replacement_char, "replacement_char")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"UnicodeEncode", input_values=input_values, input_splits=input_splits,
output_encoding=output_encoding, errors=errors,
replacement_char=replacement_char, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("errors", _op.get_attr("errors"), "output_encoding",
_op.get_attr("output_encoding"), "replacement_char",
_op._get_attr_int("replacement_char"), "Tsplits",
_op._get_attr_type("Tsplits"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"UnicodeEncode", _inputs_flat, _attrs, _result)
_result, = _result
return _result
UnicodeEncode = tf_export("raw_ops.UnicodeEncode")(_ops.to_raw_op(unicode_encode))
def unicode_encode_eager_fallback(input_values, input_splits, output_encoding, errors, replacement_char, name, ctx):
output_encoding = _execute.make_str(output_encoding, "output_encoding")
if errors is None:
errors = "replace"
errors = _execute.make_str(errors, "errors")
if replacement_char is None:
replacement_char = 65533
replacement_char = _execute.make_int(replacement_char, "replacement_char")
_attr_Tsplits, (input_splits,) = _execute.args_to_matching_eager([input_splits], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64)
input_values = _ops.convert_to_tensor(input_values, _dtypes.int32)
_inputs_flat = [input_values, input_splits]
_attrs = ("errors", errors, "output_encoding", output_encoding,
"replacement_char", replacement_char, "Tsplits", _attr_Tsplits)
_result = _execute.execute(b"UnicodeEncode", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"UnicodeEncode", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('strings.unicode_script')
def unicode_script(input, name=None):
r"""Determine the script codes of a given tensor of Unicode integer code points.
This operation converts Unicode code points to script codes corresponding to
each code point. Script codes correspond to International Components for
Unicode (ICU) UScriptCode values.
See
[ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html)
for more details on script codes.
For an example, see the unicode strings guide on [unicode scripts]
(https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode).
| |
""" Functions that help with SKA simulations
"""
__all__ = ['plot_visibility', 'plot_visibility_pol', 'find_times_above_elevation_limit', 'plot_uvcoverage',
'plot_azel', 'plot_gaintable', 'plot_pointingtable', 'find_pb_width_null',
'create_simulation_components', 'plot_pa']
import logging
import astropy.constants as constants
import astropy.units as units
import matplotlib.pyplot as plt
import numpy
from astropy.coordinates import SkyCoord
from rascil.data_models.memory_data_models import Skycomponent, BlockVisibility
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.image import create_image
from rascil.processing_components.image.operations import show_image
from rascil.processing_components.imaging.primary_beams import create_pb
from rascil.processing_components.skycomponent.base import copy_skycomponent
from rascil.processing_components.skycomponent.operations import apply_beam_to_skycomponent, \
filter_skycomponents_by_flux
from rascil.processing_components.util.coordinate_support import hadec_to_azel
from rascil.processing_components.visibility.visibility_geometry import calculate_blockvisibility_hourangles, \
calculate_blockvisibility_azel, calculate_blockvisibility_parallactic_angles
log = logging.getLogger('logger')
def find_times_above_elevation_limit(start_times, end_times, location, phasecentre, elevation_limit):
""" Find all times for which a phasecentre is above the elevation limit
:param start_times:
:param end_times:
:param location:
:param phasecentre:
:param elevation_limit:
:return:
"""
assert len(start_times) == len(end_times)
def valid_elevation(time, location, phasecentre):
ha = numpy.pi * time / 43200.0
dec = phasecentre.dec.rad
az, el = hadec_to_azel(ha, dec, location.lat.rad)
return el > elevation_limit * numpy.pi / 180.0
number_valid_times = 0
valid_start_times = []
for it, t in enumerate(start_times):
if valid_elevation(start_times[it], location, phasecentre) or \
valid_elevation(end_times[it], location, phasecentre):
valid_start_times.append(t)
number_valid_times += 1
assert number_valid_times > 0, "No data above elevation limit"
log.info("find_times_above_elevation_limit: Start times for chunks above elevation limit:")
return valid_start_times
def plot_visibility(vis_list, title='Visibility', y='amp', x='uvdist', plot_file=None, plot_zero=False, *kwargs):
""" Standard plot of visibility
:param vis_list:
:param plot_file:
:param kwargs:
:return:
"""
plt.clf()
for ivis, vis in enumerate(vis_list):
if y == 'amp':
yvalue = numpy.abs(vis.flagged_vis[..., 0, 0]).flat
else:
yvalue = numpy.angle(vis.flagged_vis[..., 0, 0]).flat
xvalue = vis.uvdist.flat
plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color='b', markersize=0.2)
if plot_zero:
plt.plot(xvalue[yvalue == 0.0], yvalue[yvalue == 0.0], '.', color='r', markersize=0.2)
plt.xlabel(x)
plt.ylabel(y)
plt.title(title)
if plot_file is not None:
plt.savefig(plot_file)
plt.show(block=False)
def plot_visibility_pol(vis_list, title='Visibility_pol', y='amp', x='uvdist', plot_file=None, **kwargs):
""" Standard plot of visibility
:param vis_list:
:param plot_file:
:param kwargs:
:return:
"""
plt.clf()
for ivis, vis in enumerate(vis_list):
pols = vis.polarisation_frame.names
colors = ["red", "blue", "green", "purple"]
for pol in range(vis.vis.shape[-1]):
if y == 'amp':
yvalue = numpy.abs(vis.flagged_vis[..., 0, pol]).flat
else:
yvalue = numpy.angle(vis.flagged_vis[..., 0, pol]).flat
if x=="time":
xvalue = numpy.repeat(vis.time, len(yvalue))
else:
xvalue = vis.uvdist.flat
if ivis == 0:
plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color=colors[pol],
label=pols[pol])
else:
plt.plot(xvalue[yvalue > 0.0], yvalue[yvalue > 0.0], '.', color=colors[pol])
plt.xlabel(x)
plt.ylabel(y)
plt.title(title)
plt.legend()
if plot_file is not None:
plt.savefig(plot_file)
plt.show(block=False)
def plot_uvcoverage(vis_list, ax=None, plot_file=None, title='UV coverage', **kwargs):
""" Standard plot of uv coverage
:param vis_list:
:param plot_file:
:param kwargs:
:return:
"""
for ivis, vis in enumerate(vis_list):
u = numpy.array(vis.u[...].flat)
v = numpy.array(vis.v[...].flat)
if isinstance(vis, BlockVisibility):
k = (vis.frequency / constants.c).value
u = numpy.array(numpy.outer(u, k).flat)
v = numpy.array(numpy.outer(v, k).flat)
plt.plot(u, v, '.', color='b', markersize=0.2)
plt.plot(-u, -v, '.', color='b', markersize=0.2)
else:
k = vis.frequency / constants.c
u = u * k
v = v * k
plt.plot(u.value, v.value, '.', color='b', markersize=0.2)
plt.plot(-u.value, -v.value, '.', color='b', markersize=0.2)
plt.xlabel('U (wavelengths)')
plt.ylabel('V (wavelengths)')
plt.title(title)
if plot_file is not None:
plt.savefig(plot_file)
plt.show(block=False)
def plot_azel(bvis_list, plot_file=None, **kwargs):
""" Standard plot of az el coverage
:param bvis_list:
:param plot_file:
:param kwargs:
:return:
"""
plt.clf()
r2d = 180.0 / numpy.pi
for ibvis, bvis in enumerate(bvis_list):
ha = calculate_blockvisibility_hourangles(bvis).value
az, el = calculate_blockvisibility_azel(bvis)
if ibvis == 0:
plt.plot(ha, az.deg, '.', color='r', label='Azimuth (deg)')
plt.plot(ha, el.deg, '.', color='b', label='Elevation (deg)')
else:
plt.plot(ha, az.deg, '.', color='r')
plt.plot(ha, el.deg, '.', color='b')
plt.xlabel('HA (hours)')
plt.ylabel('Angle')
plt.legend()
plt.title('Azimuth and elevation vs hour angle')
if plot_file is not None:
plt.savefig(plot_file)
plt.show(block=False)
def plot_pa(bvis_list, plot_file=None, **kwargs):
""" Standard plot of parallactic angle coverage
:param bvis_list:
:param plot_file:
:param kwargs:
:return:
"""
plt.clf()
for ibvis, bvis in enumerate(bvis_list):
ha = calculate_blockvisibility_hourangles(bvis).value
pa = calculate_blockvisibility_parallactic_angles(bvis)
if ibvis == 0:
plt.plot(ha, pa.deg, '.', color='r', label='PA (deg)')
else:
plt.plot(ha, pa.deg, '.', color='r')
plt.xlabel('HA (hours)')
plt.ylabel('Parallactic Angle')
plt.legend()
plt.title('Parallactic angle vs hour angle')
if plot_file is not None:
plt.savefig(plot_file)
plt.show(block=False)
def plot_gaintable(gt_list, title='', value='amp', plot_file='gaintable.png', **kwargs):
""" Standard plot of gain table
:param gt_list:
:param title:
:param plot_file:
:param kwargs:
:return:
"""
plt.clf()
for igt, gt in enumerate(gt_list):
nrec = gt[0].nrec
names = gt[0].receptor_frame.names
if nrec > 1:
recs = [0, 1]
else:
recs = [1]
colors = ['r', 'b']
for irec, rec in enumerate(recs):
amp = numpy.abs(gt[0].gain[:, 0, 0, rec, rec])
if value == 'phase':
y = numpy.angle(gt[0].gain[:, 0, 0, rec, rec])
if igt == 0:
plt.plot(gt[0].time[amp > 0.0], y[amp > 0.0], '.', color=colors[rec], label=names[rec])
else:
plt.plot(gt[0].time[amp > 0.0], y[amp > 0.0], '.', color=colors[rec])
else:
y = amp
if igt == 0:
plt.plot(gt[0].time[amp > 0.0], 1.0 / y[amp > 0.0], '.', color=colors[rec], label=names[rec])
else:
plt.plot(gt[0].time[amp > 0.0], 1.0 / y[amp > 0.0], '.', color=colors[rec])
plt.title(title)
plt.xlabel('Time (s)')
plt.legend()
if plot_file is not None:
plt.savefig(plot_file)
plt.show(block=False)
def plot_pointingtable(pt_list, plot_file, title, **kwargs):
""" Standard plot of pointing table
:param pt_list:
:param plot_file:
:param title:
:param kwargs:
:return:
"""
plt.clf()
r2a = 180.0 * 3600.0 / numpy.pi
rms_az = 0.0
rms_el = 0.0
num = 0
for pt in pt_list:
num += len(pt.pointing[:, 0, 0, 0, 0])
rms_az += numpy.sum((r2a * pt.pointing[:, 0, 0, 0, 0]) ** 2)
rms_el += numpy.sum((r2a * pt.pointing[:, 0, 0, 0, 1]) ** 2)
plt.plot(pt.time, r2a * pt.pointing[:, 0, 0, 0, 0], '.', color='r')
plt.plot(pt.time, r2a * pt.pointing[:, 0, 0, 0, 1], '.', color='b')
rms_az = numpy.sqrt(rms_az / num)
rms_el = numpy.sqrt(rms_el / num)
plt.title("%s az, el rms %.2f %.2f (arcsec)" % (title, rms_az, rms_el))
plt.xlabel('Time (s)')
plt.ylabel('Offset (arcsec)')
if plot_file is not None:
plt.savefig(plot_file)
plt.show(block=False)
def find_pb_width_null(pbtype, frequency, **kwargs):
""" Rough estimates of HWHM and null locations
:param pbtype:
:param frequency:
:param kwargs:
:return:
"""
if pbtype == 'MID':
HWHM_deg = 0.596 * 1.36e9 / frequency[0]
null_az_deg = 2.0 * HWHM_deg
null_el_deg = 2.0 * HWHM_deg
elif pbtype == 'MID_FEKO_B1':
null_az_deg = 1.0779 * 1.36e9 / frequency[0]
null_el_deg = 1.149 * 1.36e9 / frequency[0]
HWHM_deg = 0.447 * 1.36e9 / frequency[0]
elif pbtype == 'MID_FEKO_B2':
null_az_deg = 1.0779 * 1.36e9 / frequency[0]
null_el_deg = 1.149 * 1.36e9 / frequency[0]
HWHM_deg = 0.447 * 1.36e9 / frequency[0]
elif pbtype == 'MID_FEKO_Ku':
null_az_deg = 1.0779 * 1.36e9 / frequency[0]
null_el_deg = 1.149 * 1.36e9 / frequency[0]
HWHM_deg = 0.447 * 1.36e9 / frequency[0]
else:
null_az_deg = 1.145 * 1.36e9 / frequency[0]
null_el_deg = 1.145 * 1.36e9 / frequency[0]
HWHM_deg = 0.447 * 1.36e9 / frequency[0]
return HWHM_deg, null_az_deg, null_el_deg
def create_simulation_components(context, phasecentre, frequency, pbtype, offset_dir, flux_limit,
pbradius, pb_npixel, pb_cellsize, show=False, fov=10,
polarisation_frame=PolarisationFrame("stokesI"),
filter_by_primary_beam=True, flux_max=10.0):
""" Construct components for simulation
:param context: singlesource or null or s3sky
:param phasecentre: Centre of components
:param frequency: Frequency
:param pbtype: Type of primary beam
:param offset_dir: Offset in ra, dec degrees
:param flux_limit: Lower limit flux
:param pbradius: Radius of components in radians
:param pb_npixel: Number of pixels in the primary beam model
:param pb_cellsize: Cellsize in primary beam model
:param fov: FOV in degrees (used to select catalog)
:param flux_max: Maximum flux in model before application of primary beam
:param filter_by_primary_beam: Filter components by primary beam
:param polarisation_frame:
:param show:
:return:
"""
HWHM_deg, null_az_deg, null_el_deg = find_pb_width_null(pbtype, frequency)
dec = phasecentre.dec.deg
ra = phasecentre.ra.deg
if context == 'singlesource':
log.info("create_simulation_components: Constructing single component")
offset = [HWHM_deg * offset_dir[0], HWHM_deg * offset_dir[1]]
log.info(
"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg" % (
offset[0], offset[1]))
# The point source is offset to approximately the halfpower point
odirection = SkyCoord(
ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,
dec=(dec + offset[1]) * units.deg,
frame='icrs', equinox='J2000')
if polarisation_frame.type == "stokesIQUV":
original_components = [
Skycomponent(flux=[[1.0, 0.0, 0.0, 0.0]], direction=odirection, frequency=frequency,
polarisation_frame=PolarisationFrame('stokesIQUV'))]
else:
original_components = [
Skycomponent(flux=[[1.0]], direction=odirection, frequency=frequency,
polarisation_frame=PolarisationFrame('stokesI'))]
offset_direction = odirection
elif context == 'doublesource':
original_components = []
log.info("create_simulation_components: Constructing double components")
for sign_offset in [(-1, 0), (1, 0)]:
offset = [HWHM_deg * sign_offset[0], HWHM_deg * sign_offset[1]]
log.info(
"create_simulation_components: Offset from pointing centre = %.3f, %.3f deg" % (
offset[0], offset[1]))
odirection = SkyCoord(
ra=(ra + offset[0] / numpy.cos(numpy.pi * dec / 180.0)) * units.deg,
dec=(dec + offset[1]) * units.deg,
frame='icrs', equinox='J2000')
if polarisation_frame.type == "stokesIQUV":
original_components.append(
Skycomponent(flux=[[1.0, 0.0, 0.0, | |
= self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 11
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "0.500":
for self.i in range(0, 22):
if self.R_E13[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E13[self.i]:
self.r1 = self.R_E13[self.i]
self.r2 = self.R_E13[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 12
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "0.600":
for self.i in range(0, 22):
if self.R_E14[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E14[self.i]:
self.r1 = self.R_E14[self.i]
self.r2 = self.R_E14[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 13
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "0.800":
for self.i in range(0, 22):
if self.R_E15[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E15[self.i]:
self.r1 = self.R_E15[self.i]
self.r2 = self.R_E15[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 14
self.sin_gp_perform_func2()
else:
pass
pass
elif self.gp_sin_energy == "1.000":
for self.i in range(0, 22):
if self.R_E16[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E16[self.i]:
self.r1 = self.R_E16[self.i]
self.r2 = self.R_E16[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 15
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "1.500":
for self.i in range(0, 22):
if self.R_E17[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E17[self.i]:
self.r1 = self.R_E17[self.i]
self.r2 = self.R_E17[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 16
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "2.000":
for self.i in range(0, 22):
if self.R_E18[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E18[self.i]:
self.r1 = self.R_E18[self.i]
self.r2 = self.R_E18[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 17
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "3.000":
for self.i in range(0, 22):
if self.R_E19[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E19[self.i]:
self.r1 = self.R_E19[self.i]
self.r2 = self.R_E19[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 18
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "4.000":
for self.i in range(0, 22):
if self.R_E20[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E20[self.i]:
self.r1 = self.R_E20[self.i]
self.r2 = self.R_E20[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 19
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "5.000":
for self.i in range(0, 22):
if self.R_E21[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E21[self.i]:
self.r1 = self.R_E21[self.i]
self.r2 = self.R_E21[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 20
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "6.000":
for self.i in range(0, 22):
if self.R_E22[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E22[self.i]:
self.r1 = self.R_E22[self.i]
self.r2 = self.R_E22[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 21
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "8.000":
for self.i in range(0, 22):
if self.R_E23[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E23[self.i]:
self.r1 = self.R_E23[self.i]
self.r2 = self.R_E23[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 22
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "10.00":
for self.i in range(0, 22):
if self.R_E24[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E24[self.i]:
self.r1 = self.R_E24[self.i]
self.r2 = self.R_E24[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 23
self.sin_gp_perform_func2()
else:
pass
elif self.gp_sin_energy == "15.00":
for self.i in range(0, 22):
if self.R_E25[self.i+1] >= self.gp_sin_ratio and self.gp_sin_ratio >= self.R_E25[self.i]:
self.r1 = self.R_E25[self.i]
self.r2 = self.R_E25[self.i+1]
self.z1 = self.Z_VAL[self.i]
self.z2 = self.Z_VAL[self.i+1]
self.r = self.gp_sin_ratio
self.zeq = float(((self.z1*(log10(self.r2)-log10(self.r))+self.z2*(log10(self.r)-log10(self.r1)))/(log10(self.r2)-log10(self.r1))))
self.energy_count = 24
self.sin_gp_perform_func2()
else:
pass
def sin_gp_perform_func2(self):
if self.gp_sin_type == "Absorption":
if self.z1 == 5 and self.z2 == 4:
self.b1 = self.b_val_bor_aborp[self.energy_count]
self.c1 = self.c_val_bor_aborp[self.energy_count]
self.a1 = self.a_val_bor_aborp[self.energy_count]
self.xk1 = self.Xk_val_bor_aborp[self.energy_count]
self.d1 = self.d_val_bor_aborp[self.energy_count]
self.b2 = self.b_val_bery_aborp[self.energy_count]
self.c2 = self.c_val_bery_aborp[self.energy_count]
self.a2 = self.a_val_bery_aborp[self.energy_count]
self.xk2 = self.Xk_val_bery_aborp[self.energy_count]
self.d2 = self.d_val_bery_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 6 and self.z2 ==5:
self.b1 = self.b_val_car_aborp[self.energy_count]
self.c1 = self.c_val_car_aborp[self.energy_count]
self.a1 = self.a_val_car_aborp[self.energy_count]
self.xk1 = self.Xk_val_car_aborp[self.energy_count]
self.d1 = self.d_val_car_aborp[self.energy_count]
self.b2 = self.b_val_bor_aborp[self.energy_count]
self.c2 = self.c_val_bor_aborp[self.energy_count]
self.a2 = self.a_val_bor_aborp[self.energy_count]
self.xk2 = self.Xk_val_bor_aborp[self.energy_count]
self.d2 = self.d_val_bor_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 7 and self.z2 ==6:
self.b1 = self.b_val_nit_aborp[self.energy_count]
self.c1 = self.c_val_nit_aborp[self.energy_count]
self.a1 = self.a_val_nit_aborp[self.energy_count]
self.xk1 = self.Xk_val_nit_aborp[self.energy_count]
self.d1 = self.d_val_nit_aborp[self.energy_count]
self.b2 = self.b_val_car_aborp[self.energy_count]
self.c2 = self.c_val_car_aborp[self.energy_count]
self.a2 = self.a_val_car_aborp[self.energy_count]
self.xk2 = self.Xk_val_car_aborp[self.energy_count]
self.d2 = self.d_val_car_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 8 and self.z2 ==7:
#for i in range(25):
self.b1 = self.b_val_oxy_aborp[self.energy_count]
self.c1 = self.c_val_oxy_aborp[self.energy_count]
self.a1 = self.a_val_oxy_aborp[self.energy_count]
self.xk1 = self.Xk_val_oxy_aborp[self.energy_count]
self.d1 = self.d_val_oxy_aborp[self.energy_count]
self.b2 = self.b_val_nit_aborp[self.energy_count]
self.c2 = self.c_val_nit_aborp[self.energy_count]
self.a2 = self.a_val_nit_aborp[self.energy_count]
self.xk2 = self.Xk_val_nit_aborp[self.energy_count]
self.d2 = self.d_val_nit_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
elif self.z1 == 12 and self.z2 ==11:
self.b1 = self.b_val_mag_aborp[self.energy_count]
self.c1 = self.c_val_mag_aborp[self.energy_count]
self.a1 = self.a_val_mag_aborp[self.energy_count]
self.xk1 = self.Xk_val_mag_aborp[self.energy_count]
self.d1 = self.d_val_mag_aborp[self.energy_count]
self.b2 = self.b_val_sod_aborp[self.energy_count]
self.c2 = self.c_val_sod_aborp[self.energy_count]
self.a2 = self.a_val_sod_aborp[self.energy_count]
self.xk2 = self.Xk_val_sod_aborp[self.energy_count]
self.d2 = self.d_val_sod_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 10)
elif self.z1 == 13 and self.z2 ==12:
self.b1 = self.b_val_alu_aborp[self.energy_count]
self.c1 = self.c_val_alu_aborp[self.energy_count]
self.a1 = self.a_val_alu_aborp[self.energy_count]
self.xk1 = self.Xk_val_alu_aborp[self.energy_count]
self.d1 = self.d_val_alu_aborp[self.energy_count]
self.b2 = self.b_val_mag_aborp[self.energy_count]
self.c2 = self.c_val_mag_aborp[self.energy_count]
self.a2 = self.a_val_mag_aborp[self.energy_count]
self.xk2 = self.Xk_val_mag_aborp[self.energy_count]
self.d2 = self.d_val_mag_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 14 and self.z2 == 13:
self.b1 = self.b_val_sil_aborp[self.energy_count]
self.c1 = self.c_val_sil_aborp[self.energy_count]
self.a1 = self.a_val_sil_aborp[self.energy_count]
self.xk1 = self.Xk_val_sil_aborp[self.energy_count]
self.d1 = self.d_val_sil_aborp[self.energy_count]
self.b2 = self.b_val_alu_aborp[self.energy_count]
self.c2 = self.c_val_alu_aborp[self.energy_count]
self.a2 = self.a_val_alu_aborp[self.energy_count]
self.xk2 = self.Xk_val_alu_aborp[self.energy_count]
self.d2 = self.d_val_alu_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 15 and self.z2 ==14:
self.b1 = self.b_val_pho_aborp[self.energy_count]
self.c1 = self.c_val_pho_aborp[self.energy_count]
self.a1 = self.a_val_pho_aborp[self.energy_count]
self.xk1 = self.Xk_val_pho_aborp[self.energy_count]
self.d1 = self.d_val_pho_aborp[self.energy_count]
self.b2 = self.b_val_sil_aborp[self.energy_count]
self.c2 = self.c_val_sil_aborp[self.energy_count]
self.a2 = self.a_val_sil_aborp[self.energy_count]
self.xk2 = self.Xk_val_sil_aborp[self.energy_count]
self.d2 = self.d_val_sil_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 16 and self.z2 == 15:
self.b1 = self.b_val_sul_aborp[self.energy_count]
self.c1 = self.c_val_sul_aborp[self.energy_count]
self.a1 = self.a_val_sul_aborp[self.energy_count]
self.xk1 = self.Xk_val_sul_aborp[self.energy_count]
self.d1 = self.d_val_sur_aborp[self.energy_count]
self.b2 = self.b_val_pho_aborp[self.energy_count]
self.c2 = self.c_val_pho_aborp[self.energy_count]
self.a2 = self.a_val_pho_aborp[self.energy_count]
self.xk2 = self.Xk_val_pho_aborp[self.energy_count]
self.d2 = self.d_val_pho_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 18 and self.z2 == 16:
self.b1 = self.b_val_arg_aborp[self.energy_count]
self.c1 = self.c_val_arg_aborp[self.energy_count]
self.a1 = self.a_val_arg_aborp[self.energy_count]
self.xk1 = self.Xk_val_arg_aborp[self.energy_count]
self.d1 = self.d_val_arg_aborp[self.energy_count]
self.b2 = self.b_val_sul_aborp[self.energy_count]
self.c2 = self.c_val_sul_aborp[self.energy_count]
self.a2 = self.a_val_sul_aborp[self.energy_count]
self.xk2 = self.Xk_val_sul_aborp[self.energy_count]
self.d2 = self.d_val_sul_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 19 and self.z2 == 18:
self.b1 = self.b_val_pot_aborp[self.energy_count]
self.c1 = self.c_val_pot_aborp[self.energy_count]
self.a1 = self.a_val_pot_aborp[self.energy_count]
self.xk1 = self.Xk_val_pot_aborp[self.energy_count]
self.d1 = self.d_val_pot_aborp[self.energy_count]
self.b2 = self.b_val_arg_aborp[self.energy_count]
self.c2 = self.c_val_arg_aborp[self.energy_count]
self.a2 = self.a_val_arg_aborp[self.energy_count]
self.xk2 = self.Xk_val_arg_aborp[self.energy_count]
self.d2 = self.d_val_arg_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 20 and self.z2 == 19:
self.b1 = self.b_val_cal_aborp[self.energy_count]
self.c1 = self.c_val_cal_aborp[self.energy_count]
self.a1 = self.a_val_cal_aborp[self.energy_count]
self.xk1 = self.Xk_val_cal_aborp[self.energy_count]
self.d1 = self.d_val_cal_aborp[self.energy_count]
self.b2 = self.b_val_pot_aborp[self.energy_count]
self.c2 = self.c_val_pot_aborp[self.energy_count]
self.a2 = self.a_val_pot_aborp[self.energy_count]
self.xk2 = self.Xk_val_pot_aborp[self.energy_count]
self.d2 = self.d_val_pot_aborp[self.energy_count]
self.b = round((self.b1*(log10(self.z2)-log10(self.zeq))+self.b2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.c = round((self.c1*(log10(self.z2)-log10(self.zeq))+self.c2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.a = round((self.a1*(log10(self.z2)-log10(self.zeq))+self.a2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.xk = round((self.xk1*(log10(self.z2)-log10(self.zeq))+self.xk2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
self.d = round((self.d1*(log10(self.z2)-log10(self.zeq))+self.d2*(log10(self.zeq)-log10(self.z1)))/(log10(self.z2)-log10(self.z1)), 6)
elif self.z1 == 26 and self.z2 == 20:
self.b1 = self.b_val_iron_aborp[self.energy_count]
self.c1 = self.c_val_iron_aborp[self.energy_count]
self.a1 = self.a_val_iron_aborp[self.energy_count]
self.xk1 = self.Xk_val_iron_aborp[self.energy_count]
self.d1 = self.d_val_iron_aborp[self.energy_count]
self.b2 = self.b_val_cal_aborp[self.energy_count]
self.c2 = self.c_val_cal_aborp[self.energy_count]
self.a2 = | |
representation, padded to 8 characters. """
# return '{0:0{1}X}'.format( struct.unpack('<I', struct.pack( '<f', floatValue ))[0], 8 )
def toInt( input ): # Converts a 1, 2, or 4 bytes object or bytearray to an integer.
try:
byteLength = len( input )
if ( byteLength == 1 ): return struct.unpack( '>B', input )[0] # big-endian unsigned char (1 byte)
elif ( byteLength == 2 ): return struct.unpack( '>H', input )[0] # big-endian unsigned short (2 bytes)
else: return struct.unpack( '>I', input )[0] # big-endian unsigned int (4 bytes)
except:
raise Exception( '\ntoInt was not able to convert the ' + str(type(input))+' type' )
def toBytes( input, byteLength=4, cType='' ): # Converts an int to a bytes object
if not cType: # Assume a big-endian unsigned value of some byte length
if byteLength == 1: cType = '>B' # big-endian unsigned char (1 byte)
elif byteLength == 2: cType = '>H' # big-endian unsigned short (2 bytes)
elif byteLength == 4: cType = '>I' # big-endian unsigned int (4 bytes)
else:
raise Exception( '\ntoBytes was not able to convert the ' + str(type(input))+' type' )
return struct.pack( cType, input )
# Conversion solutions:
# int -> bytes objects struct.pack( )
# byte string -> int: struct.unpack( )
# byte string -> hex string .encode( 'hex' )
# bytearray -> hex string: hexlify( input )
# hex string -> bytearray: bytearray.fromhex( input )
# text string -> bytearray init bytearray, then use .extend( string ) method on it
# Note that a file object's .read() method returns a byte-string of unknown encoding, which will be
# locally interpreted as it's displayed. It should be properly decoded to a standard to be operated on.
#
# Note 2: In python 2, bytes objects are an alias for str objects; they are not like bytearrays.
def validOffset( offset ): # Accepts a string.
offset = offset.replace( '0x', '' )
if offset == '': return False
return all(char in hexdigits for char in offset) # Returns Boolean
def grammarfyList( theList ): # For example, the list [apple, pear, banana, melon] becomes the string 'apple, pear, banana, and melon'.
if len(theList) == 1: return str(theList[0])
elif len(theList) == 2: return str(theList[0]) + ' and ' + str(theList[1])
else:
string = ', '.join( theList )
indexOfLastComma = string.rfind(',')
return string[:indexOfLastComma] + ', and ' + string[indexOfLastComma + 2:]
def msg( *args ):
if len(args) > 1: tkMessageBox.showinfo( message=args[0], title=args[-1] )
else: tkMessageBox.showinfo( message=args[0] )
def copyToClipboard( text ):
Gui.root.clipboard_clear()
Gui.root.clipboard_append( text )
def humansize(nbytes): # Used for converting file sizes, in terms of human readability.
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def createFolders( folderPath ):
try:
os.makedirs( folderPath )
# Primitive failsafe to prevent race condition
attempt = 0
while not os.path.exists( folderPath ):
time.sleep( .3 )
if attempt > 10:
raise Exception( 'Unable to create folder: ' + folderPath )
attempt += 1
except OSError as error: # Python >2.5
if error.errno == errno.EEXIST and os.path.isdir( folderPath ):
pass
else: raise
def validHex( offset ): # Accepts a string.
offset = offset.replace( '0x', '' )
if offset == '': return False
return all( char in hexdigits for char in offset ) # Returns Boolean
def rgb2hex( color ): # Input can be RGB or RGBA, but output will still be RGB
return '#{:02x}{:02x}{:02x}'.format( color[0], color[1], color[2])
def rgb2hsv( color ):
r, g, b, _ = color
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn: h = 0
elif mx == r: h = (60 * ((g-b)/df) + 360) % 360
elif mx == g: h = (60 * ((b-r)/df) + 120) % 360
elif mx == b: h = (60 * ((r-g)/df) + 240) % 360
if mx == 0: s = 0
else: s = df/mx
v = mx
return ( h, s, v )
def hex2rgb( inputStr ): # Expects RRGGBBAA
""" Returns a 4 color channel iterable of (r,g,b,a) """
inputStr = inputStr.replace( '#', '' )
channelsList = []
parsingError = False
if len( inputStr ) % 2 != 0: # Checks whether the string is an odd number of characters
parsingError = True
else:
for i in xrange( 0, len(inputStr), 2 ): # Iterate by 2 over the length of the input string
try:
byte = inputStr[i:i+2]
newInt = int( byte, 16 )
if newInt > -1 and newInt < 256: channelsList.append( newInt )
except:
parsingError = True
break
else: # Got through the above loop with no break. Still got one more check.
#if len( channelsList ) == 3: channelsList.append( 255 ) # assume the entries are RGB, and add alpha
if len( channelsList ) != 4: parsingError = True
return ( tuple(channelsList), parsingError )
def getLuminance( hexColor ):
r, g, b, a = hex2rgb( hexColor )[0]
return ( r*0.299 + g*0.587 + b*0.114 ) * a/255
#return ( r+r + g+g+g + b )/6 * a/255 # a quicker but less accurate calculation
#return math.sqrt( .299 * r**2 + .587 * g**2 + .114 * b**2 ) *a/255 / 255
def findBytes( bytesRange, target ): # Searches a bytearray for a given (target) set of bytes, and returns the location (index)
targetLength = len( target )
for index, _ in enumerate( bytesRange ):
if bytesRange[index:index+targetLength] == target: return index
else: return -1
def cmdChannel( command, standardInput=None, shell=True ):
""" IPC (Inter-Process Communication) to command line.
shell=True gives access to all shell features.
creationFlags=0x08000000 prevents creation of a console for the process. """
process = subprocess.Popen( command, shell=shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=0x08000000 )
stdoutData, stderrData = process.communicate( input=standardInput )
if process.returncode == 0:
return ( process.returncode, stdoutData )
else:
print 'IPC error (exit code {}):'.format( process.returncode )
print stderrData
return ( process.returncode, stderrData )
def replaceHex( hexData, offset, newHex ): # Input takes a string, int, and string, respectively. todo: finish depricating this
offset = offset * 2 # Doubled to count by nibbles rather than bytes, since the data is just a string.
codeEndPoint = offset + len(newHex)
return hexData[:offset] + newHex + hexData[codeEndPoint:]
#=============================#
# ~ ~ Opening & Importing ~ ~ #
#=============================#
def openFolder( folderPath ):
normedPath = os.path.abspath( folderPath ) # Turns relative to absolute paths, and normalizes them (switches / for \, etc.)
if os.path.exists( normedPath ):
os.startfile( normedPath )
else:
msg( 'Could not find this folder: \n\n' + normedPath )
def loadSettings():
# Check for user defined settings / persistent memory.
if os.path.exists( settingsFile ): settings.read( settingsFile )
# Create the individual sections if they don't already exist.
if not settings.has_section('General Settings'): settings.add_section('General Settings')
if not settings.has_section('Texture Search Filters'): settings.add_section('Texture Search Filters')
# Set default settings if they were not loaded from the settings file, and validate the rest
for settingName in generalSettingsDefaults:
# If a default setting was not found in the settings file, set it to its default value
if not settings.has_option( 'General Settings', settingName ):
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
# If the setting is present and should be a number, validate it.
elif settingName == 'maxFilesToRemember' or settingName == 'globalFontSize' or settingName == 'paddingBetweenFiles':
value = settings.get( 'General Settings', settingName )
if settingName == 'maxFilesToRemember' or settingName == 'globalFontSize':
if isNaN( value ):
msg( 'The value for the saved setting "' + settingName + '" does not appear to be a number. '
'The default value of ' + generalSettingsDefaults[settingName] + ' will be used instead.',
'Error Loading Settings' )
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
elif settingName == 'paddingBetweenFiles' and value.lower() != 'auto':
try: int( value, 16 )
except:
msg( 'The value for the saved setting "paddingBetweenFiles" is invalid. '
'The value should be a hexadecimal number, or "auto". The default value '
'of ' + generalSettingsDefaults[settingName] + ' will be used instead.',
'Error Loading Settings' )
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
# Convert the filter string to its numeral representation.
if settingName == 'downscalingFilter':
validFilters = ( 'nearest', 'lanczos', 'bilinear', 'bicubic' )
currentFilter = settings.get( 'General Settings', 'downscalingFilter' )
if not currentFilter in validFilters: # Filter string unrecognized
msg( "The given downscaling filter is invalid; valid options are 'nearest', 'lanczos', 'bilinear', or 'bicubic'. "
'The default value of ' + generalSettingsDefaults[settingName] + ' will be used instead.', 'Error Loading Settings' )
settings.set( 'General Settings', settingName, generalSettingsDefaults[settingName] )
for settingName in imageFiltersDefaults:
if not settings.has_option('Texture Search Filters', settingName):
settings.set( 'Texture Search Filters', settingName, imageFiltersDefaults[settingName] )
else:
# Perform some validation on the setting's value (by making sure there is a comparator and separator)
value = settings.get( 'Texture Search Filters', settingName )
if '|' not | |
<filename>src/Components/misc/obs_aod/ABC/abc_c6_aux.py
"""
This module contains auxiliary functions used by the MODIS Collection 6 Neural Net Retrieval.
<NAME>, 2016.
"""
import os, sys
from matplotlib.pyplot import cm, imshow, plot, figure
from matplotlib.pyplot import xlabel, ylabel, title, grid, savefig, legend
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.patches as mpatches
from numpy import c_ as cat
from numpy import random, sort, pi, load, cos, log, std, exp
from numpy import reshape, arange, ones, zeros, interp, sqrt
from numpy import meshgrid, concatenate, squeeze
import numpy as np
import itertools
from sklearn.linear_model import LinearRegression
from glob import glob
from scipy import stats
from nn import _plotKDE
#---------------------------------------------------------------------
def boxplot_imshow(data,plottype,blocks,masterlist,title,filename,
vseps=None, yrange=None,ylabel=None):
nvars,ncomb = blocks.shape
fig = plt.figure()
ax = plt.subplot(211)
params = {'mathtext.default': 'regular' }
plt.rcParams.update(params)
if plottype is 'box':
bp = plt.boxplot(data,showfliers=False,showbox=True,whis='range',
whiskerprops={'linestyle':'-'})
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.plot([0,ncomb+0.5],[np.median(data[:,0]),np.median(data[:,0])],color='b',ls='--',zorder=5)
elif plottype is 'scatter':
scat = np.mean(data,axis=0)
plt.plot(np.arange(ncomb)+1,scat,'rD')
plt.plot([0,ncomb+0.5],[np.mean(data[:,0]),np.mean(data[:,0])],color='b',ls='--',zorder=5,markersize=5)
elif plottype is 'errorbar':
scat = np.mean(data,axis=0)
yerr_max = np.abs(np.max(data,axis=0)-scat)
yerr_min = np.abs(np.min(data,axis=0)-scat)
plt.errorbar(np.arange(ncomb)+1,scat,yerr=[yerr_min,yerr_max], ls='none',marker='D',color='r',ecolor='k',markersize=5)
plt.plot([0,ncomb+0.5],[np.mean(data[:,0]),np.mean(data[:,0])],color='b',ls='--',zorder=5)
ax.set_xlim(0.5,ncomb+0.5)
ax.set_xticks(np.arange(ncomb)+0.5)
ax.set_xticklabels([])
if yrange is not None:
ax.set_ylim(yrange)
yticks = ax.yaxis.get_major_ticks()
yticks[0].set_visible(False)
# Minor Y Tick Marks
# ylim = ax.get_ylim()
# dy = (ylim[1] - ylim[0])/(len(yticks)-1)
# minorLocator = MultipleLocator(0.5*dy)
# ax.yaxis.set_minor_locator(minorLocator)
if ylabel is not None:
ax.set_ylabel(ylabel,fontsize=14)
# Make vertical lines to separate bins of number of inputvars
if vseps is not None:
for v in vseps:
ax.plot([v,v],np.array(ax.get_ylim()),'k-')
plt.title(title)
#ax.minorticks_on()
plt.grid(True,axis='y',which='both',color='0.5',linestyle='-')
ax.set_axisbelow(True) #Grid lines go to the back.
plt.tick_params(
axis='y', # changes apply to the y-axis
which='major', # major ticks are affected
direction='out',
right='off')
axblocks = plt.subplot(212)
plt.imshow(blocks,interpolation='none',aspect='auto')
axblocks.set_yticks(np.arange(nvars))
axblocks.set_yticklabels(masterlist)
axblocks.set_yticks(np.arange(nvars)+0.5,minor=True)
axblocks.set_xticks(np.arange(ncomb)+0.5,minor=True)
# plt.draw() # this is needed because get_window_extent needs a renderer to work
# yax = axblocks.get_yaxis()
# # find the maximum width of the label on the major ticks
# pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
# yax.set_tick_params(pad=pad)
plt.tick_params(
axis='both', # changes apply to both
which='major', # major ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
left='off',
right='off',
labelbottom='off') # labels along the bottom edge are off
plt.grid(True,which='minor',color='0.5',linestyle='-')
axblocks.set_axisbelow(True) #Grid lines go to the back.
xlim = axblocks.get_xlim()
if vseps is not None:
for v in vseps:
axblocks.plot([v-1,v-1],np.array(axblocks.get_ylim()),'k-')
axblocks.set_xlim(xlim)
plt.tight_layout()
plt.subplots_adjust(right=0.99,hspace=0.001)
fig.set_size_inches(10.5, 5.5)
plt.savefig(filename, transparent='true',dpi=300)
#plt.show()
plt.close(fig)
# ---
def SummarizeCombinations(mxd,Input_nnr,yrange=None,sortname='rmse'):
"""
Create summary plot
"""
outdir = mxd.plotdir
if not os.path.exists(outdir):
os.makedirs(outdir)
# Flatten Input_nnr into one list
# -------------------------------
Input = ()
for i in Input_nnr:
if type(i) is list:
Input = Input + tuple(i)
else:
Input = Input + (i,)
input_nnr = list(Input)
expid = '.'.join(input_nnr)
retrieval = mxd.retrieval
ident = mxd.ident
nvars = len(input_nnr)
ngroups = len(Input_nnr)
ncomb = len(mxd.comblist)
blocks = np.zeros([nvars,ncomb])
masterlist = np.array(tuple(input_nnr))
for c,comb in enumerate(mxd.comblist):
blocks[:,c] = np.array([v == masterlist for v in comb]).sum(axis=0)
blocks = np.insert(blocks,0,np.zeros(nvars),axis=1) #for the original MODIS data
newlist = []
for var in masterlist:
if mxd.sat == 'Terra':
try:
newlist.append(dict(MODVARNAMES,**VARNAMES)[var])
except:
newlist.append(var)
else:
try:
newlist.append(dict(MYDVARNAMES,**VARNAMES)[var])
except:
newlist.append(var)
masterlist = np.array(newlist)
#nblocks = blocks.sum(axis=0)
nblocks = [len(group) for group in mxd.combgroups]
nblocks.insert(0,0)
print "MASTERLIST", masterlist
#--------------
# Default sort by mean RMSE
#------------------
sortvalue = mxd.nnr.__dict__[sortname]
sortvalue = np.insert(sortvalue,0,np.array(mxd.orig.__dict__[sortname][:,0]),axis=1)
sortmetric = np.median(sortvalue, axis=0)
isort = np.empty(0,dtype='int')
vseps = np.empty(0,dtype='int')
for i in np.sort(np.unique(nblocks)):
istart = np.where(nblocks == i)[0].min()
vseps = np.append(vseps,istart+0.5)
isort = np.append(isort,istart + np.argsort(sortmetric[nblocks == i]))
blocks = blocks[:,isort]
for i in np.arange(nvars):
blocks[i,blocks[i,:] == 1] = i+1
blocks = np.ma.masked_array(blocks)
blocks.mask = [blocks == 0]
def getplotdata(mxd,varname):
vardata = mxd.nnr.__dict__[varname]
vardata = np.insert(vardata,0,np.array(mxd.orig.__dict__[varname][:,0]),axis=1)
vardata = vardata[:,isort]
if vardata.shape[0] == 1:
vardata = np.append(vardata,vardata,axis=0)
return vardata
if mxd.nnr.slope.shape[0] == 1:
plottype = 'scatter'
elif mxd.nnr.slope.shape[0] >= 5:
plottype = 'box'
else:
plottype = 'errorbar'
boxplot_imshow(getplotdata(mxd,'slope'),plottype,
blocks,masterlist,
'Slope',
'{}/Slope.{}.{}.png'.format(outdir,retrieval,ident),
vseps = vseps,
yrange=[0,1])
boxplot_imshow(getplotdata(mxd,'R'),plottype,
blocks,masterlist,
'R',
'{}/R.{}.{}.png'.format(outdir,retrieval,ident),
vseps = vseps,
yrange=[0,1])
boxplot_imshow(getplotdata(mxd,'intercept'),plottype,
blocks,masterlist,
'Intercept',
'{}/Intercept.{}.{}.png'.format(outdir,retrieval,ident),
vseps = vseps,
yrange=yrange)
boxplot_imshow(getplotdata(mxd,'rmse'),plottype,
blocks,masterlist,
'RMSE',
'{}/RMSE.{}.{}.png'.format(outdir,retrieval,ident),
vseps = vseps,
yrange=yrange)
boxplot_imshow(getplotdata(mxd,'me'),plottype,
blocks,masterlist,
'Mean Bias',
'{}/ME.{}.{}.png'.format(outdir,retrieval,ident),
vseps = vseps,
yrange=yrange)
boxplot_imshow(getplotdata(mxd,'mae'),plottype,
blocks,masterlist,
'Mean Absolute Error',
'{}/MAE.{}.{}.png'.format(outdir,retrieval,ident),
vseps = vseps,
yrange=yrange)
#--------------------------------------------------------------------------------------
def get_Iquartiles(mxd,I=None,var=None):
if I is None:
Irange = arange(mxd.nobs)
I = [Irange[mxd.iValid]]
I1 = []
I2 = []
I3 = []
I4 = []
for iTest in I:
if var is None:
targets = mxd.getTargets(iTest)
else:
targets = mxd.__dict__[var][iTest].squeeze()
if len(targets.shape) > 1:
targets = targets[:,0]
p25 = np.percentile(targets,25)
p50 = np.percentile(targets,50)
p75 = np.percentile(targets,75)
p100 = targets.max()
I1.append(iTest[targets <= p25])
I2.append(iTest[(targets>p25) & (targets<=p50)])
I3.append(iTest[(targets>p50) & (targets<=p75)])
I4.append(iTest[(targets>p75)])
return I1, I2, I3, I4
#---------------------------------------------------------------------
def get_Ispecies(mxd,I=None):
if I is None:
Irange = arange(mxd.nobs)
I = [Irange[mxd.iValid]]
Ifdu = []
Ifss = []
Ifcc = []
Ifsu = []
for iTest in I:
fdu = mxd.fdu[iTest].squeeze()
fss = mxd.fss[iTest].squeeze()
fcc = mxd.fcc[iTest].squeeze()
fsu = mxd.fsu[iTest].squeeze()
Ifdu.append(iTest[fdu > 0.5])
Ifss.append(iTest[fss > 0.5])
Ifcc.append(iTest[fcc > 0.5])
Ifsu.append(iTest[fsu > 0.5])
return Ifdu, Ifss, Ifcc, Ifsu
#---------------------------------------------------------------------
def get_ImRef(mxd,refName,refmin,refmax,I=None):
if I is None:
Irange = arange(mxd.nobs)
I = [Irange[mxd.iValid]]
ImRef = []
for iTest in I:
mRef = mxd.__dict__[refName][iTest].squeeze()
if refmax is None:
ImRef.append(iTest[(mRef >= refmin)])
else:
ImRef.append(iTest[(mRef >= refmin) & (mRef < refmax)])
return ImRef
#---------------------------------------------------------------------
def make_plots(mxd,expid,ident,I=None):
outdir = mxd.plotdir
if not os.path.exists(outdir):
os.makedirs(outdir)
if I is None:
I = ones(mxd.lon.shape).astype(bool)
# Plot KDE of corrected AOD
# -------------------------
# mxd.plotKDE(I=I,figfile=expid+"."+ident+"_kde-"+mxd.Target[0][1:]+"-corrected.png")
targets = mxd.getTargets(I).squeeze()
results = mxd.eval(I).squeeze()
_plotKDE(targets,results,y_label='NNR')
title("Log("+mxd.Target[0][1:]+"+0.01)- "+ident)
savefig(outdir+"/"+expid+"."+ident+"_kde-"+mxd.Target[0][1:]+'-corrected.png')
# Plot KDE of uncorrected AOD
# ---------------------------
original = log(mxd.mTau550[I]+0.01)
_plotKDE(targets,original,y_label='Original MODIS')
title("Log("+mxd.Target[0][1:]+"+0.01)- "+ident)
savefig(outdir+"/"+expid+"."+ident+"_kde-"+mxd.Target[0][1:]+'.png')
# Scatter diagram for testing
# ---------------------------
mxd.plotScat(I=I,figfile=outdir+"/"+expid+"."+ident+"_scat-"+mxd.Target[0][1:]+'.png')
#---------------------------------------------------------------------
def make_error_pdfs(mxd,Input,expid,ident,K=None,I=None,Title=None,netfileRoot=None,
emin=-1.5,emax=2.5):
outdir = mxd.plotdir + '/{}/'.format('.'.join(Input))
if not os.path.exists(outdir):
os.makedirs(outdir)
if I is None:
I = [mxd.iValid]
# Plot PDF of Error
# -------------------------
if K is None:
targets = [mxd.getTargets(I[0])]
if len(targets[0].shape) > 1:
targets[0] = targets[0][:,0]
# results = [mxd.eval(I)[:,0]]
inputs = mxd.getInputs(I[0],Input=Input)
knet = mxd.loadnet(netfileRoot)
out = knet(inputs)[:,0]
results = [out]
original = [mxd.mTau550[I[0]]]
if mxd.laod:
original[0] = log(original[0] + 0.01)
mod04RMSE = rmse(original[0],targets[0])
nnrRMSE = rmse(results[0],targets[0])
else:
targets = []
original = []
results = []
mod04RMSE = []
nnrRMSE = []
for k,iTest in enumerate(I):
# Irange = arange(mxd.nobs)
# iValid = Irange[mxd.iValid]
# mxd.iTest = iValid[iTest]
mxd.iTest = iTest
targets.append(mxd.getTargets(mxd.iTest))
if len(targets[k].shape) > 1:
targets[k] = targets[k][:,0]
original.append(mxd.mTau550[mxd.iTest])
inputs = mxd.getInputs(mxd.iTest,Input=Input)
knet = mxd.loadnet(netfileRoot+'.k={}_Tau.net'.format(str(k+1)))
out = knet(inputs)[:,0]
results.append(out)
if mxd.laod:
original[k] = log(original[k] + 0.01)
mod04RMSE.append(rmse(original[k],targets[k]))
nnrRMSE.append(rmse(results[k],targets[k]))
print 'mod04RMSE',mod04RMSE
print 'nnrRMSE',nnrRMSE
mod04RMSE = np.mean(mod04RMSE)
nnrRMSE = np.mean(nnrRMSE)
eorig = []
ecorr = []
for o,t,r in zip(original,targets,results):
eorig.append(o - t)
ecorr.append(r - t)
if emax is None:
emax = np.array([[e.max() for e in eorig],[e.max() for e in ecorr]]).max()
if emin is None:
emin = np.array([[e.min() for e in eorig],[e.min() for e in ecorr]]).min()
nbins = 100
corrected = []
orig = []
x = np.linspace(emin,emax,nbins+1)
xcen = x[:-1] + 0.5*(x[1:] - x[:-1])
if K is None:
# cc, x = np.histogram(ecorr[0],bins=np.linspace(emin,emax,nbins+1),density=True)
# oo, x = np.histogram(eorig[0],bins=np.linspace(emin,emax,nbins+1),denstiry=True)
kernel = stats.gaussian_kde(ecorr[0])
cc = kernel(xcen)
kernel = stats.gaussian_kde(eorig[0])
oo = kernel(xcen)
corrected.append(cc)
orig.append(oo)
else:
for k,iTest in enumerate(I):
# cc, x = np.histogram(ecorr[k],bins=np.linspace(emin,emax,nbins+1),density=True)
# oo, x = np.histogram(eorig[k],bins=np.linspace(emin,emax,nbins+1),density=True)
kernel = stats.gaussian_kde(ecorr[k])
cc = kernel(xcen)
kernel = stats.gaussian_kde(eorig[k])
oo = kernel(xcen)
corrected.append(cc)
orig.append(oo)
corrected = np.array(corrected).T
orig = np.array(orig).T
if K is not None:
xcen = np.tile(xcen,(K,1)).T
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(xcen,orig,color='k')
ax.plot(xcen,corrected,color='r')
ax.set_xlim(emin,emax)
if mxd.surface == 'ocean':
ax.set_ylim(0,3.5)
else:
ax.set_ylim(0,2.0)
orig_patch = mpatches.Patch(color='k', label='MOD04 RMSE={:1.2F}'.format(mod04RMSE))
corrected_patch = mpatches.Patch(color='r', label='NNR RMSE={:1.2F}'.format(nnrRMSE) )
ax.legend(handles=[orig_patch,corrected_patch])
plt.grid(True, which='major',axis='both',color='0.50',linestyle='-')
if Title is None:
title("Error Log("+mxd.Target[0][1:]+"+0.01)")
else:
title(Title)
savefig(outdir+"/error_pdf-"+expid+"."+ident+"-"+mxd.Target[0][1:]+'.png')
plt.close(fig)
#---------------------------------------------------------------------
#---------------------------------------------------------------------
def make_error_pdfs_int(mxd,Input,expid,ident,K=None,I=None,Title=None,netfileRoot=None,
emin=-1.5,emax=2.5):
outdir = mxd.plotdir + '/{}/'.format('.'.join(Input))
if not os.path.exists(outdir):
os.makedirs(outdir)
if I is None:
I = [mxd.iValid]
# Plot PDF of Error
# -------------------------
if K is None:
targets = [mxd.getTargets(I[0])]
if len(targets[0].shape) > 1:
targets[0] = targets[0][:,0]
# results = [mxd.eval(I)[:,0]]
inputs = mxd.getInputs(I[0],Input=Input)
knet = mxd.loadnet(netfileRoot)
out = knet(inputs)[:,0]
results = [out]
original = [mxd.mTau550[I[0]]]
dboriginal = [mxd.dbmTau550[I[0]]]
if mxd.laod:
original[0] = log(original[0] + 0.01)
dboriginal[0] = log(dboriginal[0] + 0.01)
mod04RMSE = rmse(original[0],targets[0])
dbmod04RMSE = rmse(dboriginal[0],targets[0])
nnrRMSE = rmse(results[0],targets[0])
else:
| |
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
#
# Array utilities
from sklearn.utils import validation as skval
import numpy as np
import pandas as pd
from ..compat import DTYPE
from ._array import C_intgrt_vec
__all__ = [
'as_series',
'c',
'check_endog',
'check_exog',
'diff',
'diff_inv',
'is_iterable'
]
def as_series(x, **kwargs):
"""Cast as pandas Series.
Cast an iterable to a Pandas Series object. Note that the index
will simply be a positional ``arange`` and cannot be set in this
function.
Parameters
----------
x : array-like, shape=(n_samples,)
The 1d array on which to compute the auto correlation.
Examples
--------
>>> as_series([1, 2, 3])
0 1
1 2
2 3
dtype: int64
>>> as_series(as_series((1, 2, 3)))
0 1
1 2
2 3
dtype: int64
>>> import pandas as pd
>>> as_series(pd.Series([4, 5, 6], index=['a', 'b', 'c']))
a 4
b 5
c 6
dtype: int64
Returns
-------
s : pd.Series
A pandas Series object.
"""
if isinstance(x, pd.Series):
return x
return pd.Series(skval.column_or_1d(x), **kwargs)
def c(*args):
r"""Imitates the ``c`` function from R.
Since this whole library is aimed at re-creating in
Python what R has already done so well, the ``c`` function was created to
wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R,
this works with scalars, iterables, and any mix therein.
Note that using the ``c`` function on multi-nested lists or iterables
will fail!
Examples
--------
Using ``c`` with varargs will yield a single array:
>>> c(1, 2, 3, 4)
array([1, 2, 3, 4])
Using ``c`` with nested lists and scalars will also yield a single array:
>>> c([1, 2], 4, c(5, 4))
array([1, 2, 4, 5, 4])
However, using ``c`` with multi-level lists will fail!
>>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP
ValueError: all the input arrays must have same number of dimensions
References
----------
.. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html
"""
# R returns NULL for this
if not args:
return None
# just an array of len 1
if len(args) == 1:
element = args[0]
# if it's iterable, make it an array
if is_iterable(element):
return np.asarray(element)
# otherwise it's not iterable, put it in an array
return np.asarray([element])
# np.concat all. This can be slow, as noted by numerous threads on
# numpy concat efficiency, however an alternative using recursive
# yields was tested and performed far worse:
#
# >>> def timeit(func, ntimes, *args):
# ... times = []
# ... for i in range(ntimes):
# ... start = time.time()
# ... func(*args)
# ... times.append(time.time() - start)
# ... arr = np.asarray(times)
# ... print("%s (%i times) - Mean: %.5f sec, "
# ... "Min: %.5f sec, Max: %.5f" % (func.__name__, ntimes,
# ... arr.mean(), arr.min(),
# ... arr.max()))
# >>> y = [np.arange(10000), range(500), (1000,), 100, np.arange(50000)]
# >>> timeit(c1, 100, *y)
# c1 (100 times) - Mean: 0.00009 sec, Min: 0.00006 sec, Max: 0.00065
# >>> timeit(c2, 100, *y)
# c2 (100 times) - Mean: 0.08708 sec, Min: 0.08273 sec, Max: 0.10115
#
# So we stick with c1, which is this variant.
return np.concatenate([a if is_iterable(a) else [a] for a in args])
def check_endog(
y,
dtype=DTYPE,
copy=True,
force_all_finite=False,
preserve_series=True,
):
"""Wrapper for ``check_array`` and ``column_or_1d`` from sklearn
Parameters
----------
y : array-like, shape=(n_samples,)
The 1d endogenous array.
dtype : string, type or None (default=np.float64)
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
copy : bool, optional (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
still be triggered by a conversion.
force_all_finite : bool, optional (default=False)
Whether to raise an error on np.inf and np.nan in an array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accept both np.inf and np.nan in array.
preserve_series : bool, optional
Whether to preserve a ``pd.Series`` object. Will also attempt to
squeeze a dataframe into a ``pd.Series``.
Returns
-------
y : np.ndarray or pd.Series, shape=(n_samples,)
A 1d numpy ndarray
"""
endog = skval.check_array(
y,
ensure_2d=False,
force_all_finite=force_all_finite,
copy=copy,
dtype=dtype,
)
endog = skval.column_or_1d(endog)
if not preserve_series:
return endog
# possibly restore index information, if it was present, assigning
# checked/casted values back into the series
if isinstance(y, pd.DataFrame):
y = y.squeeze() # dtype: pd.Series
if isinstance(y, pd.Series):
endog = pd.Series(endog, index=y.index)
return endog
def check_exog(X, dtype=DTYPE, copy=True, force_all_finite=True):
"""A wrapper for ``check_array`` for 2D arrays
Parameters
----------
X : array-like, shape=(n_samples, n_features)
The exogenous array. If a Pandas frame, a Pandas frame will be returned
as well. Otherwise, a numpy array will be returned.
dtype : string, type or None (default=np.float64)
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
copy : bool, optional (default=True)
Whether a forced copy will be triggered. If copy=False, a copy might
still be triggered by a conversion.
force_all_finite : bool, optional (default=True)
Whether to raise an error on np.inf and np.nan in an array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accept both np.inf and np.nan in array.
Returns
-------
X : pd.DataFrame or np.ndarray, shape=(n_samples, n_features)
Either a 2-d numpy array or pd.DataFrame
"""
if hasattr(X, 'ndim') and X.ndim != 2:
raise ValueError("Must be a 2-d array or dataframe")
if isinstance(X, pd.DataFrame):
# if not copy, go straight to asserting finite
if copy and dtype is not None:
X = X.astype(dtype) # tantamount to copy
if force_all_finite and (~X.apply(np.isfinite)).any().any():
raise ValueError("Found non-finite values in dataframe")
return X
# otherwise just a pass-through to the scikit-learn method
return skval.check_array(
X,
ensure_2d=True,
dtype=DTYPE,
copy=copy,
force_all_finite=force_all_finite,
)
def _diff_vector(x, lag):
# compute the lag for a vector (not a matrix)
n = x.shape[0]
lag = min(n, lag) # if lag > n, then we just want an empty array back
return x[lag: n] - x[: n-lag] # noqa: E226
def _diff_matrix(x, lag):
# compute the lag for a matrix (not a vector)
m, _ = x.shape
lag = min(m, lag) # if lag > n, then we just want an empty array back
return x[lag: m, :] - x[: m-lag, :] # noqa: E226
def diff(x, lag=1, differences=1):
"""Difference an array.
A python implementation of the R ``diff`` function [1]. This computes lag
differences from an array given a ``lag`` and ``differencing`` term.
If ``x`` is a vector of length :math:`n`, ``lag=1`` and ``differences=1``,
then the computed result is equal to the successive differences
``x[lag:n] - x[:n-lag]``.
Examples
--------
Where ``lag=1`` and ``differences=1``:
>>> x = c(10, 4, 2, 9, 34)
>>> diff(x, 1, 1)
array([ -6., -2., 7., 25.], dtype=float32)
Where ``lag=1`` and ``differences=2``:
>>> x = c(10, 4, 2, 9, 34)
>>> diff(x, 1, 2)
array([ 4., 9., 18.], dtype=float32)
Where ``lag=3`` and ``differences=1``:
>>> x = c(10, 4, 2, 9, 34)
>>> diff(x, 3, 1)
array([ -1., 30.], dtype=float32)
Where ``lag=6`` (larger than the array is) and ``differences=1``:
>>> x = c(10, 4, 2, 9, 34)
>>> diff(x, 6, 1)
array([], dtype=float32)
For a 2d array with ``lag=1`` and ``differences=1``:
>>> import numpy as np
>>>
>>> x = np.arange(1, 10).reshape((3, 3)).T
>>> diff(x, 1, 1)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
Parameters
----------
x : array-like, shape=(n_samples, [n_features])
The array to difference.
lag : int, optional (default=1)
An integer > 0 indicating which lag to use.
differences : int, optional (default=1)
An integer > 0 indicating the order of the difference.
Returns
-------
res : np.ndarray, shape=(n_samples, [n_features])
The result of the differenced arrays.
References
----------
.. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/diff.html
"""
if any(v < 1 for v in (lag, differences)):
raise ValueError('lag and differences must be positive (> 0) integers')
x = skval.check_array(x, ensure_2d=False, dtype=DTYPE, copy=False)
fun = _diff_vector if x.ndim == 1 else _diff_matrix
res = x
# "recurse" over range of differences
for i in range(differences):
res = fun(res, lag)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
JPEG codestream-parser (All-JPEG Codestream/File Format Parser Tools)
See LICENCE.txt for copyright and licensing conditions.
"""
from __future__ import print_function, division
import sys
from jp2utils import ordb, ordw, ordl, ordq, ieee_float_to_float, ieee_double_to_float,\
JP2Error, InvalidMarker, InvalidMarkerField, InvalidSizedMarker, RequiredMarkerMissing, UnexpectedEOC,\
MisplacedData, BaseCodestream
class JP2Codestream(BaseCodestream):
"""
JP2 Codestream class.
"""
def __init__(self, indent=0):
super(JP2Codestream, self).__init__(indent=indent)
self.datacount = 0
self.bytecount = 0
self.offset = 0
self.buffer = None
self.marker = None
self.pos = 0
self.size = None
self.csiz = 0
def print_header(self, header, content):
self._headers.append((header, content))
def print_data(self, count):
if count > 0:
self.datacount += count
self.bytecount += count
self._print_indent("Data : %d bytes" % count)
print("")
def parse(self, buf, startpos):
self.buffer = buf
self.pos = 0
self.datacount = 0
self.offset = startpos
# Read SOC Marker
if (len(self.buffer) - self.pos < 2) or ordw(self.buffer[self.pos + 0:self.pos + 2]) != 0xff4f:
raise RequiredMarkerMissing("SOC")
self.pos += 2
self.read_SOC()
# Read SIZ Marker
if (len(self.buffer) - self.pos < 2) or ordw(self.buffer[self.pos + 0:self.pos + 2]) != 0xff51:
raise RequiredMarkerMissing("SIZ")
self.pos += 2
self.read_SIZ()
# Read other header markers
while len(self.buffer) - self.pos >= 2 and \
ordb(self.buffer[self.pos + 0]) != 0x90:
if ordb(self.buffer[self.pos + 1]) != 0xff:
raise MisplacedData()
if len(self.buffer) - self.pos < 4:
raise UnexpectedEOC()
self.read_header_marker()
# Read Tile Parts
while len(self.buffer) - self.pos >= 2 and \
ordb(self.buffer[self.pos + 0]) == 0xff and \
ordb(self.buffer[self.pos + 1]) == 0x90:
self.pos += 2
self.read_SOT()
# Read Next Marker
while len(self.buffer) - self.pos >= 2 and \
ordb(self.buffer[self.pos + 1]) != 0x93: # SOD
if ordb(self.buffer[self.pos + 0]) != 0xff:
raise MisplacedData()
if len(self.buffer) - self.pos < 4:
raise UnexpectedEOC()
self.read_header_marker()
if len(self.buffer) - self.pos < 2:
raise UnexpectedEOC()
self.pos += 2
self._new_marker("SOD", "Start of data")
self._end_marker()
self.parse_data()
if len(self.buffer) - self.pos > 0:
raise MisplacedData()
l = len(self.buffer)
oh = l - self.datacount
self._print_indent("Size : %d bytes" % l)
self._print_indent("Data Size : %d bytes" % self.datacount)
self._print_indent("Overhead : %d bytes (%d%%)" % (oh, 100 * oh / l))
def load_marker(self, file, marker):
mrk = ordw(marker[0:2])
if 0xff30 <= mrk <= 0xff3f:
self.buffer = marker
elif mrk in [0xff93, 0xff4f, 0xffd9, 0xff92]:
self.buffer = marker
elif 0xff4f <= mrk <= 0xff93:
size = file.read(2)
ln = ((ordb(size[0]) << 8) +
(ordb(size[1]) << 0))
if ln < 2:
raise InvalidSizedMarker("Marker too short")
self.buffer = marker + size + file.read(ln - 2)
if len(self.buffer) != ln + 2:
raise UnexpectedEOC()
else:
raise MisplacedData()
self.bytecount += len(self.buffer)
self.pos = 0
def load_buffer(self, file):
marker = file.read(2)
if len(marker) == 0:
self.buffer = []
else:
if len(marker) < 2:
raise UnexpectedEOC()
self.load_marker(file, marker)
def stream_parse(self, file, startpos):
self.pos = 0
self.datacount = 0
self.bytecount = 0
self.offset = startpos
# Read SOC Marker
self.load_buffer(file)
if ordw(self.buffer[self.pos + 0:self.pos + 2]) != 0xff4f:
raise RequiredMarkerMissing("SOC")
self.pos += 2
self.read_SOC()
self.offset += len(self.buffer)
# Read SIZ Marker
self.load_buffer(file)
if ordw(self.buffer[self.pos + 0:self.pos + 2]) != 0xff51:
raise RequiredMarkerMissing("SIZ")
self.pos += 2
self.read_SIZ()
self.offset += len(self.buffer)
# Read other header markers
self.load_buffer(file)
while len(self.buffer) - self.pos >= 2 and \
ordb(self.buffer[self.pos + 1]) != 0x90:
if ordb(self.buffer[self.pos + 0]) != 0xff:
raise MisplacedData()
if len(self.buffer) - self.pos < 4:
raise UnexpectedEOC()
self.read_header_marker()
self.offset += len(self.buffer)
self.load_buffer(file)
# Read Tile Parts
while len(self.buffer) >= 2 and ordw(self.buffer[0:2]) == 0xff90:
self.pos += 2
self.read_SOT()
self.offset += len(self.buffer)
self.load_buffer(file)
# Read Next Marker
while len(self.buffer) >= 2 and \
ordb(self.buffer[self.pos + 1]) != 0x93: # SOD
if ordb(self.buffer[self.pos + 0]) != 0xff:
raise MisplacedData()
if len(self.buffer) - self.pos < 4:
raise UnexpectedEOC()
self.read_header_marker()
self.offset += len(self.buffer)
self.load_buffer(file)
self.offset += len(self.buffer)
self._new_marker("SOD", "Start of data")
self._end_marker()
self.stream_data(file)
if len(self.buffer) - self.pos > 0:
raise MisplacedData()
oh = self.bytecount - self.datacount
self._print_indent("Size : %d bytes" % self.bytecount)
self._print_indent("Data Size : %d bytes" % self.datacount)
self._print_indent("Overhead : %d bytes (%d%%)" % (oh, 100 * oh // self.bytecount))
def stream_data(self, file):
count = 0
while True:
byte = file.read(1)
if len(byte) != 1:
raise UnexpectedEOC()
count += 1
if ordb(byte[0]) == 0xff:
marker = file.read(1)
if len(marker) == 1:
count += 1
if ordb(marker[0]) >= 0x90:
self.offset += count - 2
self.print_data(count - 2)
self.load_marker(file, byte + marker)
if self.read_data_marker():
break
self.offset += len(self.buffer)
count = 0
def parse_data(self, buf=None):
if buf:
self.buffer = buf
self.pos = 0
while True:
count = 0
while len(self.buffer) - self.pos >= 2 and \
(ordb(self.buffer[self.pos + 0]) != 0xff or ordb(self.buffer[self.pos + 1]) < 0x90):
self.pos += 1
count += 1
if len(self.buffer) - self.pos == 1:
self.pos += 1
count += 1
self.print_data(count)
if len(self.buffer) - self.pos == 0:
return
if len(self.buffer) - self.pos < 2:
raise UnexpectedEOC()
# Read Marker
if self.read_data_marker():
break
def read_SGco(self):
if len(self.buffer) - self.pos < 4:
raise InvalidSizedMarker("SGco")
self.print_header("Progression Order",
self.progression_order(ordb(self.buffer[self.pos + 0])))
self.print_header("Layers", str(ordw(self.buffer[self.pos + 1:self.pos + 3])))
trafo = ordb(self.buffer[self.pos + 3])
if trafo == 0:
s = "none"
elif trafo == 1:
s = "components 0,1,2"
elif trafo == 2:
s = "generic array based transform"
elif trafo == 4:
s = "wavelet based transform"
elif trafo == 6:
s = "array and wavelet based transform"
else:
s = str(ordb(self.buffer[self.pos + 3]))
self.print_header("Multiple Component Transformation", s)
self.pos += 4
def read_SPco(self, precincts):
if len(self.buffer) - self.pos < 5 + precincts:
raise InvalidSizedMarker("SPco")
levels = ordb(self.buffer[self.pos + 0])
if levels <= 32:
self.print_header("Decomposition Levels", str(levels))
else:
self.print_header("Downsampling factor style", str(levels))
self.print_header("Code-block size", "%dx%d" % \
(1 << (ordb(self.buffer[self.pos + 1]) + 2),
1 << (ordb(self.buffer[self.pos + 2]) + 2)))
x = ordb(self.buffer[self.pos + 3])
self.print_header("Selective Arithmetic Coding Bypass", "yes" if x & 0x01 else "no")
self.print_header("Reset Context Probabilities", "yes" if x & 0x02 else "no")
self.print_header("Termination on Each Coding Pass", "yes" if x & 0x04 else "no")
self.print_header("Vertically Causal Context", "yes" if x & 0x08 else "no")
self.print_header("Predictable Termination", "yes" if x & 0x10 else "no")
self.print_header("Segmentation Symbols", "yes" if x & 0x20 else "no")
self.print_header("Entropy Coding", "FBCOT (Part 15)" if x & 0x40 else "EBCOT")
if x & 0x40:
self.print_header("Mixing of FBCOT and EBCOT", "yes" if x & 0x80 else "no")
if ordb(self.buffer[self.pos + 4]) == 0x00:
s = "9-7 irreversible"
elif ordb(self.buffer[self.pos + 4]) == 0x01:
s = "5-3 reversible"
else:
s = "arbitrary ATK specified transform"
self.print_header("Wavelet Transformation", s)
for i in range(precincts):
x = ordb(self.buffer[self.pos + i + 5])
self.print_header("Precinct #%d Size Exponents" % i,
"%dx%d" % (x & 0x0f, x >> 4))
self.pos += 5 + precincts
def read_SOC(self):
self._new_marker("SOC", "Start of codestream")
self._end_marker()
def read_NSI(self):
self._new_marker("NSI", "Additional Dimension Image and Tile Size")
size = ordw(self.buffer[self.pos + 0:self.pos + 2])
if size < 20 or size > 16403:
raise InvalidSizedMarker("NSI")
ndim = ordb(self.buffer[self.pos + 2])
zsiz = ordl(self.buffer[self.pos + 3:self.pos + 7])
osiz = ordl(self.buffer[self.pos + 7:self.pos + 11])
tsiz = ordl(self.buffer[self.pos + 11:self.pos + 15])
tosz = ordl(self.buffer[self.pos + 15:self.pos + 19])
self.print_header("Dimensionality", "%d" % ndim)
self.print_header("Image Depth", "%d" % zsiz)
self.print_header("Image Depth Offset", "%d" % osiz)
self.print_header("Tile Depth", "%d" % tsiz)
self.print_header("Tile Depth Offset", "%d" % tosz)
for i in range(size - 19):
self.print_header("Z Sample Separation for component %d" % i, "%d" % ordb(self.buffer[self.pos + 19 + i]))
self.pos += size
self._end_marker()
def read_SIZ(self):
self._new_marker("SIZ", "Image and tile size")
size = ordw(self.buffer[self.pos + 0:self.pos + 2])
if size < 41:
raise InvalidSizedMarker("SIZ")
if (size - 38) % 3 != 0:
raise InvalidSizedMarker("SIZ")
# Read Csiz
components = (size - 38) // 3
self.csiz = ordw(self.buffer[self.pos + 36:self.pos + 38])
if self.csiz != components:
raise InvalidSizedMarker("SIZ")
# Read Rsiz
rsiz = ordw(self.buffer[self.pos + 2:self.pos + 4])
if rsiz == 0:
s = "JPEG2000 full standard"
elif rsiz == 1:
s = "JPEG2000 profile 0"
elif rsiz == 2:
s = "JPEG2000 profile 1"
elif rsiz == 3:
s = "DCI 2K profile"
elif rsiz == 4:
s = "DCI 4K profile"
elif rsiz == 5:
s = "DCI long term | |
is `Available`, the CIDR block is available.
"""
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "nat_gateway_id", nat_gateway_id)
pulumi.set(__self__, "nat_ip_cidr", nat_ip_cidr)
pulumi.set(__self__, "nat_ip_cidr_description", nat_ip_cidr_description)
pulumi.set(__self__, "nat_ip_cidr_id", nat_ip_cidr_id)
pulumi.set(__self__, "nat_ip_cidr_name", nat_ip_cidr_name)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time when the CIDR block was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the Nat Ip Cidr.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> bool:
"""
Whether it is the default NAT IP ADDRESS. Valid values:`true` or `false`.`true`: is the default NAT IP ADDRESS. `false`: it is not the default NAT IP ADDRESS.
"""
return pulumi.get(self, "is_default")
@property
@pulumi.getter(name="natGatewayId")
def nat_gateway_id(self) -> str:
"""
The ID of the VPC NAT gateway.
"""
return pulumi.get(self, "nat_gateway_id")
@property
@pulumi.getter(name="natIpCidr")
def nat_ip_cidr(self) -> str:
"""
The NAT CIDR block to be created. The CIDR block must meet the following conditions: It must be `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`, or one of their subnets. The subnet mask must be `16` to `32` bits in lengths. To use a public CIDR block as the NAT CIDR block, the VPC to which the VPC NAT gateway belongs must be authorized to use public CIDR blocks. For more information, see [Create a VPC NAT gateway](https://www.alibabacloud.com/help/doc-detail/268230.htm).
"""
return pulumi.get(self, "nat_ip_cidr")
@property
@pulumi.getter(name="natIpCidrDescription")
def nat_ip_cidr_description(self) -> str:
"""
NAT IP ADDRESS range to the description of. Length is from `2` to `256` characters, must start with a letter or the Chinese at the beginning, but not at the` http://` Or `https://` at the beginning.
"""
return pulumi.get(self, "nat_ip_cidr_description")
@property
@pulumi.getter(name="natIpCidrId")
def nat_ip_cidr_id(self) -> str:
"""
NAT IP ADDRESS instance ID.
"""
return pulumi.get(self, "nat_ip_cidr_id")
@property
@pulumi.getter(name="natIpCidrName")
def nat_ip_cidr_name(self) -> str:
"""
NAT IP ADDRESS the name of the root directory. Length is from `2` to `128` characters, must start with a letter or the Chinese at the beginning can contain numbers, half a period (.), underscore (_) and dash (-). But do not start with `http://` or `https://` at the beginning.
"""
return pulumi.get(self, "nat_ip_cidr_name")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the CIDR block of the NAT gateway. If the value is `Available`, the CIDR block is available.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class GetNatIpsIpResult(dict):
def __init__(__self__, *,
id: str,
is_default: bool,
nat_gateway_id: str,
nat_ip: str,
nat_ip_cidr: str,
nat_ip_description: str,
nat_ip_id: str,
nat_ip_name: str,
status: str):
"""
:param str id: The ID of the Nat Ip.
:param bool is_default: Indicates whether the BGP Group is the default NAT IP ADDRESS. Valid values: `true`: is the default NAT IP ADDRESS. `false`: it is not the default NAT IP ADDRESS.
:param str nat_gateway_id: The ID of the Virtual Private Cloud (VPC) NAT gateway to which the NAT IP address belongs.
:param str nat_ip: The NAT IP address that is queried.
:param str nat_ip_cidr: The CIDR block to which the NAT IP address belongs.
:param str nat_ip_description: The description of the NAT IP address.
:param str nat_ip_id: The ID of the NAT IP address.
:param str nat_ip_name: The name of the NAT IP address.
:param str status: The status of the NAT IP address. Valid values: `Available`, `Deleting` and `Creating`.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "nat_gateway_id", nat_gateway_id)
pulumi.set(__self__, "nat_ip", nat_ip)
pulumi.set(__self__, "nat_ip_cidr", nat_ip_cidr)
pulumi.set(__self__, "nat_ip_description", nat_ip_description)
pulumi.set(__self__, "nat_ip_id", nat_ip_id)
pulumi.set(__self__, "nat_ip_name", nat_ip_name)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the Nat Ip.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> bool:
"""
Indicates whether the BGP Group is the default NAT IP ADDRESS. Valid values: `true`: is the default NAT IP ADDRESS. `false`: it is not the default NAT IP ADDRESS.
"""
return pulumi.get(self, "is_default")
@property
@pulumi.getter(name="natGatewayId")
def nat_gateway_id(self) -> str:
"""
The ID of the Virtual Private Cloud (VPC) NAT gateway to which the NAT IP address belongs.
"""
return pulumi.get(self, "nat_gateway_id")
@property
@pulumi.getter(name="natIp")
def nat_ip(self) -> str:
"""
The NAT IP address that is queried.
"""
return pulumi.get(self, "nat_ip")
@property
@pulumi.getter(name="natIpCidr")
def nat_ip_cidr(self) -> str:
"""
The CIDR block to which the NAT IP address belongs.
"""
return pulumi.get(self, "nat_ip_cidr")
@property
@pulumi.getter(name="natIpDescription")
def nat_ip_description(self) -> str:
"""
The description of the NAT IP address.
"""
return pulumi.get(self, "nat_ip_description")
@property
@pulumi.getter(name="natIpId")
def nat_ip_id(self) -> str:
"""
The ID of the NAT IP address.
"""
return pulumi.get(self, "nat_ip_id")
@property
@pulumi.getter(name="natIpName")
def nat_ip_name(self) -> str:
"""
The name of the NAT IP address.
"""
return pulumi.get(self, "nat_ip_name")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the NAT IP address. Valid values: `Available`, `Deleting` and `Creating`.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class GetNetworkAclsAclResult(dict):
def __init__(__self__, *,
description: str,
egress_acl_entries: Sequence['outputs.GetNetworkAclsAclEgressAclEntryResult'],
id: str,
ingress_acl_entries: Sequence['outputs.GetNetworkAclsAclIngressAclEntryResult'],
network_acl_id: str,
network_acl_name: str,
resources: Sequence['outputs.GetNetworkAclsAclResourceResult'],
status: str,
vpc_id: str):
"""
:param str description: Description of the entry direction rule.
:param Sequence['GetNetworkAclsAclEgressAclEntryArgs'] egress_acl_entries: Output direction rule information.
:param str id: The ID of the Network Acl.
:param Sequence['GetNetworkAclsAclIngressAclEntryArgs'] ingress_acl_entries: Entry direction rule information.
:param str network_acl_id: The first ID of the resource.
:param str network_acl_name: The name of the network ACL.
:param Sequence['GetNetworkAclsAclResourceArgs'] resources: The associated resource.
:param str status: The state of the network ACL.
:param str vpc_id: The ID of the associated VPC.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "egress_acl_entries", egress_acl_entries)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "ingress_acl_entries", ingress_acl_entries)
pulumi.set(__self__, "network_acl_id", network_acl_id)
pulumi.set(__self__, "network_acl_name", network_acl_name)
pulumi.set(__self__, "resources", resources)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the entry direction rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="egressAclEntries")
def egress_acl_entries(self) -> Sequence['outputs.GetNetworkAclsAclEgressAclEntryResult']:
"""
Output direction rule information.
"""
return pulumi.get(self, "egress_acl_entries")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the Network Acl.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ingressAclEntries")
def ingress_acl_entries(self) -> Sequence['outputs.GetNetworkAclsAclIngressAclEntryResult']:
"""
Entry direction rule information.
"""
return pulumi.get(self, "ingress_acl_entries")
@property
@pulumi.getter(name="networkAclId")
def network_acl_id(self) -> str:
"""
The first ID of the resource.
"""
return pulumi.get(self, "network_acl_id")
@property
@pulumi.getter(name="networkAclName")
def network_acl_name(self) -> str:
"""
The name of the network ACL.
"""
return pulumi.get(self, "network_acl_name")
@property
@pulumi.getter
def resources(self) -> Sequence['outputs.GetNetworkAclsAclResourceResult']:
"""
The associated resource.
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter
def status(self) -> str:
"""
The state of the network ACL.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> str:
"""
The ID of the associated VPC.
"""
return pulumi.get(self, "vpc_id")
@pulumi.output_type
class GetNetworkAclsAclEgressAclEntryResult(dict):
def __init__(__self__, *,
description: str,
destination_cidr_ip: str,
network_acl_entry_name: str,
policy: str,
port: str,
protocol: str):
"""
:param str description: Description of the entry direction rule.
:param str destination_cidr_ip: The destination address segment.
:param str network_acl_entry_name: The name of the entry direction rule entry.
:param str policy: The authorization policy.
:param str port: Source port range.
:param str protocol: Transport layer protocol.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "destination_cidr_ip", destination_cidr_ip)
pulumi.set(__self__, "network_acl_entry_name", network_acl_entry_name)
pulumi.set(__self__, "policy", policy)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the entry direction rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationCidrIp")
def destination_cidr_ip(self) -> str:
"""
The destination address segment.
"""
return pulumi.get(self, "destination_cidr_ip")
@property
@pulumi.getter(name="networkAclEntryName")
def network_acl_entry_name(self) -> str:
"""
The name of the entry direction rule entry.
"""
return pulumi.get(self, "network_acl_entry_name")
@property
@pulumi.getter
def policy(self) -> str:
"""
The authorization policy.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter
def port(self) -> str:
"""
Source port range.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Transport layer protocol.
"""
return pulumi.get(self, "protocol")
@pulumi.output_type
class GetNetworkAclsAclIngressAclEntryResult(dict):
def __init__(__self__, *,
description: str,
network_acl_entry_name: str,
policy: str,
port: str,
protocol: str,
source_cidr_ip: str):
"""
:param str description: Description of the entry direction rule.
:param str network_acl_entry_name: The name of the entry direction rule entry.
:param str policy: The authorization policy.
:param str port: Source port range.
:param str protocol: Transport layer protocol.
:param str source_cidr_ip: The source address field.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "network_acl_entry_name", network_acl_entry_name)
pulumi.set(__self__, "policy", policy)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "source_cidr_ip", source_cidr_ip)
@property
@pulumi.getter
def | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
from google.cloud.spanner_admin_database_v1.types import common
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.spanner.admin.database.v1',
manifest={
'RestoreSourceType',
'RestoreInfo',
'Database',
'ListDatabasesRequest',
'ListDatabasesResponse',
'CreateDatabaseRequest',
'CreateDatabaseMetadata',
'GetDatabaseRequest',
'UpdateDatabaseDdlRequest',
'UpdateDatabaseDdlMetadata',
'DropDatabaseRequest',
'GetDatabaseDdlRequest',
'GetDatabaseDdlResponse',
'ListDatabaseOperationsRequest',
'ListDatabaseOperationsResponse',
'RestoreDatabaseRequest',
'RestoreDatabaseEncryptionConfig',
'RestoreDatabaseMetadata',
'OptimizeRestoredDatabaseMetadata',
},
)
class RestoreSourceType(proto.Enum):
r"""Indicates the type of the restore source."""
TYPE_UNSPECIFIED = 0
BACKUP = 1
class RestoreInfo(proto.Message):
r"""Information about the database restore.
Attributes:
source_type (google.cloud.spanner_admin_database_v1.types.RestoreSourceType):
The type of the restore source.
backup_info (google.cloud.spanner_admin_database_v1.types.BackupInfo):
Information about the backup used to restore
the database. The backup may no longer exist.
"""
source_type = proto.Field(
proto.ENUM,
number=1,
enum='RestoreSourceType',
)
backup_info = proto.Field(
proto.MESSAGE,
number=2,
oneof='source_info',
message=gsad_backup.BackupInfo,
)
class Database(proto.Message):
r"""A Cloud Spanner database.
Attributes:
name (str):
Required. The name of the database. Values are of the form
``projects/<project>/instances/<instance>/databases/<database>``,
where ``<database>`` is as specified in the
``CREATE DATABASE`` statement. This name can be passed to
other API methods to identify the database.
state (google.cloud.spanner_admin_database_v1.types.Database.State):
Output only. The current database state.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. If exists, the time at which the
database creation started.
restore_info (google.cloud.spanner_admin_database_v1.types.RestoreInfo):
Output only. Applicable only for restored
databases. Contains information about the
restore source.
encryption_config (google.cloud.spanner_admin_database_v1.types.EncryptionConfig):
Output only. For databases that are using
customer managed encryption, this field contains
the encryption configuration for the database.
For databases that are using Google default or
other types of encryption, this field is empty.
encryption_info (Sequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
Output only. For databases that are using
customer managed encryption, this field contains
the encryption information for the database,
such as encryption state and the Cloud KMS key
versions that are in use.
For databases that are using Google default or
other types of encryption, this field is empty.
This field is propagated lazily from the
backend. There might be a delay from when a key
version is being used and when it appears in
this field.
version_retention_period (str):
Output only. The period in which Cloud Spanner retains all
versions of data for the database. This is the same as the
value of version_retention_period database option set using
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
Defaults to 1 hour, if not set.
earliest_version_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Earliest timestamp at which
older versions of the data can be read. This
value is continuously updated by Cloud Spanner
and becomes stale the moment it is queried. If
you are using this value to recover data, make
sure to account for the time from the moment
when the value is queried to the moment when you
initiate the recovery.
default_leader (str):
Output only. The read-write region which contains the
database's leader replicas.
This is the same as the value of default_leader database
option set using DatabaseAdmin.CreateDatabase or
DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this
is empty.
"""
class State(proto.Enum):
r"""Indicates the current state of the database."""
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
READY_OPTIMIZING = 3
name = proto.Field(
proto.STRING,
number=1,
)
state = proto.Field(
proto.ENUM,
number=2,
enum=State,
)
create_time = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
restore_info = proto.Field(
proto.MESSAGE,
number=4,
message='RestoreInfo',
)
encryption_config = proto.Field(
proto.MESSAGE,
number=5,
message=common.EncryptionConfig,
)
encryption_info = proto.RepeatedField(
proto.MESSAGE,
number=8,
message=common.EncryptionInfo,
)
version_retention_period = proto.Field(
proto.STRING,
number=6,
)
earliest_version_time = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
default_leader = proto.Field(
proto.STRING,
number=9,
)
class ListDatabasesRequest(proto.Message):
r"""The request for
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Attributes:
parent (str):
Required. The instance whose databases should be listed.
Values are of the form
``projects/<project>/instances/<instance>``.
page_size (int):
Number of databases to be returned in the
response. If 0 or less, defaults to the server's
maximum allowed page size.
page_token (str):
If non-empty, ``page_token`` should contain a
[next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
from a previous
[ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
page_token = proto.Field(
proto.STRING,
number=4,
)
class ListDatabasesResponse(proto.Message):
r"""The response for
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Attributes:
databases (Sequence[google.cloud.spanner_admin_database_v1.types.Database]):
Databases that matched the request.
next_page_token (str):
``next_page_token`` can be sent in a subsequent
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
call to fetch more of the matching databases.
"""
@property
def raw_page(self):
return self
databases = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Database',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class CreateDatabaseRequest(proto.Message):
r"""The request for
[CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
Attributes:
parent (str):
Required. The name of the instance that will serve the new
database. Values are of the form
``projects/<project>/instances/<instance>``.
create_statement (str):
Required. A ``CREATE DATABASE`` statement, which specifies
the ID of the new database. The database ID must conform to
the regular expression ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be
between 2 and 30 characters in length. If the database ID is
a reserved word or if it contains a hyphen, the database ID
must be enclosed in backticks (:literal:`\``).
extra_statements (Sequence[str]):
Optional. A list of DDL statements to run
inside the newly created database. Statements
can create tables, indexes, etc. These
statements execute atomically with the creation
of the database: if there is an error in any
statement, the database is not created.
encryption_config (google.cloud.spanner_admin_database_v1.types.EncryptionConfig):
Optional. The encryption configuration for
the database. If this field is not specified,
Cloud Spanner will encrypt/decrypt all data at
rest using Google default encryption.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
create_statement = proto.Field(
proto.STRING,
number=2,
)
extra_statements = proto.RepeatedField(
proto.STRING,
number=3,
)
encryption_config = proto.Field(
proto.MESSAGE,
number=4,
message=common.EncryptionConfig,
)
class CreateDatabaseMetadata(proto.Message):
r"""Metadata type for the operation returned by
[CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
Attributes:
database (str):
The database being created.
"""
database = proto.Field(
proto.STRING,
number=1,
)
class GetDatabaseRequest(proto.Message):
r"""The request for
[GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
Attributes:
name (str):
Required. The name of the requested database. Values are of
the form
``projects/<project>/instances/<instance>/databases/<database>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class UpdateDatabaseDdlRequest(proto.Message):
r"""Enqueues the given DDL statements to be applied, in order but not
necessarily all at once, to the database schema at some point (or
points) in the future. The server checks that the statements are
executable (syntactically valid, name tables that exist, etc.)
before enqueueing them, but they may still fail upon later execution
(e.g., if a statement from another batch of statements is applied
first and it conflicts in some way, or if there is some data-related
problem like a ``NULL`` value in a column to which ``NOT NULL``
would be added). If a statement fails, all subsequent statements in
the batch are automatically cancelled.
Each batch of statements is assigned a name which can be used with
the [Operations][google.longrunning.Operations] API to monitor
progress. See the
[operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
field for more details.
Attributes:
database (str):
Required. The database to update.
statements (Sequence[str]):
Required. DDL statements to be applied to the
database.
operation_id (str):
If empty, the new update request is assigned an
automatically-generated operation ID. Otherwise,
``operation_id`` is used to construct the name of the
resulting [Operation][google.longrunning.Operation].
Specifying an explicit operation ID simplifies determining
whether the statements were executed in the event that the
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
call is replayed, or the return value is otherwise lost: the
[database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
and ``operation_id`` fields can be combined to form the
[name][google.longrunning.Operation.name] of the resulting
[longrunning.Operation][google.longrunning.Operation]:
``<database>/operations/<operation_id>``.
``operation_id`` should be unique within the database, and
must be a valid identifier: ``[a-z][a-z0-9_]*``. Note that
automatically-generated operation IDs always begin with an
underscore. If the named operation already exists,
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
returns ``ALREADY_EXISTS``.
"""
database = proto.Field(
proto.STRING,
number=1,
)
statements = proto.RepeatedField(
proto.STRING,
number=2,
)
operation_id = proto.Field(
proto.STRING,
number=3,
)
class UpdateDatabaseDdlMetadata(proto.Message):
r"""Metadata type for the operation returned by
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
Attributes:
database (str):
The database being modified.
statements (Sequence[str]):
For an update this list contains all the
statements. For an individual statement, this
list | |
from winney import Winney, Address, retry
from typing import Dict, List
from .errors import RequestError, ParamError
from .entities import (Entity, ServiceEntity, ServiceListEntity,
InstanceEntity, HostEntity, InstanceListEntity,
ServerEntity, LeaderEntity, MetricsEntity,
LoginResponse, RoleListResponse, PermissionListResponse)
from .params import (Param, ServiceRegisterParam, ServiceRemoveParam,
ServiceUpdateParam, ServiceGetParam, ServiceListParam,
SwitchUpdateParam, InstanceRegisterParam,
InstanceRemoveParam, InstanceUpdateParam,
InstanceListParam, InstanceGetParam, BeatInfo,
InstanceBeatParam, HealthUpdateParam, ServerListParam,
ConfigGetParam, ConfigListenParam, ConfigPublishParam,
ConfigRemoveParam)
class Nacos(object):
def __init__(self, addresses, namespace_id):
self.addrs = []
for addr in addresses.split(","):
addr = addr.split(":")
host = addr[0]
port = addr[1] if len(addr) > 1 else 8848
self.addrs.append(Address(host, port))
self.winney = Winney(
host=self.addrs[0].host,
port=self.addrs[0].port,
addrs=self.addrs,
)
self.namespace_id = namespace_id
self.init_functions()
def init_functions(self):
# 用户管理
self.winney.register(method="post",
name="user_create",
uri="/nacos/v1/auth/users")
self.winney.register(method="put",
name="user_update",
uri="/nacos/v1/auth/users")
self.winney.register(method="delete",
name="user_delete",
uri="/nacos/v1/auth/users")
self.winney.register(method="post",
name="login",
uri="/nacos/v1/auth/users/login")
# 角色管理
self.winney.register(method="post",
name="role_create",
uri="/nacos/v1/auth/roles")
self.winney.register(method="get",
name="role_list",
uri="/nacos/v1/auth/roles")
self.winney.register(method="delete",
name="role_delete",
uri="/nacos/v1/auth/roles")
# 权限管理
self.winney.register(method="post",
name="permission_add",
uri="/nacos/v1/auth/permissions")
self.winney.register(method="get",
name="permission_list",
uri="/nacos/v1/auth/permissions")
self.winney.register(method="delete",
name="permission_delete",
uri="/nacos/v1/auth/permissions")
# 配置
self.winney.register(method="get",
name="config_get",
uri="/nacos/v1/cs/configs")
self.winney.register(method="post",
name="config_listen",
uri="/nacos/v1/cs/configs/listener")
self.winney.register(method="post",
name="config_publish",
uri="/nacos/v1/cs/configs")
self.winney.register(method="delete",
name="config_remove",
uri="/nacos/v1/cs/configs")
# 服务
self.winney.register(method="post",
name="service_register",
uri="/nacos/v1/ns/service")
self.winney.register(method="delete",
name="service_remove",
uri="/nacos/v1/ns/service")
self.winney.register(method="put",
name="service_update",
uri="/nacos/v1/ns/service")
self.winney.register(method="get",
name="service_get",
uri="/nacos/v1/ns/service")
self.winney.register(method="get",
name="service_list",
uri="/nacos/v1/ns/service/list")
# 开关
self.winney.register(method="get",
name="switch_get",
uri="/nacos/v1/ns/operator/switches")
self.winney.register(method="put",
name="switch_update",
uri="/nacos/v1/ns/operator/switches")
# 实例
self.winney.register(method="post",
name="instance_register",
uri="/nacos/v1/ns/instance")
self.winney.register(method="delete",
name="instance_remove",
uri="/nacos/v1/ns/instance")
self.winney.register(method="put",
name="instance_update",
uri="/nacos/v1/ns/instance")
self.winney.register(method="get",
name="instance_list",
uri="/nacos/v1/ns/instance/list")
self.winney.register(method="get",
name="instance_get",
uri="/nacos/v1/ns/instance")
self.winney.register(method="put",
name="instance_beat",
uri="/nacos/v1/ns/instance/beat")
# 其他
self.winney.register(method="get",
name="metrics_get",
uri="/nacos/v1/ns/operator/metrics")
self.winney.register(method="get",
name="server_list",
uri="/nacos/v1/ns/operator/servers")
self.winney.register(method="get",
name="leader_get",
uri="/nacos/v1/ns/raft/leader")
self.winney.register(method="put",
name="health_update",
uri="/nacos/v1/ns/health/instance")
# 处理参数
def get_params(self, param: Param) -> Dict:
if not isinstance(param, Param):
raise ParamError(
"param should be type of Param, but {} found".format(
type(param)))
params = param.json()
params["namespaceId"] = self.namespace_id
return params
# 登陆
@retry
def login(self, username, password) -> LoginResponse:
r = self.winney.login(data={
"username": username,
"password": password
})
if not r.ok():
raise RequestError("failed to login, http status = {}".format(
r.status_code))
return LoginResponse(**r.json())
# 创建用户
@retry
def user_create(self, username, password) -> bool:
r = self.winney.user_create(data={
"username": username,
"password": password
})
if not r.ok():
raise RequestError(
"failed to create user, http status = {}".format(
r.status_code))
return True
# 更新用户信息
@retry
def user_update(self, username, old_password, new_password) -> bool:
r = self.winney.user_update(
data={
"username": username,
"oldPassword": <PASSWORD>,
"newPassword": <PASSWORD>
})
if not r.ok():
raise RequestError(
"failed to update user, http status = {}".format(
r.status_code))
return True
# 删除用户
@retry
def user_delete(self, username, password) -> bool:
r = self.winney.user_delete(data={
"username": username,
"password": password
})
if not r.ok():
raise RequestError(
"failed to delete user, http status = {}".format(
r.status_code))
return True
# 创建角色
@retry
def role_create(self, username, role) -> bool:
r = self.winney.role_create(data={"username": username, "role": role})
if not r.ok():
raise RequestError(
"failed to create role, http status = {}".format(
r.status_code))
return True
# 获取用户的所有角色
@retry
def role_list(self, username, page=1, page_size=100) -> RoleListResponse:
r = self.winney.role_list(data={
"username": username,
"pageNo": page,
"pageSize": page_size
})
if not r.ok():
raise RequestError("failed to list role, http status = {}".format(
r.status_code))
return RoleListResponse(**r.json())
# 删除用户的角色
@retry
def role_delete(self, username, role) -> bool:
r = self.winney.role_delete(data={"username": username, "role": role})
if not r.ok():
raise RequestError(
"failed to delete role, http status = {}".format(
r.status_code))
return True
# 给角色添加权限
@retry
def permission_add(self, role, resource, action) -> bool:
r = self.winney.permission_add(data={
"role": role,
"resource": resource,
"action": action
})
if not r.ok():
raise RequestError(
"failed to add permission, http status = {}".format(
r.status_code))
return True
# 从角色删除权限
@retry
def permission_delete(self, role, resource, action) -> bool:
r = self.winney.permission_delete(data={
"role": role,
"resource": resource,
"action": action
})
if not r.ok():
raise RequestError(
"failed to delete permission, http status = {}".format(
r.status_code))
return True
# 获取某个角色的权限
@retry
def permission_list(self,
role,
page=1,
page_size=100) -> PermissionListResponse:
r = self.winney.permission_list(data={
"role": role,
"pageNo": page,
"pageSize": page_size
})
if not r.ok():
raise RequestError(
"failed to list permission, http status = {}".format(
r.status_code))
return PermissionListResponse(**r.json())
# 注册服务
@retry
def service_register(self, param: ServiceRegisterParam) -> bool:
if not isinstance(param, ServiceRegisterParam):
raise ParamError(
"param should be type of ServiceRegisterParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.service_register(data=params)
if not r.ok():
raise RequestError(
"failed to register service, http status = {}".format(
r.status_code))
return True
# 删除服务
@retry
def service_remove(self, param: ServiceRemoveParam) -> bool:
if not isinstance(param, ServiceRemoveParam):
raise ParamError(
"param should be type of ServiceRemoveParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.service_remove(data=params)
if not r.ok():
raise RequestError(
"failed to remove service, http status = {}".format(
r.status_code))
return True
# 更新服务
@retry
def service_update(self, param: ServiceUpdateParam) -> bool:
if not isinstance(param, ServiceUpdateParam):
raise ParamError(
"param should be type of ServiceUpdateParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.service_update(data=params)
if not r.ok():
raise RequestError(
"failed to update service, http status = {}".format(
r.status_code))
return True
# 获取服务详情
@retry
def service_get(self, param: ServiceGetParam) -> ServiceEntity:
if not isinstance(param, ServiceGetParam):
raise ParamError(
"param should be type of ServiceGetParam, but {} found".format(
type(param)))
params = self.get_params(param)
r = self.winney.service_get(data=params)
if not r.ok():
raise RequestError(
"failed to get service, http status = {}".format(
r.status_code))
return ServiceEntity.loads(**r.json())
# 获取服务列表
@retry
def service_list(self, param: ServiceListParam) -> ServiceListEntity:
if not isinstance(param, ServiceListParam):
raise ParamError(
"param should be type of ServiceListParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.service_list(data=params)
if not r.ok():
raise RequestError(
"failed to list service, http status = {}".format(
r.status_code))
return ServiceListEntity.loads(**r.json())
# 获取系统开关
@retry
def switch_get(self) -> Dict:
r = self.winney.switch_get()
if not r.ok():
raise RequestError("failed to get switch, http status = {}".format(
r.status_code))
return r.json()
# 更新系统开关
@retry
def switch_update(self, param: SwitchUpdateParam) -> bool:
if not isinstance(param, SwitchUpdateParam):
raise ParamError(
"param should be type of SwitchUpdateParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.service_list(data=params)
if not r.ok():
raise RequestError(
"failed to update switch, http status = {}".format(
r.status_code))
return True
# 注册实例
@retry
def instance_register(self, param: InstanceRegisterParam) -> bool:
if not isinstance(param, InstanceRegisterParam):
raise ParamError(
"param should be type of InstanceRegisterParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.instance_register(data=params)
if not r.ok():
raise RequestError(
"failed to register instance, http status = {}".format(
r.status_code))
return True
# 删除实例
@retry
def instance_remove(self, param: InstanceRemoveParam) -> bool:
if not isinstance(param, InstanceRemoveParam):
raise ParamError(
"param should be type of InstanceRemoveParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.instance_remove(data=params)
if not r.ok():
raise RequestError(
"failed to remove instance, http status = {}".format(
r.status_code))
return True
# 更新实例
@retry
def instance_update(self, param: InstanceUpdateParam) -> bool:
if not isinstance(param, InstanceUpdateParam):
raise ParamError(
"param should be type of InstanceUpdateParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.instance_update(data=params)
if not r.ok():
raise RequestError(
"failed to update instance, http status = {}".format(
r.status_code))
return True
# 查询服务下的实例列表
@retry
def instance_list(self, param: InstanceListParam) -> InstanceListEntity:
if not isinstance(param, InstanceListParam):
raise ParamError(
"param should be type of InstanceListParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.instance_list(data=params)
if not r.ok():
raise RequestError(
"failed to list instance, http status = {}".format(
r.status_code))
return InstanceListEntity.loads(**r.json())
# 获取实例详情
@retry
def instance_get(self, param: InstanceGetParam) -> InstanceEntity:
if not isinstance(param, InstanceGetParam):
raise ParamError(
"param should be type of InstanceGetParam, but {} found".
format(type(param)))
params = self.get_params(param)
r = self.winney.instance_get(data=params)
if not r.ok():
raise RequestError(
"failed to get instance, http status = {}".format(
r.status_code))
return InstanceEntity.loads(**r.json())
# 实例发送心跳
@retry
def instance_beat(self, beat_info: BeatInfo) -> bool:
if not isinstance(beat_info, BeatInfo):
raise ParamError(
"beat_info should be type of BeatInfo, but {} found".format(
type(beat_info)))
param = InstanceBeatParam(beat_info)
params = self.get_params(param)
r = self.winney.instance_beat(data=params)
if not r.ok():
raise RequestError(
"failed to beat instance, http status = {}".format(
r.status_code))
return True
# 查看系统当前数据指标
@retry
def metrics_get(self) -> MetricsEntity:
r = self.winney.metrics_get()
if not r.ok():
raise RequestError(
"failed to get metrics, http status = {}".format(
r.status_code))
return MetricsEntity.loads(**r.json())
# 查看集群 server 列表
@retry
def server_list(self, param: ServerListParam) -> List[ServerEntity]:
if not isinstance(param, ServerListParam):
raise ParamError(
"param should be type of ServerListParam, but {} found".format(
type(param)))
params = self.get_params(param)
r = self.winney.instance_get(data=params)
if not r.ok():
raise RequestError(
"failed to list server, http status = {}".format(
r.status_code))
servers = r.json()["servers"]
servers = [ServerEntity.loads(**server)
for server in servers] if servers else []
return servers
# 查看当前集群 leader
@retry
def leader_get(self) -> LeaderEntity:
r = self.winney.leader_get()
if not r.ok():
raise RequestError("failed to get leader, http status = {}".format(
r.status_code))
return LeaderEntity.loads(**r.json())
# | |
<filename>cs15211/CourseScheduleIII.py
__source__ = 'https://leetcode.com/problems/course-schedule-iii/'
# Time: O(nlog(n))
# Space: O(n)
#
# Description: Leetcode # 630. Course Schedule III
#
# There are n different online courses numbered from 1 to n.
# Each course has some duration(course length) t and closed on dth day.
# A course should be taken continuously for t days and must be finished before or on the dth day.
# You will start at the 1st day.
#
# Given n online courses represented by pairs (t,d), your task is to find the maximal number of courses that can be taken.
#
# Example:
# Input: [[100, 200], [200, 1300], [1000, 1250], [2000, 3200]]
# Output: 3
# Explanation:
# There're totally 4 courses, but you can take 3 courses at most:
# First, take the 1st course, it costs 100 days so you will finish it on the 100th day,
# and ready to take the next course on the 101st day.
# Second, take the 3rd course, it costs 1000 days so you will finish it on the 1100th day,
# and ready to take the next course on the 1101st day.
# Third, take the 2nd course, it costs 200 days so you will finish it on the 1300th day.
# The 4th course cannot be taken now, since you will finish it on the 3300th day, which exceeds the closed date.
# Note:
# The integer 1 <= d, t, n <= 10,000.
# You can't take two courses simultaneously.
#
# Companies
# WAP
# Related Topics
# Greedy
# Similar Questions
# Course Schedule Course Schedule II
import unittest
import heapq
# 400ms 57.69%
class Solution(object):
def scheduleCourse(self, courses):
"""
:type courses: List[List[int]]
:rtype: int
"""
pq = []
start = 0
for t, end in sorted(courses, key = lambda (t, end): end):
start += t
heapq.heappush(pq, -t)
while start > end:
start += heapq.heappop(pq)
return len(pq)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/course-schedule-iii/solution/
# Approach #1 Brute Force [Time Limit Exceeded]
# Algorithm
# The most naive solution will be to consider every possible permutation of the given courses
# Complexity Analysis
# Time complexity : O((n+1)!). A total of n! permutations are possible for the courses array of length n.
# For every permutation, we scan over the n elements of the permutation
# to find the number of courses that can be taken in each case.
# Space complexity : O(n). Each permutation needs nn space.
Approach #2 Using Recursion with memoization[Time Limit Exceeded]
Complexity Analysis
Time complexity : O(n * d). memomemo array of size n x d is filled once.
Here, n refers to the number of courses in the given course array
and d refers to the maximum value of the end day from all the end days in the courses array.
Space complexity : O(n * d). memomemo array of size n x d is used.
# MLE
class Solution {
public int scheduleCourse(int[][] courses) {
Arrays.sort(courses, (a, b) -> a[1] - b[1]);
Integer[][] memo = new Integer[courses.length][courses[courses.length - 1][1] + 1];
return schedule(courses, 0, 0, memo);
}
public int schedule(int[][] courses, int i, int time, Integer[][] memo) {
if (i == courses.length) return 0;
if (memo[i][time] != null) return memo[i][time];
int taken = 0;
if (time + courses[i][0] <= courses[i][1]) {
taken = 1 + schedule(courses, i + 1, time + courses[i][0], memo);
}
int not_taken = schedule(courses, i + 1, time, memo);
memo[i][time] = Math.max(taken, not_taken);
return memo[i][time];
}
}
Approach #3 Iterative Solution [Time Limit Exceeded]
Complexity Analysis
Time complexity : O(n^2). We iterate over the count array of size nn once.
For every element currently considered, we could scan backwards till the first element, giving O(n^2) complexity.
Sorting the count array takes O(nlog(n)) time for count array.
Space complexity : O(1). Constant extra space is used.
# 1349ms 0%
class Solution {
public int scheduleCourse(int[][] courses) {
Arrays.sort(courses, (a, b) -> a[1] - b[1]);
int time = 0, count = 0;
for (int i = 0; i < courses.length; i++) {
if (time + courses[i][0] <= courses[i][1]) {
time += courses[i][0];
count++;
} else {
int max_i = i;
for (int j = 0; j < i; j++) {
if (courses[j][0] > courses[max_i][0]) max_i = j;
}
if (courses[max_i][0] > courses[i][0]) {
time += courses[i][0] - courses[max_i][0];
}
courses[max_i][0] = -1;
}
}
return count;
}
}
Approach #4 Optimized Iterative [Accepted]
Complexity Analysis
Time complexity : O(n * count). We iterate over a total of nn elements of the coursescourses array.
For every element, we can traverse backwards upto atmost countcount(final value) number of elements.
Space complexity : O(1). Constant extra space is used.
# 401ms 4%
class Solution {
public int scheduleCourse(int[][] courses) {
Arrays.sort(courses, (a, b) -> a[1] - b[1]);
int time = 0, count = 0;
for (int i = 0; i < courses.length; i++) {
if (time + courses[i][0] <= courses[i][1]) {
time += courses[i][0];
courses[count++] = courses[i];
} else {
int max_i = i;
for (int j = 0; j < count; j++) {
if (courses[j][0] > courses[max_i][0]) max_i = j;
}
if (courses[max_i][0] > courses[i][0]) {
time += courses[i][0] - courses[max_i][0];
courses[max_i] = courses[i];
}
}
}
return count;
}
}
Approach #5 Using Extra List [Accepted]
Complexity Analysis
Time complexity : O(n * m). We iterate over a total of n elements of the courses array.
For every element, we can traverse over atmost mm number of elements.
Here, mm refers to the final length of the alid_list.
Space complexity : O(n). The valid_list can contain at most n courses.
# 639ms 1%
class Solution {
public int scheduleCourse(int[][] courses) {
Arrays.sort(courses, (a, b) -> a[1] - b[1]);
List< Integer > valid_list = new ArrayList < > ();
int time = 0;
for (int[] c: courses) {
if (time + c[0] <= c[1]) {
valid_list.add(c[0]);
time += c[0];
} else {
int max_i=0;
for(int i=1; i < valid_list.size(); i++) {
if(valid_list.get(i) > valid_list.get(max_i))
max_i = i;
}
if (valid_list.size() > max_i && valid_list.get(max_i) > c[0]) {
time += c[0] - valid_list.get(max_i);
valid_list.set(max_i, c[0]);
}
}
}
return valid_list.size();
}
}
Approach #6 Using Priority Queue [Accepted]
Complexity Analysis
Time complexity : O(nlog(n)). At most nn elements are added to the queue.
Adding each element is followed by heapification, which takes O(log(n)) time.
Space complexity : O(n). The queuequeue containing the durations of the courses taken can have atmost nn elements
# 159ms 59%
class Solution {
public int scheduleCourse(int[][] courses) {
Arrays.sort(courses, (a, b) -> a[1] - b[1]);
PriorityQueue<Integer> pq = new PriorityQueue<>((a, b) -> (b - a));
int time = 0;
for (int[] c: courses) {
if (time + c[0] <= c[1]) {
pq.offer(c[0]);
time += c[0];
} else if (!pq.isEmpty() && pq.peek() > c[0]){
time += c[0] - pq.poll();
pq.offer(c[0]);
}
}
return pq.size();
}
}
#72.98% 157ms
class Solution {
public int scheduleCourse(int[][] courses) {
Arrays.sort(courses, (a, b) -> a[1] - b[1]);
int max = 0;
int time = 0;
int count = 0;
PriorityQueue<int[]> pq = new PriorityQueue<int[]>((a, b) -> b[0] - a[0]);
for(int i = 0; i < courses.length; i++){
if(courses[i][0] + time <= courses[i][1]){
time += courses[i][0];
pq.add(courses[i]);
count++;
} else{
if(pq.peek()[0] > courses[i][0]){
time -= pq.remove()[0];
time += courses[i][0];
pq.add(courses[i]);
}
}
}
return count;
}
}
#98.49% 88ms // without using lambda
class Solution {
public int scheduleCourse(int[][] courses) {
Arrays.sort(courses, new Comparator<int[]>(){
public int compare(int[] a, int[] b){
return a[1] - b[1];
}
});
int max = 0;
int time = 0;
int count = 0;
PriorityQueue<int[]> pq = new PriorityQueue<int[]>(new Comparator<int[]>(){
public int compare(int[] a, int[] b){
return b[0] - a[0];
}
});
for(int i = 0; i < courses.length; i++){
if(courses[i][0]+time <= courses[i][1]){
time+=courses[i][0];
pq.add(courses[i]);
count++;
}
else{
if(pq.peek()[0] > courses[i][0]){
time -= pq.remove()[0];
time += courses[i][0];
pq.add(courses[i]);
}
}
}
return count;
}
}
# 110ms 62.62%
class Solution {
public int scheduleCourse(int[][] courses) {
//Sort the courses by their deadlines (Greedy! We have to deal with courses with early deadlines first)
Arrays.sort(courses, (a, b) -> a[1] - b[1]);
//desc, start with course required the most hours to complete
PriorityQueue<Integer> pq = | |
'fifo_cpu' : 0b010,
'fast_serial' : 0b100},
value = 'uart')})
elif PRODUCT_IDS[self.owner.device.idProduct] in ['FT4232H']:
self.registers['port_cfg'] = hwio.register.Register(description = 'Port configuration',
address = bitstring.Bits('0x00'),
length = 16,
bitfields = {'port_d_driver' : hwio.register.Bitfield(bits = '[15]',
description = 'Port D driver',
fmt = 'uint',
values = {'d2xx' : 0b0,
'vcp' : 0b1},
value = 'vcp'),
'port_d_type' : hwio.register.Bitfield(bits = '[14:12]',
description = 'Port D type',
fmt = 'uint',
values = {'uart' : 0b000},
value = 'uart'),
'port_b_driver' : hwio.register.Bitfield(bits = '[11]',
description = 'Port B driver',
fmt = 'uint',
values = {'d2xx' : 0b0,
'vcp' : 0b1},
value = 'vcp'),
'port_b_type' : hwio.register.Bitfield(bits = '[10:8]',
description = 'Port B type',
fmt = 'uint',
values = {'uart' : 0b000},
value = 'uart'),
'port_c_driver' : hwio.register.Bitfield(bits = '[7]',
description = 'Port C driver',
fmt = 'uint',
values = {'d2xx' : 0b0,
'vcp' : 0b1},
value = 'vcp'),
'port_c_type' : hwio.register.Bitfield(bits = '[6:4]',
description = 'Port C type',
fmt = 'uint',
values = {'uart' : 0b000},
value = 'uart'),
'port_a_driver' : hwio.register.Bitfield(bits = '[3]',
description = 'Port A driver',
fmt = 'uint',
values = {'d2xx' : 0b0,
'vcp' : 0b1},
value = 'vcp'),
'port_a_type' : hwio.register.Bitfield(bits = '[2:0]',
description = 'Port A type',
fmt = 'uint',
values = {'uart' : 0b000},
value = 'uart')})
if PRODUCT_IDS[self.owner.device.idProduct] in ['FT232H', 'FT2232H', 'FT4232H']:
self.registers['usb_vid'] = hwio.register.Register(description = 'USB vendor ID (idVendor)',
address = bitstring.Bits('0x01'),
length = 16,
bitfields = {'usb_vid' : hwio.register.Bitfield(bits = '[15:0]',
description = 'USB vendor ID (idVendor)',
fmt = 'uint',
values = {'FTDI' : 0x0403},
value = 'FTDI')})
self.registers['usb_pid'] = hwio.register.Register(description = 'USB product ID (idProduct)',
address = bitstring.Bits('0x02'),
length = 16,
bitfields = {'usb_pid' : hwio.register.Bitfield(bits = '[15:0]',
description = 'USB product ID (idProduct)',
fmt = 'uint',
values = {'FT232H' : 0x6014,
'FT2232H' : 0x6010,
'FT4232H' : 0x6011},
value = PRODUCT_IDS[self.owner.device.idProduct])})
self.registers['usb_dev_rel_no'] = hwio.register.Register(description = 'USB device release number (bcdDevice)',
address = bitstring.Bits('0x03'),
length = 16,
bitfields = {'usb_dev_rel_no' : hwio.register.Bitfield(bits = '[15:0]',
description = 'USB device release number (bcdDevice)',
fmt = 'uint',
values = {'FT232H' : 0x0900,
'FT2232H' : 0x0700,
'FT4232H' : 0x0800},
value = PRODUCT_IDS[self.owner.device.idProduct])})
self.registers['usb_cfg_desc'] = hwio.register.Register(description = 'USB configuration descriptor (bmAttributes, bMaxPower)',
address = bitstring.Bits('0x04'),
length = 16,
bitfields = {'max_power' : hwio.register.Bitfield(bits = '[15:8]',
description = 'Maximum current consumption in 2 mA steps (bMaxPower)',
fmt = 'uint',
value = 50), # 50 * 2 mA = 100 mA
'reserved_1' : hwio.register.Bitfield(bits = '[7]',
description = 'reserved (bmAttributes)',
fmt = 'uint',
value = 0b1),
'power_source' : hwio.register.Bitfield(bits = '[6]',
description = 'Power source (bmAttributes)',
fmt = 'uint',
values = {'bus' : 0b0,
'self' : 0b1},
value = 'bus'),
'remote_wakeup' : hwio.register.Bitfield(bits = '[5]',
description = 'Remote wakeup (bmAttributes)',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = 'no'),
'reserved_0' : hwio.register.Bitfield(bits = '[4:0]',
description = 'reserved (bmAttributes)',
fmt = 'uint',
value = 0b00000)})
if PRODUCT_IDS[self.owner.device.idProduct] in ['FT232H', 'FT2232H']:
self.registers['chip_cfg'] = hwio.register.Register(description = 'Chip configuration',
address = bitstring.Bits('0x05'),
length = 16,
bitfields = {'reserved_1' : hwio.register.Bitfield(bits = '[15:4]',
description = 'reserved',
fmt = 'uint',
value = 0b000000000000),
'use_ser_no' : hwio.register.Bitfield(bits = '[3]',
description = 'Use serial number',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = 'no' if serial_number is None else 'yes'),
'suspend_pd' : hwio.register.Bitfield(bits = '[2]',
description = 'Enable internal pull-downs on pins in suspend mode',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = 'no'),
'reserved_0' : hwio.register.Bitfield(bits = '[1:0]',
description = 'reserved',
fmt = 'uint',
value = 0b00)})
elif PRODUCT_IDS[self.owner.device.idProduct] in ['FT4232H']:
self.registers['chip_cfg'] = hwio.register.Register(description = 'Chip configuration',
address = bitstring.Bits('0x05'),
length = 16,
bitfields = {'dd7_cfg' : hwio.register.Bitfield(bits = '[15]',
description = 'Pin DD7 configuration',
fmt = 'uint',
values = {'ri#' : 0b0,
'txden' : 0b1},
value = 'ri#'),
'cd7_cfg' : hwio.register.Bitfield(bits = '[14]',
description = 'Pin CD7 configuration',
fmt = 'uint',
values = {'ri#' : 0b0,
'txden' : 0b1},
value = 'ri#'),
'bd7_cfg' : hwio.register.Bitfield(bits = '[13]',
description = 'Pin BD7 configuration',
fmt = 'uint',
values = {'ri#' : 0b0,
'txden' : 0b1},
value = 'ri#'),
'ad7_cfg' : hwio.register.Bitfield(bits = '[12]',
description = 'Pin AD7 configuration',
fmt = 'uint',
values = {'ri#' : 0b0,
'txden' : 0b1},
value = 'ri#'),
'reserved_1' : hwio.register.Bitfield(bits = '[11:4]',
description = 'reserved',
fmt = 'uint',
value = 0b00000000),
'use_ser_no' : hwio.register.Bitfield(bits = '[3]',
description = 'Use serial number',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = 'no' if serial_number is None else 'yes'),
'suspend_pd' : hwio.register.Bitfield(bits = '[2]',
description = 'Enable internal pull-downs on pins in suspend mode',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = 'no'),
'reserved_0' : hwio.register.Bitfield(bits = '[1:0]',
description = 'reserved',
fmt = 'uint',
value = 0b00)})
if PRODUCT_IDS[self.owner.device.idProduct] in ['FT232H']:
self.registers['pin_cfg'] = hwio.register.Register(description = 'Pin configuration',
address = bitstring.Bits('0x06'),
length = 16,
bitfields = {'reserved' : hwio.register.Bitfield(bits = '[15:8]',
description = 'reserved',
fmt = 'uint',
value = 0b00000000),
'port_ac_hyst' : hwio.register.Bitfield(bits = '[7]',
description = 'Port AC input hysteresis',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = hysteresis),
'port_ac_slew' : hwio.register.Bitfield(bits = '[6]',
description = 'Port AC output slew',
fmt = 'uint',
values = {'fast' : 0b0,
'slow' : 0b1},
value = slew),
'port_ac_drive' : hwio.register.Bitfield(bits = '[5:4]',
description = 'Port AC output drive',
fmt = 'uint',
values = {'4 mA' : 0b00,
'8 mA' : 0b01,
'12 mA' : 0b10,
'16 mA' : 0b11},
value = drive),
'port_ad_hyst' : hwio.register.Bitfield(bits = '[3]',
description = 'Port AD input hysteresis',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = hysteresis),
'port_ad_slew' : hwio.register.Bitfield(bits = '[2]',
description = 'Port AD output slew',
fmt = 'uint',
values = {'fast' : 0b0,
'slow' : 0b1},
value = slew),
'port_ad_drive' : hwio.register.Bitfield(bits = '[1:0]',
description = 'Port AD output drive',
fmt = 'uint',
values = {'4 mA' : 0b00,
'8 mA' : 0b01,
'12 mA' : 0b10,
'16 mA' : 0b11},
value = drive)})
elif PRODUCT_IDS[self.owner.device.idProduct] in ['FT2232H']:
self.registers['pin_cfg'] = hwio.register.Register(description = 'Pin configuration',
address = bitstring.Bits('0x06'),
length = 16,
bitfields = {'port_bc_hyst' : hwio.register.Bitfield(bits = '[15]',
description = 'Port BC input hysteresis',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = hysteresis),
'port_bc_slew' : hwio.register.Bitfield(bits = '[14]',
description = 'Port BC output slew',
fmt = 'uint',
values = {'fast' : 0b0,
'slow' : 0b1},
value = slew),
'port_bc_drive' : hwio.register.Bitfield(bits = '[13:12]',
description = 'Port BC output drive',
fmt = 'uint',
values = {'4 mA' : 0b00,
'8 mA' : 0b01,
'12 mA' : 0b10,
'16 mA' : 0b11},
value = drive),
'port_bd_hyst' : hwio.register.Bitfield(bits = '[11]',
description = 'Port BD input hysteresis',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = hysteresis),
'port_bd_slew' : hwio.register.Bitfield(bits = '[10]',
description = 'Port BD output slew',
fmt = 'uint',
values = {'fast' : 0b0,
'slow' : 0b1},
value = slew),
'port_bd_drive' : hwio.register.Bitfield(bits = '[9:8]',
description = 'Port BD output drive',
fmt = 'uint',
values = {'4 mA' : 0b00,
'8 mA' : 0b01,
'12 mA' : 0b10,
'16 mA' : 0b11},
value = drive),
'port_ac_hyst' : hwio.register.Bitfield(bits = '[7]',
description = 'Port AC input hysteresis',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = hysteresis),
'port_ac_slew' : hwio.register.Bitfield(bits = '[6]',
description = 'Port AC output slew',
fmt = 'uint',
values = {'fast' : 0b0,
'slow' : 0b1},
value = slew),
'port_ac_drive' : hwio.register.Bitfield(bits = '[5:4]',
description = 'Port AC output drive',
fmt = 'uint',
values = {'4 mA' : 0b00,
'8 mA' : 0b01,
'12 mA' : 0b10,
'16 mA' : 0b11},
value = drive),
'port_ad_hyst' : hwio.register.Bitfield(bits = '[3]',
description = 'Port AD input hysteresis',
fmt = 'uint',
values = {'no' : 0b0,
'yes' : 0b1},
value = hysteresis),
'port_ad_slew' : hwio.register.Bitfield(bits = '[2]',
description = 'Port | |
<gh_stars>0
import ddh1 as ddh
import ddh1.taxonomy as taxonomy
import requests
import copy
import datetime
import time
import json
import os
import yaml
import re
import copy
# NB: disussion of new API format is here: http://jira.worldbank.org/jira/browse/DDH2-170
def search(fields=[], filter={}, obj_type='dataset'):
'''Query the search API
Parameters:
fields: an array of fields to return (in addition to 'title')
filter: a dict of field names and values. If the first (unnamed) argument
is a dict then it is assumed to be a filter
obj_type: type of object to search: 'dataset' or 'resource'
Returns:
a generator object for iterating over search results
Examples:
for nid,dataset in ddh.dataset.search({'field_wbddh_data_type': 'Time Series'}):
print dataset['title']
for nid,dataset in ddh.dataset.search(['created'], {'field_wbddh_data_type': 'geospatial'}):
print nid, dataset['title'], dataset['created']
'''
# if 1st argument is a dict then it's the filter, not fields
if type(fields) is dict:
filter = fields
fields = []
query = copy.copy(filter) # shallow copy should suffice
taxonomy.update(query, filter)
query['type'] = obj_type
for k,v in query.items():
if v == None:
raise ddh.TaxonomyError(k, filter[k])
query = {'filter['+k+']':v for k,v in iter(query.items())}
_fields = set(fields)
_fields.update(['title'])
# NB: nid must be the first element always
query['fields'] = '[nid,' + ','.join(_fields) + ',]'
totalRecords = None
recordsRead = 0
query['limit'] = str(250)
while totalRecords is None or recordsRead < totalRecords:
query['offset'] = str(recordsRead)
# crude urlencode so as not to escape the brackets
query_string = '&'.join([k + '=' + str(v) for k,v in query.items()])
url = '{}://{}/search-service/search_api/datasets?{}'.format(ddh.protocol, ddh.host, query_string)
ddh.debug_report('Search - {}'.format(url))
response = get(url)
totalRecords = response['count']
if type(response['result']) is dict:
recordsRead += len(response['result'])
for k,v in response['result'].items():
yield k,v
def get(url, obj_type='node'):
url = str(url)
if re.match(r'^\d+$', url):
url = '{}://{}/api/dataset/{}/{}'.format(ddh.protocol, ddh.host, obj_type, url)
response = requests.get(url, verify=ddh.session_verify, cookies={ddh.session_key: ddh.session_value})
try:
result = response.json()
if type(result) is not dict:
return None
return result
except:
raise ddh.APIError('get', url, response.text)
def field(rec, field):
'''Attempts to return a simple field value from the given dataset record
'''
if field in rec and (rec[field] or type(rec[field]) in [int, float]):
if type(rec[field]) is dict and rec[field].get('und') and rec[field]['und'][0].get('tid'):
return taxonomy.term(field, rec[field]['und'][0]['tid'])
elif type(rec[field]) is dict and rec[field].get('und') and rec[field]['und'][0].get('timezone'):
value = rec[field]['und'][0].get('value') or None
if value:
value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
return value
elif type(rec[field]) is dict and rec[field].get('und') and 'value' in rec[field]['und'][0]:
value = rec[field]['und'][0].get('value') or None
return value
elif field in ['created', 'changed']:
# this could either either formatted or POSIX depending on where the record came from
try:
return datetime.datetime.fromtimestamp(int(rec[field]))
except:
return datetime.datetime.strptime(rec[field], '%Y-%m-%d %H:%M:%S')
elif field in ['field_dataset_ref']:
return rec[field]['und'][0]['target_id']
elif False and field in ['field_wbddh_dsttl_upi', 'field_wbddh_collaborator_upi']:
# disabled for now
target = rec[field]['und'][0]['target_id']
if target and target not in users:
user = get(target, 'user')
if user:
users[target] = user['field_wbddh_first_name']['und'][0]['value'] + ' ' + user['field_wbddh_last_name']['und'][0]['value']
return users.get(target,'')
else:
# hopefully by now the value is a scalar
return rec[field]
return None
def ds_template():
template = {
'title': None,
'body': None,
'type': 'dataset',
'status': '1',
'moderation_next_state': 'published',
'field_wbddh_data_type': None,
'field_license_wbddh': None,
'field_exception_s_': None,
'field_wbddh_dsttl_upi': None,
'field_wbddh_responsible': 'No',
'resources': [],
}
# NB: 'frequency' is actually periodicity
tax_fields = ['wbddh_data_class', 'frequency', 'topic', 'granularity_list',
'wbddh_country', 'wbddh_economy_coverage', 'wbddh_languages_supported']
for elem in tax_fields:
template['field_'+elem] = taxonomy.get('field_'+elem, None, default=True)
return template
def rs_template():
template = {
'title': None,
'type': 'resource',
'status': '1',
'moderation_next_state': 'published',
}
tax_fields = ['wbddh_resource_type', 'wbddh_data_class']
for elem in tax_fields:
template['field_'+elem] = taxonomy.get('field_'+elem, None, default=True)
return template
def _set_values(d, elem):
for k,v in elem.items():
if v is None:
continue
if k in ['_field_tags']:
# freetagging fields have a different format than other taxonomy fields
if type(v) is list:
tags = v
else:
tags = [v]
d[k] = {'und': { 'value_field': ' '.join(['"" {} ""'.format(i) for i in tags]) }}
elif k != 'moderation_next_state' and (k == 'field_tags' or taxonomy.is_tax(k)):
if type(v) is not list:
v = [v]
d[k] = {'und': [{'tid': x} for x in v]}
elif k in ['body', 'field_wbddh_copyright', 'field_wbddh_type_of_license', 'field_wbddh_source', 'field_wbddh_publisher_name', 'field_wbddh_search_tags', 'field_ddh_external_contact_email', 'field_wbddh_depositor_notes', 'field_ddh_harvest_sys_id', 'field_wbddh_reference_system', 'field_related_links_and_publicat', 'field_external_metadata', 'field_wbddh_responsible']:
if type(v) is str:
d[k] = {'und': [{'value': v}]}
else:
d[k] = {'und': [v]}
elif k in ['field_link_api']:
if type(v) is str:
d[k] = {'und': [{'url': v}]}
else:
d[k] = {'und': [v]}
elif k in ['field_wbddh_dsttl_upi', 'field_wbddh_collaborator_upi']:
if type(v) is not list:
v = [v]
d[k] = {'und': [{'target_id': x} for x in v]}
elif k in ['og_group_ref']:
d[k] = {'und': [{'target_id': v, 'field_mode': 'collections'}]}
elif k in ['field_wbddh_release_date', 'field_wbddh_modified_date', 'field_wbddh_start_date', 'field_wbddh_end_date']:
d[k] = {'und': [{
'value': v.strftime('%Y-%m-%d %H:%M:%S'),
'timezone': 'America/New_York',
'timezone_db': 'America/New_York',
'date_type': 'datetime',
}]}
elif k in ['field_wbddh_start_date', 'field_wbddh_end_date']:
d[k] = {'und': [{ 'value': {
'day': 1,
'month': 1,
'year': 2001
}}]}
d[k] = {'und': [{ 'value': v.strftime('%Y-%m-%d %H:%M:%S') }]}
d[k] = {'und': [{ 'value': str(int(time.mktime(v.timetuple()))) }]}
d[k] = {'und': [{ 'value': 20010101 }]}
elif k in ['field_wbddh_time_periods']:
# note: this currently supports only 1 value in the field, but this could be easily improved
d[k] = {'und': [{ 'value': v.strftime('%Y-%m-%d %H:%M:%S'), 'value2': v.strftime('%Y-%m-%d %H:%M:%S') }]}
d[k]['und'][0]['show_todate'] = 0
else:
d[k] = v
def new_object(ds):
obj = {}
_set_values(obj, ds)
return obj
def update_dataset(nid, ds):
'''Updates an existing dataset
Parameters:
nid: the node ID of a dataset
ds: a (partial) dataset object
Returns:
the node ID of the modified dataset, if successful
'''
obj = new_object(ds)
# workflow status defaults to published if undefined
if not 'moderation_next_state' in obj:
obj['moderation_next_state'] = 'published'
url = '{}://{}/api/dataset/node/{}'.format(ddh.protocol, ddh.host, nid)
debug_report('Update dataset - {}'.format(url), obj)
response = requests.put(url, cookies={ddh.session_key: ddh.session_value}, headers={'X-CSRF-Token': ddh.token}, json=obj)
try:
data = safe_json(response)
return data['nid']
except:
raise APIError('put', nid, response.text)
def append_resource(nid, rsrc, weight=None):
'''Appends a new resource to an existing dataset
Parameters:
nid: the node ID of a dataset
rsrc: a resource object (returned by rs_template and modified)
Returns:
the node ID of the new resource, if successful
'''
if type(nid) is dict:
id = nid['id']
nid = nid['nid']
else:
id = nid
e = copy.deepcopy(rsrc)
post_info = None
if e.get('upload'):
post_info = {'files[1]': open(e['upload'],'rb'), 'field_name': (None,'field_upload'), 'attach': (None,'1')}
del e['upload']
obj = new_object(e)
obj['field_dataset_ref'] = {'und': [{'target_id': nid}]}
if weight is not None:
obj['field_resource_weight'] = {'und': [{'value': weight}]}
url = '{}://{}/api/dataset/node'.format(ddh.protocol, ddh.host)
debug_report('Resource Create - {}'.format(url), obj)
response = requests.post(url, cookies={ddh.session_key: ddh.session_value}, headers={'X-CSRF-Token': ddh.token}, json=obj)
try:
data = safe_json(response)
rsrc_nid = data['nid']
except:
raise APIError('resource', id, response.text)
# attach files
if post_info is not None:
url = '{}://{}/api/dataset/node/{}/attach_file'.format(ddh.protocol, ddh.host, rsrc_nid)
response = requests.post(url, cookies={ddh.session_key: ddh.session_value}, headers={'X-CSRF-Token': ddh.token}, files=post_info)
try:
data = safe_json(response)
fid = data[0]['fid']
except:
raise APIError('upload to {}'.format(rsrc_nid), id, response.text)
return rsrc_nid
def new_dataset(ds, id=None):
'''adds a new dataset and child resources
Parameters:
ds: a dataset object (returned by ds_template and modified)
id: an optional record identifier, included when exceptions are raised
Returns:
an object describing the new dataset, if successful
'''
if id is None:
id = ds.get('field_ddh_harvest_sys_id', None)
new_ds = {'nid': None, 'id': id, 'resources': None}
# this variable determines how the module tries to attach child resources to the dataset
# 'concurrent' - resource references are included with the initial dataset POST (most efficient)
# 'posthoc' - resource references are attached to dataset in a subsequent PUT, as one object
# 'posthoc2' - like posthoc, but PUT request is repeated multiple times until all resources are attached (kludgy but works)
# 'posthoc-single' - attached to data in multiple subsequent PUTs, one per resource (least efficient)
#
# Currently the API only works in 'posthoc2' mode
# 'dataset_first' - datasets are created first. resources are appended by including the field_dataset_ref element
# which appends them to the dataset
rsrc_approach = 'dataset_first'
workflow_state = ds.get('moderation_next_state', 'draft')
# step B-1: create dataset with resources attached
e = copy.deepcopy(ds)
del e['resources']
obj = new_object(e)
url = '{}://{}/api/dataset/node'.format(ddh.protocol, ddh.host)
debug_report('Dataset Create - {}'.format(url), obj)
response = requests.post(url, cookies={ddh.session_key: ddh.session_value}, headers={'X-CSRF-Token': ddh.token}, json=obj)
try:
data = safe_json(response)
new_ds['nid'] = data['nid']
except:
raise APIError(e['type'], id, response.text)
# step B-2: create resources
resource_references = []
for elem in ds['resources']:
rsrc_id = append_resource(new_ds, elem, len(resource_references))
resource_references.append({'nid': rsrc_id, 'title': elem['title']})
new_ds['resources'] = resource_references
# NB: | |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.utils import timezone
from django.http import Http404
from django.core.exceptions import MultipleObjectsReturned
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.schemas import ManualSchema
import coreapi
import coreschema
from restui.models.ensembl import EnsemblTranscript
from restui.models.uniprot import UniprotEntry
from restui.models.mappings import MappingView
from restui.models.mappings import ReleaseMappingHistory
from restui.models.annotations import CvUeStatus, CvUeLabel
from restui.models.annotations import UeUnmappedEntryComment
from restui.models.annotations import UeUnmappedEntryLabel
from restui.models.annotations import UeUnmappedEntryStatus
from restui.serializers.unmapped import UnmappedEntrySerializer
from restui.serializers.unmapped import UnmappedSwissprotEntrySerializer
from restui.serializers.unmapped import UnmappedEnsemblEntrySerializer
from restui.serializers.unmapped import CommentSerializer
from restui.serializers.unmapped import UnmappedEntryCommentsSerializer
from restui.serializers.annotations import LabelsSerializer
from restui.serializers.annotations import UnmappedEntryLabelSerializer
from restui.serializers.annotations import UnmappedEntryCommentSerializer
from restui.serializers.annotations import UnmappedEntryStatusSerializer
from restui.pagination import UnmappedEnsemblEntryPagination
from restui.lib.mail import GiftsEmail
from django.conf import settings
def get_uniprot_entry(mapping_view_id):
try:
mapping_view = MappingView.objects.get(pk=mapping_view_id)
uniprot_entry = UniprotEntry.objects.get(pk=mapping_view.uniprot_id)
except (MappingView.DoesNotExist, UniprotEntry.DoesNotExist):
raise Http404
return uniprot_entry
class UnmappedDetailed(APIView):
"""
Retrieve a single "unmapped" entry, includes related entries.
"""
schema = ManualSchema(
description="Retrieve a single unmapped entry, includes related entries.",
fields=[
coreapi.Field(
name="id",
required=True,
location="path",
schema=coreschema.Integer(),
description="A unique integer value identifying the mapping view id"
),
]
)
def get(self, request, mapping_view_id):
try:
mapping_view = MappingView.objects.get(pk=mapping_view_id)
except MappingView.DoesNotExist:
raise Http404
# this is supposed to be called for an unmapped entry
if (
mapping_view.uniprot_mapping_status == 'mapped' and
mapping_view.mapping_id is not None
):
return Response(
{"error":"Entry is mapped with id {}".format(mapping_view.mapping_id)},
status=status.HTTP_400_BAD_REQUEST
)
email_recipients_list = {
recipient_id: recipient_details.get('name')
for (recipient_id, recipient_details) in settings.EMAIL_RECIPIENT_LIST.items()
}
data = {
'entry': mapping_view,
'relatedEntries': list(
MappingView.objects.filter(
grouping_id=mapping_view.grouping_id
).exclude(pk=mapping_view_id)
),
'emailRecipientsList': email_recipients_list
}
serializer = UnmappedEntrySerializer(data)
serializer.data['entry']['status'] = MappingView.status_description(serializer.data['entry']['status'])
return Response(serializer.data)
class UnmappedEntries(APIView):
"""
Present the details for Swissprot/Ensembl unmapped entities for the latest
release for a given species
"""
# pagination_class = PageNumberPagination # settings.DEFAULT_PAGINATION_CLASS
schema = ManualSchema(
description=(
"Present the details for Swissprot/Ensembl unmapped entities for the "
"latest release for a given species"
),
fields=[
coreapi.Field(
name="taxid",
required=True,
location="path",
schema=coreschema.Integer(),
description="Taxonomy id"
),
coreapi.Field(
name="source",
required=True,
location="path",
schema=coreschema.String(),
description="Source, either 'swissprot' or 'ensembl'"
),
]
)
def get(self, request, taxid, source):
if source == 'swissprot':
# the Swissprot entries:
# uniprot_entry_history for release X minus mapping_history for release X
# = all unmapped uniprot entries for release X
# that all can be filtered by entry type
# find the latest uniprot release corresponding to the species
release_mapping_history = ReleaseMappingHistory.objects.filter(
uniprot_taxid=taxid
).latest(
'release_mapping_history_id'
)
# get the Uniprot entries corresponding to that species and uniprot release
release_uniprot_entries = UniprotEntry.objects.select_related(
'entry_type'
).filter(
uniprot_tax_id=taxid,
uniprotentryhistory__release_version=release_mapping_history.uniprot_release,
entry_type__description__icontains='swiss'
)
# find the mapped uniprot entries for the release and species
release_mapped_uniprot_entries = UniprotEntry.objects.select_related(
'entry_type'
).filter(
mapping__mapping_history__release_mapping_history=release_mapping_history,
entry_type__description__icontains='swiss'
).distinct()
# the unmapped swiss-prot entries
# NOTE: using select_related('entry_type') to speed up query
# generates 'Index out of range' error, using it in the two sets
# above works
release_unmapped_sp_entries = release_uniprot_entries.difference(
release_mapped_uniprot_entries
)
data = list(
map(
lambda ue: {
'uniprotAccession': ue.uniprot_acc,
"entryType": ue.entry_type.description,
"isCanonical": not ue.canonical_uniprot_id,
"alias": ue.alias if ue.alias else None,
"gene_symbol": ue.gene_symbol,
"gene_accession": ue.chromosome_line,
"length": ue.length,
"protein_existence_id": ue.protein_existence_id
},
release_unmapped_sp_entries.order_by('uniprot_acc')
)
)
page = self.paginate_queryset(data)
if page is not None:
serializer = UnmappedSwissprotEntrySerializer(page, many=True)
return self.get_paginated_response(serializer.data)
# if pagination is not defined
serializer = UnmappedSwissprotEntrySerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
if source == 'ensembl':
release_mapping_history = ReleaseMappingHistory.objects.filter(
ensembl_species_history__ensembl_tax_id=taxid
).latest(
'release_mapping_history_id'
)
release_mapped_transcripts = EnsemblTranscript.objects.select_related(
'gene'
).filter(
mapping__mapping_history__release_mapping_history=release_mapping_history
).distinct()
"""
certain SQL operations, e.g. values(), count(), order_by, don't work
on union/intersection/difference
see https://docs.djangoproject.com/en/1.11/ref/models/querysets/#django.db.models.query.QuerySet.union
"""
release_unmapped_transcripts = EnsemblTranscript.objects.exclude(
transcript_id__in=release_mapped_transcripts.values_list(
'transcript_id', flat=True
)
)
page = self.paginate_queryset(release_unmapped_transcripts)
if page is not None:
serializer = UnmappedEnsemblEntrySerializer(page, many=True)
return self.get_paginated_response(serializer.data)
# if pagination is not defined
serializer = UnmappedEnsemblEntrySerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
raise Http404('Unknown source')
@property
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
source = self.kwargs['source']
if source == 'ensembl':
self._paginator = UnmappedEnsemblEntryPagination() # pylint: disable=attribute-defined-outside-init
elif source == 'swissprot':
self._paginator = LimitOffsetPagination() # pylint: disable=attribute-defined-outside-init
else:
raise Exception('Unknown source')
return self._paginator
def paginate_queryset(self, queryset):
"""
Return a single page of results, or `None` if pagination is disabled.
"""
if self.paginator is None:
return None
return self.paginator.paginate_queryset(
queryset,
self.request,
view=self
)
def get_paginated_response(self, data):
"""
Return a paginated style `Response` object for the given output data.
"""
assert self.paginator is not None
return self.paginator.get_paginated_response(data)
class AddDeleteLabel(APIView):
"""
Add or delete label a associated to the given unmapped entry
"""
permission_classes = (IsAuthenticated,)
schema = ManualSchema(
description="Add or delete label a associated to the given unmapped entry",
fields=[
coreapi.Field(
name="id",
required=True,
location="path",
schema=coreschema.Integer(),
description="The mapping view id associated to the unmapped entry"
),
coreapi.Field(
name="label_id",
required=True,
location="path",
schema=coreschema.Integer(),
description="A unique integer value identifying the label"
),
]
)
def post(self, request, mapping_view_id, label_id):
uniprot_entry = get_uniprot_entry(mapping_view_id)
entry_labels = UeUnmappedEntryLabel.objects.filter(
uniprot=uniprot_entry,
label=label_id
)
if entry_labels:
# label already attached, ignore
return Response(status=status.HTTP_204_NO_CONTENT)
try:
serializer = UnmappedEntryLabelSerializer(
data={
'time_stamp': timezone.now(),
'user_stamp': request.user,
'label': label_id,
'uniprot': uniprot_entry.uniprot_id
}
)
except KeyError:
raise Http404("Must provide valid label")
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, mapping_view_id, label_id):
uniprot_entry = get_uniprot_entry(mapping_view_id)
# delete all labels with the given description attached to the mapping
# TODO: only those associated to the given user
entry_labels = UeUnmappedEntryLabel.objects.filter(
uniprot=uniprot_entry,
label=label_id
)
if entry_labels:
entry_labels.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
raise Http404
class GetLabels(APIView):
"""
Retrieve all labels for a given unmapped entry
"""
schema = ManualSchema(
description="Retrieve all labels for a given unmapped entry",
fields=[
coreapi.Field(
name="id",
required=True,
location="path",
schema=coreschema.Integer(),
description="The mapping view id associated to the given unmapped entry"
),
]
)
def get(self, request, mapping_view_id):
uniprot_entry = get_uniprot_entry(mapping_view_id)
all_labels = CvUeLabel.objects.all()
entry_labels = uniprot_entry.labels.values_list('label', flat=True)
label_map = []
for label in all_labels:
label_map.append({
'label': label.description,
'id': label.id,
'status': label.id in entry_labels
})
data = {'labels': label_map}
serializer = LabelsSerializer(data)
return Response(serializer.data)
class AddGetComments(APIView):
"""
Add a comment/Retrieve all comments relative to a given unmapped entry
"""
permission_classes = (IsAuthenticated,)
schema = ManualSchema(
description="Add a comment/Retrieve all comments relative to a given unmapped entry.",
fields=[
coreapi.Field(
name="id",
required=True,
location="path",
schema=coreschema.Integer(),
description="The mapping view id associated to the unmapped entry"
),
]
)
def get(self, request, mapping_view_id):
uniprot_entry = get_uniprot_entry(mapping_view_id)
# fetch mapping comment history, exclude deleted comments
entry_comments = uniprot_entry.comments.filter(deleted=False).order_by('-time_stamp')
# comments are editable if they belong to the requesting user
comments = map(
lambda c: {
'commentId': c.id,
'text': c.comment,
'timeAdded': c.time_stamp,
'user': c.user_stamp.full_name,
'editable': request.user and request.user == c.user_stamp
},
entry_comments
)
data = {
'mapping_view_id': mapping_view_id,
'comments': list(comments)
}
serializer = UnmappedEntryCommentsSerializer(data)
return Response(serializer.data)
def post(self, request, mapping_view_id):
uniprot_entry = get_uniprot_entry(mapping_view_id)
try:
serializer = UnmappedEntryCommentSerializer(
data={
'time_stamp': timezone.now(),
'user_stamp': request.user,
'comment': request.data['text'],
'uniprot': uniprot_entry.uniprot_id,
'deleted': False
}
)
except KeyError:
raise Http404("Must provide comment")
if serializer.is_valid():
serializer.save()
email = GiftsEmail(request)
build_comments_email = email.build_unmapped_comments_email(mapping_view_id)
if build_comments_email:
email.send()
return Response(
serializer.data,
status=status.HTTP_201_CREATED
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
class EditDeleteComment(APIView):
"""
Edit (PUT) and delete (DELETE) a comment for a given unmapped entry
"""
permission_class = (IsAuthenticated,)
schema = ManualSchema(
description="Edit or delete a comment for a given unmapped entry.",
fields=[
coreapi.Field(
name="id",
required=True,
location="path",
schema=coreschema.Integer(),
description="The mapping_view id associated to the unmapped entry"
),
coreapi.Field(
name="comment_id",
required=True,
location="path",
schema=coreschema.Integer(),
description="A unique integer value identifying the comment"
),
]
)
def put(self, request, mapping_view_id, comment_id):
uniprot_entry = get_uniprot_entry(mapping_view_id)
if 'text' not in request.data:
return Response(
{'error': 'Text not specified'},
status=status.HTTP_400_BAD_REQUEST
)
try:
comment = uniprot_entry.comments.get(id=comment_id)
except UeUnmappedEntryComment.DoesNotExist:
return Response(
{'error': "Invalid comment ID: {}".format(comment_id)},
status=status.HTTP_400_BAD_REQUEST
)
else:
if comment.deleted:
return Response(
{'error': 'Cannot edit deleted comment'},
status=status.HTTP_400_BAD_REQUEST
)
comment.comment = request.data['text']
comment.time_stamp = timezone.now()
comment.save()
serializer = CommentSerializer({
'commentId': comment.id,
'text': comment.comment,
'timeAdded': comment.time_stamp,
'user': comment.user_stamp.full_name,
'editable': request.user and request.user == | |
<filename>sushi.py
#!/usr/bin/env python2
import logging
import sys
import operator
import argparse
import os
import bisect
import collections
from itertools import takewhile, izip, chain
import time
import numpy as np
import chapters
from common import SushiError, get_extension, format_time, ensure_static_collection
from demux import Timecodes, Demuxer
import keyframes
from subs import AssScript, SrtScript
from wav import WavStream
try:
import matplotlib.pyplot as plt
plot_enabled = True
except ImportError:
plot_enabled = False
if sys.platform == 'win32':
try:
import colorama
colorama.init()
console_colors_supported = True
except ImportError:
console_colors_supported = False
else:
console_colors_supported = True
ALLOWED_ERROR = 0.01
MAX_GROUP_STD = 0.025
VERSION = '0.5.1'
class ColoredLogFormatter(logging.Formatter):
bold_code = "\033[1m"
reset_code = "\033[0m"
grey_code = "\033[30m\033[1m"
error_format = "{bold}ERROR: %(message)s{reset}".format(bold=bold_code, reset=reset_code)
warn_format = "{bold}WARNING: %(message)s{reset}".format(bold=bold_code, reset=reset_code)
debug_format = "{grey}%(message)s{reset}".format(grey=grey_code, reset=reset_code)
default_format = "%(message)s"
def format(self, record):
if record.levelno == logging.DEBUG:
self._fmt = self.debug_format
elif record.levelno == logging.WARN:
self._fmt = self.warn_format
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
self._fmt = self.error_format
else:
self._fmt = self.default_format
return super(ColoredLogFormatter, self).format(record)
def abs_diff(a, b):
return abs(a - b)
def interpolate_nones(data, points):
data = ensure_static_collection(data)
values_lookup = {p: v for p, v in izip(points, data) if v is not None}
if not values_lookup:
return []
zero_points = {p for p, v in izip(points, data) if v is None}
if not zero_points:
return data
data_list = sorted(values_lookup.iteritems())
zero_points = sorted(x for x in zero_points if x not in values_lookup)
out = np.interp(x=zero_points,
xp=map(operator.itemgetter(0), data_list),
fp=map(operator.itemgetter(1), data_list))
values_lookup.update(izip(zero_points, out))
return [
values_lookup[point] if value is None else value
for point, value in izip(points, data)
]
# todo: implement this as a running median
def running_median(values, window_size):
if window_size % 2 != 1:
raise SushiError('Median window size should be odd')
half_window = window_size // 2
medians = []
items_count = len(values)
for idx in xrange(items_count):
radius = min(half_window, idx, items_count-idx-1)
med = np.median(values[idx-radius:idx+radius+1])
medians.append(med)
return medians
def smooth_events(events, radius):
if not radius:
return
window_size = radius*2+1
shifts = [e.shift for e in events]
smoothed = running_median(shifts, window_size)
for event, new_shift in izip(events, smoothed):
event.set_shift(new_shift, event.diff)
def detect_groups(events_iter):
events_iter = iter(events_iter)
groups_list = [[next(events_iter)]]
for event in events_iter:
if abs_diff(event.shift, groups_list[-1][-1].shift) > ALLOWED_ERROR:
groups_list.append([])
groups_list[-1].append(event)
return groups_list
def groups_from_chapters(events, times):
logging.info(u'Chapter start points: {0}'.format([format_time(t) for t in times]))
groups = [[]]
chapter_times = iter(times[1:] + [36000000000]) # very large event at the end
current_chapter = next(chapter_times)
for event in events:
if event.end > current_chapter:
groups.append([])
while event.end > current_chapter:
current_chapter = next(chapter_times)
groups[-1].append(event)
groups = filter(None, groups) # non-empty groups
# check if we have any groups where every event is linked
# for example a chapter with only comments inside
broken_groups = [group for group in groups if not any(e for e in group if not e.linked)]
if broken_groups:
for group in broken_groups:
for event in group:
parent = event.get_link_chain_end()
parent_group = next(group for group in groups if parent in group)
parent_group.append(event)
del group[:]
groups = filter(None, groups)
# re-sort the groups again since we might break the order when inserting linked events
# sorting everything again is far from optimal but python sorting is very fast for sorted arrays anyway
for group in groups:
group.sort(key=lambda event: event.start)
return groups
def split_broken_groups(groups):
correct_groups = []
broken_found = False
for g in groups:
std = np.std([e.shift for e in g])
if std > MAX_GROUP_STD:
logging.warn(u'Shift is not consistent between {0} and {1}, most likely chapters are wrong (std: {2}). '
u'Switching to automatic grouping.'.format(format_time(g[0].start), format_time(g[-1].end),
std))
correct_groups.extend(detect_groups(g))
broken_found = True
else:
correct_groups.append(g)
if broken_found:
groups_iter = iter(correct_groups)
correct_groups = [list(next(groups_iter))]
for group in groups_iter:
if abs_diff(correct_groups[-1][-1].shift, group[0].shift) >= ALLOWED_ERROR \
or np.std([e.shift for e in group + correct_groups[-1]]) >= MAX_GROUP_STD:
correct_groups.append([])
correct_groups[-1].extend(group)
return correct_groups
def fix_near_borders(events):
"""
We assume that all lines with diff greater than 5 * (median diff across all events) are broken
"""
def fix_border(event_list, median_diff):
last_ten_diff = np.median([x.diff for x in event_list[:10]], overwrite_input=True)
diff_limit = min(last_ten_diff, median_diff)
broken = []
for event in event_list:
if not 0.2 < (event.diff / diff_limit) < 5:
broken.append(event)
else:
for x in broken:
x.link_event(event)
return len(broken)
return 0
median_diff = np.median([x.diff for x in events], overwrite_input=True)
fixed_count = fix_border(events, median_diff)
if fixed_count:
logging.info('Fixing {0} border events right after {1}'.format(fixed_count, format_time(events[0].start)))
fixed_count = fix_border(list(reversed(events)), median_diff)
if fixed_count:
logging.info('Fixing {0} border events right before {1}'.format(fixed_count, format_time(events[-1].end)))
def get_distance_to_closest_kf(timestamp, keyframes):
idx = bisect.bisect_left(keyframes, timestamp)
if idx == 0:
kf = keyframes[0]
elif idx == len(keyframes):
kf = keyframes[-1]
else:
before = keyframes[idx - 1]
after = keyframes[idx]
kf = after if after - timestamp < timestamp - before else before
return kf - timestamp
def find_keyframe_shift(group, src_keytimes, dst_keytimes, src_timecodes, dst_timecodes, max_kf_distance):
def get_distance(src_distance, dst_distance, limit):
if abs(dst_distance) > limit:
return None
shift = dst_distance - src_distance
return shift if abs(shift) < limit else None
src_start = get_distance_to_closest_kf(group[0].start, src_keytimes)
src_end = get_distance_to_closest_kf(group[-1].end + src_timecodes.get_frame_size(group[-1].end), src_keytimes)
dst_start = get_distance_to_closest_kf(group[0].shifted_start, dst_keytimes)
dst_end = get_distance_to_closest_kf(group[-1].shifted_end + dst_timecodes.get_frame_size(group[-1].end), dst_keytimes)
snapping_limit_start = src_timecodes.get_frame_size(group[0].start) * max_kf_distance
snapping_limit_end = src_timecodes.get_frame_size(group[0].end) * max_kf_distance
return (get_distance(src_start, dst_start, snapping_limit_start),
get_distance(src_end, dst_end, snapping_limit_end))
def find_keyframes_distances(event, src_keytimes, dst_keytimes, timecodes, max_kf_distance):
def find_keyframe_distance(src_time, dst_time):
src = get_distance_to_closest_kf(src_time, src_keytimes)
dst = get_distance_to_closest_kf(dst_time, dst_keytimes)
snapping_limit = timecodes.get_frame_size(src_time) * max_kf_distance
if abs(src) < snapping_limit and abs(dst) < snapping_limit and abs(src-dst) < snapping_limit:
return dst - src
return 0
ds = find_keyframe_distance(event.start, event.shifted_start)
de = find_keyframe_distance(event.end, event.shifted_end)
return ds, de
def snap_groups_to_keyframes(events, chapter_times, max_ts_duration, max_ts_distance, src_keytimes, dst_keytimes,
src_timecodes, dst_timecodes, max_kf_distance, kf_mode):
if not max_kf_distance:
return
groups = merge_short_lines_into_groups(events, chapter_times, max_ts_duration, max_ts_distance)
if kf_mode == 'all' or kf_mode == 'shift':
# step 1: snap events without changing their duration. Useful for some slight audio imprecision correction
shifts = []
times = []
for group in groups:
shifts.extend(find_keyframe_shift(group, src_keytimes, dst_keytimes, src_timecodes, dst_timecodes, max_kf_distance))
times.extend((group[0].shifted_start, group[-1].shifted_end))
shifts = interpolate_nones(shifts, times)
if shifts:
mean_shift = np.mean(shifts)
shifts = zip(*(iter(shifts), ) * 2)
logging.info('Group {0}-{1} corrected by {2}'.format(format_time(events[0].start), format_time(events[-1].end), mean_shift))
for group, (start_shift, end_shift) in izip(groups, shifts):
if abs(start_shift-end_shift) > 0.001 and len(group) > 1:
actual_shift = min(start_shift, end_shift, key=lambda x: abs(x - mean_shift))
logging.warning("Typesetting group at {0} had different shift at start/end points ({1} and {2}). Shifting by {3}."
.format(format_time(group[0].start), start_shift, end_shift, actual_shift))
for e in group:
e.adjust_shift(actual_shift)
else:
for e in group:
e.adjust_additional_shifts(start_shift, end_shift)
if kf_mode == 'all' or kf_mode == 'snap':
# step 2: snap start/end times separately
for group in groups:
if len(group) > 1:
pass # we don't snap typesetting
start_shift, end_shift = find_keyframes_distances(group[0], src_keytimes, dst_keytimes, src_timecodes, max_kf_distance)
if abs(start_shift) > 0.01 or abs(end_shift) > 0.01:
logging.info('Snapping {0} to keyframes, start time by {1}, end: {2}'.format(format_time(group[0].start), start_shift, end_shift))
group[0].adjust_additional_shifts(start_shift, end_shift)
def average_shifts(events):
events = [e for e in events if not e.linked]
shifts = [x.shift for x in events]
weights = [1 - x.diff for x in events]
avg = np.average(shifts, weights=weights)
for e in events:
e.set_shift(avg, e.diff)
return avg
def merge_short_lines_into_groups(events, chapter_times, max_ts_duration, max_ts_distance):
search_groups = []
chapter_times = iter(chapter_times[1:] + [100000000])
next_chapter = next(chapter_times)
events = ensure_static_collection(events)
processed = set()
for idx, event in enumerate(events):
if idx in processed:
continue
while event.end > next_chapter:
next_chapter = next(chapter_times)
if event.duration > max_ts_duration:
search_groups.append([event])
processed.add(idx)
else:
group = [event]
group_end = event.end
i = idx+1
while i < len(events) and abs(group_end - events[i].start) < max_ts_distance:
if events[i].end < next_chapter and events[i].duration <= max_ts_duration:
processed.add(i)
group.append(events[i])
group_end = max(group_end, events[i].end)
i += 1
search_groups.append(group)
return search_groups
def prepare_search_groups(events, source_duration, chapter_times, max_ts_duration, max_ts_distance):
last_unlinked = None
for idx, event in enumerate(events):
if event.is_comment:
try:
event.link_event(events[idx+1])
except IndexError:
event.link_event(last_unlinked)
continue
if (event.start + event.duration / 2.0) > source_duration:
logging.info('Event time outside of audio range, ignoring: %s' % unicode(event))
event.link_event(last_unlinked)
continue
elif event.end == event.start:
logging.info('{0}: skipped because zero duration'.format(format_time(event.start)))
try:
event.link_event(events[idx + 1])
except IndexError:
event.link_event(last_unlinked)
continue
# link lines with start and end times identical to some other event
# assuming scripts are sorted by start time so we don't search the entire collection
same_start = lambda x: event.start == x.start
processed = next((x for x in takewhile(same_start, reversed(events[:idx])) if not x.linked and x.end == event.end),None)
if processed:
event.link_event(processed)
else:
last_unlinked = event
events = (e for e in events if not e.linked)
search_groups = merge_short_lines_into_groups(events, chapter_times, max_ts_duration, max_ts_distance)
# link | |
<filename>tests/unit_test/api/api_processor_test.py
import asyncio
import datetime
import os
from urllib.parse import urljoin
import jwt
import responses
from fastapi import HTTPException
from fastapi_sso.sso.base import OpenID
from mongoengine import connect
from mongoengine.errors import ValidationError, DoesNotExist
import pytest
from mongomock.object_id import ObjectId
from pydantic import SecretStr
from pytest_httpx import HTTPXMock
from starlette.datastructures import Headers, URL
from starlette.requests import Request
from starlette.responses import RedirectResponse
from kairon.shared.auth import Authentication, LoginSSOFactory
from kairon.shared.account.data_objects import Feedback, BotAccess, User
from kairon.shared.account.processor import AccountProcessor
from kairon.shared.authorization.processor import IntegrationProcessor
from kairon.shared.data.constant import ACTIVITY_STATUS, ACCESS_ROLES, TOKEN_TYPE, INTEGRATION_STATUS
from kairon.shared.data.data_objects import Configs, Rules, Responses
from kairon.shared.sso.clients.facebook import FacebookSSO
from kairon.shared.sso.clients.google import GoogleSSO
from kairon.shared.utils import Utility
from kairon.exceptions import AppException
from stress_test.data_objects import Bot
os.environ["system_file"] = "./tests/testing_data/system.yaml"
def pytest_configure():
return {'bot': None, 'account': None}
class TestAccountProcessor:
@pytest.fixture(autouse=True, scope='class')
def init_connection(self):
Utility.load_environment()
connect(**Utility.mongoengine_connection(Utility.environment['database']["url"]))
AccountProcessor.load_system_properties()
def test_add_account(self):
account_response = AccountProcessor.add_account("paypal", "testAdmin")
account = AccountProcessor.get_account(account_response["_id"])
assert account_response
pytest.account = account_response["_id"]
assert account_response["_id"] == account["_id"]
assert account_response["name"] == account["name"]
account_response = AccountProcessor.add_account("ebay", "testAdmin")
account = AccountProcessor.get_account(account_response["_id"])
assert account_response
assert account_response["_id"] == account["_id"]
assert account_response["name"] == account["name"]
def test_add_duplicate_account(self):
with pytest.raises(Exception):
AccountProcessor.add_account("paypal", "testAdmin")
def test_add_duplicate_account_case_insentive(self):
with pytest.raises(Exception):
AccountProcessor.add_account("PayPal", "testAdmin")
def test_add_blank_account(self):
with pytest.raises(AppException):
AccountProcessor.add_account("", "testAdmin")
def test_add_empty_account(self):
with pytest.raises(AppException):
AccountProcessor.add_account(" ", "testAdmin")
def test_add_none_account(self):
with pytest.raises(AppException):
AccountProcessor.add_account(None, "testAdmin")
def test_list_bots_none(self):
assert not list(AccountProcessor.list_bots(1000))
def test_add_bot(self):
bot_response = AccountProcessor.add_bot("test", pytest.account, "<EMAIL>", True)
bot = Bot.objects(name="test").get().to_mongo().to_dict()
assert bot['_id'].__str__() == bot_response['_id'].__str__()
config = Configs.objects(bot=bot['_id'].__str__()).get().to_mongo().to_dict()
assert config['language']
assert config['pipeline'][5]['name'] == 'FallbackClassifier'
assert config['pipeline'][5]['threshold'] == 0.7
assert config['policies'][2]['name'] == 'RulePolicy'
assert config['policies'][2]['core_fallback_action_name'] == "action_default_fallback"
assert config['policies'][2]['core_fallback_threshold'] == 0.3
assert Rules.objects(bot=bot['_id'].__str__()).get()
assert Responses.objects(name__iexact='utter_please_rephrase', bot=bot['_id'].__str__(), status=True).get()
assert Responses.objects(name='utter_default', bot=bot['_id'].__str__(), status=True).get()
pytest.bot = bot_response['_id'].__str__()
def test_list_bots(self):
bot = list(AccountProcessor.list_bots(pytest.account))
assert bot[0]['name'] == 'test'
assert bot[0]['_id']
def test_get_bot(self):
bot_response = AccountProcessor.get_bot(pytest.bot)
assert bot_response
assert bot_response["account"] == pytest.account
def test_add_duplicate_bot(self):
with pytest.raises(Exception):
AccountProcessor.add_bot("test", pytest.account, "testAdmin")
def test_add_duplicate_bot_case_insensitive(self):
with pytest.raises(Exception):
AccountProcessor.add_bot("TEST", pytest.account, "testAdmin")
def test_add_blank_bot(self):
with pytest.raises(AppException):
AccountProcessor.add_bot(" ", pytest.account, "testAdmin")
def test_add_empty_bot(self):
with pytest.raises(AppException):
AccountProcessor.add_bot("", pytest.account, "testAdmin")
def test_add_none_bot(self):
with pytest.raises(AppException):
AccountProcessor.add_bot(None, pytest.account, "testAdmin")
def test_add_none_user(self):
with pytest.raises(AppException):
AccountProcessor.add_bot('test', pytest.account, None)
def test_add_user(self):
user = AccountProcessor.add_user(
email="<EMAIL>",
first_name="<NAME>",
last_name="Shaikh",
password="<PASSWORD>",
account=pytest.account,
user="testAdmin",
)
assert user
assert user["password"] != "<PASSWORD>"
assert user["status"]
def test_add_bot_for_existing_user(self):
bot_response = AccountProcessor.add_bot("test_version_2", pytest.account, "<EMAIL>", False)
bot = Bot.objects(name="test_version_2").get().to_mongo().to_dict()
assert bot['_id'].__str__() == bot_response['_id'].__str__()
assert len(AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned']) == 2
config = Configs.objects(bot=bot['_id'].__str__()).get().to_mongo().to_dict()
assert config['language']
assert config['pipeline'][5]['name'] == 'FallbackClassifier'
assert config['pipeline'][5]['threshold'] == 0.7
assert config['policies'][2]['name'] == 'RulePolicy'
assert config['policies'][2]['core_fallback_action_name'] == "action_default_fallback"
assert config['policies'][2]['core_fallback_threshold'] == 0.3
assert Rules.objects(bot=bot['_id'].__str__()).get()
assert Responses.objects(name='utter_default', bot=bot['_id'].__str__(), status=True).get()
def test_add_member_already_exists(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
with pytest.raises(AppException, match='User is already a collaborator'):
AccountProcessor.allow_bot_and_generate_invite_url(bot_id, "<EMAIL>", 'testAdmin',
pytest.account, ACCESS_ROLES.DESIGNER.value)
def test_add_member_bot_not_exists(self):
with pytest.raises(DoesNotExist, match='Bot does not exists!'):
AccountProcessor.allow_bot_and_generate_invite_url('bot_not_exists', "<EMAIL>", 'testAdmin', pytest.account)
def test_list_bot_accessors_1(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
accessors = list(AccountProcessor.list_bot_accessors(bot_id))
assert len(accessors) == 1
assert accessors[0]['accessor_email'] == '<EMAIL>'
assert accessors[0]['role'] == 'owner'
assert accessors[0]['bot']
assert accessors[0]['bot_account'] == pytest.account
assert accessors[0]['user'] == "<EMAIL>"
assert accessors[0]['timestamp']
def test_update_bot_access_modify_bot_owner_access(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
with pytest.raises(AppException, match='Ownership modification denied'):
AccountProcessor.update_bot_access(bot_id, "<EMAIL>", 'testAdmin',
ACCESS_ROLES.OWNER.value, ACTIVITY_STATUS.INACTIVE.value)
with pytest.raises(AppException, match='Ownership modification denied'):
AccountProcessor.update_bot_access(bot_id, "<EMAIL>", 'testAdmin',
ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.ACTIVE.value)
def test_update_bot_access_user_not_exists(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
BotAccess(bot=bot_id, accessor_email="<EMAIL>", user='test',
role='designer', status='invite_not_accepted', bot_account=pytest.account).save()
with pytest.raises(DoesNotExist, match='User does not exist!'):
AccountProcessor.update_bot_access(bot_id, "<EMAIL>",
ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.INACTIVE.value)
def test_update_bot_access_invite_not_accepted(self, monkeypatch):
monkeypatch.setitem(Utility.email_conf["email"], "enable", True)
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
User(email='<EMAIL>', first_name='udit', last_name='pandey', password='<PASSWORD>', account=10,
user='<EMAIL>').save()
with pytest.raises(AppException, match='User is yet to accept the invite'):
AccountProcessor.update_bot_access(bot_id, "<EMAIL>",
ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.INACTIVE.value)
assert BotAccess.objects(bot=bot_id, accessor_email="<EMAIL>", user='test',
role='designer', status='invite_not_accepted', bot_account=pytest.account).get()
def test_list_active_invites(self):
invite = list(AccountProcessor.list_active_invites("<EMAIL>"))
assert invite[0]['accessor_email'] == '<EMAIL>'
assert invite[0]['role'] == 'designer'
assert invite[0]['bot_name'] == 'test_version_2'
def test_accept_bot_access_invite_user_not_exists(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
token = Utility.generate_token("<EMAIL>")
with pytest.raises(DoesNotExist, match='User does not exist!'):
AccountProcessor.validate_request_and_accept_bot_access_invite(token, bot_id)
def test_update_bot_access_user_not_allowed(self):
AccountProcessor.add_account('<EMAIL>', '<EMAIL>')
User(email='<EMAIL>', first_name='udit', last_name='pandey', password='<PASSWORD>', account=10,
user='<EMAIL>').save()
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
with pytest.raises(AppException, match='User not yet invited to collaborate'):
AccountProcessor.update_bot_access(bot_id, "<EMAIL>",
ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.INACTIVE.value)
def test_accept_bot_access_invite(self, monkeypatch):
def _mock_get_user(*args, **kwargs):
return None
monkeypatch.setattr(AccountProcessor, 'get_user_details', _mock_get_user)
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
token = Utility.generate_token("<EMAIL>")
AccountProcessor.validate_request_and_accept_bot_access_invite(token, bot_id)
assert BotAccess.objects(bot=bot_id, accessor_email="<EMAIL>", user='test',
role='designer', status='active', bot_account=pytest.account).get()
def test_list_active_invites_none(self):
invite = list(AccountProcessor.list_active_invites("<EMAIL>"))
assert invite == []
def test_update_bot_access(self):
account_bot_info = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]
assert account_bot_info['role'] == 'owner'
bot_id = account_bot_info['_id']
assert ('test_version_2', '<EMAIL>') == AccountProcessor.update_bot_access(
bot_id, "<EMAIL>", 'testAdmin', ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.ACTIVE.value
)
bot_access = BotAccess.objects(bot=bot_id, accessor_email="<EMAIL>").get()
assert bot_access.role == ACCESS_ROLES.ADMIN.value
assert bot_access.status == ACTIVITY_STATUS.ACTIVE.value
shared_bot_info = AccountProcessor.get_accessible_bot_details(4, "<EMAIL>")['shared'][0]
assert shared_bot_info['role'] == 'admin'
assert shared_bot_info['_id'] == bot_id
with pytest.raises(AppException, match='Ownership modification denied'):
AccountProcessor.update_bot_access(bot_id, "<EMAIL>", 'testAdmin',
ACCESS_ROLES.OWNER.value, ACTIVITY_STATUS.ACTIVE.value)
bot_access = BotAccess.objects(bot=bot_id, accessor_email="<EMAIL>").get()
assert bot_access.role == ACCESS_ROLES.ADMIN.value
assert bot_access.status == ACTIVITY_STATUS.ACTIVE.value
def test_accept_bot_access_invite_user_not_allowed(self, monkeypatch):
def _mock_get_user(*args, **kwargs):
return None
monkeypatch.setattr(AccountProcessor, 'get_user_details', _mock_get_user)
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
token = Utility.generate_token("<EMAIL>")
with pytest.raises(AppException, match='No pending invite found for this bot and user'):
AccountProcessor.validate_request_and_accept_bot_access_invite(token, bot_id)
def test_accept_bot_access_invite_token_expired(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
token = '<KEY>'
with pytest.raises(AppException, match='Invalid token'):
AccountProcessor.validate_request_and_accept_bot_access_invite(token, bot_id)
def test_accept_bot_access_invite_invalid_bot(self):
token = Utility.generate_token("<EMAIL>")
with pytest.raises(DoesNotExist, match='Bot does not exists!'):
AccountProcessor.validate_request_and_accept_bot_access_invite(token, '<PASSWORD>')
def test_list_bot_accessors_2(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
accessors = list(AccountProcessor.list_bot_accessors(bot_id))
assert accessors[0]['accessor_email'] == '<EMAIL>'
assert accessors[0]['role'] == 'owner'
assert accessors[0]['bot']
assert accessors[0]['bot_account'] == pytest.account
assert accessors[0]['user'] == "<EMAIL>"
assert accessors[0]['timestamp']
assert accessors[1]['accessor_email'] == '<EMAIL>'
assert accessors[1]['role'] == 'admin'
assert accessors[1]['bot']
assert accessors[1]['bot_account'] == pytest.account
assert accessors[1]['user'] == 'testAdmin'
assert accessors[1]['accept_timestamp']
assert accessors[1]['timestamp']
def test_invite_user_as_owner(self):
with pytest.raises(AppException, match='There can be only 1 owner per bot'):
AccountProcessor.allow_bot_and_generate_invite_url('test', '<EMAIL>', '<EMAIL>', 2, ACCESS_ROLES.OWNER.value)
def test_transfer_ownership(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
AccountProcessor.transfer_ownership(pytest.account, bot_id, "<EMAIL>", '<EMAIL>')
accessors = list(AccountProcessor.list_bot_accessors(bot_id))
assert accessors[0]['accessor_email'] == '<EMAIL>'
assert accessors[0]['role'] == 'admin'
assert accessors[0]['bot']
assert accessors[0]['bot_account'] == 10
assert accessors[0]['user'] == "<EMAIL>"
assert accessors[1]['accessor_email'] == '<EMAIL>'
assert accessors[1]['role'] == 'owner'
assert accessors[1]['bot']
assert accessors[1]['bot_account'] == 10
assert accessors[1]['user'] == "<EMAIL>"
assert AccountProcessor.get_bot_and_validate_status(bot_id)['account'] == 10
AccountProcessor.transfer_ownership(pytest.account, bot_id, '<EMAIL>', "<EMAIL>")
accessors = list(AccountProcessor.list_bot_accessors(bot_id))
assert accessors[0]['accessor_email'] == '<EMAIL>'
assert accessors[0]['role'] == 'owner'
assert accessors[0]['bot']
assert accessors[0]['bot_account'] == pytest.account
assert accessors[0]['user'] == '<EMAIL>'
assert accessors[1]['accessor_email'] == '<EMAIL>'
assert accessors[1]['role'] == 'admin'
assert accessors[1]['bot']
assert accessors[1]['bot_account'] == pytest.account
assert accessors[1]['user'] == '<EMAIL>'
assert AccountProcessor.get_bot_and_validate_status(bot_id)['account'] == pytest.account
def test_transfer_ownership_to_non_member(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
User(email='<EMAIL>', first_name='udit', last_name='pandey', password='<PASSWORD>', account=10,
user='<EMAIL>').save()
with pytest.raises(AppException, match='User not yet invited to collaborate'):
AccountProcessor.transfer_ownership(pytest.account, bot_id, "<EMAIL>", '<EMAIL>')
def test_remove_bot_access_not_a_member(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
with pytest.raises(AppException, match='User not a collaborator to this bot'):
AccountProcessor.remove_bot_access(bot_id, accessor_email='<EMAIL>')
def test_remove_bot_access(self):
bot_id = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")['account_owned'][1]['_id']
assert not AccountProcessor.remove_bot_access(bot_id, accessor_email='<EMAIL>')
assert len(list(AccountProcessor.list_bot_accessors(bot_id))) == 1
def test_remove_bot_from_all_accessors(self):
bot_id = str(ObjectId())
BotAccess(bot=bot_id, accessor_email="<EMAIL>.<EMAIL>", user='test',
role='designer', status='active', bot_account=10).save()
BotAccess(bot=bot_id, accessor_email="<EMAIL>", user='test',
role='designer', status='invite_not_accepted', bot_account=10).save()
BotAccess(bot=bot_id, accessor_email="<EMAIL>", user='test',
role='designer', status='inactive', bot_account=10).save()
BotAccess(bot=bot_id, accessor_email="<EMAIL>", user='test',
role='designer', status='deleted', bot_account=10).save()
assert len(list(AccountProcessor.list_bot_accessors(bot_id))) == 3
AccountProcessor.remove_bot_access(bot_id)
assert len(list(AccountProcessor.list_bot_accessors(bot_id))) == 0
def test_list_bots_2(self):
bot = list(AccountProcessor.list_bots(pytest.account))
assert bot[0]['name'] == 'test'
assert bot[0]['_id']
assert bot[1]['name'] == 'test_version_2'
assert bot[1]['_id']
def test_update_bot_name(self):
AccountProcessor.update_bot('test_bot', pytest.bot)
bot = list(AccountProcessor.list_bots(pytest.account))
assert bot[0]['name'] == 'test_bot'
assert bot[0]['_id']
def test_update_bot_not_exists(self):
with pytest.raises(AppException):
AccountProcessor.update_bot('test_bot', '5f256412f98b97335c168ef0')
def test_update_bot_empty_name(self):
with pytest.raises(AppException):
AccountProcessor.update_bot(' ', '5f256412f98b97335c168ef0')
def test_delete_bot(self):
bot = list(AccountProcessor.list_bots(pytest.account))
pytest.deleted_bot = bot[1]['_id']
AccountProcessor.delete_bot(pytest.deleted_bot)
with pytest.raises(DoesNotExist):
Bot.objects(id=pytest.deleted_bot, status=True).get()
bots = AccountProcessor.get_accessible_bot_details(pytest.account, "<EMAIL>")
assert len(bots['account_owned']) == 1
assert pytest.deleted_bot not in [bot['_id'] for bot in bots['account_owned']]
assert pytest.deleted_bot not in [bot['_id'] for bot in bots['shared']]
def test_delete_bot_not_exists(self):
with pytest.raises(AppException):
AccountProcessor.delete_bot(pytest.deleted_bot)
def test_delete_account_for_account_bots(self):
account = {
"account": "Test_Delete_Account",
"email": "<EMAIL>",
"first_name": "Test_Delete_First",
"last_name": "Test_Delete_Last",
"password": SecretStr("<PASSWORD>"),
}
loop = asyncio.new_event_loop()
user_detail, mail, link = loop.run_until_complete(AccountProcessor.account_setup(account_setup=account))
pytest.deleted_account = user_detail['account'].__str__()
AccountProcessor.add_bot("delete_account_bot_1", pytest.deleted_account, "<EMAIL>", False)
AccountProcessor.add_bot("delete_account_bot_2", pytest.deleted_account, "<EMAIL>", False)
account_bots_before_delete = list(AccountProcessor.list_bots(pytest.deleted_account))
assert len(account_bots_before_delete) == 3
AccountProcessor.delete_account(pytest.deleted_account)
for bot in account_bots_before_delete:
with pytest.raises(DoesNotExist):
Bot.objects(id=bot['_id'], account=pytest.deleted_account, status=True).get()
def test_delete_account_for_shared_bot(self):
account = {
"account": "Test_Delete_Account",
"email": "<EMAIL>",
"first_name": "Test_Delete_First",
"last_name": "Test_Delete_Last",
"password": SecretStr("<PASSWORD>"),
}
loop = asyncio.new_event_loop()
user_detail, mail, link = loop.run_until_complete(
AccountProcessor.account_setup(account_setup=account))
#Add shared bot
bot_response = AccountProcessor.add_bot("delete_account_shared_bot", 30, "<EMAIL>", False)
bot_id = bot_response['_id'].__str__()
BotAccess(bot=bot_id, accessor_email="<EMAIL>", user='testAdmin',
role='designer', status='active', bot_account=30).save()
pytest.deleted_account = user_detail['account'].__str__()
accessors_before_delete = list(AccountProcessor.list_bot_accessors(bot_id))
assert len(accessors_before_delete) == 2
assert accessors_before_delete[0]['accessor_email'] == '<EMAIL>'
assert accessors_before_delete[1]['accessor_email'] == '<EMAIL>'
AccountProcessor.delete_account(pytest.deleted_account)
accessors_after_delete = list(AccountProcessor.list_bot_accessors(bot_id))
assert len(accessors_after_delete) == 1
assert accessors_after_delete[0]['accessor_email'] == '<EMAIL>'
assert accessors_after_delete[0]['bot_account'] == 30
assert Bot.objects(id=bot_id, account=30, status=True).get()
def test_delete_account_for_account(self):
account = {
"account": "Test_Delete_Account",
"email": "<EMAIL>",
"first_name": "Test_Delete_First",
"last_name": "Test_Delete_Last",
"password": SecretStr("<PASSWORD>")
}
loop = asyncio.new_event_loop()
user_detail, mail, link = loop.run_until_complete(
AccountProcessor.account_setup(account_setup=account))
pytest.deleted_account = user_detail['account'].__str__()
AccountProcessor.delete_account(pytest.deleted_account)
assert AccountProcessor.get_account(pytest.deleted_account)
assert not AccountProcessor.get_account(pytest.deleted_account).get('status')
with pytest.raises(AppException, match="Account does not exist!"):
AccountProcessor.delete_account(pytest.deleted_account)
def test_delete_account_for_user(self):
account = {
"account": "Test_Delete_Account",
"email": "<EMAIL>",
"first_name": "Test_Delete_First",
"last_name": "Test_Delete_Last",
"password": SecretStr("<PASSWORD>")
}
loop = asyncio.new_event_loop()
user_detail, mail, link = | |
<filename>webfront/tests/tests_3_endpoints_using_searcher.py
import time
from tqdm import tqdm
from interpro import settings
from rest_framework import status
from webfront.tests.InterproRESTTestCase import InterproRESTTestCase
from webfront.searcher.elastic_controller import ElasticsearchController
from webfront.tests.actions_on_test_dataset import *
api_test_map = {
"entry": {
"interpro": ["IPR003165", "IPR001165"],
"pfam": ["PF02171", "PF17180", "PF17176"],
"smart": ["SM00950", "SM00002"],
"profile": ["PS50822", "PS01031"],
},
"protein": {
"uniprot": ["A1CUJ5", "M5ADK6", "A0A0A2L2G2", "P16582"],
"reviewed": ["A1CUJ5", "M5ADK6"],
"unreviewed": ["A0A0A2L2G2", "P16582"],
},
"structure": {"pdb": ["1JM7", "1T2V", "2BKM", "1JZ8"]},
"taxonomy": {"uniprot": ["1", "2", "2579", "40296", "344612", "1001583"]},
"proteome": {"uniprot": ["UP000006701", "UP000012042", "UP000030104"]},
"set": {"pfam": ["CL0001", "CL0002"]},
}
chains = {
"1JM7": ["a", "b"],
"1T2V": ["a", "b", "c", "d", "e"],
"2BKM": ["a", "b"],
"1JZ8": ["a", "b"],
}
class ActionsOnTestDocumentTest(InterproRESTTestCase):
@classmethod
def setUpClass(cls):
super(ActionsOnTestDocumentTest, cls).setUpClass()
search = ElasticsearchController()
obj = search.execute_query("*:*", rows=50)
cls.all_docs = [o["_source"] for o in obj["hits"]["hits"]]
def tests_all_docs_are_loaded(self):
self.assertNotEqual(None, self.all_docs)
self.assertGreater(len(self.all_docs), 0)
def tests_can_filter_by_entry_acc(self):
empty = filter_by_value(self.all_docs, "entry_acc", "Unicorn")
self.assertEqual(len(empty), 0)
not_entry_acc = filter_by_value(self.all_docs, "entry_acc", None)
self.assertLess(len(not_entry_acc), len(self.all_docs))
with_entry_acc = filter_by_value(self.all_docs, "entry_acc", "*")
self.assertLess(len(with_entry_acc), len(self.all_docs))
self.assertEqual(len(with_entry_acc) + len(not_entry_acc), len(self.all_docs))
filtered = filter_by_value(self.all_docs, "entry_acc", "ipr003165")
self.assertGreater(len(filtered), 1)
def tests_can_select(self):
with_entry_acc = filter_by_value(self.all_docs, "entry_acc", "*")
entry_acc_db = select_fields(with_entry_acc, ["entry_acc", "entry_db"])
for doc in entry_acc_db:
self.assertIn("entry_acc", doc)
def tests_can_get_unique_set(self):
with_entry_db = filter_by_value(self.all_docs, "entry_db", "*")
entry_dbs = select_fields(with_entry_db, ["entry_db"])
entry_dbs = unique(entry_dbs)
entry_dbs = [d["entry_db"] for d in entry_dbs]
self.assertEqual(len(entry_dbs), len(set(entry_dbs)))
def tests_group_and_count(self):
db_counts = group_by_field_and_count(self.all_docs, "entry_db", "entry_acc")
for db in db_counts:
c = 0
for doc in self.all_docs:
if "entry_db" in doc and doc["entry_db"] == db:
c += 1
self.assertEqual(db_counts[db], c)
def tests_group_and_count_unique(self):
db_counts_unique = group_by_field_and_count(
self.all_docs, "entry_db", "entry_acc", True
)
for db in db_counts_unique:
uni = set()
for doc in self.all_docs:
if "entry_db" in doc and doc["entry_db"] == db:
uni.add(doc["entry_acc"])
self.assertEqual(db_counts_unique[db], len(uni))
def tests_contains_filter(self):
with_root = filter_by_contain_value(self.all_docs, "tax_lineage", "1")
self.assertEqual(len(self.all_docs), len(with_root))
with_bacteria = filter_by_contain_value(self.all_docs, "tax_lineage", "2")
self.assertGreater(len(self.all_docs), len(with_bacteria))
with_tax_id = filter_by_value(self.all_docs, "tax_id", 344612)
with_parent = filter_by_contain_value(with_tax_id, "tax_lineage", "2579")
self.assertEqual(len(with_parent), len(with_tax_id))
PAYLOAD_TYPE_COUNTER = 1
PAYLOAD_TYPE_LIST = 2
PAYLOAD_TYPE_ENTITY = 3
def get_url(endpoints, dbs=None, accs=None):
url = "/api"
for i in range(len(endpoints)):
url += "/" + endpoints[i]
if dbs is not None and dbs[i] is not None:
url += "/" + dbs[i]
if accs is not None and accs[i] is not None:
url += "/" + accs[i]
return url
class ThreeEndpointsTableTest(InterproRESTTestCase):
@classmethod
def setUpClass(cls):
super(ThreeEndpointsTableTest, cls).setUpClass()
search = ElasticsearchController()
obj = search.execute_query("*:*", rows=50)
cls.all_docs = [o["_source"] for o in obj["hits"]["hits"]]
def assert_response_equal_to_expectd(
self, url, data, payload_type, endpoints, dbs, accs
):
response = self._get_in_debug_mode(url)
if len(data) == 0:
self.assertEqual(
response.status_code,
status.HTTP_204_NO_CONTENT,
"It should be an empty response for URL {}".format(url),
)
else:
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
"It should be an OK response for URL {}".format(url),
)
if payload_type == PAYLOAD_TYPE_COUNTER:
expected = get_counter_payload(data, endpoints, dbs, accs)
self.assertEqual(
response.data,
expected,
"The URL {} wasn't equal to the expected response.\nRESPONSE: {}\nEXPECTED: {}".format(
url, response.data, expected
),
)
elif payload_type == PAYLOAD_TYPE_LIST:
expected = get_db_payload(data, endpoints, dbs, accs)
self.assert_db_response_is_as_expected(
response, expected, endpoints[0], url
)
elif payload_type == PAYLOAD_TYPE_ENTITY:
expected = get_acc_payload(data, endpoints, dbs, accs)
self.assert_obj_response_is_as_expected(
response.data, expected, endpoints[0], url
)
def assert_chain_urls(self, data, endpoints, dbs, accs):
payload_type = PAYLOAD_TYPE_COUNTER
if dbs[0] is not None:
payload_type = PAYLOAD_TYPE_LIST
if accs is not None and accs[0] is not None:
payload_type = PAYLOAD_TYPE_ENTITY
for i in range(len(accs)):
if accs[i] is not None and endpoints[i] == "structure":
for chain in chains[accs[i]]:
accs_copy = accs.copy()
accs_copy[i] = accs_copy[i] + "/" + chain
url = get_url(endpoints, dbs, accs_copy)
with_chain = filter_by_value(data, "structure_chain_acc", chain)
self.assert_response_equal_to_expectd(
url, with_chain, payload_type, endpoints, dbs, accs
)
def assert_db_integration_urls(self, data, endpoints, dbs, accs=None):
payload_type = PAYLOAD_TYPE_COUNTER
if dbs[0] is not None:
payload_type = PAYLOAD_TYPE_LIST
if accs is not None and accs[0] is not None:
payload_type = PAYLOAD_TYPE_ENTITY
for i in range(len(dbs)):
if dbs[i] is not None and endpoints[i] == "entry" and dbs[i] != "interpro":
dbs_copy = dbs.copy()
dbs_copy[i] = "unintegrated/" + dbs_copy[i]
url = get_url(endpoints, dbs_copy, accs)
unintegrated = filter_by_value(data, "entry_integrated", None)
unintegrated = exclude_by_value(unintegrated, "entry_db", "interpro")
self.assert_response_equal_to_expectd(
url, unintegrated, payload_type, endpoints, dbs, accs
)
dbs_copy = dbs.copy()
dbs_copy[i] = "integrated/" + dbs_copy[i]
url = get_url(endpoints, dbs_copy, accs)
integrated = filter_by_value(data, "entry_integrated", "*")
self.assert_response_equal_to_expectd(
url, integrated, payload_type, endpoints, dbs, accs
)
def test_endpoint_endpoint_endpoint(self):
for endpoint1 in api_test_map:
for endpoint2 in api_test_map:
if endpoint1 == endpoint2:
continue
for endpoint3 in api_test_map:
if endpoint1 == endpoint3 or endpoint2 == endpoint3:
continue
url = "/api/{}/{}/{}".format(endpoint1, endpoint2, endpoint3)
response = self.client.get(url)
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
"URL: [{}]".format(url),
)
data = filter_by_endpoint(self.all_docs, endpoint1)
data = filter_by_endpoint(data, endpoint2)
data = filter_by_endpoint(data, endpoint3)
expected = get_counter_payload(
data, [endpoint1, endpoint2, endpoint3]
)
self.assertEqual(
response.data,
expected,
"The URL {} wasn't equal to the expected response.\nRESPONSE: {}\nEXPECTED: {}".format(
url, response.data, expected
),
)
def test_endpoint_endpoint_db(self):
for endpoint1 in api_test_map:
for endpoint2 in api_test_map:
if endpoint1 == endpoint2:
continue
for endpoint3 in api_test_map:
if endpoint1 == endpoint3 or endpoint2 == endpoint3:
continue
for db3 in api_test_map[endpoint3]:
url = "/api/{}/{}/{}/{}".format(
endpoint1, endpoint2, endpoint3, db3
)
response = self._get_in_debug_mode(url)
data = filter_by_endpoint(self.all_docs, endpoint1)
data = filter_by_endpoint(data, endpoint2)
data = filter_by_endpoint(data, endpoint3, db3)
if len(data) == 0:
self.assertEqual(
response.status_code,
status.HTTP_204_NO_CONTENT,
"It should be an empty response for URL {}".format(url),
)
else:
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
"It should be an OK response for URL {}".format(url),
)
expected = get_counter_payload(
data,
[endpoint1, endpoint2, endpoint3],
[None, None, db3],
)
self.assertEqual(
response.data,
expected,
"The URL {} wasn't equal to the expected response.\nRESPONSE: {}\nEXPECTED: {}".format(
url, response.data, expected
),
)
# test_endpoint_db_endpoint
url = "/api/{}/{}/{}/{}".format(
endpoint1, endpoint3, db3, endpoint2
)
response2 = self.client.get(url)
self.assertEqual(
response2.status_code,
status.HTTP_200_OK,
"URL: [{}]".format(url),
)
self.assertEqual(
response2.data,
expected,
"The URL {} wasn't equal to the expected response.\nRESPONSE: {}\nEXPECTED: {}".format(
url, response.data, expected
),
)
self.assert_db_integration_urls(
data,
[endpoint1, endpoint2, endpoint3],
[None, None, db3],
)
def test_endpoint_endpoint_acc(self):
for endpoint1 in api_test_map:
for endpoint2 in api_test_map:
if endpoint1 == endpoint2:
continue
for endpoint3 in api_test_map:
if endpoint1 == endpoint3 or endpoint2 == endpoint3:
continue
for db3 in api_test_map[endpoint3]:
for acc3 in api_test_map[endpoint3][db3]:
url = "/api/{}/{}/{}/{}/{}".format(
endpoint1, endpoint2, endpoint3, db3, acc3
)
response = self._get_in_debug_mode(url)
data = filter_by_endpoint(self.all_docs, endpoint1)
data = filter_by_endpoint(data, endpoint2)
data = filter_by_endpoint(data, endpoint3, db3, acc3)
if len(data) == 0:
self.assertEqual(
response.status_code,
status.HTTP_204_NO_CONTENT,
"It should be an empty response for URL {}".format(
url
),
)
else:
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
"It should be an OK response for URL {}".format(
url
),
)
eps = [endpoint1, endpoint2, endpoint3]
dbs = [None, None, db3]
accs = [None, None, acc3]
expected = get_counter_payload(data, eps, dbs, accs)
self.assertEqual(
len(response.data),
len(expected),
"The URL {} wasn't equal to the expected response.\nRESPONSE: {}\nEXPECTED: {}".format(
url, response.data, expected
),
)
for key in response.data:
self.assertEqual(
response.data[key],
expected[key],
"The URL {} wasn't equal. Response on key {}.\nRESPONSE: {}\nEXPECTED: {}".format(
url, key, response.data, expected
),
)
# test_endpoint_acc_endpoint
url = "/api/{}/{}/{}/{}/{}".format(
endpoint1, endpoint3, db3, acc3, endpoint2
)
response2 = self.client.get(url)
self.assertEqual(
response2.status_code,
status.HTTP_200_OK,
"URL: [{}]".format(url),
)
self.assertEqual(
len(response.data),
len(expected),
"The URL {} wasn't equal to the expected response.\nRESPONSE: {}\nEXPECTED: {}".format(
url, response.data, expected
),
)
for key in response.data:
self.assertEqual(
response.data[key],
expected[key],
"The URL {} wasn't equal. Response on key {}.\nRESPONSE: {}\nEXPECTED: {}".format(
url, key, response.data, expected
),
)
self.assert_db_integration_urls(data, eps, dbs, accs)
self.assert_chain_urls(data, eps, dbs, accs)
def test_endpoint_db_db(self):
for endpoint1 in api_test_map:
for endpoint2 in api_test_map:
if endpoint1 == endpoint2:
continue
for db2 in api_test_map[endpoint2]:
for endpoint3 in api_test_map:
if endpoint1 == endpoint3 or endpoint2 == endpoint3:
continue
for db3 in api_test_map[endpoint3]:
if not settings.DEBUG:
time.sleep(0.1)
url = "/api/{}/{}/{}/{}/{}".format(
endpoint1, endpoint2, db2, endpoint3, db3
)
response = self._get_in_debug_mode(url)
data = filter_by_endpoint(self.all_docs, endpoint1)
data = filter_by_endpoint(data, endpoint2, db2)
data = filter_by_endpoint(data, endpoint3, db3)
if len(data) == 0:
self.assertEqual(
response.status_code,
status.HTTP_204_NO_CONTENT,
"It should be an empty response for URL {}".format(
url
),
)
else:
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
"It should be an OK response for URL {}".format(
url
),
)
expected = get_counter_payload(
data,
[endpoint1, endpoint2, endpoint3],
[None, db2, db3],
)
self.assertEqual(response.data, expected)
url = "/api/{}/{}/{}/{}/{}".format(
endpoint1, endpoint3, db3, endpoint2, db2
)
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK
)
self.assertEqual(response.data, expected)
self.assert_db_integration_urls(
data,
[endpoint1, endpoint2, endpoint3],
[None, db2, db3],
)
def test_endpoint_db_acc(self):
for endpoint1 in api_test_map:
for endpoint2 in api_test_map:
if endpoint1 == endpoint2:
continue
for db2 in api_test_map[endpoint2]:
for endpoint3 | |
<reponame>tschwinge/borg
# -*- encoding: utf-8 *-*
import os
import io
import re
import sys
from collections import OrderedDict
from datetime import datetime
from glob import glob
from distutils.command.build import build
from distutils.core import Command
import textwrap
import setup_lz4
import setup_zstd
import setup_b2
# True: use the shared liblz4 (>= 1.7.0 / r129) from the system, False: use the bundled lz4 code
prefer_system_liblz4 = True
# True: use the shared libzstd (>= 1.3.0) from the system, False: use the bundled zstd code
prefer_system_libzstd = True
# True: use the shared libb2 from the system, False: use the bundled blake2 code
prefer_system_libb2 = True
min_python = (3, 5)
my_python = sys.version_info
if my_python < min_python:
print("Borg requires Python %d.%d or later" % min_python)
sys.exit(1)
# Are we building on ReadTheDocs?
on_rtd = os.environ.get('READTHEDOCS')
install_requires = [
# msgpack pure python data corruption was fixed in 0.4.6.
# msgpack 0.5.0 was a bit of a troublemaker.
'msgpack-python>=0.4.6,!=0.5.0,<0.6.0',
'pyzmq',
]
# note for package maintainers: if you package borgbackup for distribution,
# please add llfuse as a *requirement* on all platforms that have a working
# llfuse package. "borg mount" needs llfuse to work.
# if you do not have llfuse, do not require it, most of borgbackup will work.
extras_require = {
# llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0
# llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 1.1.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 1.2 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 1.3 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 2.0 will break API
'fuse': ['llfuse<2.0', ],
}
if sys.platform.startswith('freebsd'):
# llfuse was frequently broken / did not build on freebsd
# llfuse 0.41.1, 1.1 are ok
extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ]
from setuptools import setup, find_packages, Extension
from setuptools.command.sdist import sdist
from distutils.command.clean import clean
compress_source = 'src/borg/compress.pyx'
crypto_ll_source = 'src/borg/crypto/low_level.pyx'
crypto_helpers = 'src/borg/crypto/_crypto_helpers.c'
chunker_source = 'src/borg/chunker.pyx'
hashindex_source = 'src/borg/hashindex.pyx'
item_source = 'src/borg/item.pyx'
checksums_source = 'src/borg/algorithms/checksums.pyx'
platform_posix_source = 'src/borg/platform/posix.pyx'
platform_linux_source = 'src/borg/platform/linux.pyx'
platform_darwin_source = 'src/borg/platform/darwin.pyx'
platform_freebsd_source = 'src/borg/platform/freebsd.pyx'
cython_sources = [
compress_source,
crypto_ll_source,
chunker_source,
hashindex_source,
item_source,
checksums_source,
platform_posix_source,
platform_linux_source,
platform_freebsd_source,
platform_darwin_source,
]
try:
from Cython.Distutils import build_ext
import Cython.Compiler.Main as cython_compiler
class Sdist(sdist):
def __init__(self, *args, **kwargs):
for src in cython_sources:
cython_compiler.compile(src, cython_compiler.default_options)
super().__init__(*args, **kwargs)
def make_distribution(self):
self.filelist.extend([
'src/borg/compress.c',
'src/borg/crypto/low_level.c',
'src/borg/chunker.c', 'src/borg/_chunker.c',
'src/borg/hashindex.c', 'src/borg/_hashindex.c',
'src/borg/cache_sync/cache_sync.c', 'src/borg/cache_sync/sysdep.h', 'src/borg/cache_sync/unpack.h',
'src/borg/cache_sync/unpack_define.h', 'src/borg/cache_sync/unpack_template.h',
'src/borg/item.c',
'src/borg/algorithms/checksums.c',
'src/borg/algorithms/crc32_dispatch.c', 'src/borg/algorithms/crc32_clmul.c', 'src/borg/algorithms/crc32_slice_by_8.c',
'src/borg/algorithms/xxh64/xxhash.h', 'src/borg/algorithms/xxh64/xxhash.c',
'src/borg/platform/posix.c',
'src/borg/platform/linux.c',
'src/borg/platform/freebsd.c',
'src/borg/platform/darwin.c',
])
super().make_distribution()
except ImportError:
class Sdist(sdist):
def __init__(self, *args, **kwargs):
raise Exception('Cython is required to run sdist')
compress_source = compress_source.replace('.pyx', '.c')
crypto_ll_source = crypto_ll_source.replace('.pyx', '.c')
chunker_source = chunker_source.replace('.pyx', '.c')
hashindex_source = hashindex_source.replace('.pyx', '.c')
item_source = item_source.replace('.pyx', '.c')
checksums_source = checksums_source.replace('.pyx', '.c')
platform_posix_source = platform_posix_source.replace('.pyx', '.c')
platform_linux_source = platform_linux_source.replace('.pyx', '.c')
platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c')
platform_darwin_source = platform_darwin_source.replace('.pyx', '.c')
from distutils.command.build_ext import build_ext
if not on_rtd and not all(os.path.exists(path) for path in [
compress_source, crypto_ll_source, chunker_source, hashindex_source, item_source, checksums_source,
platform_posix_source, platform_linux_source, platform_freebsd_source, platform_darwin_source]):
raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')
def detect_openssl(prefixes):
for prefix in prefixes:
filename = os.path.join(prefix, 'include', 'openssl', 'evp.h')
if os.path.exists(filename):
with open(filename, 'rb') as fd:
if b'PKCS5_PBKDF2_HMAC(' in fd.read():
return prefix
include_dirs = []
library_dirs = []
define_macros = []
possible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_OPENSSL_PREFIX'):
possible_openssl_prefixes.insert(0, os.environ.get('BORG_OPENSSL_PREFIX'))
ssl_prefix = detect_openssl(possible_openssl_prefixes)
if not ssl_prefix:
raise Exception('Unable to find OpenSSL >= 1.0 headers. (Looked here: {})'.format(', '.join(possible_openssl_prefixes)))
include_dirs.append(os.path.join(ssl_prefix, 'include'))
library_dirs.append(os.path.join(ssl_prefix, 'lib'))
possible_liblz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_LIBLZ4_PREFIX'):
possible_liblz4_prefixes.insert(0, os.environ.get('BORG_LIBLZ4_PREFIX'))
liblz4_prefix = setup_lz4.lz4_system_prefix(possible_liblz4_prefixes)
if prefer_system_liblz4 and liblz4_prefix:
print('Detected and preferring liblz4 over bundled LZ4')
define_macros.append(('BORG_USE_LIBLZ4', 'YES'))
liblz4_system = True
else:
liblz4_system = False
possible_libb2_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libb2', '/usr/local/libb2',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_LIBB2_PREFIX'):
possible_libb2_prefixes.insert(0, os.environ.get('BORG_LIBB2_PREFIX'))
libb2_prefix = setup_b2.b2_system_prefix(possible_libb2_prefixes)
if prefer_system_libb2 and libb2_prefix:
print('Detected and preferring libb2 over bundled BLAKE2')
define_macros.append(('BORG_USE_LIBB2', 'YES'))
libb2_system = True
else:
libb2_system = False
possible_libzstd_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libzstd', '/usr/local/libzstd',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_LIBZSTD_PREFIX'):
possible_libzstd_prefixes.insert(0, os.environ.get('BORG_LIBZSTD_PREFIX'))
libzstd_prefix = setup_zstd.zstd_system_prefix(possible_libzstd_prefixes)
if prefer_system_libzstd and libzstd_prefix:
print('Detected and preferring libzstd over bundled ZSTD')
define_macros.append(('BORG_USE_LIBZSTD', 'YES'))
libzstd_system = True
else:
libzstd_system = False
with open('README.rst', 'r') as fd:
long_description = fd.read()
# remove badges
long_description = re.compile(r'^\.\. start-badges.*^\.\. end-badges', re.M | re.S).sub('', long_description)
# remove |substitutions|
long_description = re.compile(r'\|screencast\|').sub('', long_description)
# remove unknown directives
long_description = re.compile(r'^\.\. highlight:: \w+$', re.M).sub('', long_description)
def format_metavar(option):
if option.nargs in ('*', '...'):
return '[%s...]' % option.metavar
elif option.nargs == '?':
return '[%s]' % option.metavar
elif option.nargs is None:
return option.metavar
else:
raise ValueError('Can\'t format metavar %s, unknown nargs %s!' % (option.metavar, option.nargs))
class build_usage(Command):
description = "generate usage for each command"
user_options = [
('output=', 'O', 'output directory'),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print('generating usage docs')
import borg
borg.doc_mode = 'build_man'
if not os.path.exists('docs/usage'):
os.mkdir('docs/usage')
# allows us to build docs without the C modules fully loaded during help generation
from borg.archiver import Archiver
parser = Archiver(prog='borg').build_parser()
# borgfs has a separate man page to satisfy debian's "every program from a package
# must have a man page" requirement, but it doesn't need a separate HTML docs page
#borgfs_parser = Archiver(prog='borgfs').build_parser()
self.generate_level("", parser, Archiver)
def generate_level(self, prefix, parser, Archiver, extra_choices=None):
is_subcommand = False
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
is_subcommand = True
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if extra_choices is not None:
choices.update(extra_choices)
if prefix and not choices:
return
print('found commands: %s' % list(choices.keys()))
for command, parser in sorted(choices.items()):
if command.startswith('debug'):
print('skipping', command)
continue
print('generating help for %s' % command)
if self.generate_level(command + " ", parser, Archiver):
continue
with open('docs/usage/%s.rst.inc' % command.replace(" ", "_"), 'w') as doc:
doc.write(".. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit!\n\n")
if command == 'help':
for topic in Archiver.helptext:
params = {"topic": topic,
"underline": '~' * len('borg help ' + topic)}
doc.write(".. _borg_{topic}:\n\n".format(**params))
doc.write("borg help {topic}\n{underline}\n\n".format(**params))
doc.write(Archiver.helptext[topic])
else:
params = {"command": command,
"command_": command.replace(' ', '_'),
"underline": '-' * len('borg ' + command)}
doc.write(".. _borg_{command_}:\n\n".format(**params))
doc.write("borg {command}\n{underline}\n.. code-block:: none\n\n borg [common options] {command}".format(**params))
self.write_usage(parser, doc)
epilog = parser.epilog
parser.epilog = None
self.write_options(parser, doc)
doc.write("\n\nDescription\n~~~~~~~~~~~\n")
doc.write(epilog)
if 'create' in choices:
common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]
with open('docs/usage/common-options.rst.inc', 'w') as doc:
self.write_options_group(common_options, doc, False, base_indent=0)
return is_subcommand
def write_usage(self, parser, fp):
if any(len(o.option_strings) for o in parser._actions):
fp.write(' [options]')
for option in parser._actions:
if option.option_strings:
continue
fp.write(' ' + format_metavar(option))
fp.write('\n\n')
def write_options(self, parser, fp):
def is_positional_group(group):
return any(not o.option_strings for o in group._group_actions)
# HTML output:
# A table using some column-spans
def html_write(s):
for line in s.splitlines():
fp.write(' ' + line + '\n')
rows = []
for group in parser._action_groups:
if group.title == 'Common options':
# (no of columns used, columns, ...)
rows.append((1, '.. class:: borg-common-opt-ref\n\n:ref:`common_options`'))
else:
if not group._group_actions:
continue
group_header = '**%s**' % group.title
if group.description:
group_header += ' — ' + group.description
rows.append((1, group_header))
if is_positional_group(group):
for option in group._group_actions:
rows.append((3, '', '``%s``' % option.metavar, option.help or ''))
else:
for option in group._group_actions:
if option.metavar:
option_fmt = '``%s ' + option.metavar + '``'
else:
option_fmt = '``%s``'
option_str = ', '.join(option_fmt % s for s in option.option_strings)
option_desc = textwrap.dedent((option.help or '') % option.__dict__)
rows.append((3, '', option_str, option_desc))
fp.write('.. only:: html\n\n')
table = io.StringIO()
table.write('.. class:: borg-options-table\n\n')
self.rows_to_table(rows, table.write)
fp.write(textwrap.indent(table.getvalue(), ' ' * 4))
# LaTeX output:
# Regular rST option lists (irregular column widths)
latex_options = io.StringIO()
for group in parser._action_groups:
if group.title == 'Common options':
latex_options.write('\n\n:ref:`common_options`\n')
latex_options.write(' |')
else:
self.write_options_group(group, latex_options)
fp.write('\n.. only:: latex\n\n')
fp.write(textwrap.indent(latex_options.getvalue(), ' ' * 4))
def rows_to_table(self, rows, write):
def write_row_separator():
write('+')
for column_width in column_widths:
write('-' * (column_width + 1))
write('+')
write('\n')
# Find column count and width
column_count = max(columns for columns, *_ in rows)
column_widths = [0] * column_count
for columns, *cells in rows:
for i in range(columns):
# "+ 1" because we want a space between the cell contents and the | |
<filename>TrainingExtensions/torch/test/python/test_quantizer.py
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import json as json
import os
import unittest.mock
from collections import namedtuple
from pprint import pprint
from typing import Dict
from packaging import version
import libpymo
import numpy as np
import onnx
import pytest
import torch
import torch.nn as nn
import yaml
from torchvision import models
from aimet_common.defs import QuantScheme, QuantizationDataType, MAP_ROUND_MODE_TO_PYMO
from aimet_common.utils import AimetLogger
from aimet_torch import transformer_utils, onnx_utils
from aimet_torch import utils, elementwise_ops
from aimet_torch.elementwise_ops import Multiply
from aimet_torch.examples.test_models import TwoLayerBidirectionalLSTMModel, SingleLayerRNNModel, \
ModelWithTwoInputs, SimpleConditional
from aimet_torch.meta.connectedgraph import ConnectedGraph
from aimet_torch.onnx_utils import OnnxExportApiArgs
from aimet_torch.qc_quantize_op import QcQuantizeWrapper, QcQuantizeStandalone, \
StaticGridQuantWrapper, QcQuantizeOpMode, LearnedGridQuantWrapper
from aimet_torch.qc_quantize_recurrent import QcQuantizeRecurrent
from aimet_torch.quantsim import QuantizationSimModel, check_accumulator_overflow
from aimet_torch.quantsim_straight_through_grad import compute_dloss_by_dx
import libpymo
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)
def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):
"""
Helper function to evaluate model given dummy input
:param model: torch model
:param dummy_input: dummy input to model
"""
model.eval()
if isinstance(dummy_input, torch.Tensor):
dummy_input = [dummy_input]
with torch.no_grad():
model(*dummy_input)
def dummy_forward_pass(model, args):
model.eval()
with torch.no_grad():
output = model(torch.randn((32, 1, 28, 28)))
return output
class InputOutputDictModel(nn.Module):
def __init__(self):
super(InputOutputDictModel, self).__init__()
self.mul1 = Multiply()
self.mul2 = Multiply()
self.mul3 = Multiply()
def forward(self, inputs: Dict[str, torch.Tensor]):
ab = self.mul1(inputs['a'], inputs['b'])
bc = self.mul2(inputs['b'], inputs['c'])
ca = self.mul3(inputs['c'], inputs['a'])
output_def = namedtuple('output_def', ['ab', 'bc', 'ca'])
return output_def(ab, bc, ca)
class SmallMnistNoDropoutWithPassThrough(nn.Module):
def __init__(self):
super(SmallMnistNoDropoutWithPassThrough, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.pt1 = torch.nn.Identity()
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.pt2 = torch.nn.Identity()
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.relu1(self.pt1(self.conv1(x)))
x = self.conv2(x)
x = self.relu2(self.pt2(x))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.log_softmax(x)
class SmallMnist(nn.Module):
def __init__(self):
super(SmallMnist, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.dropout = nn.Dropout()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.conv2(x)
x = self.relu2(self.conv2_drop(x))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return self.log_softmax(x)
class SmallMnistNoDropout(nn.Module):
def __init__(self):
super(SmallMnistNoDropout, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.relu2(self.conv2(x))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.log_softmax(x)
class SoftMaxAvgPoolModel(torch.nn.Module):
def __init__(self):
super(SoftMaxAvgPoolModel, self).__init__()
self.sfmax = torch.nn.Softmax(dim=1)
self.avgpool = torch.nn.AvgPool2d(3)
def forward(self, inp):
x = self.sfmax(inp)
return self.avgpool(x)
class ModelWithStandaloneOps(nn.Module):
def __init__(self):
super(ModelWithStandaloneOps, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.maxpool1 = nn.MaxPool2d(2)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.myquant = QcQuantizeStandalone(activation_bw=8, round_mode=MAP_ROUND_MODE_TO_PYMO['nearest'],
quant_scheme=QuantScheme.post_training_tf_enhanced,
is_symmetric=False, data_type=QuantizationDataType.int)
self.conv2_drop = nn.Dropout2d()
self.maxpool2 = nn.MaxPool2d(2)
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.dropout = nn.Dropout2d()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = self.relu1(self.maxpool1(self.conv1(inputs[0])))
x = self.conv2(x)
x = self.myquant(x)
x = self.relu2(self.maxpool2(self.conv2_drop(x)))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return self.log_softmax(x)
class ModelWithTwoInputsOneToAdd(nn.Module):
def __init__(self):
super(ModelWithTwoInputsOneToAdd, self).__init__()
self.conv1_a = nn.Conv2d(1, 10, kernel_size=5)
self.maxpool1_a = nn.MaxPool2d(2)
self.relu1_a = nn.ReLU()
self.conv1_b = nn.Conv2d(10, 10, kernel_size=5)
self.maxpool1_b = nn.MaxPool2d(2)
self.relu1_b = nn.ReLU()
self.add = elementwise_ops.Add()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.maxpool2 = nn.MaxPool2d(2)
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.dropout = nn.Dropout()
self.fc2 = nn.Linear(50, 10)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x1 = self.relu1_a(self.maxpool1_a(self.conv1_a(x1)))
x1 = self.relu1_b(self.maxpool1_b(self.conv1_b(x1)))
x = self.add(x1, x2)
x = self.relu2(self.maxpool2(self.conv2(x)))
x = x.view(-1, 320)
x = self.relu3(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return self.softmax(x)
class PreluModel(nn.Module):
def __init__(self):
super(PreluModel, self).__init__()
self.prelu = nn.PReLU()
def forward(self, x):
return self.prelu(x)
class PixelShuffleModel(nn.Module):
def __init__(self):
super(PixelShuffleModel, self).__init__()
self.ps = nn.PixelShuffle(2)
def forward(self, x):
return self.ps(x)
class FakeMultiOutputOp(torch.autograd.Function):
"""
This function helps create a custom onnx op to simulate a 5 output tensor onnx node
Note: the forward pass has some tensor computation to prevent torch onnx export from removing onnx node.
"""
@staticmethod
def symbolic(g, inp):
"""
Magic method that helps with exporting a custom ONNX node
"""
return g.op('FakeMultiOutputOp', inp, outputs=5)
@staticmethod
def forward(ctx, x): # pylint: disable=arguments-differ
return x*2, x*4, x*8, x*16, x*32
@staticmethod
def backward(ctx, _grad): # pylint: disable=arguments-differ
raise NotImplementedError()
class ModuleWith5Output(torch.nn.Module):
def forward(self, x):
return FakeMultiOutputOp.apply(x)
class ModelWith5Output(torch.nn.Module):
def __init__(self):
super(ModelWith5Output, self).__init__()
self.cust = ModuleWith5Output()
def forward(self, x):
return self.cust(x)
class TestQuantizationSimStaticGrad:
def test_is_leaf_module_positive(self):
"""With an actual leaf module"""
conv1 = nn.Conv2d(1, 10, 5)
assert utils.is_leaf_module(conv1)
# -------------------------------------------
def test_is_leaf_module_negative(self):
"""With a non-leaf module"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
net = Net()
model = net.to(torch.device('cpu'))
assert not utils.is_leaf_module(model)
# -------------------------------------------------------------
def test_is_quantizable_module_positive(self):
"""With a quantizable module"""
conv1 = nn.Conv2d(1, 10, 5)
assert QuantizationSimModel._is_quantizable_module(conv1)
# -------------------------------------------------------------
def test_is_quantizable_module_negative(self):
"""With a non-quantizable module"""
conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest',
quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
assert not QuantizationSimModel._is_quantizable_module(conv1)
# ------------------------------------------------------------
def verify_quantization_wrappers(self, original_model, quantized_model, quant_scheme=QuantScheme.post_training_tf_enhanced):
"""Test utility to determine if quantization wrappers were added correctly"""
# All leaf modules in the original model
orig_modules = [(name, module) for name, module in original_model.named_modules()
if len(list(module.modules())) == 1]
# All QcQuantized modules in the quantized model
quant_modules = [(name, module) for name, module in quantized_model.named_modules()
if isinstance(module, QcQuantizeWrapper)]
for i, orig_mod_tuple in enumerate(orig_modules):
quant_mod_tuple = quant_modules[i]
# If the original model has any QcQuantizeWrapper node, skip the checks
if '_module_to_wrap' in orig_mod_tuple[0]:
continue
# Checks --------
# Modules should be in the same order as before
assert orig_mod_tuple[0] == quant_mod_tuple[0], "Quantized model has a incorrectly named module"
if quant_scheme in [QuantScheme.post_training_tf, QuantScheme.post_training_tf_enhanced]:
# For every leaf module in the first list, there is a corresponding QcQuantized model in the second list
assert str(type(quant_mod_tuple[1]).__name__) == 'StaticGridQuantWrapper'
# Each QcQuantized model has 1 child, that is the same type as the corresponding module in the original list
assert len(list(quant_mod_tuple[1].modules())) == 2
child = list(quant_mod_tuple[1].modules())[1]
logger.debug("{} -> {}".format(type(child), type(orig_mod_tuple[1])))
assert type(child) == type(orig_mod_tuple[1])
# --------------------------------------------------------
def test_add_quantization_wrappers_one_deep(self):
"""With a one-deep model"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
x = self.conv2_drop(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
return x
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model)
# ------------------------------------------------------
def test_add_quantization_wrappers_with_preexisting_quantization_layers(self):
"""With a one-deep model"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8,
round_mode='stochastic',
quant_scheme=QuantScheme.post_training_tf_enhanced,
data_type=QuantizationDataType.int)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def | |
<filename>lib/modeling/rel_heads.py<gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
from core.config import cfg
import nn as mynn
import utils.net as net_utils
import numpy as np
import modeling.geo_feat as geo_feat
class rel_infer(nn.Module):
def __init__(self, rel_out):
super().__init__()
self.rel_out = rel_out
self.relationship_mat = None
self.num_batches_tracked = 0
self.avg_rel_tp = 0
self.avg_rel_tn = 0
def detectron_weight_mapping(self):
return {}, []
def init_rel_mat(self):
self.relationship_dict = self.rel_out.relationship_dict
coo = []
for k,v in self.relationship_dict.items():
for _ in v:
coo.append([k[0], k[1], _])
coo = torch.tensor(coo)
self.relationship_mat = torch.sparse.FloatTensor(coo.t(), torch.ones(coo.shape[0]),
torch.Size([cfg.MODEL.NUM_CLASSES,cfg.MODEL.NUM_CLASSES,cfg.MODEL.NUM_RELATIONS])).to_dense()
if cfg.REL_INFER.CONSIDER_REVERSE_REL:
self.relationship_mat = torch.cat([self.relationship_mat, self.relationship_mat.transpose(1,0)[...,1:]], 2)
def forward(self, roi_scores, rel_scores, roidb):
if roi_scores.shape[0] == 1 and roidb is not None and len(roidb[0]['boxes']) == 0:
# The placeholder all zero rois, just return as it is.
return roi_scores
if roi_scores.shape[0] == 1:
# Only one image, no need for propogation
return roi_scores
# Turn logprob to prob
if not cfg.REL_INFER.TRAIN or cfg.REL_INFER.MODE == 2:
rel_scores = rel_scores.exp()
# Replace rel_scores with ground truth relationship if USE_GT_REL is True
if cfg.TEST.USE_GT_REL or cfg.TEST.EVALUATE_REL_ACC:
_rel_scores = torch.zeros_like(rel_scores).reshape(roi_scores.shape[0], roi_scores.shape[0], -1)
for i in range(roi_scores.shape[0]):
for j in range(roi_scores.shape[0]):
rel_ids = roidb[0]['gt_relationships'].get((i, j), [])
if cfg.MODEL.RELATION_COOCCUR and len(rel_ids) > 0:
rel_ids = [1]
if len(rel_ids) > 0:
_rel_scores[i][j][rel_ids] = 1 / len(rel_ids)
else:
_rel_scores[i][j][0] = 1
_rel_scores = _rel_scores.view_as(rel_scores)
if cfg.TEST.EVALUATE_REL_ACC:
filt = (_rel_scores > 0).float().sum(1) <= 1
filt = (torch.eye(roi_scores.shape[0]).type_as(filt) != 1).view(-1) * filt
if not hasattr(self, 'rel_scores_collect'):
self.rel_scores_collect = []
self.rel_gt_collect = []
self.rel_scores_collect.append(rel_scores[filt].cpu().data.numpy())
self.rel_gt_collect.append(_rel_scores[filt].max(1)[1].cpu().data.numpy())
tmp_tp = (((rel_scores.max(1)[1] == _rel_scores.max(1)[1]) * (_rel_scores[:,1:] == 1).any(1)).float().sum() / ((_rel_scores[:, 1:] == 1).any(1).float().sum()+1e-7)).item()
tmp_tn = (rel_scores.max(1)[1] == 0)[(_rel_scores[:,1:] == 0).all(1)].float().mean().item()
print('Relation true positive acc', tmp_tp)
print('Relation true negative acc', tmp_tn)
self.avg_rel_tp = (self.avg_rel_tp * self.num_batches_tracked + tmp_tp) / (self.num_batches_tracked + 1)
self.avg_rel_tn = (self.avg_rel_tn * self.num_batches_tracked + tmp_tn) / (self.num_batches_tracked + 1)
self.num_batches_tracked += 1
print('Relation avg tp acc', self.avg_rel_tp)
print('Relation avg tn acc', self.avg_rel_tn)
if cfg.TEST.USE_GT_REL:
rel_scores = _rel_scores
if self.relationship_mat is None:
self.init_rel_mat()
self.relationship_mat = self.relationship_mat.type_as(roi_scores)
self.relationship_mat[:,:,0].fill_(1)
assert roi_scores.shape[0] ** 2 == rel_scores.shape[0]
rel_scores = rel_scores.reshape(roi_scores.shape[0], roi_scores.shape[0], -1)
# Take topk to reduce search space. k = 5, so we can perverse recall@5
topk_scores, topk_labels = roi_scores.topk(cfg.TEST.REL_INFER_PROPOSAL, 1)
# Approximate Q
Q = torch.ones_like(topk_scores)
Q = F.softmax(Q) # normalize
# Get binary terms, log energy.
tmp_index = topk_labels.view(-1)
tmp_index = torch.cat([tmp_index.view(-1, 1).repeat(1, tmp_index.shape[0]).view(1, -1),
tmp_index.view(1, -1).repeat(tmp_index.shape[0], 1).view(1, -1)], 0)
binary_theta = self.relationship_mat[tmp_index[0], tmp_index[1]].view(*(topk_scores.shape[:2]+topk_scores.shape[:2]+(-1,)))
if not cfg.REL_INFER.NO_REL_SCORE:
binary_theta *= rel_scores.unsqueeze(1).unsqueeze(3)
if cfg.REL_INFER.MODE == 3:
binary_theta = binary_theta[..., 0] + cfg.REL_INFER.NONE_BGREL_WEIGHT * binary_theta[..., 1:].max(-1)[0] # KxNxN
else:
binary_theta = binary_theta[..., 0] + cfg.REL_INFER.NONE_BGREL_WEIGHT * binary_theta[..., 1:].sum(-1) # KxNxN
# make binary_theta symmetric
binary_theta = (binary_theta + binary_theta.permute(2,3,0,1))/2
# Take log
if not cfg.REL_INFER.TRAIN or cfg.REL_INFER.MODE == 2:
binary_theta = binary_theta.log()
# Make diagonal zero, cause the message pass only from neighbours
torch.diagonal(binary_theta, dim1=0, dim2=2).fill_(0)
if cfg.REL_INFER.BINARY_NORMALIZE:
weight = rel_scores[:,:,1:].sum(-1)
weight = (weight + weight.t()) / 2
torch.diagonal(weight).fill_(0)
weight = weight / weight.sum(-1, keepdim=True)
binary_theta = binary_theta * weight.unsqueeze(-1).unsqueeze(1)
# Get unary_theta
unary_theta = topk_scores.log()
for r in range(cfg.TEST.REL_INFER_ROUND):
new_Q = cfg.TEST.REL_INFER_BINARY_WEIGHT * binary_theta.view(Q.numel(), -1).matmul(Q.view(-1, 1)).view_as(Q)
new_Q = new_Q + unary_theta
new_Q = F.softmax(new_Q)
Q = new_Q
if torch.isnan(Q).any():
import pdb;pdb.set_trace()
new_roi_scores = torch.zeros_like(roi_scores)
new_roi_scores.scatter_(1, topk_labels, Q)
roi_scores = (roi_scores + 10000 * new_roi_scores) / (1+10000)
return roi_scores
class rel_infer_train(nn.Module):
def __init__(self, rel_out):
super().__init__()
self.rel_out = rel_out
self.relationship_mat = None
self.num_batches_tracked = 0
self.avg_rel_tp = 0
self.avg_rel_tn = 0
def detectron_weight_mapping(self):
return {}, []
def init_rel_mat(self):
self.relationship_dict = self.rel_out.relationship_dict
coo = []
for k,v in self.relationship_dict.items():
for _ in v:
coo.append([k[0], k[1], _])
coo = torch.tensor(coo)
self.relationship_mat = torch.sparse.FloatTensor(coo.t(), torch.ones(coo.shape[0]),
torch.Size([cfg.MODEL.NUM_CLASSES,cfg.MODEL.NUM_CLASSES,cfg.MODEL.NUM_RELATIONS])).to_dense()
if cfg.REL_INFER.CONSIDER_REVERSE_REL:
self.relationship_mat = torch.cat([self.relationship_mat, self.relationship_mat.transpose(1,0)[...,1:]], 2)
def forward(self, rois, roi_labels, roi_scores, rel_scores, roidb):
# Remove roi_labels being zero
if roi_labels is not None:
roi_scores = roi_scores[torch.from_numpy(roi_labels) > 0]
rois = rois[roi_labels > 0]
roi_labels = roi_labels[roi_labels > 0]
proposal_num = get_proposal_num(rois) # Get the numbers of each image
roi_labels = torch.from_numpy(roi_labels.astype('int64')).to(roi_scores.device)
if self.relationship_mat is None:
self.init_rel_mat()
self.relationship_mat = self.relationship_mat.type_as(roi_scores)
self.relationship_mat[:,:,0].fill_(1)
roi_scores = F.log_softmax(roi_scores, dim=1)
# Turn logprob to prob
if not cfg.REL_INFER.TRAIN or cfg.REL_INFER.MODE == 2:
rel_scores = rel_scores.exp()
assert(sum([_**2 for _ in proposal_num]) == rel_scores.shape[0])
head_rois = 0
head_out = 0
loss = []
for i in range(len(proposal_num)):
# Generate sub_obj features
if proposal_num[i] == 0:
continue
lc_roi_labels = roi_labels[head_rois:head_rois+proposal_num[i]] # N
lc_roi_scores = roi_scores[head_rois:head_rois+proposal_num[i]] # NxC
lc_rel_scores = rel_scores[head_out:head_out+proposal_num[i]**2].reshape(proposal_num[i], proposal_num[i], -1)
# import pdb;pdb.set_trace()
binary_theta = self.relationship_mat[lc_roi_labels].unsqueeze(1).repeat(1, proposal_num[i],1,1)
binary_theta = binary_theta * lc_rel_scores.unsqueeze(-2) # NxNx600xR
if cfg.REL_INFER.MODE == 3:
binary_theta = binary_theta[..., 0] + cfg.REL_INFER.NONE_BGREL_WEIGHT * binary_theta[..., 1:].max(-1)[0] # KxNxN
else:
binary_theta = binary_theta[..., 0] + cfg.REL_INFER.NONE_BGREL_WEIGHT * binary_theta[..., 1:].sum(-1) # KxNxN
# t_binary the same as above
t_binary_theta = self.relationship_mat[:, lc_roi_labels] # 600xNxR
t_binary_theta = self.relationship_mat[:, lc_roi_labels].transpose(1,0).unsqueeze(1).repeat(1, proposal_num[i],1,1)
t_binary_theta = t_binary_theta * lc_rel_scores.transpose(0,1).unsqueeze(-2) # NxNxKxR
t_binary_theta = t_binary_theta[..., 0] + cfg.REL_INFER.NONE_BGREL_WEIGHT * t_binary_theta[..., 1:].sum(-1) # KxNxN
# make binary_theta symmetric
binary_theta = (binary_theta + t_binary_theta)/2 # NxNxK
# remove diagonal
tmp_diag = 1-torch.eye(proposal_num[i]).type_as(binary_theta).unsqueeze(-1)
binary_theta = binary_theta * tmp_diag
binary_theta = binary_theta.sum(0) # Nxk
# Turn logprob to prob
if not cfg.REL_INFER.TRAIN or cfg.REL_INFER.MODE == 2:
binary_theta = binary_theta.log()
unary_theta = lc_roi_scores
# _loss = -F.log_softmax(unary_theta + binary_theta).gather(1, lc_roi_labels.unsqueeze(1)).squeeze(1)
_loss = -F.log_softmax(binary_theta).gather(1, lc_roi_labels.unsqueeze(1)).squeeze(1)
loss.append(_loss)
head_out += proposal_num[i]**2
head_rois += proposal_num[i]
# if torch.isnan(loss[-1]):
# import pdb;pdb.set_trace()
if len(loss) > 0:
return torch.cat(loss, 0).mean()
else:
return roi_scores.new_tensor(0)
class rel_outputs(nn.Module):
def __init__(self, dim_in):
super().__init__()
if cfg.REL_INFER.PRETRAINED_WE:
out_rel_dim = 300
elif not cfg.REL_INFER.CONSIDER_REVERSE_REL:
out_rel_dim = cfg.MODEL.NUM_RELATIONS
else:
out_rel_dim = cfg.MODEL.NUM_RELATIONS * 2 -1
if not cfg.MODEL.RELATION_NET_INPUT.startswith('GEO'):
self.bilinear_sub = nn.Sequential(
nn.Linear(dim_in, 32),
nn.ReLU(inplace=True)
)
self.bilinear_obj = nn.Sequential(
nn.Linear(dim_in, 32),
nn.ReLU(inplace=True)
)
if cfg.MODEL.RELATION_NET_INPUT.startswith('NEW+GEO'):
self.embed_geo = nn.Sequential(
nn.Linear(18 if 'GEO2' in cfg.MODEL.RELATION_NET_INPUT else 64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, out_rel_dim)
)
self.embed_appr = nn.Sequential(
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, out_rel_dim)
)
return
if 'GEO2' in cfg.MODEL.RELATION_NET_INPUT:
self.bilinear_geo = nn.Sequential(
nn.Linear(18, 32),
nn.ReLU(inplace=True)
)
elif 'GEO' in cfg.MODEL.RELATION_NET_INPUT:
self.bilinear_geo = nn.Sequential(
nn.Linear(64, 32),
nn.ReLU(inplace=True)
)
if cfg.MODEL.RELATION_NET_INPUT.startswith('GEO'):
self.rel_dim_in = 32
elif cfg.MODEL.RELATION_NET_INPUT.startswith('+GEO'):
self.rel_dim_in = 64 + 32
else:
self.rel_dim_in = 64
self.relation_net = nn.Sequential(
nn.Linear(self.rel_dim_in, 64),
nn.ReLU(),
nn.Linear(64, out_rel_dim)
)
def set_word_embedding(self, embedding):
if embedding.shape[0] != cfg.MODEL.NUM_RELATIONS - 1:
print('WARNING: Word embedding size not matched')
return
self.word_embedding[0].copy_(embedding.mean(0))
self.word_embedding[1:].copy_(embedding)
self.word_embedding.copy_(F.normalize(self.cls_score.word_embedding, dim=1))
def detectron_weight_mapping(self):
return {}, []
def forward(self, *args, **kwargs):
if cfg.MODEL.RELATION_NET_INPUT.startswith('NEW+GEO'):
return self._forward_new(*args, **kwargs)
else:
return self._forward(*args, **kwargs)
def get_geo_feat(self, rois, roi_feats):
if 'GEO2' in cfg.MODEL.RELATION_NET_INPUT:
tmp_geo_feat = geo_feat.get_proposal_feat(torch.from_numpy(rois).to(roi_feats))
elif 'GEO' in cfg.MODEL.RELATION_NET_INPUT:
tmp_geo_feat = geo_feat.extract_multi_position_matrix_nd(torch.from_numpy(rois).to(roi_feats))
tmp_geo_feat = geo_feat.extract_pairwise_multi_position_embedding_nd(tmp_geo_feat)
return tmp_geo_feat
def _forward(self, rois, roi_feats, roi_labels = None, roidb=None):
"""
input:
rois, rois_feats
"""
if roi_feats.dim() == 4:
roi_feats = roi_feats.squeeze(3).squeeze(2)
if cfg.MODEL.RELATION_NET_FEAT_STOP_GRAG:
roi_feats = roi_feats.detach()
# Remove roi_labels being zero
if roi_labels is not None:
old_rois = rois
old_roi_labels = roi_labels
roi_feats = roi_feats[torch.from_numpy(roi_labels) > 0]
rois = rois[roi_labels > 0]
roi_labels = roi_labels[roi_labels > 0]
proposal_num = get_proposal_num(rois) # Get the numbers of each image
if not cfg.MODEL.RELATION_NET_INPUT.startswith('GEO'):
sub_feats = self.bilinear_sub(roi_feats)
obj_feats = self.bilinear_obj(roi_feats)
total_pairs = sum([_**2 for _ in proposal_num])
pairs = np.zeros((total_pairs, 2))
rel_feats = roi_feats.new_zeros((total_pairs, self.rel_dim_in))
head_rois = 0
head_out = 0
for i in range(len(proposal_num)):
# Generate sub_obj features
if proposal_num[i] == 0:
continue
if not cfg.MODEL.RELATION_NET_INPUT.startswith('GEO'):
tmp_sub = sub_feats[head_rois:head_rois+proposal_num[i]]
tmp_obj = obj_feats[head_rois:head_rois+proposal_num[i]]
tmp_sub = tmp_sub.unsqueeze(1).expand(proposal_num[i], proposal_num[i], -1)
tmp_obj = tmp_obj.unsqueeze(0).expand(proposal_num[i], proposal_num[i], -1)
# tmp appearance feature
tmp_appr_feat = torch.cat([tmp_sub, tmp_obj], 2).reshape(proposal_num[i]**2, -1)
if 'GEO' in cfg.MODEL.RELATION_NET_INPUT:
tmp_geo_feat = self.get_geo_feat(rois[head_rois:head_rois+proposal_num[i], 1:], roi_feats)
tmp_geo_feat = self.bilinear_geo(tmp_geo_feat)
tmp_geo_feat = tmp_geo_feat.view(-1, tmp_geo_feat.shape[-1])
if cfg.MODEL.RELATION_NET_INPUT.startswith('GEO'):
rel_feats[head_out:head_out+proposal_num[i]**2] = tmp_geo_feat
elif cfg.MODEL.RELATION_NET_INPUT.startswith('+GEO'):
rel_feats[head_out:head_out+proposal_num[i]**2,:-32] = tmp_appr_feat
rel_feats[head_out:head_out+proposal_num[i]**2,-32:] = tmp_geo_feat
else:
rel_feats[head_out:head_out+proposal_num[i]**2] = tmp_appr_feat
if roi_labels is not None:
# Generate sub obj pairs
tmp_lbl = torch.from_numpy(roi_labels[head_rois:head_rois+proposal_num[i]]).unsqueeze(1)
tmp_pairs = torch.cat([
tmp_lbl.unsqueeze(1).expand(proposal_num[i], proposal_num[i], -1),\
tmp_lbl.unsqueeze(0).expand(proposal_num[i], proposal_num[i], -1)\
], 2).numpy()
# Manually set diagonal to 0,0
np.fill_diagonal(tmp_pairs[:,:,0], -1)
np.fill_diagonal(tmp_pairs[:,:,1], -1)
pairs[head_out:head_out+proposal_num[i]**2] = np.reshape(tmp_pairs, (proposal_num[i]**2, -1))
head_out += proposal_num[i]**2
head_rois += proposal_num[i]
logit_rel = self.relation_net(rel_feats)
def saveit(logit):
import glob
import os
sfiles = glob.glob('rel/*.pth')
sfiles.sort(key=os.path.getmtime)
if len(sfiles) == | |
#!/usr/bin/env python3
#
# This script checks for updates to zcashd's dependencies.
#
# The SOURCE_ROOT constant specifies the location of the zcashd codebase to
# check, and the GITHUB_API_* constants specify a personal access token for the
# GitHub API, which need not have any special privileges.
#
# All dependencies must be specified inside the get_dependency_list() function
# below. A dependency is specified by:
#
# (a) A way to fetch a list of current releases.
#
# This is usually regular-expression-based parsing of GitHub tags, but
# might otherwise parse version numbers out of the project's webpage.
#
# GitHub tag regexps can be tested by specifying test cases in the third
# argument to GithubTagReleaseLister's constructor.
#
# (b) A way to fetch the currently-used version out of the source tree.
#
# This is typically parsed out of the depends/packages/*.mk files.
#
# If any dependency is found to be out-of-date, or there are un-accounted-for
# .mk files in depends/packages, this script will exit with
# a nonzero status. The latter case would suggest someone added a new dependency
# without adding a corresponding entry to get_dependency_list() below.
#
# To test the script itself, run it with --functionality-test as the only
# argument. This will exercise the full functionality of the script, but will
# only return a non-zero exit status when there's something wrong with the
# script itself, for example if a new file was added to depends/packages/ but
# wasn't added to this script.
import requests
import os
import re
import sys
import datetime
SOURCE_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")
# The email for this account is <EMAIL> and the token does not
# have any privileges.
GITHUB_API_BASIC_AUTH_USER = "taylor-ecc"
GITHUB_API_BASIC_AUTH_PASSWORD = "<PASSWORD>e8fa6618"
def get_dependency_list():
dependencies = [
Dependency("bdb",
BerkeleyDbReleaseLister(),
DependsVersionGetter("bdb")),
Dependency("boost",
GithubTagReleaseLister("boostorg", "boost", "^boost-(\d+)\.(\d+)\.(\d+)$",
{ "boost-1.69.0": (1, 69, 0), "boost-1.69.0-beta1": None }),
DependsVersionGetter("boost")),
Dependency("googletest",
GithubTagReleaseLister("google", "googletest", "^release-(\d+)\.(\d+)\.(\d+)$",
{ "release-1.8.1": (1, 8, 1) }),
DependsVersionGetter("googletest")),
# libc++ matches the Clang version
Dependency("libcxx",
GithubTagReleaseLister("llvm", "llvm-project", "^llvmorg-(\d+)\.(\d+).(\d+)$",
{ "llvmorg-11.0.0": (11, 0, 0), "llvmorg-9.0.1-rc3": None}),
DependsVersionGetter("native_clang")),
Dependency("libevent",
GithubTagReleaseLister("libevent", "libevent", "^release-(\d+)\.(\d+)\.(\d+)-stable$",
{ "release-2.0.22-stable": (2, 0, 22), "release-2.1.9-beta": None }),
DependsVersionGetter("libevent")),
Dependency("libsodium",
GithubTagReleaseLister("jedisct1", "libsodium", "^(\d+)\.(\d+)\.(\d+)$",
{ "1.0.17": (1, 0, 17) }),
DependsVersionGetter("libsodium")),
Dependency("native_ccache",
GithubTagReleaseLister("ccache", "ccache", "^v?(\d+)\.(\d+)(?:\.(\d+))?$",
{ "v3.5.1": (3, 5, 1), "v3.6": (3, 6)}),
DependsVersionGetter("native_ccache")),
Dependency("native_clang",
GithubTagReleaseLister("llvm", "llvm-project", "^llvmorg-(\d+)\.(\d+).(\d+)$",
{ "llvmorg-11.0.0": (11, 0, 0), "llvmorg-9.0.1-rc3": None}),
DependsVersionGetter("native_clang")),
Dependency("native_rust",
GithubTagReleaseLister("rust-lang", "rust", "^(\d+)\.(\d+)(?:\.(\d+))?$",
{ "1.33.0": (1, 33, 0), "0.9": (0, 9) }),
DependsVersionGetter("native_rust")),
Dependency("zeromq",
GithubTagReleaseLister("zeromq", "libzmq", "^v(\d+)\.(\d+)(?:\.(\d+))?$",
{ "v4.3.1": (4, 3, 1), "v4.2.0-rc1": None }),
DependsVersionGetter("zeromq")),
Dependency("leveldb",
GithubTagReleaseLister("google", "leveldb", "^v(\d+)\.(\d+)$",
{ "v1.13": (1, 13) }),
LevelDbVersionGetter()),
Dependency("univalue",
GithubTagReleaseLister("jgarzik", "univalue", "^v(\d+)\.(\d+)\.(\d+)$",
{ "v1.1.1": (1, 1, 1) }),
UnivalueVersionGetter()),
Dependency("utfcpp",
GithubTagReleaseLister("nemtrif", "utfcpp", "^v(\d+)\.(\d+)(?:\.(\d+))?$",
{ "v3.1": (3, 1), "v3.0.3": (3, 0, 3) }),
DependsVersionGetter("utfcpp"))
]
# Rust crates (filename portion: depends/packages/crate_<NAME>.mk).
crates = [
"addchain", "aes", "aesni", "aes_soft", "ansi_term",
"arrayvec", "arrayref", "autocfg",
"base64", "bellman", "bigint", "bit_vec", "blake2b_simd", "blake2s_simd",
"block_buffer", "block_cipher", "block_modes", "block_padding",
"bls12_381", "byteorder",
"cfg_if", "chrono", "constant_time_eq", "cpuid_bool",
"crossbeam_channel", "crossbeam_deque", "crossbeam_epoch",
"crossbeam_utils", "crossbeam_queue", "crossbeam",
"crunchy", "crypto_api", "crypto_api_chachapoly", "curve25519_dalek",
"directories", "dirs_sys", "digest",
"ed25519_zebra", "equihash",
"ff", "ff_derive", "fpe", "futures_cpupool", "futures",
"generic_array", "getrandom", "group",
"hermit_abi", "hex", "jubjub", "log",
"lazy_static", "libc", "matchers", "maybe_uninit", "memoffset",
"num_bigint", "num_cpus", "num_integer", "num_traits",
"ppv_lite86", "proc_macro2", "quote",
"opaque_debug", "pairing", "rand", "typenum",
"rand_chacha", "rand_core", "rand_hc", "rand_xorshift",
"redox_syscall", "redox_users",
"regex", "regex_automata", "regex_syntax", "rust_argon2",
"scopeguard", "serde",
"serde_derive", "sha2", "sharded_slab", "subtle", "syn", "thiserror",
"thiserror_impl", "thread_local", "time", "tracing", "tracing_appender",
"tracing_attributes", "tracing_core", "tracing_subscriber",
"unicode_xid", "version_check", "wasi",
"winapi_i686_pc_windows_gnu", "winapi",
"winapi_x86_64_pc_windows_gnu", "zcash_history", "zcash_primitives",
"zcash_proofs", "zeroize"
]
# Sometimes we need multiple versions of a crate, in which case there can't
# be a direct mapping between the filename portion and the crate name.
crate_name_exceptions = {
}
for crate in crates:
if crate in crate_name_exceptions.keys():
crate_name = crate_name_exceptions[crate]
else:
crate_name = crate
# Rust dependency checks are temporarily disabled:
# https://github.com/zcash/zcash/issues/4726
# No-op statement to keep pyflakes happy:
crate_name = crate_name
# dependencies.append(
# Dependency("crate_" + crate,
# RustCrateReleaseLister(crate_name),
# DependsVersionGetter("crate_" + crate)
# )
# )
return dependencies
class Version(list):
def __init__(self, version_tuple):
for part in version_tuple:
if part: # skip None's which can come from optional regexp groups
if str(part).isdigit():
self.append(int(part))
else:
self.append(part)
def __str__(self):
return '.'.join(map(str, self))
def __hash__(self):
return hash(tuple(self))
class Dependency:
def __init__(self, name, release_lister, current_getter):
self.name = name
self.release_lister = release_lister
self.current_getter = current_getter
self.cached_known_releases = None
def current_version(self):
return self.current_getter.current_version()
def known_releases(self):
if self.cached_known_releases is None:
self.cached_known_releases = sorted(self.release_lister.known_releases())
return self.cached_known_releases
def released_versions_after_current_version(self):
current_version = self.current_version()
releases_after_current = []
for release in self.known_releases():
if release > current_version:
releases_after_current.append(release)
return releases_after_current
def is_up_to_date(self):
return len(self.released_versions_after_current_version()) == 0
class GithubTagReleaseLister:
def __init__(self, org, repo, regex, testcases={}):
self.org = org
self.repo = repo
self.regex = regex
self.testcases = testcases
for tag, expected in testcases.items():
match = re.match(self.regex, tag)
if (expected and not match) or (match and not expected) or (match and Version(match.groups()) != list(expected)):
groups = str(match.groups())
raise RuntimeError("GitHub tag regex test case [" + tag + "] failed, got [" + groups + "].")
def known_releases(self):
release_versions = []
all_tags = self.all_tag_names()
# sanity check against the test cases
for tag, expected in self.testcases.items():
if tag not in all_tags:
raise RuntimeError("Didn't find expected tag [" + tag + "].")
for tag_name in all_tags:
match = re.match(self.regex, tag_name)
if match:
release_versions.append(Version(match.groups()))
return release_versions
def all_tag_names(self):
url = "https://api.github.com/repos/" + safe(self.org) + "/" + safe(self.repo) + "/git/refs/tags"
r = requests.get(url, auth=requests.auth.HTTPBasicAuth(GITHUB_API_BASIC_AUTH_USER, GITHUB_API_BASIC_AUTH_PASSWORD))
if r.status_code != 200:
raise RuntimeError("Request to GitHub tag API failed.")
json = r.json()
return list(map(lambda t: t["ref"].split("/")[-1], json))
class RustCrateReleaseLister:
def __init__(self, crate):
self.crate = crate
def known_releases(self):
url = "https://crates.io/api/v1/crates/" + safe(self.crate) + "/versions"
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("Request to crates.io versions API failed.")
json = r.json()
version_numbers = list(map(lambda t: t["num"], json["versions"]))
release_versions = []
for num in version_numbers:
match = re.match("^(\d+)\.(\d+)\.(\d+)$", num)
if match:
release_versions.append(Version(match.groups()))
if len(release_versions) == 0:
raise RuntimeError("Failed to list release versions from crates.io.")
return release_versions
class LibGmpReleaseLister:
def known_releases(self):
url = "https://gmplib.org/download/gmp/"
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("Request to libgmp download directory failed.")
page = r.text
# We use a set because the search will result in duplicates.
release_versions = set()
for match in re.findall("gmp-(\d+)\.(\d+)\.(\d+)\.tar.bz2", page):
release_versions.add(Version(match))
if Version((6, 1, 2)) not in release_versions:
raise RuntimeError("Missing expected version from libgmp download directory.")
return list(release_versions)
class BerkeleyDbReleaseLister:
def known_releases(self):
url = "https://www.oracle.com/technetwork/products/berkeleydb/downloads/index-082944.html"
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("Request to Berkeley DB download directory failed.")
page = r.text
# We use a set because the search will result in duplicates.
release_versions = set()
for match in re.findall("Berkeley DB (\d+)\.(\d+)\.(\d+)\.tar.gz", page):
release_versions.add(Version(match))
if len(release_versions) == 0:
raise RuntimeError("Missing expected version from Oracle web page.")
return list(release_versions)
class DependsVersionGetter:
def __init__(self, name):
self.name = name
def current_version(self):
mk_file_path = os.path.join(SOURCE_ROOT, "depends", "packages", safe_depends(self.name) + ".mk")
mk_file = open(mk_file_path, 'r', encoding='utf8').read()
regexp_whitelist = [
"package\)_version=(\d+)\.(\d+)\.(\d+)$",
"package\)_version=(\d+)\.(\d+)$",
"package\)_version=(\d+)_(\d+)_(\d+)$",
"package\)_version=(\d+)\.(\d+)\.(\d+)([a-z])$",
# Workaround for wasi 0.9.0 preview
"package\)_version=(\d+)\.(\d+)\.(\d+)\+wasi-snapshot-preview1$",
]
current_version = None
for regexp in regexp_whitelist:
match = re.search(regexp, mk_file, re.MULTILINE)
if match:
current_version = Version(match.groups())
if not current_version:
raise RuntimeError("Couldn't parse version number from depends .mk file.")
return current_version
class LevelDbVersionGetter:
def current_version(self):
header_path = os.path.join(SOURCE_ROOT, "src", "leveldb", "include", "leveldb", "db.h")
header_contents = open(header_path, 'r', encoding='utf8').read()
match = re.search("kMajorVersion\s*=\s*(\d+);\s*.*kMinorVersion\s*=\s*(\d+);\s*$", header_contents, re.MULTILINE)
if match:
return Version(match.groups())
else:
raise RuntimeError("Couldn't parse LevelDB's version from db.h")
class UnivalueVersionGetter:
def current_version(self):
configure_path = os.path.join(SOURCE_ROOT, "src", "univalue", "configure.ac")
configure_contents = open(configure_path, 'r', encoding='utf8').read()
match = re.search("AC_INIT.*univalue.*\[(\d+)\.(\d+)\.(\d+)\]", configure_contents)
if match:
return Version(match.groups())
else:
raise RuntimeError("Couldn't parse univalue's version from its configure.ac")
class PostponedUpdates():
def __init__(self):
self.postponedlist = dict()
postponedlist_path = os.path.join(
os.path.dirname(__file__),
"postponed-updates.txt"
)
file = open(postponedlist_path, 'r', encoding='utf8')
for line in file.readlines():
stripped = re.sub('#.*$', '', line).strip()
if stripped != "":
match = re.match('^(\S+)\s+(\S+)\s+(\S+)$', stripped)
if match:
postponed_name = match.groups()[0]
postponed_version = Version(match.groups()[1].split("."))
postpone_expiration = datetime.datetime.strptime(match.groups()[2], '%Y-%m-%d')
if datetime.datetime.utcnow() < postpone_expiration:
self.postponedlist[(postponed_name, str(postponed_version))] = True
else:
raise RuntimeError("Could not parse line in postponed-updates.txt:" + line)
def is_postponed(self, name, version):
return (name, str(version)) in self.postponedlist
def safe(string):
if re.match('^[a-zA-Z0-9_-]*$', string):
return string
else:
raise RuntimeError("Potentially-dangerous string encountered.")
def safe_depends(string):
if re.match('^[a-zA-Z0-9._-]*$', string):
return string
else:
raise RuntimeError("Potentially-dangerous string encountered.")
def print_row(name, status, current_version, known_versions):
COL_FMT_LARGE = "{:<35}"
COL_FMT_SMALL = "{:<18}"
print(COL_FMT_LARGE.format(name) +
COL_FMT_SMALL.format(status) +
COL_FMT_SMALL.format(current_version) +
COL_FMT_SMALL.format(known_versions))
def main():
# | |
<reponame>adunmore/triage
import pandas
from triage.component.audition.selection_rules import (
best_current_value,
best_average_value,
most_frequent_best_dist,
best_average_two_metrics,
best_avg_var_penalized,
best_avg_recency_weight,
lowest_metric_variance,
)
def test_best_current_value_greater_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "4", "1", "2", "3"],
"model_id": ["1", "2", "3", "4", "5", "6"],
"train_end_time": [
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
],
"metric": [
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [0.5, 0.4, 0.4, 0.6, 0.8, 0.7],
"dist_from_best_case": [0.0, 0.1, 0.1, 0.1, 0.0, 0.0],
}
)
assert best_current_value(df, "2012-01-01", "precision@", "100_abs", n=2) == [
"2",
"3",
]
assert best_current_value(df, "2011-01-01", "precision@", "100_abs", n=2) == ["1"]
assert best_current_value(df, "2011-01-01", "precision@", "100_abs", n=1) == ["1"]
assert best_current_value(df, "2012-01-01", "precision@", "100_abs") == ["2"]
def test_best_current_value_lesser_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "3", "1", "2"],
"model_id": ["1", "2", "3", "4", "5"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
],
"metric": [
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
],
"parameter": ["100_abs", "100_abs", "100_abs", "100_abs", "100_abs"],
"raw_value": [40, 50, 55, 60, 70],
"dist_from_best_case": [0, 10, 5, 0, 10],
}
)
assert best_current_value(df, "2011-01-01", "false positives@", "100_abs", n=2) == [
"1",
"2",
]
assert best_current_value(df, "2012-01-01", "false positives@", "100_abs", n=1) == [
"1"
]
def test_best_average_value_greater_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "1", "2", "1", "2"],
"model_id": ["1", "2", "3", "4", "5", "6"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [0.5, 0.4, 0.6, 0.69, 0.62, 0.62],
"dist_from_best_case": [0.0, 0.1, 0.1, 0.0, 0.0, 0.0],
}
)
assert best_average_value(df, "2013-01-01", "precision@", "100_abs", n=2) == [
"1",
"2",
]
assert best_average_value(df, "2012-01-01", "precision@", "100_abs", n=1) == ["1"]
def test_best_average_value_lesser_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "3", "1", "2", "3", "1", "2", "3"],
"model_id": ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [20, 30, 10, 30, 30, 10, 10, 5, 10],
"dist_from_best_case": [10, 20, 0, 20, 20, 0, 5, 0, 5],
}
)
assert best_average_value(df, "2012-01-01", "false positives@", "100_abs", n=2) == [
"3",
"1",
]
assert best_average_value(df, "2013-01-01", "false positives@", "100_abs") == ["3"]
def test_most_frequent_best_dist():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "3", "1", "2", "3", "1", "2", "3"],
"model_id": ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [0.5, 0.4, 0.4, 0.6, 0.69, 0.6, 0.6, 0.62, 0.57],
"dist_from_best_case": [0.0, 0.1, 0.1, 0.09, 0.0, 0.09, 0.02, 0.0, 0.05],
}
)
assert most_frequent_best_dist(df, "2013-01-01", "precision@", "100_abs", 0.01) == [
"2"
]
assert most_frequent_best_dist(
df, "2013-01-01", "precision@", "100_abs", 0.01, n=2
) == ["2", "1"]
def test_best_average_two_metrics_greater_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "1", "2", "2", "1", "1", "2", "2"],
"model_id": ["1", "1", "2", "2", "3", "3", "4", "4"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
],
"metric": [
"precision@",
"recall@",
"precision@",
"recall@",
"precision@",
"recall@",
"precision@",
"recall@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [0.6, 0.4, 0.4, 0.6, 0.5, 0.5, 0.4, 0.5],
"dist_from_best_case": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
}
)
assert best_average_two_metrics(
df, "2013-01-01", "precision@", "100_abs", "recall@", "100_abs", 0.5
) == ["1"]
assert best_average_two_metrics(
df, "2013-01-01", "precision@", "100_abs", "recall@", "100_abs", 0.5, n=2
) == ["1", "2"]
assert best_average_two_metrics(
df, "2013-01-01", "precision@", "100_abs", "recall@", "100_abs", 0.1, n=2
) == ["2", "1"]
def test_best_average_two_metrics_lesser_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "1", "2", "2", "1", "1", "2", "2"],
"model_id": ["1", "1", "2", "2", "3", "3", "4", "4"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
],
"metric": [
"false positives@",
"false negatives@",
"false positives@",
"false negatives@",
"false positives@",
"false negatives@",
"false positives@",
"false negatives@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [20, 30, 40, 20, 20, 30, 40, 20],
"dist_from_best_case": [0, 10, 20, 0, 0, 10, 20, 0],
}
)
assert best_average_two_metrics(
df,
"2013-01-01",
"false positives@",
"100_abs",
"false negatives@",
"100_abs",
0.5,
) == ["1"]
assert best_average_two_metrics(
df,
"2013-01-01",
"false positives@",
"100_abs",
"false negatives@",
"100_abs",
0.5,
n=2,
) == ["1", "2"]
assert best_average_two_metrics(
df,
"2013-01-01",
"false positives@",
"100_abs",
"false negatives@",
"100_abs",
0.1,
n=2,
) == ["2", "1"]
def test_lowest_metric_variance():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "3", "1", "2", "3", "1", "2", "3"],
"model_id": ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [0.5, 0.5, 0.5, 0.6, 0.9, 0.5, 0.4, 0.2, 0.5],
"dist_from_best_case": [0.0, 0.0, 0.0, 0.3, 0.0, 0.4, 0.1, 0.3, 0.0],
}
)
assert lowest_metric_variance(df, "2013-01-01", "precision@", "100_abs") == ["3"]
assert lowest_metric_variance(df, "2013-01-01", "precision@", "100_abs", n=2) == [
"3",
"1",
]
def test_best_avg_var_penalized_greater_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "1", "2", "1", "2"],
"model_id": ["1", "2", "3", "4", "5", "6"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [0.5, 0.4, 0.8, 0.3, 0.2, 0.5],
"dist_from_best_case": [0.0, 0.1, 0.0, 0.5, 0.3, 0.0],
}
)
assert best_avg_var_penalized(df, "2013-01-01", "precision@", "100_abs", 0.5) == [
"1"
]
assert best_avg_var_penalized(
df, "2013-01-01", "precision@", "100_abs", 0.5, n=2
) == ["1", "2"]
assert best_avg_var_penalized(
df, "2013-01-01", "precision@", "100_abs", 1.0, n=2
) == ["2", "1"]
def test_best_avg_var_penalized_lesser_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "1", "2", "1", "2"],
"model_id": ["1", "2", "3", "4", "5", "6"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [40, 50, 10, 60, 70, 40],
"dist_from_best_case": [0, 10, 0, 50, 30, 0],
}
)
assert best_avg_var_penalized(
df, "2013-01-01", "false positives@", "100_abs", 0.2
) == ["1"]
assert best_avg_var_penalized(
df, "2013-01-01", "false positives@", "100_abs", 0.2, n=2
) == ["1", "2"]
assert best_avg_var_penalized(
df, "2013-01-01", "false positives@", "100_abs", 0.7, n=2
) == ["2", "1"]
def test_best_avg_recency_weight_greater_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "3", "1", "2", "3", "1", "2", "3"],
"model_id": ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
"precision@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [0.8, 0.2, 0.4, 0.5, 0.5, 0.5, 0.2, 0.7, 0.5],
"dist_from_best_case": [0.0, 0.4, 0.2, 0.0, 0.0, 0.0, 0.5, 0.0, 0.2],
}
)
df["train_end_time"] = pandas.to_datetime(df["train_end_time"])
assert best_avg_recency_weight(
df, "2013-01-01", "precision@", "100_abs", 1.00, "linear"
) == ["1"]
assert best_avg_recency_weight(
df, "2013-01-01", "precision@", "100_abs", 1.00, "linear", n=2
) == ["1", "2"]
assert best_avg_recency_weight(
df, "2013-01-01", "precision@", "100_abs", 1.15, "linear"
) == ["1"]
assert best_avg_recency_weight(
df, "2013-01-01", "precision@", "100_abs", 1.50, "linear"
) == ["2"]
assert best_avg_recency_weight(
df, "2013-01-01", "precision@", "100_abs", 1.50, "linear", n=2
) == ["2", "3"]
def test_best_avg_recency_weight_lesser_is_better():
df = pandas.DataFrame.from_dict(
{
"model_group_id": ["1", "2", "3", "1", "2", "3", "1", "2", "3"],
"model_id": ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
"train_end_time": [
"2011-01-01",
"2011-01-01",
"2011-01-01",
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2013-01-01",
"2013-01-01",
"2013-01-01",
],
"metric": [
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
"false positives@",
],
"parameter": [
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
"100_abs",
],
"raw_value": [20, 90, 40, 50, 50, 50, 80, 20, 50],
"dist_from_best_case": [70, 0, 50, 0, 0, 0, 0, 60, 30],
}
)
df["train_end_time"] = pandas.to_datetime(df["train_end_time"])
assert best_avg_recency_weight(
df, "2013-01-01", "false | |
Radiometric relation between scene radiance L and the light Irradiance E reaching the pixel sensor
:param L: Scene Radiance
:param N: Lens Aperture
:param alfa: Off-Axis Angle
:return: Irradiance reaching the pixel sensor
"""
E = L*self.lens_aperture_attenuation(N)*self.natural_vignetting(alfa)
return E
@staticmethod
def lens_aperture_attenuation(N):
return (np.pi/4)*((1/N)**2)
@staticmethod
def natural_vignetting(alfa):
return np.cos(alfa)**4
def get_aperture_diameter(self, N):
"""
Compute the lens aperture diameter
:param N: Lens aperture number
:return: Diameter of lens aperture in mm
"""
d = self.focal_length/N
return d
def init_generic_lens(self, f, t):
"""
Initialize a generic lens based on the given focal length and constant transmittance value
:param f: Focal length in mm
:param t: Transmittance as a fraction
:return: True if successful, False otherwise
"""
self.focal_length = f
if t > 1:
logger.error("Transmittance value of %f bigger then 1", t)
return False
self.transmittance_wav = range(400, 1001, 50)
self.transmittance = [t] * len(self.transmittance_wav)
self.initialized = True
return True
def reset(self):
"""
Reset the class to original state
:return: None
"""
self.__init__()
class Camera:
def __init__(self):
self.sensor = Sensor()
self.lens = Lens()
self.housing = 'flat'
self.port_refraction_idx = 1.5
self.dome_radius = 0.1
self.dome_thickness = 0.01
self.vectorized_dof = np.vectorize(self.get_depth_of_field_width, excluded='self')
self.vectorized_framerate = np.vectorize(self.compute_framerate, excluded=['self', 'axis', 'overlap'])
self.vectorized_exposure = np.vectorize(self.max_blur_shutter_time, excluded=['self', 'axis', 'blur'])
@property
def effective_focal_length(self):
"""
Effect of underwater housings on focal length, from Lavest et al "Underwater Camera Calibration"
:return:
"""
if self.housing == 'flat':
return self.lens.focal_length*1.33
elif self.housing == 'domed':
return self.lens.focal_length
def set_housing(self, housing_type):
"""
Set the type of housing the camera is located in. Flat viewports affect the focal length
:param housing_type: Either 'flat' or 'domed'
:return: None
"""
if housing_type == 'flat':
self.housing = housing_type
elif housing_type == 'domed':
self.housing = housing_type
else:
logger.error("Undefined housing type")
def initialized(self):
"""
Test if both sensor and lens have been initialized
:return: True or False
"""
return self.sensor.initialized and self.lens.initialized
def get_angular_fov(self, axis):
"""
Get the angular field of view in radians on the specified axis
:param axis: Either x or y
:return: FOV in radians
"""
fov = 2. * np.arctan2(self.sensor.get_sensor_size(axis)/1000., 2.*self.effective_focal_length)
logger.debug("Angular FOV: %f", fov)
return fov
def get_fov(self, axis, working_distance):
"""
Get the size of the area covered by the image
:param axis: x or y
:param working_distance: Distance between the camera and the subject
:return: size of the area covered by the image along the given axis in the same units as working distance
"""
if self.initialized():
fov = 2. * working_distance * np.tan(self.get_angular_fov(axis)/2.)
return fov
else:
logger.error("Tried to cpmpute camera FOV without initializing sensor or lens")
return -1
def max_blur_shutter_time(self, axis, working_distance, camera_speed, blur):
"""
Compute the maximum exposure time to ensure motion smaller than the specified value
:param axis: Axis (x or y) along which the camera is moving
:param working_distance: Distance between the camera and the subject, in m
:param camera_speed: Speed of the camera movement along the specified axis in m/s
:param blur: Maximum allowable motion blur in pixels
:return:
"""
if camera_speed <= 0:
logger.error("Vehicle speed has to be positive for exposure calculation")
return 0
pixel_res = self.sensor.get_resolution(axis)/self.get_fov(axis, working_distance)
pixel_speed = camera_speed * pixel_res
max_shutter_time = blur/pixel_speed
return max_shutter_time
def get_depth_of_field(self, lens_aperture, working_distance):
if self.housing == 'flat':
return self.compute_depth_of_field(lens_aperture, working_distance)
else: # self.housing == 'dome':
return self.dome_compute_depth_of_field(lens_aperture, working_distance)
def get_depth_of_field_width(self, lens_aperture, working_distance):
(n, f) = self.get_depth_of_field(lens_aperture, working_distance)
return f-n
def compute_depth_of_field(self, lens_aperture, working_distance):
f = self.effective_focal_length
c = self.sensor.get_coc()
S = working_distance * 1000.
m = f/S
N = lens_aperture
# Resource: http://www.dofmaster.com/equations.html
H = f**2/(N*c) + working_distance # Hyperfocal distance
dn = S*(H-f)/(H+S-2*f) # Near distance of acceptable sharpness
if (H-S)/(S*(H-f)) < 1.E-6: # Check inverse first for infinity
df = 1.E6
else:
df = S*(H-f)/(H-S) # Far distance of acceptable sharpness
# dof = (2.*lens_aperture*c*working_distance**2) / (f**2)
# dof = (2.*N*c*(m+1)) / (m**2 - (N*c/f)**2)
return (dn/1000., df/1000.) # Return in m
def dome_compute_depth_of_field(self, lens_aperture, working_distance):
# compute camera focus distance for virtual image
focus_distance = self.dome_world_to_virtual_dist(working_distance)
(dn_v, df_v) = self.compute_depth_of_field(lens_aperture, focus_distance)
dn = self.dome_virtual_to_world_dist(dn_v)
df = self.dome_virtual_to_world_dist(df_v)
return (dn, df)
def dome_world_to_virtual_dist(self, dist):
# nd = index of refraction of dome
nd = self.port_refraction_idx #index of refraction of port
d = self.dome_thickness #assume concentric dome
r1 = self.dome_radius #external dome radius
r2 = r1 - d #internal dome radius
na = 1.0 #index of refraction of air
nw = 1.33 #index of refraction of water
p = dist #p=object distance relative to external vertex
f1 = nw*(r1/(nd-nw)) #primary focal length of dome's external surface
f1p = f1*nd/nw #secondary focal length of dome's external surface
f2p = nd*(r2/(na-nd)) #primary focal length of dome's internal surface
f2pp = f2p*na/nd #secondary focal length of dome's internal surface
enovrf = nd/f1p + na/f2pp - (d/f1p)*(na/f2pp)
f = nw/enovrf #primary focal length of the dome
fpp = f*na/nw #secondary focal length of the dome
a1f = -f*(1.-d/f2p) #position of the dome's primary focal plane relative
#to the dome's external vertex (on the dome axis)
a2fpp = fpp*(1.-d/f1p) #position of the dome's secondary focal plane relative
#to the dome's internal vertex (on the dome axis)
a1h = f*d/f2p #position of the dome's primary principal plane relative
#to the dome's external vertex (on the dome axis)
g = r1 - a1h #g=distance from dome principal plane to lens primary
#principal plane. We assume lens primary principal plane
#at center of dome curvature
a2hpp = -fpp*d/f1p #position of the dome's secondary principal plane relative
#to the dome's internal vertex (on the dome axis)
s = p+a1h #s=object distance relative to primary principal plane
spp = na/(nw/f - nw/s) #image distance relative to secondary principal plane
ppp = spp+a2hpp+d #image distance relative to external vertex
# m = -(nw/na)*(spp/s) #size of image relative to size of object
# view = -(spp-g)/(s+g)/m #view=tangent of lens' effective half-angle of view
#divided by tangent of lens' in-air half-angle
# of view
return -ppp
def dome_virtual_to_world_dist(self, dist):
# nd = index of refraction of dome
nd = self.port_refraction_idx #index of refraction of port
d = self.dome_thickness #assume concentric dome
r1 = self.dome_radius #external dome radius
r2 = r1 - d #internal dome radius
na = 1.0 #index of refraction of air
nw = 1.33 #index of refraction of water
ppp = -dist #ppp=virtual distance relative to external vertex
f1 = nw*(r1/(nd-nw)) #primary focal length of dome's external surface
f1p = f1*nd/nw #secondary focal length of dome's external surface
f2p = nd*(r2/(na-nd)) #primary focal length of dome's internal surface
f2pp = f2p*na/nd #secondary focal length of dome's internal surface
enovrf = nd/f1p + na/f2pp - (d/f1p)*(na/f2pp)
f = nw/enovrf #primary focal length of the dome
fpp = f*na/nw #secondary focal length of the dome
a1f = -f*(1.-d/f2p) #position of the dome's primary focal plane relative
#to the dome's external vertex (on the dome axis)
a2fpp = fpp*(1.-d/f1p) #position of the dome's secondary focal plane relative
#to the dome's internal vertex (on the dome axis)
a1h = f*d/f2p #position of the dome's primary principal plane relative
#to the dome's external vertex (on the dome axis)
g = r1 - a1h #g=distance from dome principal plane to lens primary
#principal plane. We assume lens primary principal plane
#at center of dome curvature
a2hpp = -fpp*d/f1p #position of the dome's secondary principal plane relative
#to the dome's internal vertex (on the dome axis)
spp = ppp-a2hpp-d #image distance relative to secondary principal plane
if ((1/f - na/(spp*nw)) < 1E-3):
return 1000.
s = 1/(1/f - na/(spp*nw)) #object distance relative to primary principal plane
p = s-a1h #object distance relative to dome's external vertex
return p
def compute_aperture(self, dof, working_distance):
"""
Equation from https://en.wikipedia.org/wiki/Depth_of_field
:param coc: Circle of confusion: specs the allowable blur, in mm
:param dof: desired depth of field, in m
:param working_distance: Distance at which the camera is focused
:return:
"""
logger.debug("Computing aperture")
coc = self.sensor.get_coc()
df = (working_distance + dof/2.) * 1000. | |
+= tf.einsum("kd,ibk->ibd", cur_y, perms[i])
tables.append(lookup_table)
projs.append(proj_W)
if perms is None:
cat_lookup = tf.concat(cat_lookup, 0)
y = embedding_lookup(cat_lookup, x)
else:
y = cat_lookup
ret_params = [tables, projs]
y *= emb_scale
return y, ret_params
def mask_adaptive_logsoftmax(
hidden,
target,
n_token,
d_embed,
d_proj,
cutoffs,
params,
tie_projs,
initializer=None,
proj_initializer=None,
div_val=1,
scope="adaptive_softmax",
proj_same_dim=True,
return_mean=True,
**kwargs,
):
def _logit(x, W, b, proj):
y = x
if proj is not None:
y = tf.einsum("ibd,ed->ibe", y, proj)
return tf.einsum("ibd,nd->ibn", y, W) + b
params_W, params_projs = params[0], params[1]
def _gather_logprob(logprob, target):
lp_size = tf.shape(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
with tf.variable_scope(scope):
if len(cutoffs) == 0:
softmax_b = tf.get_variable("bias", [n_token], initializer=tf.zeros_initializer())
output = _logit(hidden, params_W, softmax_b, params_projs)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
else:
cutoff_ends = [0] + cutoffs + [n_token]
nll = tf.zeros_like(target, dtype=tf.float32)
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope("cutoff_{}".format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
cur_d_embed = d_embed // (div_val ** i)
if div_val == 1:
cur_W = params_W[l_idx:r_idx]
else:
cur_W = params_W[i]
cur_b = tf.get_variable(
"b", [r_idx - l_idx], initializer=tf.zeros_initializer()
)
if tie_projs[i]:
if div_val == 1:
cur_proj = params_projs
else:
cur_proj = params_projs[i]
else:
if (div_val == 1 or not proj_same_dim) and d_proj == cur_d_embed:
cur_proj = None
else:
cur_proj = tf.get_variable(
"proj", [cur_d_embed, d_proj], initializer=proj_initializer
)
if i == 0:
cluster_W = tf.get_variable(
"cluster_W", [len(cutoffs), d_embed], initializer=tf.zeros_initializer()
)
cluster_b = tf.get_variable(
"cluster_b", [len(cutoffs)], initializer=tf.zeros_initializer()
)
cur_W = tf.concat([cur_W, cluster_W], 0)
cur_b = tf.concat([cur_b, cluster_b], 0)
head_logit = _logit(hidden, cur_W, cur_b, cur_proj)
head_logprob = tf.nn.log_softmax(head_logit)
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = _gather_logprob(cur_head_logprob, cur_target)
else:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_hidden = tf.boolean_mask(hidden, mask)
tail_logit = tf.squeeze(_logit(cur_hidden[None], cur_W, cur_b, cur_proj), 0)
tail_logprob = tf.nn.log_softmax(tail_logit)
cur_logprob = cur_head_logprob[:, cutoff_ends[1] + i - 1] + _gather_logprob(
tail_logprob, cur_target
)
nll += tf.scatter_nd(mask_idx, -cur_logprob, tf.to_int64(tf.shape(nll)))
if return_mean:
nll = tf.reduce_mean(nll)
return nll
def mul_adaptive_logsoftmax(
hidden,
target,
n_token,
d_embed,
d_proj,
cutoffs,
params,
tie_projs,
initializer=None,
proj_initializer=None,
div_val=1,
perms=None,
proj_same_dim=True,
scope="adaptive_softmax",
**kwargs,
):
def _logit(x, W, b, proj):
y = x
if x.shape.ndims == 3:
if proj is not None:
y = tf.einsum("ibd,ed->ibe", y, proj)
return tf.einsum("ibd,nd->ibn", y, W) + b
else:
if proj is not None:
y = tf.einsum("id,ed->ie", y, proj)
return tf.einsum("id,nd->in", y, W) + b
params_W, params_projs = params[0], params[1]
with tf.variable_scope(scope):
if len(cutoffs) == 0:
softmax_b = tf.get_variable("bias", [n_token], initializer=tf.zeros_initializer())
output = _logit(hidden, params_W, softmax_b, params_projs)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
nll = tf.reduce_mean(nll)
else:
total_loss, total_cnt = 0, 0
cutoff_ends = [0] + cutoffs + [n_token]
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope("cutoff_{}".format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
cur_d_embed = d_embed // (div_val ** i)
if div_val == 1:
cur_W = params_W[l_idx:r_idx]
else:
cur_W = params_W[i]
cur_b = tf.get_variable(
"b", [r_idx - l_idx], initializer=tf.zeros_initializer()
)
if tie_projs[i]:
if div_val == 1:
cur_proj = params_projs
else:
cur_proj = params_projs[i]
else:
if (div_val == 1 or not proj_same_dim) and d_proj == cur_d_embed:
cur_proj = None
else:
cur_proj = tf.get_variable(
"proj", [cur_d_embed, d_proj], initializer=proj_initializer
)
if i == 0:
cluster_W = tf.get_variable(
"cluster_W", [len(cutoffs), d_embed], initializer=tf.zeros_initializer()
)
cluster_b = tf.get_variable(
"cluster_b", [len(cutoffs)], initializer=tf.zeros_initializer()
)
cur_W = tf.concat([cur_W, cluster_W], 0)
cur_b = tf.concat([cur_b, cluster_b], 0)
head_logit = _logit(hidden, cur_W, cur_b, cur_proj)
head_target = kwargs.get("head_target")
head_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=head_target, logits=head_logit
)
masked_loss = head_nll * perms[i]
total_loss += tf.reduce_sum(masked_loss)
total_cnt += tf.reduce_sum(perms[i])
# head_logprob = tf.nn.log_softmax(head_logit)
# final_logprob = head_logprob * perms[i][:, :, None]
# final_target = tf.one_hot(target, tf.shape(head_logprob)[2])
# total_loss -= tf.einsum('ibn,ibn->', final_logprob, final_target)
# total_cnt += tf.reduce_sum(perms[i])
else:
cur_head_nll = tf.einsum("ib,ibk->k", head_nll, perms[i])
cur_hidden = tf.einsum("ibd,ibk->kd", hidden, perms[i])
tail_logit = _logit(cur_hidden, cur_W, cur_b, cur_proj)
tail_target = tf.einsum("ib,ibk->k", tf.to_float(target - l_idx), perms[i])
tail_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.to_int32(tail_target), logits=tail_logit
)
sum_nll = cur_head_nll + tail_nll
mask = tf.reduce_sum(perms[i], [0, 1])
masked_loss = sum_nll * mask
total_loss += tf.reduce_sum(masked_loss)
total_cnt += tf.reduce_sum(mask)
nll = total_loss / total_cnt
return nll
def _create_mask(qlen, mlen, same_length=False):
attn_mask = tf.ones([qlen, qlen])
mask_u = tf.matrix_band_part(attn_mask, 0, -1)
mask_dia = tf.matrix_band_part(attn_mask, 0, 0)
attn_mask_pad = tf.zeros([qlen, mlen])
ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
if same_length:
mask_l = tf.matrix_band_part(attn_mask, -1, 0)
ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
return ret
def _cache_mem(curr_out, prev_mem, mem_len=None):
if mem_len is None or prev_mem is None:
new_mem = curr_out
elif mem_len == 0:
return prev_mem
else:
new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:]
return tf.stop_gradient(new_mem)
def transformer(
dec_inp,
mems,
n_token,
n_layer,
d_model,
d_embed,
n_head,
d_head,
d_inner,
initializer,
proj_initializer=None,
mem_len=None,
cutoffs=[],
div_val=1,
tie_projs=[],
same_length=False,
clamp_len=-1,
untie_r=False,
proj_same_dim=True,
scope="transformer",
reuse=tf.AUTO_REUSE,
):
"""
cutoffs: a list of python int. Cutoffs for adaptive softmax.
tie_projs: a list of python bools. Whether to tie the projections.
perms: a list of tensors. Each tensor should of size [len, bsz, bin_size].
Only used in the adaptive setting.
"""
new_mems = []
with tf.variable_scope(scope, reuse=reuse):
if untie_r:
r_w_bias = tf.get_variable(
"r_w_bias", [n_layer, n_head, d_head], initializer=initializer
)
r_r_bias = tf.get_variable(
"r_r_bias", [n_layer, n_head, d_head], initializer=initializer
)
else:
r_w_bias = tf.get_variable("r_w_bias", [n_head, d_head], initializer=initializer)
r_r_bias = tf.get_variable("r_r_bias", [n_head, d_head], initializer=initializer)
qlen = tf.shape(dec_inp)[0]
mlen = tf.shape(mems[0])[0] if mems is not None else 0
klen = mlen + qlen
if proj_initializer is None:
proj_initializer = initializer
lookup_fn = mask_adaptive_embedding_lookup
embeddings, shared_params = lookup_fn(
x=dec_inp,
n_token=n_token,
d_embed=d_embed,
d_proj=d_model,
cutoffs=cutoffs,
initializer=initializer,
proj_initializer=proj_initializer,
div_val=div_val,
proj_same_dim=proj_same_dim,
)
attn_mask = _create_mask(qlen, mlen, same_length)
pos_seq = tf.range(klen - 1, -1, -1.0)
if clamp_len > 0:
pos_seq = tf.minimum(pos_seq, clamp_len)
inv_freq = 1 / (10000 ** (tf.range(0, d_model, 2.0) / d_model))
pos_emb = positional_embedding(pos_seq, inv_freq)
if mems is None:
mems = [None] * n_layer
output = embeddings
for i in range(n_layer):
# cache new mems
new_mems.append(_cache_mem(output, mems[i], mem_len))
with tf.variable_scope("layer_{}".format(i)):
output = rel_multihead_attn(
w=output,
r=pos_emb,
r_w_bias=r_w_bias if not untie_r else r_w_bias[i],
r_r_bias=r_r_bias if not untie_r else r_r_bias[i],
attn_mask=attn_mask,
mems=mems[i],
d_model=d_model,
n_head=n_head,
d_head=d_head,
kernel_initializer=initializer,
)
output = positionwise_FF(
inp=output, d_model=d_model, d_inner=d_inner, kernel_initializer=initializer
)
return output, new_mems
# In[13]:
class Chatbot:
def __init__(self):
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32)
batch_size = tf.shape(self.X)[0]
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
initializer = tf.initializers.random_normal(stddev=0.1)
def forward(x, y, reuse=tf.AUTO_REUSE):
memory = tf.fill([n_layer, tf.shape(x)[0], tf.shape(x)[1], d_model], PAD)
memory = tf.cast(memory, tf.float32)
logits, next_memory = transformer(
x,
memory,
len(dictionary_from),
n_layer,
d_model,
d_embed,
n_head,
d_head,
d_inner,
initializer,
scope="encoder",
reuse=reuse,
)
logits, next_memory = transformer(
x,
next_memory,
len(dictionary_to),
n_layer,
d_model,
d_embed,
n_head,
d_head,
d_inner,
initializer,
scope="decoder",
reuse=reuse,
)
logits = transformer(
y,
next_memory,
len(dictionary_to),
n_layer,
d_model,
d_embed,
n_head,
d_head,
d_inner,
initializer,
scope="decoder_1",
reuse=reuse,
)[0]
return tf.layers.dense(logits, len(dictionary_from), reuse=tf.AUTO_REUSE)
self.training_logits = forward(self.X, decoder_input)
def cond(i, y, temp):
return i < tf.reduce_max(tf.shape(self.X)[1])
def body(i, y, temp):
logits = forward(self.X, y, reuse=True)
ids = tf.argmax(logits, -1)[:, i]
ids = tf.expand_dims(ids, -1)
temp = tf.concat([temp[:, 1:], ids], -1)
y = tf.concat([temp[:, -(i + 1) :], temp[:, : -(i + 1)]], -1)
y = tf.reshape(y, [tf.shape(temp)[0], tf.shape(self.X)[1]])
i += 1
return i, y, temp
target = tf.fill([batch_size, tf.shape(self.X)[1]], GO)
target = tf.cast(target, tf.int64)
self.target = target
_, self.predicting_ids, _ = tf.while_loop(cond, body, [tf.constant(0), target, target])
masks = tf.sequence_mask(self.Y_seq_len, maxlen_answer, dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(
logits=self.training_logits, targets=self.Y, weights=masks
)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits, axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# In[14]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot()
sess.run(tf.global_variables_initializer())
# In[15]:
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, len(short_questions), batch_size):
index = min(k + batch_size, len(short_questions))
batch_x, seq_x = pad_sentence_batch(X[k:index], PAD, maxlen_answer)
batch_y, seq_y = pad_sentence_batch(Y[k:index], PAD, maxlen_answer)
predicted, accuracy, loss, _ = sess.run(
[model.predicting_ids, model.accuracy, model.cost, model.optimizer],
feed_dict={model.X: batch_x, model.Y: batch_y},
)
total_loss += loss
total_accuracy += accuracy
total_loss /= len(short_questions) / batch_size
total_accuracy /= len(short_questions) / batch_size
print(predicted)
print("epoch: %d, avg loss: %f, avg accuracy: %f\n" % (i + 1, total_loss, total_accuracy))
# In[16]:
for i in range(len(batch_x)):
| |
"""
Evaluates the baseline method requested. Evaluation output is returned as a Results object. For Katz
neighbourhood=`in` and neighbourhood=`out` will return the same results corresponding to neighbourhood=`in`.
Execution time is contained in the results object. If the train/test split object used to initialize the
evaluator does not contain test edges, the results object will only contain train results.
Parameters
----------
method : string
A string indicating the name of any baseline from evalne.methods to evaluate.
neighbourhood : string, optional
A string indicating the 'in' or 'out' neighbourhood to be used for directed graphs. Default is 'in'.
timeout : float or None
A float indicating the maximum amount of time (in seconds) the evaluation can run for. If None, the
evaluation is allowed to continue until completion. Default is None.
Returns
-------
results : Results
The evaluation results as a Results object.
Raises
------
TimeoutExpired
If the execution does not finish within the allocated time.
TypeError
If the Katz method call is incorrect.
ValueError
If the heuristic selected does not exist.
See Also
--------
evalne.utils.util.run_function : The low level function used to run a baseline with given timeout.
Examples
--------
Evaluating the common neighbours heuristic with default parameters. We assume an evaluator (nee) has already
been instantiated (see class examples):
>>> result = nee.evaluate_baseline(method='common_neighbours')
>>> # Print the results
>>> result.pretty_print()
Method: common_neighbours
Parameters:
[('split_id', 0), ('dim', 128), ('eval_time', 0.06909489631652832), ('neighbourhood', 'in'),
('split_alg', 'spanning_tree'), ('fe_ratio', 1.0), ('owa', True), ('nw_name', 'test'),
('train_frac', 0.510061919504644)]
Test scores:
tn = 1124
[...]
Evaluating katz with beta=0.05 and timeout 60 seconds. We assume an evaluator (nee) has already
been instantiated (see class examples):
>>> result = nee.evaluate_baseline(method='katz 0.05', timeout=60)
>>> # Print the results
>>> result.pretty_print()
Method: katz 0.05
Parameters:
[('split_id', 0), ('dim', 128), ('eval_time', 0.11670708656311035), ('neighbourhood', 'in'),
('split_alg', 'spanning_tree'), ('fe_ratio', 1.0), ('owa', True), ('nw_name', 'test'),
('train_frac', 0.510061919504644)]
Test scores:
tn = 1266
[...]
"""
# Measure execution time
start = time.time()
if 'katz' in method.lower():
try:
train_pred, test_pred = util.run_function(timeout, _eval_katz, *[method, self.traintest_split])
except TypeError:
raise TypeError('Call to katz method is incorrect. Check method parameters.')
except util.TimeoutExpired as e:
raise util.TimeoutExpired('Method `{}` timed out after {} seconds'.format(method, time.time()-start))
else:
try:
train_pred, test_pred = _eval_sim_no_q(method,self.traintest_split,neighbourhood)
#util.run_function(timeout, _eval_sim,
#*[method, self.traintest_split, neighbourhood])
except AttributeError:
raise AttributeError('Method `{}` is not one of the available baselines!'.format(method))
except util.TimeoutExpired as e:
raise util.TimeoutExpired('Method `{}` timed out after {} seconds'.format(method, time.time()-start))
# Make predictions column vectors
train_pred = np.array(train_pred)
if test_pred is not None:
test_pred = np.array(test_pred)
# End of exec time measurement
end = time.time() - start
# Set some parameters for the results object
params = {'neighbourhood': neighbourhood, 'eval_time': end}
self.edge_embed_method = None
if 'all_baselines' in method:
# This method returns node-pair embeddings so we need to compute the predictions
train_pred, test_pred = self.compute_pred(data_split=self.traintest_split, tr_edge_embeds=train_pred,
te_edge_embeds=test_pred)
# Compute the scores
if nx.is_directed(self.traintest_split.TG):
results = self.compute_results(data_split=self.traintest_split, method_name=method + '-' + neighbourhood,
train_pred=train_pred, test_pred=test_pred, params=params)
else:
results = self.compute_results(data_split=self.traintest_split, method_name=method,
train_pred=train_pred, test_pred=test_pred, params=params)
return results
def evaluate_cmd(self, method_name, method_type, command, edge_embedding_methods, input_delim, output_delim,
tune_params=None, maximize='auroc', write_weights=False, write_dir=False, timeout=None,
verbose=True):
"""
Evaluates an embedding method and tunes its parameters from the method's command line call string. This
function can evaluate node embedding, node-pair embedding or end to end predictors.
Parameters
----------
method_name : string
A string indicating the name of the method to be evaluated.
method_type : string
A string indicating the type of embedding method (i.e. ne, ee, e2e).
NE methods are expected to return embeddings, one per graph node, as either dict or matrix sorted by nodeID.
EE methods are expected to return node-pair emb. as [num_edges x embed_dim] matrix in same order as input.
E2E methods are expected to return predictions as a vector in the same order as the input edgelist.
command : string
A string containing the call to the method as it would be written in the command line.
For 'ne' methods placeholders (i.e. {}) need to be provided for the parameters: input network file,
output file and embedding dimensionality, precisely IN THIS ORDER.
For 'ee' methods with parameters: input network file, input train edgelist, input test edgelist, output
train embeddings, output test embeddings and embedding dimensionality, 6 placeholders (i.e. {}) need to
be provided, precisely IN THIS ORDER.
For methods with parameters: input network file, input edgelist, output embeddings, and embedding
dimensionality, 4 placeholders (i.e. {}) need to be provided, precisely IN THIS ORDER.
For 'e2e' methods with parameters: input network file, input train edgelist, input test edgelist, output
train predictions, output test predictions and embedding dimensionality, 6 placeholders (i.e. {}) need
to be provided, precisely IN THIS ORDER.
For methods with parameters: input network file, input edgelist, output predictions, and embedding
dimensionality, 4 placeholders (i.e. {}) need to be provided, precisely IN THIS ORDER.
edge_embedding_methods : array-like
A list of methods used to compute node-pair embeddings from the node embeddings output by NE models.
The accepted values are the function names in evalne.evaluation.edge_embeddings.
When evaluating 'ee' or 'e2e' methods, this parameter is ignored.
input_delim : string
The delimiter expected by the method as input (edgelist).
output_delim : string
The delimiter provided by the method in the output.
tune_params : string, optional
A string containing all the parameters to be tuned and their values. Default is None.
maximize : string, optional
The score to maximize while performing parameter tuning. Default is 'auroc'.
write_weights : bool, optional
If True the train graph passed to the embedding methods will be stored as weighted edgelist
(e.g. triplets src, dst, weight) otherwise as normal edgelist. If the graph edges have no weight attribute
and this parameter is set to True, a weight of 1 will be assigned to each edge. Default is False.
write_dir : bool, optional
This option is only relevant for undirected graphs. If False, the train graph will be stored with a single
direction of the edges. If True, both directions of edges will be stored. Default is False.
timeout : float or None, optional
A float indicating the maximum amount of time (in seconds) the evaluation can run for. If None, the
evaluation is allowed to continue until completion. Default is None.
verbose : bool, optional
A parameter to control the amount of screen output. Default is True.
Returns
-------
results : Results
Returns the evaluation results as a Results object.
Raises
------
TimeoutExpired
If the execution does not finish within the allocated time.
IOError
If the method call does not succeed.
ValueError
If the method type is unknown.
If for a method all parameter combinations fail to provide results.
See Also
--------
evalne.utils.util.run : The low level function used to run a cmd call with given timeout.
Examples
--------
Evaluating the OpenNE implementation of node2vec without parameter tuning and with 'average' and 'hadamard' as
node-pair embedding operators. We assume the method is installed in a virtual environment and that an evaluator
(nee) has already been instantiated (see class examples):
>>> # Prepare the cmd command for running the method. If running on a python console full paths are required
>>> cmd = '../OpenNE-master/venv/bin/python -m openne --method node2vec '\
... '--graph-format edgelist --input {} --output {} --representation-size {}'
>>> # Call the evaluation
>>> result = nee.evaluate_cmd(method_name='Node2vec', method_type='ne', command=cmd,
... edge_embedding_methods=['average', 'hadamard'], input_delim=' ', output_delim=' ')
Running command...
[...]
>>> # Print the results
>>> result.pretty_print()
Method: Node2vec
Parameters:
[('split_id', 0), ('dim', 128), ('owa', True), ('nw_name', 'test'), ('train_frac', 0.51),
('split_alg', 'spanning_tree'), ('eval_time', 24.329686164855957), ('edge_embed_method', 'average'),
('fe_ratio', 1.0)]
Test scores:
tn = 913
[...]
Evaluating the metapath2vec c++ implementation with parameter tuning and with 'average' node-pair embedding
operator. We assume the method is installed and that an evaluator (nee) has already been instantiated
(see class examples):
>>> # Prepare the cmd command for running the method. If running on a python console full paths are required
>>> | |
= cnbtc_tuple[0]
amount = cnbtc_tuple[2]
remain = amount
price = cnbtc_tuple[3]
if amount==0:
cnbtcTradeQue2.put((0.0,0.0))
cnbtcTradeQue1.task_done()
continue
buy = True
if cnbtc_tuple[1] == "sell":
buy = False
times = 10
while True:
if buy:
order = cnbtc.buy(volume = amount,price=price+slippage)
else:
order = cnbtc.sell(volume=amount,price=price-slippage)
if order!= None:
if order.has_key("code") and order["code"] != 1000:
time.sleep(delay_time)
print "cnbtc",order
continue
id = order["id"]
wait_times = 5
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = cnbtc.getOrder(id)
if order!=None:
break
print "cnbtc",order
####2 is done
####
if order["status"] == 2:
break
if order["status"] == 2:
if buy:
print "cnbtc remain buy ",0.0
money-=amount*(price+slippage)
cnbtcTradeQue2.put((0.0,money))
else:
print "cnbtc remain sell 0.0"
money+=amount*(price-slippage)
cnbtcTradeQue2.put((0.0,money))
break
elif order["status"] == 0 or order["status"] == 3:
while True:
order = cnbtc.deleteOrder(id)
if order!=None:
if order.has_key("code") and order["code"] != 1000:
print json.dumps(order,ensure_ascii=False)
if order["code"] == 3001:
break
time.sleep(delay_time)
continue
break
while True:
order = cnbtc.getOrder(id)
if order!=None:
# print order
if order.has_key("code") and order["code"] != 1000:
print "cnbtc",order
time.sleep(delay_time)
continue
#todo judge whether is deleted
if order["status"]==1 or order["status"] == 2:
break
else:
time.sleep(delay_time)
print "cnbtc",order
if buy:
money-=float(order["trade_amount"])*(price+slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain buy %f/%f"%(remain,float(order["total_amount"]))
else:
money+=float(order["trade_amount"])*(price-slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain sell %f/%f"%(remain,float(order["total_amount"]))
if remain<=0:
cnbtcTradeQue2.put((0.0,money))
break
else:
if buy:
money-=float(order["trade_amount"])*(price+slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain buy %f/%f"%(remain,float(order["total_amount"]))
else:
money+=float(order["trade_amount"])*(price-slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain sell %f/%f"%(remain,float(order["total_amount"]))
if remain<=0:
cnbtcTradeQue2.put((0.0,money))
break
print "get_depth"
while True:
depth = cnbtc.getDepth()
depth["asks"].reverse()
if depth:
break
if buy:
price_now = cnbtcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["asks"])[1]
print "prince_now cnbtc",price_now
if price_now>cnbtc_tuple[4]:
cnbtcTradeQue2.put((remain,money))
break
else:
price_now = cnbtcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["bids"])[1]
print "prince_now cnbtc",price_now
if price_now<cnbtc_tuple[4]:
cnbtcTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
cnbtcTradeQue1.task_done()
def ybAccountRun():
while True:
yb = ybAccountQue1.get()
yb_cny = 0
yb_eth = 0
while True:
yb_acc = yb.get_account()
if yb_acc!= None:
if yb_acc.has_key("error"):
time.sleep(delay_time)
print yb_acc
continue
break
for acc in yb_acc["accounts"]:
if acc["currency"] == "cny":
yb_cny=float(acc["balance"])
elif acc["currency"] == "eth":
yb_eth= float(acc["balance"])
ybAccountQue1.task_done()
ybAccountQue2.put((yb_cny,yb_eth))
def cnbtcAccountRun():
while True:
cnbtc = cnbtcAccountQue1.get()
cnbtc_cny = 0
cnbtc_eth = 0
while True:
cnbtc_acc = cnbtc.get_account()
if cnbtc_acc!= None:
if cnbtc_acc.has_key("code") and cnbtc_acc["code"] != 1000:
time.sleep(delay_time)
print cnbtc_acc
continue
break
cnbtc_eth=cnbtc_acc["result"]["balance"]["ETH"]["amount"]
cnbtc_cny+=cnbtc_acc["result"]["balance"]["CNY"]["amount"]
cnbtcAccountQue1.task_done()
cnbtcAccountQue2.put((cnbtc_cny,cnbtc_eth))
def okcAccountRun():
while True:
time.sleep(delay_time)
okc = okcAccountQue1.get()
okc_cny = 0
okc_eth = 0
while True:
okc_acc = okc.get_account()
if okc_acc!= None:
if okc_acc["result"]!=True:
time.sleep(delay_time)
print "okc",okc_acc
continue
break
okc_eth = float(okc_acc["info"]["funds"]["free"]["eth"])
okc_cny = float(okc_acc["info"]["funds"]["free"]["cny"])
# print okc_acc
okcAccountQue1.task_done()
okcAccountQue2.put((okc_cny,okc_eth))
def hbAccountRun():
while True:
hb = hbAccountQue1.get()
hb_cny = 0
hb_eth = 0
while True:
hb_acc = hb.get_account()
if hb_acc!= None:
if hb_acc["status"]!="ok":
print hb_acc
continue
break
for mon in hb_acc["data"]["list"]:
if mon["currency"]=="cny" and mon["type"] == "trade":
hb_cny = float(mon["balance"])
if mon["currency"] == "eth" and mon["type"] == "trade":
hb_eth = float(mon["balance"])
hbAccountQue1.task_done()
hbAccountQue2.put((hb_cny,hb_eth))
import sys
import numpy.matlib
def setThreshold(cny_list,eth_list,brokerage_fee,cash_fee,thres_list_now,thres_list_origin,number,price,tick_coin,name_list):
trade_multiplier = numpy.ones([number,number])
thres_list = thres_list_origin.copy()
sell_times = eth_list/tick_coin
buy_times = cny_list/price/tick_coin
trade_broker = numpy.add.outer(brokerage_fee,brokerage_fee)*price*1.1
trade_cash = numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05
length = cny_list.shape[0]
print "buy_times",buy_times
print "sell_times",sell_times
tmp = buy_times.copy()
tmp[tmp>thres_money] = thres_money
tmp = (-tmp+thres_money)*slope
tmp[tmp>max_thres_limitation] = max_thres_limitation
offset = numpy.matlib.repmat(tmp,length,1)
tmp = buy_times.copy()
tmp[tmp>thres_money] = thres_money
tmp = (-tmp+thres_money)*5/thres_money
tmp[tmp>1] = 1
max_diff_thres_tmp = max(0,max_diff_thres)
tmp_mul = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
trade_multiplier+=tmp_mul*trade_multiplier_ratio
tmp = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
# print 123
offset_cash = -numpy.multiply(tmp,numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05)
# print tmp
# tmp = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
# print tmp
tmp = sell_times.copy()
tmp[tmp>thres_coin] = thres_coin
tmp = (-tmp+thres_coin)*slope
tmp[tmp>max_thres_limitation] = max_thres_limitation
offset += numpy.matlib.repmat(tmp.reshape(length,1),1,length)
tmp = sell_times.copy()
tmp[tmp>thres_coin] = thres_coin
tmp = (-tmp+thres_coin)*5/thres_coin
tmp[tmp>1] = 1
tmp_mul = numpy.matlib.repmat(tmp,length,1)
trade_multiplier+=tmp_mul*trade_multiplier_ratio
tmp = numpy.matlib.repmat(tmp,length,1)
# print 123
offset_cash -= numpy.multiply(tmp,numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05)
# print offset
# buy_times<100
alertQue.put((buy_times,sell_times,number))
# offset[offset<max_diff_thres_tmp] = max_diff_thres_tmp
offset[offset>max_thres_limitation] = max_thres_limitation
print offset
# print offset
# print trade_broker,trade_cash,offset_cash
thres_list = trade_broker+trade_cash+offset_cash+max_diff_thres_tmp+offset+thres_list_origin
# print thres_list
thres_list[:,buy_times<=8] = 999999
thres_list[sell_times<=8,:] = 999999
buy_tmp = (thres_money-buy_times.copy())*slope
buy_tmp[buy_tmp<0] = 0
buy_tmp[buy_tmp>max_diff_thres_tmp] = max_diff_thres_tmp
buy_tmp_n_n = numpy.matlib.repmat(buy_tmp.reshape(length, 1), 1, length)
sell_tmp = (thres_coin-sell_times.copy())*slope
sell_tmp[sell_tmp<0] = 0
sell_tmp[sell_tmp>max_diff_thres_tmp] = max_diff_thres_tmp
sell_tmp_n_n = numpy.matlib.repmat(sell_tmp,length,1)
tmp_n_n = numpy.maximum(sell_tmp_n_n,buy_tmp_n_n)
# print thres_list
# print tmp_n_n
thres_list -= tmp_n_n
# thres_list -= sell_tmp
numpy.fill_diagonal(thres_list,999999)
numpy.fill_diagonal(trade_multiplier,0)
trade_multiplier[trade_multiplier>2] = 2
# print trade_multiplier
# print thres_list
# thres_list = numpy.maximum.reduce([thres_list,(trade_broker+trade_cash)])
# print buy_times<=1
# print thres_list
# result = thres_list_origin.copy()
# result[:number,:number] = thres_list
# thres_list[2,0] = 0
# thres_list[2,1] = 0
# thres_list[1,2] = 0
# thres_list[0,2] = 0
# print thres_list
return thres_list,trade_multiplier
def alert():
while True:
alertTuple = alertQue.get()
buy_times = alertTuple[0]
sell_times = alertTuple[1]
number = alertTuple[2]
for i in range(number):
if open_platform[i]:
if buy_times[i] <= 8:
if money_status[i] == 0 or money_status[i] == 1:
for tel in tel_list:
res = message.send_sms("提醒:%s的账户完全没钱了" % name_list[i], tel)
print res
money_status[i] = 2
print >> sys.stderr, "%s has no money!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
elif buy_times[i] < alert_thres_money:
if money_status[i] == 0:
for tel in tel_list:
message.send_sms("提醒:%s快没钱了,只能买%f次了" % (name_list[i],buy_times[i]), tel)
money_status[i] = 1
print >> sys.stderr, "%s is low money!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
else:
money_status[i] = 0
if sell_times[i] <= 8:
if coin_status[i] == 0 or coin_status[i] == 1:
for tel in tel_list:
message.send_sms("提醒:%s的账户完全没币了" % name_list[i], tel)
coin_status[i] = 2
print >> sys.stderr, "%s has no coin!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
elif sell_times[i] < alert_thres_coin:
if coin_status[i] == 0:
for tel in tel_list:
message.send_sms("提醒:%s快没币了,只能卖%f次了" % (name_list[i],sell_times[i]), tel)
coin_status[i] = 1
print >> sys.stderr, "%s is low coin!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
else:
coin_status[i] = 0
alertQue.task_done()
import HuoBi
import OKCoin
open_okc = open_platform[3]
open_yb = open_platform[1]
open_cnbtc = open_platform[0]
open_hb = open_platform[2]
if open_yb:
yb = YunBi.Yunbi(config,"LiChen")
print yb.get_account()
else:
yb = None
# import gzip
# from StringIO import StringIO
#
# buf = StringIO(acc["name"])
# f = gzip.GzipFile(fileobj=buf)
# print f.read()
# sss = acc["name"].encode("raw_unicode_escape").decode()
# print ss
# logging.info("YB Account "+json.dumps(yb.get_account(),ensure_ascii=False))
if open_cnbtc:
cnbtc = CNBTC.CNBTC(config)
print("cnbtc Account "+str(cnbtc.get_account()))
else:
cnbtc = None
if open_hb:
hb = HuoBi.HuoBi(config)
print("HB Account "+str(hb.get_account()))
else:
hb = None
if open_okc:
okc = OKCoin.OKCoin(config)
print("OKCoin Account "+str(okc.get_account()))
okc_thread = threading.Thread(target=okcRun)
okc_thread.setDaemon(True)
okc_thread.start()
else:
okc = None
if open_yb:
yb_thread = threading.Thread(target=ybRun)
yb_thread.setDaemon(True)
yb_thread.start()
if open_cnbtc:
cnbtc_thread = threading.Thread(target=cnbtcRun)
cnbtc_thread.setDaemon(True)
cnbtc_thread.start()
if open_hb:
hb_thread = threading.Thread(target=hbRun)
hb_thread.setDaemon(True)
hb_thread.start()
if open_okc:
okc_trade_thread = threading.Thread(target=okcTradeRun)
okc_trade_thread.setDaemon(True)
okc_trade_thread.start()
if open_yb:
yb_trade_thread = threading.Thread(target=ybTradeRun)
yb_trade_thread.setDaemon(True)
yb_trade_thread.start()
if open_cnbtc:
cnbtc_trade_thread = threading.Thread(target = cnbtcTradeRun)
cnbtc_trade_thread.setDaemon(True)
cnbtc_trade_thread.start()
if open_hb:
hb_trade_thread = threading.Thread(target=hbTradeRun)
hb_trade_thread.setDaemon(True)
hb_trade_thread.start()
if open_okc:
okc_account_thread = threading.Thread(target=okcAccountRun)
okc_account_thread.setDaemon(True)
okc_account_thread.start()
if open_yb:
yb_account_thread = threading.Thread(target=ybAccountRun)
yb_account_thread.setDaemon(True)
yb_account_thread.start()
if open_cnbtc:
cnbtc_account_thread = threading.Thread(target = cnbtcAccountRun)
cnbtc_account_thread.setDaemon(True)
cnbtc_account_thread.start()
if open_hb:
hb_account_thread = threading.Thread(target=hbAccountRun)
hb_account_thread.setDaemon(True)
hb_account_thread.start()
alertThread = threading.Thread(target=alert)
alertThread.setDaemon(True)
alertThread.start()
total_coin = 0
total_money = 0
tick = 0
last_total_eth = 0
last_total_cny = 0
first_total_eth = 0
first_total_cny = 0
first = True
platform_number = 4
name_list = ["CNBTC","YunBi","HuoBi","OKCoin"]
obj_list = [cnbtc,yb,hb,okc]
que1_list = [cnbtcQue1,ybQue1,hbQue1,okcQue1]
que2_list = [cnbtcQue2,ybQue2,hbQue2,okcQue2]
trade_que1_list = [cnbtcTradeQue1,ybTradeQue1,hbTradeQue1,okcTradeQue1]
trade_que2_list = [cnbtcTradeQue2,ybTradeQue2,hbTradeQue2,okcTradeQue2]
thres_list = numpy.array([[999999,spread_threshold_yb_buy_cnbtc_sell,spread_threshold_hb_buy_cnbtc_sell,spread_threshold_okc_buy_cnbtc_sell],
[spread_threshold_yb_sell_cnbtc_buy,999999,spread_threshold_yb_sell_hb_buy,spread_threshold_yb_sell_okc_buy],
[spread_threshold_hb_sell_cnbtc_buy,spread_threshold_yb_buy_hb_sell,9999999,spread_threshold_okc_buy_hb_sell],
[spread_threshold_okc_sell_cnbtc_buy,spread_threshold_yb_buy_okc_sell,spread_threshold_okc_sell_hb_buy,999999]])
thres_list_origin = thres_list.copy()
has_ts = [True,True,True,False]
platform_list = []
for i in range(platform_number):
platform_list.append(
{
"name":name_list[i],
"obj":obj_list[i],
"que1":que1_list[i],
"que2":que2_list[i],
"trade_que1":trade_que1_list[i],
"trade_que2":trade_que2_list[i],
"depth_buy":None,
"depth_sell":None,
"has_ts":has_ts[i]
}
)
brokerage_fee = numpy.asarray([0.0004,0.001,0.002,0.001])
cash_fee = numpy.asarray([0.001,0.001,0.002,0.002])
while True:
print 'tick',tick
for platform in platform_list:
if platform["obj"]!=None:
platform["que1"].put(platform["obj"])
if open_yb:
ybAccountQue1.put(yb)
if open_okc:
okcAccountQue1.put(okc)
if open_cnbtc:
cnbtcAccountQue1.put(cnbtc)
if open_hb:
hbAccountQue1.put(hb)
for platform in platform_list:
if platform["obj"]!=None:
platform["depth_sell"] = platform["que2"].get()
platform["depth_buy"] = platform["que2"].get()
###depth[0] is amount
###depth[1] is price
###depth[2] is list platform_list["depth_buy"] = platform["que2"].get()
max_diff = -1000
trade_info = dict()
average_price = 0
open_num = 0
for i in range(platform_number):
if platform_list[i]["obj"]!=None:
open_num+=1
average_price+=platform_list[i]["depth_buy"][0][1]+platform_list[i]["depth_sell"][0][1]
average_price /= open_num*2.0/1.01
print 'average_price %f'%average_price
brokerage_trade = numpy.add.outer(brokerage_fee,brokerage_fee)*average_price
cash_trade = numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*average_price
tick+=1
if tick % 1 == 0:
total_cny = 0
total_eth = 0
yb_cny = 0
yb_eth = 0
cnbtc_cny = 0
cnbtc_eth = 0
hb_cny = 0
hb_eth = 0
okc_cny = 0
okc_eth = 0
if open_yb:
yb_cny,yb_eth = ybAccountQue2.get()
print "yb_balance:%f %f"%(yb_eth,yb_cny)
if open_okc:
okc_cny,okc_eth = okcAccountQue2.get()
print "okc_balance:%f %f"%(okc_eth,okc_cny)
if open_hb:
hb_cny,hb_eth = hbAccountQue2.get()
print "hb balance:%f %f"%(hb_eth,hb_cny)
if open_cnbtc:
cnbtc_cny,cnbtc_eth = cnbtcAccountQue2.get()
print "cnbtc balance:%f %f"%(cnbtc_eth,cnbtc_cny)
total_cny = yb_cny+hb_cny+cnbtc_cny+okc_cny
total_eth = yb_eth+hb_eth+cnbtc_eth+okc_eth
balance.write("%s %f %f %f %f %f %f %f %f %f %f\n"%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),
cnbtc_eth,cnbtc_cny,yb_eth,yb_cny,hb_eth,hb_cny,okc_eth,okc_cny,total_eth,total_cny))
history.write("%s "%time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
for i in range(platform_number):
if platform_list[i]["obj"]!=None:
history.write("%f %f "%(platform_list[i]["depth_buy"][0][1],platform_list[i]["depth_sell"][0][1]))
else:
history.write('0 0 ')
history.write('\n')
cny_list = numpy.asarray([cnbtc_cny,yb_cny,hb_cny,okc_cny])
eth_list = numpy.asarray([cnbtc_eth,yb_eth,hb_eth,okc_eth])
last_total_eth = total_eth
last_total_cny = total_cny
if first:
first_total_cny = total_cny
first_total_eth = total_eth
first = False
# history.write("%s %f %f %f %f %f %f\n" % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
# yb_depth[0][1], cnbtc_depth[0][1], yb_depth[0][1] - cnbtc_depth[0][1],
| |
<gh_stars>0
'''
Created on Feb 27, 2015
@author: root
'''
import time
from paxes_nova.virt.ibmpowervm.ivm import blockdev
from paxes_nova.virt.ibmpowervm.ivm import command
from paxes_nova.virt.ibmpowervm.ivm.blockdev import ISCSIDiskAdapter
from paxes_nova.virt.ibmpowervm.common.volume_utils \
import PowerVCNovaVirtDriverVolumeAPI
from paxes_nova.virt.ibmpowervm.common.volume_utils \
import PowerVCNovaISCSIVirtDriverVolumeAPI
from paxes_nova.virt.ibmpowervm.common.volume_utils \
import PowerVCNovaLOCALVirtDriverVolumeAPI
from paxes_nova.virt.ibmpowervm.ivm import exception
from nova.openstack.common import excutils
from paxes_nova.virt.ibmpowervm.ivm import common
from nova.openstack.common.lockutils import synchronized
from paxes_nova import logcall
from nova.openstack.common import log as logging
import os
import hashlib
LOG = logging.getLogger(__name__)
# class PowerVMDiskLocalAdapter(blockdev.PowerVMDiskAdapter):
# pass
class SANDiskLocalAdapter(blockdev.SANDiskAdapter):
def __init__(self, pvm_operator):
self.volume_api = PowerVCNovaVirtDriverVolumeAPI()
self.pvm_operator = pvm_operator
self._initiator = None
self._wwpns = None
self._connection = None
def _md5sum_remote_file(self, remote_path):
# AIX6/VIOS cannot md5sum files with sizes greater than ~2GB
cmd = ("perl -MDigest::MD5 -e 'my $file = \"%s\"; open(FILE, $file); "
"binmode(FILE); "
"print Digest::MD5->new->addfile(*FILE)->hexdigest, "
"\" $file\n\";'" % remote_path)
# output = os.popen(cmd)
output, err = self.pvm_operator.run_vios_command_as_root_with_shell(cmd)
return output
def copy_image_file(self, source_path, remote_path, decompress=False):
"""Copy file to VIOS, decompress it, and return its new size and name.
:param source_path: source file path
:param remote_path remote file path
:param decompress: if True, de-compresses the file after copying;
if False (default), just copies the file
"""
# Calculate source image checksum
source_cksum = self._checksum_local_file(source_path)
comp_path = os.path.join(remote_path, os.path.basename(source_path))
if comp_path.endswith(".gz"):
uncomp_path = os.path.splitext(comp_path)[0]
else:
uncomp_path = comp_path
if not decompress:
final_path = comp_path
else:
final_path = uncomp_path
# Check whether the image is already on IVM
output = self.run_vios_command("ls %s" % final_path,
check_exit_code=False)
# If the image does not exist already
if not output:
try:
# Copy file to IVM
# common.scp_command(self.connection_data,
# source_path, remote_path, 'put')
cp_cmd = 'cp' + ' -f ' + source_path + ' ' + remote_path
self.run_vios_command(cp_cmd)
except exception.IBMPowerVMFileTransferFailed:
with excutils.save_and_reraise_exception():
cmd = "/usr/bin/rm -f %s" % final_path
self.run_vios_command_as_root(cmd)
# Verify image file checksums match
output = self._md5sum_remote_file(final_path)
if not output:
LOG.error(_("Unable to get checksum"))
# Cleanup inconsistent remote file
cmd = "/usr/bin/rm -f %s" % final_path
self.run_vios_command_as_root(cmd)
raise exception.\
IBMPowerVMFileTransferFailed(file_path=final_path)
if source_cksum != output.split(' ')[0]:
LOG.error(_("Image checksums do not match"))
# Cleanup inconsistent remote file
cmd = "/usr/bin/rm -f %s" % final_path
self.run_vios_command_as_root(cmd)
raise exception.\
IBMPowerVMFileTransferFailed(file_path=final_path)
if decompress:
# Unzip the image
cmd = "/usr/bin/gunzip %s" % comp_path
output = self.run_vios_command_as_root(cmd)
# Remove existing image file
cmd = "/usr/bin/rm -f %s.*" % uncomp_path
output = self.run_vios_command_as_root(cmd)
# Rename unzipped image
cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
output = self.run_vios_command_as_root(cmd)
# Remove compressed image file
cmd = "/usr/bin/rm -f %s" % comp_path
output = self.run_vios_command_as_root(cmd)
else:
LOG.debug("Image found on host at '%s'" % final_path)
# Calculate file size in multiples of 512 bytes |awk '{print $4}'
output = self.run_vios_command("ls -o %s" % final_path, check_exit_code=False)
if output:
out = output[0].split()
size = int(out[3])
else:
LOG.error(_("Uncompressed image file not found"))
msg_args = {'file_path': final_path}
raise exception.IBMPowerVMFileTransferFailed(**msg_args)
if (size % 512 != 0):
size = (int(size / 512) + 1) * 512
return final_path, size
class PowerVMLocalVolumeLocalAdapter(blockdev.PowerVMLocalVolumeAdapter):
def __init__(self, ld_ivm_operator):
self.command = command.IVMCommand()
self.pvm_operator = ld_ivm_operator
self._connection = None
self.volume_api = PowerVCNovaLOCALVirtDriverVolumeAPI()
def _md5sum_remote_file(self, remote_path):
# AIX6/VIOS cannot md5sum files with sizes greater than ~2GB
cmd = ("perl -MDigest::MD5 -e 'my $file = \"%s\"; open(FILE, $file); "
"binmode(FILE); "
"print Digest::MD5->new->addfile(*FILE)->hexdigest, "
"\" $file\n\";'" % remote_path)
# output = os.popen(cmd)
output, err = self.pvm_operator.run_vios_command_as_root_with_shell(cmd)
return output
def copy_image_file(self, source_path, remote_path, decompress=False):
"""Copy file to VIOS, decompress it, and return its new size and name.
:param source_path: source file path
:param remote_path remote file path
:param decompress: if True, de-compresses the file after copying;
if False (default), just copies the file
"""
# Calculate source image checksum
source_cksum = self._checksum_local_file(source_path)
comp_path = os.path.join(remote_path, os.path.basename(source_path))
if comp_path.endswith(".gz"):
uncomp_path = os.path.splitext(comp_path)[0]
else:
uncomp_path = comp_path
if not decompress:
final_path = comp_path
else:
final_path = uncomp_path
# Check whether the image is already on IVM
output = self.run_vios_command("ls %s" % final_path,
check_exit_code=False)
# If the image does not exist already
if not output:
try:
# Copy file to IVM
# common.scp_command(self.connection_data,
# source_path, remote_path, 'put')
cp_cmd = 'cp' + ' -f ' + source_path + ' ' + remote_path
self.run_vios_command(cp_cmd)
except exception.IBMPowerVMFileTransferFailed:
with excutils.save_and_reraise_exception():
cmd = "/usr/bin/rm -f %s" % final_path
self.run_vios_command_as_root(cmd)
# Verify image file checksums match
output = self._md5sum_remote_file(final_path)
if not output:
LOG.error(_("Unable to get checksum"))
# Cleanup inconsistent remote file
cmd = "/usr/bin/rm -f %s" % final_path
self.run_vios_command_as_root(cmd)
raise exception.\
IBMPowerVMFileTransferFailed(file_path=final_path)
if source_cksum != output.split(' ')[0]:
LOG.error(_("Image checksums do not match"))
# Cleanup inconsistent remote file
cmd = "/usr/bin/rm -f %s" % final_path
self.run_vios_command_as_root(cmd)
raise exception.\
IBMPowerVMFileTransferFailed(file_path=final_path)
if decompress:
# Unzip the image
cmd = "/usr/bin/gunzip %s" % comp_path
output = self.run_vios_command_as_root(cmd)
# Remove existing image file
cmd = "/usr/bin/rm -f %s.*" % uncomp_path
output = self.run_vios_command_as_root(cmd)
# Rename unzipped image
cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
output = self.run_vios_command_as_root(cmd)
# Remove compressed image file
cmd = "/usr/bin/rm -f %s" % comp_path
output = self.run_vios_command_as_root(cmd)
else:
LOG.debug("Image found on host at '%s'" % final_path)
# Calculate file size in multiples of 512 bytes, |awk '{print $4}'
output = self.run_vios_command("ls -o %s" % final_path, check_exit_code=False)
if output:
out = output[0].split()
size = int(out[3])
else:
LOG.error(_("Uncompressed image file not found"))
msg_args = {'file_path': final_path}
raise exception.IBMPowerVMFileTransferFailed(**msg_args)
if (size % 512 != 0):
size = (int(size / 512) + 1) * 512
return final_path, size
@logcall
@synchronized('image-files-manipulation', 'localdisk-')
def _copy_image_file(self, source_path, remote_path, context, image_meta,
decompress=False):
"""Copy file to VIOS, decompress it, and return its new size and name.
:param source_path: source file path
:param remote_path remote file path
:param decompress: if True, decompressess the file after copying;
if False (default), just copies the file
"""
# Calculate source image checksum
hasher = hashlib.md5()
block_size = 0x10000
img_file = file(source_path, 'r')
buf = img_file.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = img_file.read(block_size)
source_cksum = hasher.hexdigest()
comp_path = os.path.join(remote_path, os.path.basename(source_path))
if comp_path.endswith(".gz"):
uncomp_path = os.path.splitext(comp_path)[0]
else:
uncomp_path = comp_path
if not decompress:
final_path = comp_path
else:
final_path = uncomp_path
# Check whether the image is already on IVM
output = self.run_vios_command("ls %s" % final_path,
check_exit_code=False)
# If the image does not exist already
if not output:
LOG.debug("Image %s not found on host" % final_path)
# calculate required size
img_size_mb = image_meta['size'] / (1024 * 1024)
# Have a check if we have space on IVM
ivm_free_space_kb = self.\
pvm_operator.get_staging_free_space()
ivm_free_space_mb = ivm_free_space_kb / 1024
if ivm_free_space_mb < img_size_mb:
# need to free-up cache area on ivm
if not self.pvm_operator.\
attempt_to_delete_image_cache(context, 1,
img_size_mb):
# can not proceed with ftp transfer.
# TODO: raise a right message, with req memory details.
raise exception.IBMPowerVMStagingAreaOutOfSpace(
dir=remote_path, size=img_size_mb,
free=ivm_free_space_mb)
LOG.debug(
"Initiating ftp, from %(source_path)s to %(remote_path)s"
% locals())
# Copy file to IVM
# common.scp_command(self.connection_data,
# source_path, remote_path, 'put')
cp_cmd = 'cp' + ' -f ' + source_path + ' ' + remote_path
self.run_vios_command(cp_cmd)
LOG.debug(
"ftp from %(source_path)s to %(remote_path)s done" % locals())
# Verify image file checksums match
output = self._md5sum_remote_file(final_path)
if not output:
LOG.error(_("Unable to get checksum"))
msg_args = {'file_path': final_path}
raise exception.IBMPowerVMFileTransferFailed(**msg_args)
target_cksum = output.split(' ')[0]
if source_cksum != target_cksum:
LOG.error(_("Image checksums do not match"
"source cksum %(source_cksum)s"
"target cksum %(target_cksum)s") % locals())
msg_args = {'file_path': final_path}
raise exception.IBMPowerVMFileTransferFailed(**msg_args)
if decompress:
# Unzip the image
cmd = "/usr/bin/gunzip %s" % comp_path
output = self.run_vios_command_as_root(cmd)
# Remove existing image file
cmd = "/usr/bin/rm -f %s.*" % uncomp_path
output = self.run_vios_command_as_root(cmd)
# Rename unzipped image
cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
output = self.run_vios_command_as_root(cmd)
# Remove compressed image file
cmd = "/usr/bin/rm -f %s" % comp_path
output = self.run_vios_command_as_root(cmd)
else:
LOG.debug("Image found on host at '%s'" % final_path)
# touch the file to indicate usage.
self._touch_file(final_path)
# Calculate file size in multiples of 512 bytes |awk '{print $4}'
output = self.run_vios_command("ls -o %s" % final_path, check_exit_code=False)
if output:
out = output[0].split()
size = int(out[3])
else:
LOG.error(_("Uncompressed image file not found"))
msg_args = {'file_path': final_path}
raise exception.IBMPowerVMFileTransferFailed(**msg_args)
if (size % 512 != 0):
size = (int(size / 512) + 1) * 512
return final_path, size
def _copy_compressed_image_file_from_host(self, device_name,
remote_source_path,
local_dest_dir, compress=False):
"""
Copy a file from IVM to | |
will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1]:
# Add word in hex to $msg string variable
msg += hex(w)[2:].zfill(8)
return msg
@staticmethod
def sha512(message, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep_sha384.prep(inital_message)
# Set H variable with inital hash value
H = [hash.get_H_sha512()]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha512(preproccessed_message[i-1])
message_hashed = hash.hash_sha512(schedule, H, i)
H.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1]:
# Add word in hex to $msg string variable
msg += hex(w)[2:].zfill(8)
return msg
@staticmethod
def sha512_for_t(message, h, trunc=512, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep_sha384.prep(inital_message)
# Set H variable with inital hash value
H = [h]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha512(preproccessed_message[i-1])
message_hashed = hash.hash_sha512(schedule, H, i)
H.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1]:
# Add word in hex to $msg string variable
msg += bin(w)[2:].zfill(64)
msg = hex(int(msg[:trunc] ,2))[2:]
return msg
@staticmethod
def sha512_t(message, t):
h = hash.sha512_for_t(message, IV.IV(t), t)[:]
return h
# FIPS-180-4 2.2.2
# Rotate bits to the right
@staticmethod
def ROTR(x, n, w=32):
return ((x >> n) | (x << w - n)) & ((1 << w) - 1)
# FIPS-180-4 3.2
# Rotate bits to the right
@staticmethod
def ROTL(x, n, w=32):
return sched.ROTR(x, w-n)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA0(x):
return hash.ROTR(x, 2) ^ hash.ROTR(x, 13) ^ hash.ROTR(x, 22)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA1(x):
return hash.ROTR(x, 6) ^ hash.ROTR(x, 11) ^ hash.ROTR(x, 25)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA0_sha384(x):
return hash.ROTR(x, 28, 64) ^ hash.ROTR(x, 34, 64) ^ hash.ROTR(x, 39, 64)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA1_sha384(x):
return hash.ROTR(x, 14, 64) ^ hash.ROTR(x, 18, 64) ^ hash.ROTR(x, 41, 64)
# FIPS-180-4 4.1.1
@staticmethod
def Ch(x, y, z):
return (x & y) ^ (~x & z)
# FIPS-180-4 4.1.1
@staticmethod
def Maj(x, y, z):
return (x & y) ^ (x & z) ^ (y & z)
# FIPS-180-4 4.1.1
@staticmethod
def Parity(x, y, z):
return x ^ y ^ z
# FIPS-180-4 4.1.1
@staticmethod
def f_sha160(x, y, z, t):
if 0 <= t and t <= 19:
return hash.Ch(x, y, z)
if 20 <= t and t <= 39:
return hash.Parity(x, y, z)
if 40 <= t and t <= 59:
return hash.Maj(x, y, z)
if 60 <= t and t <= 79:
return hash.Parity(x, y, z)
# FIPS-180-4 5.3.2
# Get the (constant) inital hash values
@staticmethod
def get_H_sha160():
return hash.H_sha160
@staticmethod
def get_H_sha224():
return hash.H_sha224
@staticmethod
def get_H_sha256():
return hash.H_sha256
@staticmethod
def get_H_sha384():
return hash.H_sha384
@staticmethod
def get_H_sha512():
return hash.H_sha512
# FIPS-180-4 6.2.2
@staticmethod
def hash_sha160(W, H_sha160, i):
# Set inital hash values from previous (final) hash values
a = H_sha160[i-1][0]
b = H_sha160[i-1][1]
c = H_sha160[i-1][2]
d = H_sha160[i-1][3]
e = H_sha160[i-1][4]
# Iterate 80 times
for t in range(80):
if 0 <= t <= 19:
hash.K_sha160.append(int('5A827999', 16))
elif 20 <= t <= 39:
hash.K_sha160.append(int('6ED9EBA1', 16))
elif 40 <= t <= 59:
hash.K_sha160.append(int('8F1BBCDC', 16))
elif 60 <= t <= 79:
hash.K_sha160.append(int('CA62C1D6', 16))
# Calculate temporary value
T = int((hash.ROTL(a, 5) + hash.f_sha160(b, c, d, t) + e + hash.K_sha160[t] + W[t]) % 2 ** 32)
e = d
d = c
c = hash.ROTL(b, 30)
b = a
a = T
# Calculate final hash values
H0 = (H_sha160[i-1][0] + a) % 2 ** 32
H1 = (H_sha160[i-1][1] + b) % 2 ** 32
H2 = (H_sha160[i-1][2] + c) % 2 ** 32
H3 = (H_sha160[i-1][3] + d) % 2 ** 32
H4 = (H_sha160[i-1][4] + e) % 2 ** 32
# Return final hash values
return [H0, H1, H2, H3, H4]
@staticmethod
def hash_sha224(W, H, i):
# Set inital hash values from previous (final) hash values
a = H[i-1][0]
b = H[i-1][1]
c = H[i-1][2]
d = H[i-1][3]
e = H[i-1][4]
f = H[i-1][5]
g = H[i-1][6]
h = H[i-1][7]
# Iterate 64 times
for t in range(0, 64):
# Calculate temporary value 1
T1 = int((h + hash.SIGMA1(e) + hash.Ch(e, f, g) + hash.K_sha224[t] + W[t]) % 2 ** 32)
# Calculate temporary value 2
T2 = int((hash.SIGMA0(a) + hash.Maj(a, b, c)) % 2 ** 32)
h = g
g = f
f = e
e = int((d + T1) % 2 ** 32)
d = c
c = b
b = a
a = (T1 + T2) % 2 ** 32
# Calculate final hash values
H0 = (H[i-1][0] + a) % 2 ** 32
H1 = (H[i-1][1] + b) % 2 ** 32
H2 = (H[i-1][2] + c) % 2 ** 32
H3 = (H[i-1][3] + d) % 2 ** 32
H4 = (H[i-1][4] + e) % 2 ** 32
H5 = (H[i-1][5] + f) % 2 ** 32
H6 = (H[i-1][6] + g) % 2 ** 32
H7 = (H[i-1][7] + h) % 2 ** 32
# Return final hash values
return [H0, H1, H2, H3, H4, H5, H6, H7]
@staticmethod
def hash_sha384(W, H, i):
# Set inital hash values from previous (final) hash values
a = H[i-1][0]
b = H[i-1][1]
c = H[i-1][2]
d = H[i-1][3]
e = H[i-1][4]
f = H[i-1][5]
g = H[i-1][6]
h = H[i-1][7]
# Iterate 80 times
for t in range(0, 80):
# Calculate temporary value 1
T1 = int((h + hash.SIGMA1_sha384(e) + hash.Ch(e, f, g) + hash.K_sha384[t] + W[t]) % 2 ** 64)
# Calculate temporary value 2
T2 = int((hash.SIGMA0_sha384(a) + hash.Maj(a, b, c)) % 2 ** 64)
h = g
g = f
f = e
e = int((d + T1) % 2 ** 64)
d = c
c = b
b = a
a = (T1 + T2) % 2 ** 64
# Calculate final hash values
H0 = (H[i-1][0] + a) % 2 ** 64
H1 = (H[i-1][1] + b) % 2 ** 64
H2 = (H[i-1][2] + c) % 2 ** 64
H3 = (H[i-1][3] + d) % 2 ** 64
H4 = (H[i-1][4] + e) % 2 ** 64
H5 = (H[i-1][5] + f) % 2 ** 64
# Return final hash values
return [H0, H1, H2, H3, H4, H5]
@staticmethod
def hash_sha512(W, H, i):
# Set inital hash values from previous (final) hash values
a = H[i-1][0]
b = H[i-1][1]
c = H[i-1][2]
d = H[i-1][3]
e = H[i-1][4]
f = H[i-1][5]
g = H[i-1][6]
h = H[i-1][7]
# Iterate 80 times
for t in range(0, 80):
# Calculate temporary value 1
T1 = int((h + hash.SIGMA1_sha384(e) + hash.Ch(e, f, g) + hash.K_sha512[t] + W[t]) % 2 ** 64)
# Calculate temporary value 2
T2 = int((hash.SIGMA0_sha384(a) + hash.Maj(a, b, c)) % 2 ** 64)
h = g
g | |
<reponame>jacoblb64/pico_rgb_keypad_hid
# SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
# SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`iothub_device`
=====================
Connectivity to Azure IoT Hub
* Author(s): <NAME>, <NAME>
"""
import json
import adafruit_logging as logging
from .iot_error import IoTError
from .iot_mqtt import IoTMQTT, IoTMQTTCallback, IoTResponse
def _validate_keys(connection_string_parts):
"""Raise ValueError if incorrect combination of keys"""
host_name = connection_string_parts.get(HOST_NAME)
shared_access_key_name = connection_string_parts.get(SHARED_ACCESS_KEY_NAME)
shared_access_key = connection_string_parts.get(SHARED_ACCESS_KEY)
device_id = connection_string_parts.get(DEVICE_ID)
if host_name and device_id and shared_access_key:
pass
elif host_name and shared_access_key and shared_access_key_name:
pass
else:
raise ValueError("Invalid Connection String - Incomplete")
DELIMITER = ";"
VALUE_SEPARATOR = "="
HOST_NAME = "HostName"
SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"
SHARED_ACCESS_KEY = "SharedAccessKey"
SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"
DEVICE_ID = "DeviceId"
MODULE_ID = "ModuleId"
GATEWAY_HOST_NAME = "GatewayHostName"
VALID_KEYS = [
HOST_NAME,
SHARED_ACCESS_KEY_NAME,
SHARED_ACCESS_KEY,
SHARED_ACCESS_SIGNATURE,
DEVICE_ID,
MODULE_ID,
GATEWAY_HOST_NAME,
]
class IoTHubDevice(IoTMQTTCallback):
"""A device client for the Azure IoT Hub service"""
def connection_status_change(self, connected: bool) -> None:
"""Called when the connection status changes
:param bool connected: True if the device is connected, otherwise false
"""
if self._on_connection_status_changed is not None:
# pylint: disable=E1102
self._on_connection_status_changed(connected)
# pylint: disable=W0613, R0201
def direct_method_invoked(self, method_name: str, payload) -> IoTResponse:
"""Called when a direct method is invoked
:param str method_name: The name of the method that was invoked
:param str payload: The payload with the message
:returns: A response with a code and status to show if the method was correctly handled
:rtype: IoTResponse
"""
if self._on_direct_method_invoked is not None:
# pylint: disable=E1102
return self._on_direct_method_invoked(method_name, payload)
raise IoTError("on_direct_method_invoked not set")
# pylint: disable=C0103
def cloud_to_device_message_received(self, body: str, properties: dict) -> None:
"""Called when a cloud to device message is received
:param str body: The body of the message
:param dict properties: The propreties sent with the mesage
"""
if self._on_cloud_to_device_message_received is not None:
# pylint: disable=E1102
self._on_cloud_to_device_message_received(body, properties)
def device_twin_desired_updated(
self, desired_property_name: str, desired_property_value, desired_version: int
) -> None:
"""Called when the device twin desired properties are updated
:param str desired_property_name: The name of the desired property that was updated
:param desired_property_value: The value of the desired property that was updated
:param int desired_version: The version of the desired property that was updated
"""
if self._on_device_twin_desired_updated is not None:
# pylint: disable=E1102
self._on_device_twin_desired_updated(
desired_property_name, desired_property_value, desired_version
)
def device_twin_reported_updated(
self,
reported_property_name: str,
reported_property_value,
reported_version: int,
) -> None:
"""Called when the device twin reported values are updated
:param str reported_property_name: The name of the reported property that was updated
:param reported_property_value: The value of the reported property that was updated
:param int reported_version: The version of the reported property that was updated
"""
if self._on_device_twin_reported_updated is not None:
# pylint: disable=E1102
self._on_device_twin_reported_updated(
reported_property_name, reported_property_value, reported_version
)
def __init__(
self,
socket,
iface,
device_connection_string: str,
token_expires: int = 21600,
logger: logging = None,
):
"""Create the Azure IoT Central device client
:param socket: The network socket
:param iface: The network interface
:param str device_connection_string: The Iot Hub device connection string
:param int token_expires: The number of seconds till the token expires, defaults to 6 hours
:param adafruit_logging logger: The logger
"""
self._socket = socket
self._iface = iface
self._token_expires = token_expires
self._logger = logger if logger is not None else logging.getLogger("log")
connection_string_values = {}
try:
cs_args = device_connection_string.split(DELIMITER)
connection_string_values = dict(
arg.split(VALUE_SEPARATOR, 1) for arg in cs_args
)
except (ValueError, AttributeError) as e:
raise ValueError(
"Connection string is required and should not be empty or blank and must be supplied as a string"
) from e
if len(cs_args) != len(connection_string_values):
raise ValueError("Invalid Connection String - Unable to parse")
_validate_keys(connection_string_values)
self._hostname = connection_string_values[HOST_NAME]
self._device_id = connection_string_values[DEVICE_ID]
self._shared_access_key = connection_string_values[SHARED_ACCESS_KEY]
self._logger.debug("Hostname: " + self._hostname)
self._logger.debug("Device Id: " + self._device_id)
self._logger.debug("Shared Access Key: " + self._shared_access_key)
self._on_connection_status_changed = None
self._on_direct_method_invoked = None
self._on_cloud_to_device_message_received = None
self._on_device_twin_desired_updated = None
self._on_device_twin_reported_updated = None
self._mqtt = None
@property
def on_connection_status_changed(self):
"""A callback method that is called when the connection status is changed. This method should have the following signature:
def connection_status_changed(connected: bool) -> None
"""
return self._on_connection_status_changed
@on_connection_status_changed.setter
def on_connection_status_changed(self, new_on_connection_status_changed):
"""A callback method that is called when the connection status is changed. This method should have the following signature:
def connection_status_changed(connected: bool) -> None
"""
self._on_connection_status_changed = new_on_connection_status_changed
@property
def on_direct_method_invoked(self):
"""A callback method that is called when a direct method is invoked. This method should have the following signature:
def direct_method_invoked(method_name: str, payload: str) -> IoTResponse:
This method returns an IoTResponse containing a status code and message from the method invocation. Set this appropriately
depending on if the method was successfully handled or not. For example, if the method was handled successfully, set
the code to 200 and message to "OK":
return IoTResponse(200, "OK")
"""
return self._on_direct_method_invoked
@on_direct_method_invoked.setter
def on_direct_method_invoked(self, new_on_direct_method_invoked):
"""A callback method that is called when a direct method is invoked. This method should have the following signature:
def direct_method_invoked(method_name: str, payload: str) -> IoTResponse:
This method returns an IoTResponse containing a status code and message from the method invocation. Set this appropriately
depending on if the method was successfully handled or not. For example, if the method was handled successfully, set
the code to 200 and message to "OK":
return IoTResponse(200, "OK")
"""
self._on_direct_method_invoked = new_on_direct_method_invoked
@property
def on_cloud_to_device_message_received(self):
"""A callback method that is called when a cloud to device message is received. This method should have the following signature:
def cloud_to_device_message_received(body: str, properties: dict) -> None:
"""
return self._on_cloud_to_device_message_received
@on_cloud_to_device_message_received.setter
def on_cloud_to_device_message_received(
self, new_on_cloud_to_device_message_received
):
"""A callback method that is called when a cloud to device message is received. This method should have the following signature:
def cloud_to_device_message_received(body: str, properties: dict) -> None:
"""
self._on_cloud_to_device_message_received = (
new_on_cloud_to_device_message_received
)
@property
def on_device_twin_desired_updated(self):
"""A callback method that is called when the desired properties of the devices device twin are updated.
This method should have the following signature:
def device_twin_desired_updated(desired_property_name: str, desired_property_value, desired_version: int) -> None:
"""
return self._on_device_twin_desired_updated
@on_device_twin_desired_updated.setter
def on_device_twin_desired_updated(self, new_on_device_twin_desired_updated):
"""A callback method that is called when the desired properties of the devices device twin are updated.
This method should have the following signature:
def device_twin_desired_updated(desired_property_name: str, desired_property_value, desired_version: int) -> None:
"""
self._on_device_twin_desired_updated = new_on_device_twin_desired_updated
if self._mqtt is not None:
self._mqtt.subscribe_to_twins()
@property
def on_device_twin_reported_updated(self):
"""A callback method that is called when the reported properties of the devices device twin are updated.
This method should have the following signature:
def device_twin_reported_updated(reported_property_name: str, reported_property_value, reported_version: int) -> None:
"""
return self._on_device_twin_reported_updated
@on_device_twin_reported_updated.setter
def on_device_twin_reported_updated(self, new_on_device_twin_reported_updated):
"""A callback method that is called when the reported properties of the devices device twin are updated.
This method should have the following signature:
def device_twin_reported_updated(reported_property_name: str, reported_property_value, reported_version: int) -> None:
"""
self._on_device_twin_reported_updated = new_on_device_twin_reported_updated
if self._mqtt is not None:
self._mqtt.subscribe_to_twins()
def connect(self) -> None:
"""Connects to Azure IoT Hub
:raises RuntimeError: if the internet connection is not responding or is unable to connect
"""
self._mqtt = IoTMQTT(
self,
self._socket,
self._iface,
self._hostname,
self._device_id,
self._shared_access_key,
self._token_expires,
self._logger,
)
self._mqtt.connect()
if (
self._on_device_twin_desired_updated is not None
or self._on_device_twin_reported_updated is not None
):
self._mqtt.subscribe_to_twins()
def disconnect(self) -> None:
"""Disconnects from the MQTT broker
:raises IoTError: if there is no open connection to the MQTT broker
"""
if self._mqtt is None:
raise IoTError("You are not connected to IoT Central")
self._mqtt.disconnect()
def reconnect(self) -> None:
"""Reconnects to the MQTT broker"""
if self._mqtt is None:
raise IoTError("You are not connected to IoT Central")
self._mqtt.reconnect()
def is_connected(self) -> bool:
"""Gets if there is an open connection to the MQTT broker
:returns: True if there is an open connection, False if not
:rtype: bool
"""
if self._mqtt is not None:
return self._mqtt.is_connected()
return False
def loop(self) -> None:
"""Listens for MQTT messages
:raises IoTError: if there is no open connection to the MQTT broker
"""
if self._mqtt is None:
raise IoTError("You are not connected to IoT Central")
self._mqtt.loop()
def send_device_to_cloud_message(self, message, system_properties=None) -> None:
"""Send a device to cloud message from this device to Azure IoT Hub
:param message: The message data as a JSON string or a dictionary
:param system_properties: System properties to send with the message
:raises: ValueError if the message is not a string or dictionary
| |
line
fn_line = _get_line_sample_func(C_vv, theta + pi / 2.0)
ax.plot(*fn_line(mu=mu_l)[0], linestyle="--", color=line.get_color())
try:
v1.zt
z_var = "zt"
except AttributeError:
z_var = "zm"
try:
t_units = "s" if "seconds" in v1.time.units else v1.time.units
ax.set_title(
"""Covariance length-scale for\n{C_vv}
t={t}{t_units} z={z}{z_units}
""".format(
C_vv=fix_cumulant_name(C_vv.name),
t=float(v1.time),
t_units=t_units,
z=float(v1[z_var]),
z_units=v1[z_var].units,
)
)
except AttributeError:
pass
return ax
def _get_line_sample_func(data, theta):
x = data.coords["x"]
y = data.coords["y"]
# the interpolation method works best for data in the -1:1 range
d_scale = max(data.values.max(), -data.values.min())
if d_scale == 0.0:
d_scale = 1.0
maps = [x, y]
if data.dims == ("x", "y"):
lo = np.array([x[0], y[0]])
hi = np.array([x[-1], y[-1]])
else:
lo = np.array([y[0], x[0]])
hi = np.array([y[-1], x[-1]])
interp_f = intergrid.Intergrid(
data.values / d_scale, lo=lo, hi=hi, maps=maps, verbose=0
)
def sample(mu):
"""
mu is the distance along the rotated coordinate
"""
x_ = np.cos(float(theta)) * mu
y_ = np.sin(float(theta)) * mu
if data.dims == ("x", "y"):
p = np.array([x_, y_]).T
else:
p = np.array([y_, x_]).T
return (x_, y_), interp_f(p) * d_scale
return sample
def _line_sample(data, theta, max_dist):
"""
Sample 2D dataset along a line define by points in (x,y)
"""
sample = _get_line_sample_func(data, theta)
x = data.coords["x"]
mu_l = x[np.abs(x) < max_dist]
return mu_l, sample(mu_l)[1]
class WidthEstimationMethod(Enum):
MASS_WEIGHTED = 0
CUTOFF = 1
def __str__(self):
return self.name
def _find_width_through_mass_weighting(data, theta, max_width=5000.0, center_only=True):
"""
Integrates the central cumulant over distance to calculate a weighted
length-scale. If `center_only` is true only the part that has the same sign
as correlation at the origin (0,0) is included.
"""
assert data.x.units == "m"
sample_fn = _get_line_sample_func(data, theta)
if center_only:
data = _extract_cumulant_center(data)
def mass_weighted_edge(dir):
# we only integrate positive "mass" contributions, including negative
# contributions didn't work...
# if the correlation is negative we still want to be able to calculate
# a width, the distance over which the correlation is negative. And so
# we multiply by the sign at the origin to flip the function
s = np.sign(sample_fn(0.0)[1])
def fn(mu):
val = sample_fn(mu)[1]
return np.maximum(0, s * val)
fn_inertia = lambda mu: fn(mu) * np.abs(mu) # noqa
fn_mass = lambda mu: fn(mu) # noqa
if dir == 1:
kw = dict(a=0, b=max_width / 2.0)
else:
kw = dict(a=-max_width / 2.0, b=0)
# try to work out if function has a local minimum, i.e. whether we get
# a flip in correlation, if so we only want to integrate up to this
# limit so that we avoid getting contributions if the correlation
# becomes positive again
x_ = np.linspace(kw["a"], kw["b"], 100)
vals_ = s * sample_fn(x_)[1]
if np.all(np.isnan(vals_)):
raise Exception("No valid datapoints found")
elif np.min(vals_) < 0.0:
max_width_local = x_[np.argmin(vals_)]
if dir == 1:
kw = dict(a=0, b=max_width_local)
else:
kw = dict(a=-max_width_local, b=0)
# import matplotlib.pyplot as plt
# if dif == 1:
# plt.figure()
# plt.plot(x_, fn(x_))
# else:
# plt.plot(x_, fn(x_))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
inertia, _ = scipy.integrate.quad(fn_inertia, **kw)
mass, _ = scipy.integrate.quad(fn_mass, **kw)
dist = inertia / mass
return dir * dist
try:
width = mass_weighted_edge(1.0) - mass_weighted_edge(-1.0)
except ZeroDivisionError:
width = np.nan
return xr.DataArray(width, coords=dict(zt=data.zt), attrs=dict(units="m"))
def _find_width_through_cutoff(data, theta, width_peak_fraction=0.5, max_width=5000.0):
x = data.coords["x"]
assert x.units == "m"
sample_fn = _get_line_sample_func(data, theta)
d_max = data.values.max()
# d_at_inf = sample_fn(x.max())[1]
# use corner of domain as reference
# d_at_inf = data[0,0].values
d_at_inf = 0.0
def sample_fn_normed(mu):
"""
Normalize with value at infinity so that root finding method can find
halfway value if there is net positive correlation of the entire domain
"""
pts_xy, d_val = sample_fn(mu)
return pts_xy, (d_val - d_at_inf) / (d_max - d_at_inf)
def root_fn(mu):
return sample_fn_normed(mu)[1] - width_peak_fraction
def find_edge(dir):
# first find when data drops below zero away from x=0, this will set limit
# range for root finding
mu_coarse = np.linspace(0.0, dir * max_width, 100)
_, d_coarse = sample_fn_normed(mu=mu_coarse)
try:
i_isneg = np.min(np.argwhere(d_coarse < 0.0))
mu_lim = mu_coarse[i_isneg]
except ValueError:
mu_lim = dir * max_width
try:
x_hwhm = scipy.optimize.brentq(f=root_fn, a=0.0, b=mu_lim)
except ValueError:
warnings.warn(
"Couldn't find width smaller than `{}` assuming"
" that the cumulant spreads to infinity".format(max_width)
)
x_hwhm = np.inf
return x_hwhm
width = find_edge(1.0) - find_edge(-1.0)
return xr.DataArray(width, coords=dict(zt=data.zt), attrs=dict(units="m"))
def add_width_indicator_legend(ax):
"""
Render a cumulant width indicator below the bottom right corner of the the
axes `ax`
"""
# set the width and height of the axes we'll draw into in inches
# so that they're always the same size
fig = ax.figure
fw, fh = (fig.dpi_scale_trans - fig.transFigure).transform([1.6, 0.5])
# and we want to attach to the bottom corner of the provided axes
fx, fy = (ax.transAxes - fig.transFigure).transform([1.0, 0.0])
ax_inset = fig.add_axes([fx - fw, fy - 2 * fh, fw, fh], transform=fig.transFigure)
labels = [r"$L^p$", r"$L^{\bot}$"]
linestyles = ["-", "--"]
ax_inset.axis("off")
for n, (label, ls) in enumerate(zip(labels, linestyles)):
w = 0.2
eb = ax_inset.errorbar(x=n, y=0.0, xerr=w, color="black", capsize=5.0)
eb[-1][0].set_linestyle(ls)
ax_inset.text(x=n - 2.5 * w, y=0.0, s=label, va="center")
def covariance_direction_plot(
v1,
v2,
s_N=200,
theta_win_N=100,
width_peak_fraction=0.5,
mask=None,
max_dist=2000.0,
with_45deg_sample=False,
sample_angle=None,
ax=None,
width_est_method=WidthEstimationMethod.MASS_WEIGHTED,
line_color="green",
width_indicator="full",
):
"""
Compute 2nd-order cumulant between v1 and v2 and sample and perpendicular
to pricinple axis. `s_N` sets plot window
width_indicator:
`full`: draw width indicator with label
`floating_legend`: width indicator with label in legend below axes
`no_label`: draw indicator without label
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
# add grid lines
ax.axhline(0.0, color="grey", linestyle=":", alpha=0.3)
ax.axvline(0.0, color="grey", linestyle=":", alpha=0.3)
assert v1.shape == v2.shape
if v1.dims == ("x", "y"):
Nx, Ny = v1.shape
elif v1.dims == ("y", "x"):
Ny, Nx = v1.shape
else:
raise NotImplementedError
assert np.all(v1.coords["x"] == v2.coords["x"])
assert np.all(v1.coords["y"] == v2.coords["y"])
if width_est_method == WidthEstimationMethod.CUTOFF:
width_func = _find_width_through_cutoff
elif width_est_method == WidthEstimationMethod.MASS_WEIGHTED:
width_func = _find_width_through_mass_weighting
else:
raise NotImplementedError(width_est_method)
# TODO: don't actually need 2D coords here, but would have to fix indexing
# below
if v1.dims == ("x", "y"):
x, y = np.meshgrid(v1.coords["x"], v1.coords["y"], indexing="ij")
else:
x, y = np.meshgrid(v1.coords["x"], v1.coords["y"])
C_vv = calc_2nd_cumulant(v1, v2, mask=mask)
if sample_angle is not None:
theta = xr.DataArray(
sample_angle * pi / 180.0,
attrs=dict(units="radians"),
coords=dict(zt=v1.zt),
)
else:
theta = identify_principle_axis(C_vv, sI_N=theta_win_N)
# principle direction line
mu_l, C_vv_l = _line_sample(data=C_vv, theta=theta, max_dist=max_dist)
(line_1,) = ax.plot(
mu_l,
C_vv_l,
label=r"$\theta^{{p}}={:.1f}^{{\circ}}$" "".format(theta.values * 180.0 / pi),
color=line_color,
)
width = width_func(C_vv, theta)
def _make_width_indicator(label, y0, width, color, linestyle="-"):
eb = ax.errorbar(
x=0.0, xerr=width / 2.0, y=y0, color=color, capsize=4.0, linestyle=linestyle
)
eb[-1][0].set_linestyle(linestyle)
if width_indicator == "full":
ax.text(
s=label,
x=0.0,
y=y0,
verticalalignment="bottom",
horizontalalignment="center",
)
if width_indicator is not None:
# we'll place the width indicator based on the y-range of the plot using
# the side of the y=0 line where there is the most space
ylim = np.array(ax.get_ylim())
i_max = np.argmax(np.abs(ylim))
y_max = ylim[i_max]
if abs(ylim[[1, 0][i_max]] / ylim[i_max]) > 0.3:
y_max = 1.3 * y_max
y_ref = sorted([0.4 * y_max, 0.2 * y_max])
_make_width_indicator(
label="$L^p$",
width=width,
y0=y_ref[0],
color=line_1.get_color(),
linestyle="-",
)
# perpendicular direction line
mu_l, C_vv_l = _line_sample(data=C_vv, theta=theta + pi / 2.0, max_dist=max_dist)
(line_2,) = ax.plot(
mu_l,
C_vv_l,
label=r"$\theta^\bot=\theta^p + 90^{\circ}$",
linestyle="--",
color=line_1.get_color(),
)
if width_indicator is not None:
width = width_func(C_vv, theta + pi / 2.0)
_make_width_indicator(
label=r"$L^{\bot}$",
width=width,
y0=y_ref[1],
color=line_1.get_color(),
linestyle="--",
)
if width_indicator == "floating_legend":
add_width_indicator_legend(ax=ax)
if with_45deg_sample:
mu_l, C_vv_l = _line_sample(
data=C_vv, theta=theta + pi / 4.0, max_dist=max_dist
)
(line_2,) = ax.plot(mu_l, C_vv_l, label=r"$\theta=\theta^{p} + 45^{\circ}$")
width = width_func(C_vv, theta + pi / 2.0, width_peak_fraction)
ax.axvline(-0.5 * width, linestyle="--", color=line_2.get_color())
ax.axvline(0.5 * width, linestyle="--", color=line_2.get_color())
lines = [line_1, line_2]
ax.legend(lines, [l.get_label() for l in lines], loc="upper right")
ax.set_xlabel("distance [m]")
ax.set_ylabel("covariance [{}]".format(C_vv.units))
# ax.text(0.05, 0.8, r"$\theta_{{p}}={:.1f}^{{\circ}}$".format(theta.values*180./pi),
# transform=ax.transAxes
# )
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0), useMathText=True)
try:
v1.zt
z_var = "zt"
except AttributeError:
z_var = "zm"
ax.set_title(
"{} sampled along and\n perpendicular to principle axis "
"at z={z}{z_units}\n".format(
C_vv.long_name, z=float(v1[z_var]), z_units=v1[z_var].units
)
)
return [line_1, line_2]
def charactistic_scales(
v1,
v2=None,
l_theta_win=1000.0,
mask=None,
sample_angle=None,
width_est_method=WidthEstimationMethod.MASS_WEIGHTED,
):
"""
From 2nd-order cumulant of v1 and v2 compute principle axis angle,
characteristic length-scales along and perpendicular to principle axis (as
full width at half maximum)
"""
if | |
scope for all raw_dbs and datasets
scope=self.generate_scope(acl_type, self.all_scope_ctx),
)
}
)
for acl_type in acl_default_types
]
# root level like cdf:root
elif root_account: # no parameters
# all (no limits)
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{root_account}"
# all default ACLs
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action("owner", acl_type),
scope={"all": {}},
)
}
)
# skipping admin types from default types to avoid duplicates
for acl_type in (set(acl_default_types) - set(acl_admin_types))
]
# plus admin ACLs
[
capabilities.append( # type: ignore
{
f"{acl_admin_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_admin_action(acl_admin_type),
scope={"all": {}},
)
}
)
for acl_admin_type in acl_admin_types
]
return group_name_full_qualified, capabilities
def get_group_ids_by_name(self, group_name: str) -> List[int]:
"""Lookup if CDF Group name exists (could be more than one!)
and return list of all CDF Group IDs
Args:
group_name (str): CDF Group name to check
Returns:
List[int]: of CDF Group IDs
"""
return self.deployed["groups"].query("name == @group_name")["id"].tolist()
# return self.deployed["groups"].query("name == @group_payload['name']")["id"].tolist()
# TODO 220203 pa: explicit providing 'group_name'
# to bypass a strange bug under Docker which throws a
# pandas.core.computation.ops.UndefinedVariableError:
# name 'str_0_0x900xd80x90xec0x870x7f0x00x0' is not defined
def create_group(
self,
group_name: str,
group_capabilities: Dict[str, Any] = None,
idp_mapping: Tuple[str] = None,
) -> Group:
"""Creating a CDF Group
- with upsert support the same way Fusion updates CDF Groups
if a group with the same name exists:
1. a new group with the same name will be created
2. then the old group will be deleted (by its 'id')
- with support of explicit given aad-mapping or internal lookup from config
Args:
group_name (str): name of the CDF Group (always prefixed with GROUP_NAME_PREFIX)
group_capabilities (List[Dict[str, Any]], optional): Defining the CDF Group capabilities.
aad_mapping (Tuple[str, str], optional):
Tuple of ({AAD SourceID}, {AAD SourceName})
to link the CDF Group to
Returns:
Group: the new created CDF Group
"""
idp_source_id, idp_source_name = None, None
if idp_mapping:
# explicit given
# TODO: change from tuple to dataclass
if len(idp_mapping) != 2:
raise ValueError(f"Expected a tuple of length 2, got {idp_mapping=} instead")
idp_source_id, idp_source_name = idp_mapping
else:
# check lookup from provided config
mapping = self.bootstrap_config.get_idp_cdf_mapping_for_group(
cdf_project=self.cdf_project, cdf_group=group_name
)
# unpack
idp_source_id, idp_source_name = mapping.idp_source_id, mapping.idp_source_name
# check if group already exists, if yes it will be deleted after a new one is created
old_group_ids = self.get_group_ids_by_name(group_name)
new_group = Group(name=group_name, capabilities=group_capabilities)
if idp_source_id:
# inject (both will be pushed through the API call!)
new_group.source_id = idp_source_id # 'S-314159-1234'
new_group.source = idp_source_name # type: ignore
# print(f"group_create_object:<{group_create_object}>")
# overwrite new_group as it now contains id too
if self.is_dry_run:
_logger.info(f"Dry run - Creating group with name: <{new_group.name}>")
_logger.debug(f"Dry run - Creating group details: <{new_group}>")
else:
new_group = self.client.iam.groups.create(new_group)
# if the group name existed before, delete those groups now
# same upsert approach Fusion is using to update a CDF Group: create new with changes => then delete old one
if old_group_ids:
if self.is_dry_run:
_logger.info(f"Dry run - Deleting groups with ids: <{old_group_ids}>")
else:
self.client.iam.groups.delete(old_group_ids)
return new_group
def process_group(
self, action: str = None, ns_name: str = None, node_name: str = None, root_account: str = None
) -> Group:
# to avoid complex upsert logic, all groups will be recreated and then the old ones deleted
# to be merged with existing code
# print(f"=== START: action<{action}> | ns_name<{ns_name}> | node_name<{node_name}> ===")
group_name, group_capabilities = self.generate_group_name_and_capabilities(
action, ns_name, node_name, root_account
)
group: Group = self.create_group(group_name, group_capabilities)
return group
def generate_target_datasets(self) -> Dict[str, Any]:
# list of all targets: autogenerated dataset names
target_datasets = {
# dictionary generator
# dataset_name : {Optional[dataset_description], Optional[dataset_metadata], ..}
# key:
(fq_ns_name := self.get_dataset_name_template().format(node_name=ns_node.node_name)):
# value
{
"description": ns_node.description,
"metadata": ns_node.metadata,
# if not explicit provided, same template as name
"external_id": ns_node.external_id or fq_ns_name,
}
for ns in self.bootstrap_config.namespaces
for ns_node in ns.ns_nodes
}
# update target datasets to include 'allproject' and '{ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME}' datasets
target_datasets.update(
{ # dictionary generator
# key:
self.get_dataset_name_template().format(
node_name=f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME
):
# value
{
"description": f"Dataset for '{BootstrapCore.AGGREGATED_LEVEL_NAME}' Owner Groups",
# "metadata": "",
"external_id": f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME,
}
# creating 'all' at group type level + top-level
for ns_name in list([ns.ns_name for ns in self.bootstrap_config.namespaces]) + [""]
}
)
return target_datasets
def generate_missing_datasets(self) -> Tuple[List[str], List[str]]:
target_datasets = self.generate_target_datasets()
# TODO: SDK should do this fine, that was an older me still learning :)
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
# which targets are not already deployed?
missing_datasets = {
name: payload
for name, payload in target_datasets.items()
if name not in self.deployed["datasets"]["name"].tolist()
}
if missing_datasets:
# create all datasets which are not already deployed
# https://docs.cognite.com/api/v1/#operation/createDataSets
for chunked_missing_datasets in chunks(missing_datasets, 10):
datasets_to_be_created = [
DataSet(
name=name,
description=payload.get("description"),
external_id=payload.get("external_id"),
metadata=payload.get("metadata"),
write_protected=True,
)
for name, payload in chunked_missing_datasets.items()
]
if self.is_dry_run:
for data_set_to_be_created in datasets_to_be_created:
_logger.info(f"Dry run - Creating dataset with name: <{data_set_to_be_created.name}>")
_logger.debug(f"Dry run - Creating dataset: <{data_set_to_be_created}>")
else:
self.client.data_sets.create(datasets_to_be_created)
# which targets are already deployed?
existing_datasets = {
# dictionary generator
# key:
dataset_columns["name"]:
# value
# Merge dataset 'id' from CDF with dataset arguments from config.yml
dict(id=dataset_columns["id"], **target_datasets[dataset_columns["name"]])
for row_id, dataset_columns in self.deployed["datasets"].iterrows() # iterating pd dataframe
if dataset_columns["name"] in target_datasets.keys()
}
if existing_datasets:
# update datasets which are already deployed
# https://docs.cognite.com/api/v1/#operation/createDataSets
# TODO: description, metadata, externalId
for chunked_existing_datasets in chunks(existing_datasets, 10):
datasets_to_be_updated = [
DataSetUpdate(id=dataset["id"])
.name.set(name)
.description.set(dataset.get("description"))
.external_id.set(dataset.get("external_id"))
.metadata.set(dataset.get("metadata"))
for name, dataset in chunked_existing_datasets.items()
]
if self.is_dry_run:
for data_set_to_be_updated in datasets_to_be_updated:
_logger.info(f"Dry run - Updating dataset with name: <{data_set_to_be_updated.name}>")
_logger.debug(f"Dry run - Updating dataset: <{data_set_to_be_updated}>")
# _logger.info(f"Dry run - Updating dataset: <{data_set_to_be_updated}>")
else:
self.client.data_sets.update(datasets_to_be_updated)
return list(target_datasets.keys()), list(missing_datasets.keys())
def generate_target_raw_dbs(self) -> Set[str]:
# list of all targets: autogenerated raw_db names
target_raw_db_names = set(
[
self.get_raw_dbs_name_template().format(node_name=ns_node.node_name, raw_variant=raw_variant)
for ns in self.bootstrap_config.namespaces
for ns_node in ns.ns_nodes
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
target_raw_db_names.update(
# add RAW DBs for 'all' users
[
self.get_raw_dbs_name_template().format(
node_name=f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME,
raw_variant=raw_variant,
)
# creating allprojects at group type level + top-level
for ns_name in list([ns.ns_name for ns in self.bootstrap_config.namespaces]) + [""]
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
return target_raw_db_names
def generate_missing_raw_dbs(self) -> Tuple[List[str], List[str]]:
target_raw_db_names = self.generate_target_raw_dbs()
try:
# which targets are not already deployed?
missing_rawdbs = target_raw_db_names - set(self.deployed["raw_dbs"]["name"])
except Exception as exc:
_logger.info(f"Raw databases do not exist in CDF:\n{exc}")
missing_rawdbs = target_raw_db_names
if missing_rawdbs:
# create all raw_dbs which are not already deployed
if self.is_dry_run:
for raw_db in list(missing_rawdbs):
_logger.info(f"Dry run - Creating rawdb: <{raw_db}>")
else:
self.client.raw.databases.create(list(missing_rawdbs))
return target_raw_db_names, missing_rawdbs
"""
"Special CDF Groups" are groups which don't have capabilities but have an effect by their name only.
1. 'transformations' group: grants access to "Fusion > Integrate > Transformations"
2. 'extractors' group: grants access to "Fusion > Integrate > Extract Data" which allows dowload of extractors
Both of them are about getting deprecated in the near future (time of writing: Q4 '21).
- 'transformations' can already be replaced with dedicated 'transformationsAcl' capabilities
- 'extractors' only used to grant access to extractor-download page
"""
def generate_special_groups(self):
special_group_names = ["extractors", "transformations"]
_logger.info(f"Generating special groups:\n{special_group_names}")
for special_group_name in special_group_names:
self.create_group(group_name=special_group_name)
# generate all groups - iterating through the 3-level hierarchy
def generate_groups(self):
# permutate the combinations
for action in ["read", "owner"]: # action_dimensions w/o 'admin'
for ns in self.bootstrap_config.namespaces:
for ns_node in ns.ns_nodes:
# group for each dedicated group-type id
self.process_group(action, ns.ns_name, ns_node.node_name)
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
self.process_group(action, ns.ns_name)
# 'all' groups on action level (no limits to datasets or raw-dbs)
self.process_group(action)
# creating CDF Group for root_account (highest admin-level)
for root_account in ["root"]:
self.process_group(root_account=root_account)
def load_deployed_config_from_cdf(self, groups_only=False) -> None:
"""Load CDF Groups, Datasets and RAW DBs as pd.DataFrames
and store them in 'self.deployed' dictionary.
Args:
groups_only (bool, optional): Limit to CDF Groups only (used by 'prepare' command). Defaults to False.
| |
<reponame>AgazW/Seq-Pip
from PyQt4 import QtGui,QtCore
from PyQt4.QtCore import pyqtSlot,SIGNAL,SLOT
import sys
from os import listdir
from os.path import isfile, join
import mainwindow
import subprocess
import bowt
import tophat
from subprocess import Popen, PIPE
import cuff
import cuffquant
import cuffdiff
import cuffcompare
import cuffnorm
import cuffmerge
import pickle
from subprocess import call
import os
class Mainp(QtGui.QMainWindow,mainwindow.Ui_MainWindow):
def __init__(self,out="", parent=None ):
super(Mainp, self).__init__(parent)
self.setupUi(self) # code for initializing all the necessary variable.
self.option = " "
self.out = out
self.align =" "
self.file=" "
self.cuff = " "
self.txt = " "
self.txt1=" "
self.frame.hide()
self.algnoption.hide()
self.builselcom.hide()
self.selfilebuil.hide()
self.frame_2.hide()
self.connect(self.createind, SIGNAL('activated(QString)'), self.runindex1) # link the runindex1
# self.connect(self.selfilcom, SIGNAL('activated(QString)'), self.runindex)
self.selfile.clicked.connect(self.selectFile)
self.mulfile.clicked.connect(self.selectFile1)
self.selfilebuil.clicked.connect(self.selectFile2)
self.bowtie.clicked.connect(self.onIndexChange1) # link onIndexChange1 function
self.tophat.clicked.connect(self.onIndexChange2)
self.cuffyes.clicked.connect(self.onIndexChange3)
self.cuffno.clicked.connect(self.onIndexChange4)
self.genind.clicked.connect(self.genindop)
self.allignment.clicked.connect(self.allignmentop) # link allignmentop function
self.cufflinks.clicked.connect(self.cufflinksop)
self.CuffCompare.clicked.connect(self.CuffCompareop)
self.cuffdif.clicked.connect(self.Cuffdiffop)
self.cuffquant.clicked.connect(self.Cuffquantop)
self.Cuffnorm.clicked.connect(self.Cuffnormop)
self.Cuffmerge.clicked.connect(self.Cuffmergeop)
# self.C.clicked.connect(self.onIndexChange4)
def selectFile(self): #code for selecting reference genome file
self.selfilcom.addItem(QtGui.QFileDialog.getOpenFileName())
self.file = self.selfilcom.currentText()
if not self.file:
quit_msg = "Error! Please select and load the file first"
reply = QtGui.QMessageBox.warning(self, 'Error',
quit_msg, QtGui.QMessageBox.Ok)
else:
st = ".fa"
st1 = ".fasta" # check whether tha submitted file will be in this format or not.
st2 =".mfa"
st3 = ".fna"
st4=".txt"
st5 =".zip"
if st in self.file or st1 in self.file or st2 in self.file or st3 in self.file or st4 in self.file or st5 in self.file:
quit_msg = "if you want to Provide the File Name? (Press Yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
text = QtGui.QInputDialog.getText(self, self.tr("QInputDialog"), self.tr("File Name:"),
QtGui.QLineEdit.Normal)
fil = text[0]
self.out = fil
file = self.selfilcom.currentText()
# imag = QtGui.QPixmap("")
build = "bowtie2-build" + " " +"-f"+" "+ file + " " + fil # command for execution to create index
fpt = open("build.pkl", "wb") # create file and store the build variable value in this file.
pickle.dump(str(build), fpt, protocol=2)
fpt.close()
# proc = subprocess.Popen(["bowtie2-build" + " "+ file + " "+ fil, ""],stdout=subprocess.PIPE, shell=True)
# (out, err) = proc.communicate()
print(self.out)
outfile = self.out
fp = open("shared.pkl", "wb") # containe the output file name this file will be used futher modules
pickle.dump(str(outfile), fp, protocol=2)
fp.close()
quit_msg = "Do you want to Continue? (Press Yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.align = "No"
self.algnoption.show()
else:
quit_msg = "Processing Please wait"
reply = QtGui.QMessageBox.information(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
proc = subprocess.Popen([build, ""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
f = open("error", "w") # file that writes the error message
f.write(str(err))
f.close()
f = open("error", "r")
self.txt = f.read(5)
self.txt1 = f.read(10) # contains error message till that possition
f.close()
if not self.txt1: # if not error occure
quit_msg = "Successfully created"
reply = QtGui.QMessageBox.information(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
self.close()
elif self.txt1: # if error is there
quit_msg = "is not a valid reference file"
reply = QtGui.QMessageBox.warning(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
self.txt = " "
self.txt1 = " "
self.selfilcom.clear()
else:
quit_msg = "Please select Fasta format files only"
reply = QtGui.QMessageBox.warning(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
self.selfilcom.clear()
def selectFile1(self): # code for selecting multiple files
#self.selfilcom.addItem(QtGui.QFileDialog.getOpenFileName())
self.file3 = (QtGui.QFileDialog.getOpenFileName())
num = self.selfilcom.count()
self.file = self.selfilcom.itemText(0)
self.file1 = self.selfilcom.itemText(num)
for i in range(num-1):
self.file2 = self.selfilcom.itemText(i+1)
self.file += ","+self.file2
self.file += self.file1
if not self.file3:
quit_msg = "Error! Please select and load the file first"
reply = QtGui.QMessageBox.warning(self, 'Error',
quit_msg, QtGui.QMessageBox.Ok)
else:
st = ".fa"
st1 = ".fasta"
st2 = ".mfa"
st3 = ".fna"
st4 = ".txt"
if st in self.file3 or st1 in self.file3 or st2 in self.file3 or st3 in self.file3 or st4 in self.file3:
quit_msg = "Do you want to select any other file(press yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.selfilcom.addItem(self.file3)
self.selectFile1()
elif reply == QtGui.QMessageBox.No:
quit_msg = "if you want to Provide the File Name? (Press Yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
text = QtGui.QInputDialog.getText(self, self.tr("QInputDialog"), self.tr("File Name:"),
QtGui.QLineEdit.Normal)
fil = text[0]
self.out = fil
#file = self.selfilcom.currentText()
# imag = QtGui.QPixmap("")
build = "bowtie2-build" + " " + self.file + " " + fil
fpt = open("build.pkl", "wb")
pickle.dump(str(build), fpt, protocol=2)
fpt.close()
# proc = subprocess.Popen(["bowtie2-build" + " "+ file + " "+ fil, ""],stdout=subprocess.PIPE, shell=True)
# (out, err) = proc.communicate()
# print(self.out)
outfile = self.out
fp = open("shared.pkl", "wb")
pickle.dump(str(outfile), fp, protocol=2)
fp.close()
quit_msg = "Do you want to Continue? (Press Yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.align = "No"
self.algnoption.show()
else:
quit_msg = "Processing Please wait"
reply = QtGui.QMessageBox.information(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
proc = subprocess.Popen([build, ""], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
f = open("error", "w")
f.write(str(err))
f.close()
f = open("error", "r")
self.txt = f.read(5)
self.txt1 = f.read(10)
f.close()
print(proc)
if not self.txt1:
quit_msg = "Successfully created"
reply = QtGui.QMessageBox.information(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
self.close()
elif self.txt1:
quit_msg = "is not a valid reference file"
reply = QtGui.QMessageBox.warning(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
self.txt = " "
self.txt1 = " "
self.selfilcom.clear()
self.close()
else:
quit_msg = "Please select Fasta format files only"
reply = QtGui.QMessageBox.warning(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
self.selectFile1()
def selectFile2(self): # code for adding builtin files implementation code is there below
self.builselcom.addItem(QtGui.QFileDialog.getOpenFileName())
def CuffCompareop(self):# for showing cuffcompare module
top = cuffcompare.Cuffcomp(self)
top.closed.connect(self.show)
top.show()
def Cuffnormop(self):# for showing cuffnorm module
top = cuffnorm.Cuffnorm(self)
top.closed.connect(self.show)
top.show()
def Cuffdiffop(self):# for showing cuffdiff module
top = cuffdiff.Cuffdiff(self)
top.closed.connect(self.show)
top.show()
def Cuffquantop(self):# for showing cuffquant module
top = cuffquant.Cuffq(self)
top.closed.connect(self.show)
top.show()
def Cuffmergeop(self):# for showing cuffmerge module
top = cuffmerge.Cuffm(self)
top.closed.connect(self.show)
top.show()
def cufflinksop(self): # code for cuffflink options
quit_msg = "Please Select the already alligned SAM / BAM sorted file? (Press Yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok) # for selecting sam files
if reply == QtGui.QMessageBox.Ok:
read = QtGui.QFileDialog.getOpenFileName(self,"pick a file")
fp2 = open("sharedcufffile.pkl","wb") # file for storing SAM filename
pickle.dump(str(read), fp2,protocol=2)
fp2.close()
filg = "Yes"
fp1w = open("filegen.pkl","wb") # file that used for directly start from cufflink option only(it doesnot run Bowtie 2 module)
pickle.dump(str(filg), fp1w,protocol=2)
fp1w.close()
top1 = cuff.Cuffl(self)
top1.closed.connect(self.show)
top1.show()# to show cufflink module
#self.close()
def allignmentop(self): # to show alignment options ( it will show only alignment frame)
self.scrollArea.hide()
self.scrollArea_2.hide()
#self.frame_2.show()
self.frame.show()
self.selfile.hide()
self.mulfile.hide()
self.createind.hide()
self.selfilcom.hide()
self.builselcom.hide()
self.label_7.hide()
self.label_5.hide()
self.align="Yes"
self.algnoption.show()
def genindop(self): # code for generate index option
self.scrollArea.hide()
self.scrollArea_2.hide()
self.frame.show()
def onIndexChange1(self):# code for alignment option (directly start form alignment option)
if self.align == "Yes":
quit_msg = "Please Select the already created index File? (Press Yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Ok)
if reply == QtGui.QMessageBox.Ok:
read = str(QtGui.QFileDialog.getOpenFileName(self,"pick a file"))
self.pfl = read
str6= ".1.bt2"
str7 =".2.bt2"
str4 = ".3.bt2"
str5 = ".4.bt2"
str3 =".rev.1.bt2"
str2 =".rev.2.bt2"
if str2 in read: # chaeck wheter the file is in this format or not
str1 =read.index(str2)
file = read[:str1]
elif str3 in read:
str1 = read.index(str3)
file = read[:str1]
elif str4 in read:
str1 = read.index(str4)
file = read[:str1]
elif str5 in read:
str1 = read.index(str5)
file = read[:str1]
elif str6 in read:
str1 = read.index(str6)
file = read[:str1]
elif str7 in read:
str1 = read.index(str7)
file = read[:str1]
fp = open("shared.pkl","wb") # store the file name in thsi file
pickle.dump(str(file), fp, protocol=2)
fp.close()
self.option= "bowtie"
option = "bowtie"
fp1w = open("file.pkl","wb") # file that indicate this is from bowtie module
pickle.dump(str(option), fp1w,protocol=2)
fp1w.close()
quit_msg1 = "if you want to See the Output of Bowtie? (Press Yes if you want)"
reply3 = QtGui.QMessageBox.question(self, 'Message',
quit_msg1, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply3 == QtGui.QMessageBox.Yes:
cfile="yes"
fp1 = open("botout.pkl","wb") # file that containe yes for seeing bowtie output
pickle.dump(str(cfile), fp1,protocol=2)
fp1.close()
elif reply3 == QtGui.QMessageBox.No:
cfile="no"
fp1 = open("botout.pkl","wb")# file that containe no for seeing bowtie output
pickle.dump(str(cfile), fp1,protocol=2)
fp1.close()
quit_msg = "Do you want to Continue to use cufflinks? (Press Yes if you want)"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
option1 = "Yes"
fp1w1 = open("nocont.pkl","wb")# file that containe yes for continue to | |
<reponame>semuconsulting/pyrtcm
"""
RTCM Protocol payload definitions
Created on 14 Feb 2022
Information sourced from RTCM STANDARD 10403.3 © 2016 RTCM
:author: semuadmin
"""
# pylint: disable=too-many-lines, line-too-long
# attribute names holding size of MSM repeating groups
NSAT = "NSat"
NSIG = "NSig"
NCELL = "_NCell"
NBIAS = "_NBias"
# *************************************************************
# MSM MESSAGE SUB-SECTION DEFINITIONS
# *************************************************************
MSM_HDR = {
"DF002": "Message number",
"DF003": "Reference station ID",
"GNSSEpoch": "GNSS Epoch Time",
"DF393": "Multiple Message Bit",
"DF409": "IODS - Issue of Data Station",
"DF001_7": "Reserved",
"DF411": "Clock Steering Indicator",
"DF412": "External Clock Indicator",
"DF417": "GNSS Divergence-free Smoothing Indicator",
"DF418": "GNSS Smoothing Interval",
"DF394": "GNSS Satellite Mask", # NSAT = num of set bits
"DF395": "GNSS Signal Mask", # NSIG = num of set bits
"DF396": "GNSS Cell Mask", # size = NCELL = NSAT * NSIG
}
MSM_SAT_123 = {
"group": (
NSAT,
{
"DF398": "GNSS Satellite rough ranges modulo 1 millisecond",
},
),
}
MSM_SAT_46 = {
"group1": (
NSAT,
{
"DF397": "Number of int millisecs in GNSS Satellite roughranges",
},
),
"group2": (
NSAT,
{
"DF398": "GNSS Satellite rough ranges modulo 1 millisecond",
},
),
}
MSM_SAT_57 = {
"group1": (
NSAT,
{
"DF397": "The number of integer milliseconds in GNSS Satellite rough ranges",
},
),
"group2": (
NSAT,
{
"GNSSSpecific": "Extended Satellite Information",
},
),
"group3": (
NSAT,
{
"DF398": "GNSS Satellite rough ranges modulo 1 millisecond",
},
),
"group4": (
NSAT,
{
"DF399": "GNSS Satellite rough PhaseRangeRates",
},
),
}
MSM_SIG_1 = {
"group1": (
NCELL,
{
"DF400": "GNSS signal fine Pseudoranges",
},
),
}
MSM_SIG_2 = {
"group1": (
NCELL,
{
"DF401": "GNSS signal fine PhaseRange data",
},
),
"group2": (
NCELL,
{
"DF402": "GNSS PhaseRange Lock",
},
),
"group3": (
NCELL,
{
"DF420",
"Half-cycle ambiguity indicator",
},
),
}
MSM_SIG_3 = {
"group1": (
NCELL,
{
"DF400": "GNSS signal fine Pseudoranges",
},
),
"group2": (
NCELL,
{
"DF401": "GNSS signal fine PhaseRange data",
},
),
"group3": (
NCELL,
{
"DF402": "GNSS PhaseRange Lock",
},
),
"group4": (
NCELL,
{
"DF420": "Half-cycle ambiguity indicator",
},
),
}
MSM_SIG_4 = {
"group1": (
NCELL,
{
"DF400": "GNSS signal fine Pseudoranges",
},
),
"group2": (
NCELL,
{
"DF401": "GNSS signal fine PhaseRange data",
},
),
"group3": (
NCELL,
{
"DF402": "GNSS PhaseRange Lock",
},
),
"group4": (
NCELL,
{
"DF420": "Half-cycle ambiguity indicator",
},
),
"group5": (
NCELL,
{
"DF403": "GNSS signal CNRs",
},
),
}
MSM_SIG_5 = {
"group1": (
NCELL,
{
"DF400": "GNSS signal fine Pseudoranges",
},
),
"group2": (
NCELL,
{
"DF401": "GNSS signal fine PhaseRange data",
},
),
"group3": (
NCELL,
{
"DF402": "GNSS PhaseRange Lock",
},
),
"group4": (
NCELL,
{
"DF420": "Half-cycle ambiguity indicator",
},
),
"group5": (
NCELL,
{
"DF403": "GNSS signal CNRs",
},
),
"group6": (
NCELL,
{
"DF404": "GNSS signal fine PhaseRangeRates",
},
),
}
MSM_SIG_6 = {
"group1": (
NCELL,
{
"DF405": "GNSS signal fine",
},
),
"group2": (
NCELL,
{
"DF406": "GNSS signal fine PhaseRange data with extended resolution",
},
),
"group3": (
NCELL,
{
"DF407": "GNSS PhaseRange Lock",
},
),
"group4": (
NCELL,
{
"DF420": "Half-cycle ambiguity indicator",
},
),
"group5": (
NCELL,
{
"DF408": "GNSS signal CNRs with extended resolution",
},
),
}
MSM_SIG_7 = {
"group1": (
NCELL,
{
"DF405": "GNSS signal fine",
},
),
"group2": (
NCELL,
{
"DF406": "GNSS signal fine PhaseRange data with extended resolution",
},
),
"group3": (
NCELL,
{
"DF407": "GNSS PhaseRange Lock",
},
),
"group4": (
NCELL,
{
"DF420": "Half-cycle ambiguity indicator",
},
),
"group5": (
NCELL,
{
"DF408": "GNSS signal CNRs with extended resolution",
},
),
"group6": (
NCELL,
{
"DF404": "GNSS signal fine PhaseRangeRates",
},
),
}
# concatenate MSM sections in to a single dict
# NB: Python >=3.9 supports the more intuitive | (union)
# operation for this, but earlier versions don't.
MSM1 = {**MSM_HDR, **MSM_SAT_123, **MSM_SIG_1}
MSM2 = {**MSM_HDR, **MSM_SAT_123, **MSM_SIG_2}
MSM3 = {**MSM_HDR, **MSM_SAT_123, **MSM_SIG_3}
MSM4 = {**MSM_HDR, **MSM_SAT_46, **MSM_SIG_4}
MSM5 = {**MSM_HDR, **MSM_SAT_57, **MSM_SIG_5}
MSM6 = {**MSM_HDR, **MSM_SAT_46, **MSM_SIG_6}
MSM7 = {**MSM_HDR, **MSM_SAT_57, **MSM_SIG_7}
# *************************************************************
# *************************************************************
# RTCM3 MESSAGE GROUP HEADER DEFINITIONS
# *************************************************************
HDR_1001_1004 = {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF004": "GPS Epoch Time (TOW)",
"DF005": "Synchronous GNSS Flag",
"DF006": "No. of GPS Satellite Signals Processed",
"DF007": "GPS Divergence-free Smoothing Indicator",
"DF008": "GPS Smoothing Interval",
}
HDR_1005_1006 = {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF021": "Reserved for ITRF Realization Year",
"DF022": "GPS Indicator",
"DF023": "GLONASS Indicator",
"DF024": "Reserved for Galileo Indicator",
"DF141": "Reference-Station Indicator",
"DF025": "Antenna Reference Point ECEF-X",
"DF142": "Single Receiver Oscillator Indicator",
"DF001_1": "Reserved",
"DF026": "Antenna Reference Point ECEF-Y",
"DF364": "Quarter Cycle Indicator",
"DF027": "Antenna Reference Point ECEF-Z",
}
HDR_1009_1012 = {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF034": "GLONASS Epoch Time (tk)",
"DF005": "Synchronous GNSS Flag",
"DF035": "No. of GLONASS Satellite Signals Processed",
"DF036": "GLONASS Divergence-free Smoothing Indicator",
"DF037": "GLONASS Smoothing Interval",
}
HDR_1015_1017 = {
"DF002": "Message Number",
"DF059": "Network ID",
"DF072": "Subnetwork ID",
"DF065": "GPS Epoch Time (GPS TOW)",
"DF066": "GPS Multiple Message Indicator",
"DF060": "Master Reference Station ID",
"DF061": "Auxiliary Reference Station ID",
"DF067": "# of GPS Sats",
}
HDR_1037_1039 = {
"DF002": "Message Number",
"DF059": "Network ID",
"DF072": "Subnetwork ID",
"DF233": "GLONASS Network Epoch Time",
"DF066": "Multiple Message Indicator",
"DF060": "Master Reference Station ID",
"DF061": "Auxiliary Reference Station ID",
"DF234": "# of GLONASS Data Entries",
}
# *************************************************************
# RTCM3 MESSAGE PAYLOAD DEFINITIONS
# *************************************************************
RTCM_PAYLOADS_GET = {
"1001": {
**HDR_1001_1004,
"group": (
"DF006",
{
"DF009": "GPS Satellite ID",
"DF010": "GPS L1 Code Indicator",
"DF011": "GPS L1 Pseudorange",
"DF012": "GPS L1 PhaseRange - L1 Pseudorange",
"DF013": "GPS L1 Lock time Indicator",
},
),
},
"1002": {
**HDR_1001_1004,
"group": (
"DF006",
{
"DF009": "GPS Satellite ID",
"DF010": "GPS L1 Code Indicator",
"DF011": "GPS L1 Pseudorange",
"DF012": "GPS L1 PhaseRange - L1 Pseudorange",
"DF013": "GPS L1 Lock time Indicator",
"DF014": "GPS Integer L1 Pseudorange Modulus Ambiguity",
"DF015": "GPS L1 CNR",
},
),
},
"1003": {
**HDR_1001_1004,
"group": (
"DF006",
{
"DF009": "GPS Satellite ID",
"DF010": "GPS L1 Code Indicator",
"DF011": "GPS L1 Pseudorange",
"DF012": "GPS L1 PhaseRange - L1 Pseudorange",
"DF013": "GPS L1 Lock time Indicator",
"DF016": "GPS L2 Code Indicator",
"DF017": "GPS L2-L1 Pseudorange Difference",
"DF018": "GPS L2 PhaseRange - L1 Pseudorange",
"DF019": "GPS L2 Lock time Indicator",
},
),
},
"1004": {
**HDR_1001_1004,
"group": (
"DF006",
{
"DF009": "GPS Satellite ID",
"DF010": "GPS L1 Code Indicator",
"DF011": "GPS L1 Pseudorange",
"DF012": "GPS L1 PhaseRange - L1 Pseudorange",
"DF013": "GPS L1 Lock time Indicator",
"DF014": "GPS Integer L1 Pseudorange Modulus Ambiguity",
"DF015": "GPS L1 CNR",
"DF016": "GPS L2 Code Indicator",
"DF017": "GPS L2-L1 Pseudorange Difference",
"DF018": "GPS L2 PhaseRange - L1 Pseudorange",
"DF019": "GPS L2 Lock time Indicator",
"DF020": "GPS L2 CNR",
},
),
},
"1005": HDR_1005_1006,
"1006": {**HDR_1005_1006, "DF028": "Antenna Height"},
"1007": {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF029": "Descriptor Counter N",
"group": (
"DF029",
{
"DF030": "Antenna Descriptor",
},
),
"DF031": "Antenna Setup ID",
},
"1008": {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF029": "Descriptor Counter N",
"group1": (
"DF029",
{
"DF030": "Antenna Descriptor",
},
),
"DF031": "Antenna Setup ID",
"DF032": "Serial Number Counter M",
"group2": (
"DF032",
{
"DF033": "Antenna Serial Number",
},
),
},
"1009": {
**HDR_1009_1012,
"group": (
"DF035",
{
"DF038": "GLONASS Satellite ID (Satellite Slot Number)",
"DF039": "GLONASS Code Indicator",
"DF040": "GLONASS Satellite Frequency Channel Number",
"DF041": "GLONASS L1 Pseudorange",
"DF042": "GLONASS L1 PhaseRange - L1 Pseudorange",
"DF043": "GLONASS L1 Lock time Indicator",
},
),
},
"1010": {
**HDR_1009_1012,
"group": (
"DF035",
{
"DF038": "GLONASS Satellite ID (Satellite Slot Number)",
"DF039": "GLONASS L1 Code Indicator",
"DF040": "GLONASS Satellite Frequency Channel Number",
"DF041": "GLONASS L1 Pseudorange",
"DF042": "GLONASS L1 PhaseRange - L1 Pseudorange",
"DF043": "GLONASS L1 Lock time Indicator",
"DF044": "GLONASS Integer L1 Pseudorange Modulus Ambiguity",
"DF045": "GLONASS L1 CNR",
},
),
},
"1011": {
**HDR_1009_1012,
"group": (
"DF035",
{
"DF038": "GLONASS Satellite ID (Satellite Slot Number)",
"DF039": "GLONASS L1 Code Indicator",
"DF040": "GLONASS Satellite Frequency Channel Number",
"DF041": "GLONASS L1 Pseudorange",
"DF042": "GLONASS L1 PhaseRange - L1 Pseudorange",
"DF043": "GLONASS L1 Lock time Indicator",
"DF046": "GLONASS L2 Code Indicator",
"DF047": "GLONASS L2-L1 Pseudorange Difference",
"DF048": "GLONASS L2 PhaseRange - L1 Pseudorange",
"DF049": | |
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SearchResult
"""
kwargs['_return_http_data_only'] = True
return self.execute_entity_type_collection_search_with_http_info(search_term, **kwargs) # noqa: E501
def execute_entity_type_collection_search_with_http_info(self, search_term, **kwargs): # noqa: E501
"""Executes a search over all EntityTypeCollections to find ones that match the given search term # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_type_collection_search_with_http_info(search_term, async_req=True)
>>> result = thread.get()
:param search_term: (required)
:type search_term: SearchTerm
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SearchResult, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'search_term'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method execute_entity_type_collection_search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'search_term' is set
if self.api_client.client_side_validation and ('search_term' not in local_var_params or # noqa: E501
local_var_params['search_term'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `search_term` when calling `execute_entity_type_collection_search`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'search_term' in local_var_params:
body_params = local_var_params['search_term']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/search/entity_types/collections', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def execute_entity_type_search(self, search_term, **kwargs): # noqa: E501
"""Executes a search over all entity types to find ones that match the given search term # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_type_search(search_term, async_req=True)
>>> result = thread.get()
:param search_term: (required)
:type search_term: SearchTerm
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SearchResult
"""
kwargs['_return_http_data_only'] = True
return self.execute_entity_type_search_with_http_info(search_term, **kwargs) # noqa: E501
def execute_entity_type_search_with_http_info(self, search_term, **kwargs): # noqa: E501
"""Executes a search over all entity types to find ones that match the given search term # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_type_search_with_http_info(search_term, async_req=True)
>>> result = thread.get()
:param search_term: (required)
:type search_term: SearchTerm
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SearchResult, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'search_term'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method execute_entity_type_search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'search_term' is set
if self.api_client.client_side_validation and ('search_term' not in local_var_params or # noqa: E501
local_var_params['search_term'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `search_term` when calling `execute_entity_type_search`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'search_term' in local_var_params:
body_params = local_var_params['search_term']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/search/entity_types', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def execute_filtered_entity_neighbor_id_search(self, entity_set_id, neighbor_search_filter, **kwargs): # noqa: E501
"""Executes a search for all neighbors of multiple entities of the same entity set that are connected by an association and returns a simple version of the neighborDetails # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_filtered_entity_neighbor_id_search(entity_set_id, neighbor_search_filter, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param neighbor_search_filter: (required)
:type neighbor_search_filter: NeighborSearchFilter
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: dict(str, dict(str, dict(str, list[NeighborEntityIds])))
"""
kwargs['_return_http_data_only'] = True
return self.execute_filtered_entity_neighbor_id_search_with_http_info(entity_set_id, neighbor_search_filter, **kwargs) # noqa: E501
def execute_filtered_entity_neighbor_id_search_with_http_info(self, entity_set_id, neighbor_search_filter, **kwargs): # noqa: E501
"""Executes a search for all neighbors of multiple entities of the same entity set that are connected by an association and returns a simple version of the neighborDetails # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_filtered_entity_neighbor_id_search_with_http_info(entity_set_id, neighbor_search_filter, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param neighbor_search_filter: (required)
:type neighbor_search_filter: NeighborSearchFilter
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(dict(str, dict(str, dict(str, list[NeighborEntityIds]))), status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_set_id',
'neighbor_search_filter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
| |
<filename>library_old/iworkflow_service_template.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: iworkflow_service_template
short_description: Manages Service Templates on iWorkflow.
description:
- Manages Service Templates on iWorkflow. Service templates are created
by the iWorkflow administrator and are consumed by iWorkflow tenants
in the form of L4/L7 services. The Service Template can be configured
to allow tenants to change certain values of the template such as
the IP address of a VIP, or the port that a Virtual Server listens on.
version_added: "2.4"
options:
name:
description:
- Name of the service template.
required: True
parameters:
description:
- A dictionary containing the values of input parameters that the
Service Template contains. You will see these in iWorkflow's UI
labeled as "Application Tier Information" and "Sections". This
is the way by which you customize the Service Template and specify
which values are tenant editable. Since this value can be particularly
large, the recommended practice is to put it in an external file
and include it with the Ansible C(file) or C(template) lookup plugins.
This option is required when C(state) is C(present).
required: False
default: None
connector:
description:
- The cloud connector associated with this Service Template. If you want
to have this Service Template associated with all clouds, then specify
a C(connector) of C(all). When creating a new Service Template, if no
connector is specified, then C(all) clouds will be the default.
required: False
default: None
base_template:
description:
- The iApp template that you want to base this Service Template off
of. Note that, while iWorkflow's UI also allows you to specify another
Service Template for the C(base_template), this module does not yet
let you do that. This option is required when C(state) is C(present).
required: False
default: None
notes:
- Requires the f5-sdk Python package on the remote host. This is as easy as
pip install f5-sdk
- Requires the deepdiff Python package on the Ansible controller host. This
is as easy as pip install deepdiff.
requirements:
- f5-sdk >= 2.3.0
- iWorkflow >= 2.1.0
author:
- <NAME> (@caphrim007)
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.f5_utils import *
from deepdiff import DeepDiff
class Parameters(AnsibleF5Parameters):
api_map = {
'templateName': 'name',
'properties': 'connector',
'overrides': 'parameters'
}
returnables = ['vars']
api_attributes = [
'overrides', 'templateName', 'parentReference', 'properties'
]
updatables = ['tables', 'vars']
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k,v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def _get_connector_collection(self):
return self.client.api.cm.cloud.connectors.locals.get_collection()
def _get_connector_selflink(self, connector, collection):
for resource in collection:
if str(resource.displayName) != "BIG-IP":
continue
if str(resource.name) != connector:
continue
return str(resource.selfLink)
return None
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def tables(self):
result = []
if not self._values['tables']:
return None
tables = self._values['tables']
for table in tables:
tmp = dict()
name = table.get('name', None)
if name is None:
raise F5ModuleError(
"One of the provided tables does not have a name"
)
tmp['name'] = str(name)
columns = table.get('columns', None)
if columns:
tmp['columns'] = []
for column in columns:
tmp['columns'].append(
dict((str(k),str(v)) for k,v in iteritems(column))
)
# You cannot have rows without columns
rows = table.get('rows', None)
if rows:
tmp['rows'] = []
for row in rows:
tmp['rows'].append([str(x) for x in row])
description = table.get('description', None)
tmp['description'] = str(description)
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@tables.setter
def tables(self, value):
self._values['tables'] = value
@property
def vars(self):
result = []
if not self._values['vars']:
return None
variables = self._values['vars']
for variable in variables:
tmp = dict((str(k), v) for k, v in iteritems(variable))
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@vars.setter
def vars(self, value):
self._values['vars'] = value
@property
def parameters(self):
return dict(
tables=self.tables,
vars=self.vars
)
@parameters.setter
def parameters(self, value):
if value is None:
return
if 'tables' in value:
self.tables = value['tables']
if 'vars' in value:
self.vars = value['vars']
@property
def connector(self):
connector = None
if self._values['connector'] is None:
return self._values['connector']
elif self._values['connector'] == 'all':
connector = 'all'
elif isinstance(self._values['connector'], basestring):
collection = self._get_connector_collection()
result = self._get_connector_selflink(str(self._values['connector']), collection)
connector = result
elif 'provider' in self._values['connector'][0]:
# Case for the REST API
item = self._values['connector'][0]['provider']
connector = str(item)
if connector is None:
raise F5ModuleError(
"The specified connector was not found"
)
elif connector == 'all':
result = [
dict(
id="cloudConnectorReference",
isRequired=True,
defaultValue=""
)
]
return result
else:
result = [
dict(
id="cloudConnectorReference",
isRequired=True,
provider=connector
)
]
return result
@property
def parentReference(self):
return dict(
link="https://localhost/mgmt/cm/cloud/templates/iapp/{0}".format(
self._values['base_template']
)
)
@parentReference.setter
def parentReference(self, value):
self._values['base_template'] = value['link']
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters()
self.want.client = self.client
self.want.update(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters()
self.changes.client = self.client
self.changes.update(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = str(DeepDiff(attr1,attr2))
if changed:
self.changes = Parameters()
self.changes.client = self.client
self.changes.update(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def exists(self):
result = self.client.api.cm.cloud.provider.templates.iapps.iapp.exists(
name=self.want.name
)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.want.connector is None:
self.want.update({'connector': 'all'})
if self.client.check_mode:
return True
if self.want.base_template is None:
raise F5ModuleError(
"A 'base_template' is required when creating a new Service Template"
)
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.cm.cloud.provider.templates.iapps.iapp.load(
name=self.want.name
)
resource.update(**params)
def read_current_from_device(self):
resource = self.client.api.cm.cloud.provider.templates.iapps.iapp.load(
name=self.want.name,
)
result = resource.attrs
result['parameters'] = result.pop('overrides', None)
params = Parameters()
params.client = self.client
params.update(result)
return params
def create_on_device(self):
params = self.want.api_params()
self.client.api.cm.cloud.provider.templates.iapps.iapp.create(
isF5Example=False,
**params
)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp service")
return True
def remove_from_device(self):
resource = self.client.api.cm.cloud.provider.templates.iapps.iapp.load(
name=self.want.name,
)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
base_template=dict(
required=False,
default=None
),
parameters=dict(
required=False,
default=None,
type='dict'
),
connector=dict(
required=False,
default=None
),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'iworkflow'
def main():
if not HAS_F5SDK:
raise | |
"""
plugins.modules
===============
"""
import os
import shutil
import sys
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional
from object_colors import Color
import pyaud
DOCS = Path("docs")
README = Path("README.rst")
colors = Color()
colors.populate_colors()
class LineSwitch:
"""Take the ``path`` and ``replace`` argument from the commandline.
Reformat the README whilst returning the original title to the
parent process.
:param path: File to manipulate.
:param obj: Dictionary of line number's as key and replacement
strings as values.
"""
def __init__(self, path: Path, obj: Dict[int, str]) -> None:
self._path = path
self._obj = obj
with open(path, encoding="utf-8") as fin:
self.read = fin.read()
def __enter__(self) -> None:
with open(self._path, "w", encoding="utf-8") as file:
for count, line in enumerate(self.read.splitlines()):
if count in self._obj:
line = self._obj[count]
file.write(f"{line}\n")
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
with open(self._path, "w", encoding="utf-8") as file:
file.write(self.read)
@pyaud.plugins.register(name="clean")
class Clean(pyaud.plugins.Action): # pylint: disable=too-few-public-methods
"""Remove all unversioned package files recursively."""
def action(self, *args: Any, **kwargs: bool) -> int:
exclude = pyaud.config.toml["clean"]["exclude"]
return pyaud.git.clean( # type: ignore
"-fdx", *[f"--exclude={e}" for e in exclude], **kwargs
)
@pyaud.plugins.register(name="coverage")
class Coverage(pyaud.plugins.Action): # pylint: disable=too-few-public-methods
"""Run package unit-tests with ``pytest`` and ``coverage``."""
coverage = "coverage"
@property
def exe(self) -> List[str]:
return [self.coverage]
def action(self, *args: Any, **kwargs: bool) -> Any:
returncode = pyaud.plugins.get("tests")(
*[f"--cov={e}" for e in pyaud.files.reduce()], *args, **kwargs
)
if not returncode:
kwargs["suppress"] = True
self.subprocess[self.coverage].call("xml", *args, **kwargs)
else:
print("No coverage to report")
@pyaud.plugins.register(name="deploy")
class Deploy( # pylint: disable=too-few-public-methods
pyaud.plugins.Parametrize
):
"""Deploy package documentation and test coverage."""
def plugins(self) -> List[str]:
return ["deploy-docs", "deploy-cov"]
@pyaud.plugins.register(name="deploy-cov")
class DeployCov( # pylint: disable=too-few-public-methods
pyaud.plugins.Action
):
"""Upload coverage data to ``Codecov``.
If no file exists otherwise announce that no file has been created
yet. If no ``CODECOV_TOKEN`` environment variable has been exported
or defined in ``.env`` announce that no authorization token has been
created yet.
"""
codecov = "codecov"
@property
def exe(self) -> List[str]:
return [self.codecov]
def action(self, *args: Any, **kwargs: bool) -> Any:
coverage_xml = Path.cwd() / os.environ["PYAUD_COVERAGE_XML"]
if coverage_xml.is_file():
if os.environ["CODECOV_TOKEN"] != "":
self.subprocess[self.codecov].call(
"--file", Path.cwd() / coverage_xml, **kwargs
)
else:
print("CODECOV_TOKEN not set")
else:
print("No coverage report found")
@pyaud.plugins.register(name="deploy-docs")
class DeployDocs(
pyaud.plugins.Action
): # pylint: disable=too-few-public-methods
"""Deploy package documentation to ``gh-pages``.
Check that the branch is being pushed as master (or other branch
for tests). If the correct branch is the one in use deploy.
``gh-pages`` to the orphaned branch - otherwise do nothing and
announce.
"""
_pushing_skipped = "Pushing skipped"
def deploy_docs(self) -> None:
"""Series of functions for deploying docs."""
gh_remote = os.environ["PYAUD_GH_REMOTE"]
root_html = Path.cwd() / "html"
pyaud.git.add(".") # type: ignore
pyaud.git.diff_index("--cached", "HEAD", capture=True) # type: ignore
stashed = False
if pyaud.git.stdout():
pyaud.git.stash(devnull=True) # type: ignore
stashed = True
shutil.move(
str(Path.cwd() / os.environ["BUILDDIR"] / "html"), root_html
)
shutil.copy(Path.cwd() / README, root_html / README)
pyaud.git.rev_list( # type: ignore
"--max-parents=0", "HEAD", capture=True
)
stdout = pyaud.git.stdout()
if stdout:
pyaud.git.checkout(stdout[-1]) # type: ignore
pyaud.git.checkout("--orphan", "gh-pages") # type: ignore
pyaud.git.config( # type: ignore
"--global", "user.name", os.environ["PYAUD_GH_NAME"]
)
pyaud.git.config( # type: ignore
"--global", "user.email", os.environ["PYAUD_GH_EMAIL"]
)
shutil.rmtree(Path.cwd() / DOCS)
pyaud.git.rm("-rf", Path.cwd(), devnull=True) # type: ignore
pyaud.git.clean("-fdx", "--exclude=html", devnull=True) # type: ignore
for file in root_html.rglob("*"):
shutil.move(str(file), Path.cwd() / file.name)
shutil.rmtree(root_html)
pyaud.git.add(".") # type: ignore
pyaud.git.commit( # type: ignore
"-m", '"[ci skip] Publishes updated documentation"', devnull=True
)
pyaud.git.remote("rm", "origin") # type: ignore
pyaud.git.remote("add", "origin", gh_remote) # type: ignore
pyaud.git.fetch() # type: ignore
pyaud.git.stdout()
pyaud.git.ls_remote( # type: ignore
"--heads", gh_remote, "gh-pages", capture=True
)
result = pyaud.git.stdout()
remote_exists = None if not result else result[-1]
pyaud.git.diff( # type: ignore
"gh-pages", "origin/gh-pages", suppress=True, capture=True
)
result = pyaud.git.stdout()
remote_diff = None if not result else result[-1]
if remote_exists is not None and remote_diff is None:
colors.green.print("No difference between local branch and remote")
print(self._pushing_skipped)
else:
colors.green.print("Pushing updated documentation")
pyaud.git.push("origin", "gh-pages", "-f") # type: ignore
print("Documentation Successfully deployed")
pyaud.git.checkout("master", devnull=True) # type: ignore
if stashed:
pyaud.git.stash("pop", devnull=True) # type: ignore
pyaud.git.branch("-D", "gh-pages", devnull=True) # type: ignore
def action(self, *args: Any, **kwargs: bool) -> Any:
if pyaud.branch() == "master":
git_credentials = [
"PYAUD_GH_NAME",
"PYAUD_GH_EMAIL",
"PYAUD_GH_TOKEN",
]
null_vals = [k for k in git_credentials if os.environ[k] == ""]
if not null_vals:
if not Path(
Path.cwd() / os.environ["BUILDDIR"] / "html"
).is_dir():
pyaud.plugins.get("docs")(**kwargs)
self.deploy_docs()
else:
print("The following is not set:")
for null_val in null_vals:
print(f"- {null_val}")
print()
print(self._pushing_skipped)
else:
colors.green.print("Documentation not for master")
print(self._pushing_skipped)
@pyaud.plugins.register(name="docs")
class Docs(pyaud.plugins.Action): # pylint: disable=too-few-public-methods
"""Compile package documentation with ``Sphinx``.
This is so the hyperlink isn't exactly the same as the package
documentation. Build the ``Sphinx`` html documentation. Return the
README's title to what it originally was.
"""
sphinx_build = "sphinx-build"
@property
def exe(self) -> List[str]:
return [self.sphinx_build]
def action(self, *args: Any, **kwargs: bool) -> Any:
pyaud.plugins.get("toc")(*args, **kwargs)
readme_rst = "README"
underline = len(readme_rst) * "="
build_dir = Path.cwd() / os.environ["BUILDDIR"]
if build_dir.is_dir():
shutil.rmtree(build_dir)
if (
Path(Path.cwd() / DOCS / "conf.py").is_file()
and Path(Path.cwd(), README).is_file()
):
with LineSwitch(
Path.cwd() / README, {0: readme_rst, 1: underline}
):
command = ["-M", "html", Path.cwd() / DOCS, build_dir, "-W"]
self.subprocess[self.sphinx_build].call(*command, **kwargs)
colors.green.bold.print("Build successful")
else:
print("No docs found")
@pyaud.plugins.register(name="files")
class Files(
pyaud.plugins.Parametrize
): # pylint: disable=too-few-public-methods
"""Audit project data files.
Make ``docs/<APPNAME>.rst``, ``whitelist.py``, and
``requirements.txt`` if none already exist, update them if they do
and changes are needed or pass if nothing needs to be done.
"""
def plugins(self) -> List[str]:
return ["requirements", "toc", "whitelist"]
@pyaud.plugins.register(name="format")
class Format(pyaud.plugins.Fix):
"""Audit code with `Black`."""
black = "black"
@property
def exe(self) -> List[str]:
return [self.black]
def audit(self, *args: Any, **kwargs: bool) -> Any:
return self.subprocess[self.black].call(
"--check", *pyaud.files.args(), *args, **kwargs
)
def fix(self, *args: Any, **kwargs: bool) -> Any:
return self.subprocess[self.black].call(
*args, *pyaud.files.args(), **kwargs
)
@pyaud.plugins.register(name="lint")
class Lint(pyaud.plugins.Audit):
"""Lint code with ``pylint``."""
pylint = "pylint"
@property
def exe(self) -> List[str]:
return [self.pylint]
@property
def env(self) -> Dict[str, str]:
return {"PYCHARM_HOSTED": "True"}
def audit(self, *args: Any, **kwargs: bool) -> Any:
return self.subprocess[self.pylint].call(
"--output-format=colorized", *args, *pyaud.files.args(), **kwargs
)
@pyaud.plugins.register(name="requirements")
class Requirements(pyaud.plugins.Write):
"""Audit requirements.txt with Pipfile.lock."""
p2req = "pipfile2req"
@property
def exe(self) -> List[str]:
return [self.p2req]
@property
def path(self) -> Path:
return Path.cwd() / os.environ["PYAUD_REQUIREMENTS"]
def required(self) -> Path:
return Path.cwd() / "Pipfile.lock"
def write(self, *args: Any, **kwargs: bool) -> Any:
# get the stdout for both production and development packages
# get the stdout for both production and development packages
self.subprocess[self.p2req].call(
self.required(), *args, capture=True, **kwargs
)
self.subprocess[self.p2req].call(
self.required(), "--dev", *args, capture=True, **kwargs
)
# write to file and then use sed to remove the additional
# information following the semi-colon
stdout = list(
set("\n".join(self.subprocess[self.p2req].stdout()).splitlines())
)
stdout.sort()
with open(self.path, "w", encoding="utf-8") as fout:
for content in stdout:
fout.write(f"{content.split(';')[0]}\n")
@pyaud.plugins.register(name="tests")
class Tests(pyaud.plugins.Action): # pylint: disable=too-few-public-methods
"""Run the package unit-tests with ``pytest``."""
pytest = "pytest"
@property
def exe(self) -> List[str]:
return [self.pytest]
def action(self, *args: Any, **kwargs: bool) -> Any:
tests = Path.cwd() / "tests"
patterns = ("test_*.py", "*_test.py")
rglob = [
f
for f in pyaud.files
for p in patterns
if f.match(p) and str(tests) in str(f)
]
if rglob:
return self.subprocess[self.pytest].call(*args, **kwargs)
print("No tests found")
return 1
@pyaud.plugins.register(name="toc")
class Toc(pyaud.plugins.Write):
"""Audit docs/<NAME>.rst toc-file."""
sphinx_apidoc = "sphinx-apidoc"
@property
def exe(self) -> List[str]:
return [self.sphinx_apidoc]
@property
def path(self) -> Path:
return Path.cwd() / DOCS / f"{pyaud.package()}.rst"
def required(self) -> Optional[Path]:
return Path.cwd() / DOCS / "conf.py"
@staticmethod
def _populate(path: Path, contents: List[str]) -> None:
if path.is_file():
with open(path, encoding="utf-8") as fin:
contents.extend(fin.read().splitlines())
def write(self, *args: Any, **kwargs: bool) -> Any:
toc_attrs = " :members:\n :undoc-members:\n :show-inheritance:"
package = pyaud.package()
docspath = Path.cwd() / DOCS
self.subprocess[self.sphinx_apidoc].call(
"-o",
docspath,
Path.cwd() / package,
"-f",
*args,
devnull=True,
**kwargs,
)
# dynamically populate a list of unwanted, overly nested files
# nesting the file in the docs/<NAME>.rst file is preferred
nested = [
docspath / f
for f in docspath.iterdir()
if len(f.name.split(".")) > 2
]
contents: List[str] = []
self._populate(self.path, contents)
for file in nested:
# extract the data from the nested toc
self._populate(file, contents)
contents = sorted(
[i for i in contents if i.startswith(".. automodule::")]
)
with open(self.path, "w", encoding="utf-8") as fout:
fout.write(f"{package}\n{len(package) * '='}\n\n")
for content in contents:
fout.write(f"{content}\n{toc_attrs}\n")
# files that we do not want included in docs
# modules creates an extra layer that is not desired for this
# module
blacklist = [docspath / "modules.rst", *nested]
| |
row = db(db.fornitori.id == id_fornitore).select().first()
error = False
if row.citta is None:
response.flash="Il fornitore non ha la città in anagrafica\nAggiornare l'anagrafica per poter emettere il DDT"
error=True
luoghi = []
try:
if len(row.luogo_consegna_1) is not Null:
luoghi.append(row.luogo_consegna_1)
if len(row.luogo_consegna_2) is not Null:
luoghi.append(row.luogo_consegna_2)
except:
luoghi.append("Indirizzo fornitore,,,,,")
trasporto_a_mezzo = Set()
trasporto_a_mezzo.add("Mittente")
trasporto_a_mezzo.add("Destinatario")
trasporto_a_mezzo.add("Vettore")
aspetto_esteriore_dei_beni = Set()
rows = db(db.aspetto_esteriore_dei_beni).select()
for row in rows:
aspetto_esteriore_dei_beni.add(row.nome)
causali = Set()
rows = db(db.causali).select()
for row in rows:
causali.add(row.nome)
porto = Set()
rows = db(db.porto).select()
for row in rows:
porto.add(row.nome)
ddt_id2 = db(db.ddt_cliente.id == id_ddt).select()
links=[lambda row: BUTTON("Aggiungi righe",_onclick=XML('aggiungiRigheFornitore('+str(row.id)+')'),_class='button btn btn-default')]
fields=[db.ordine_fornitore.ultimo_codice_ordine,db.ordine_fornitore.riferimento_ordine_cliente,db.ordine_fornitore.data_ordine_fornitore]
query=((db.ordine_fornitore.id_fornitore== id_fornitore) & (db.ordine_fornitore.ddt_completato =='F'))
# query=(db.ordine_cliente.ddt_completato == '0')
righe_in_ordine_fornitore_form = SQLFORM.grid(query=query,formname='ordini_fornitorii_ddt',maxtextlength=100,create=False,editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,user_signature=True,args=request.args[:1],fields=fields)
return locals()
def crea_riba():
current_month = 1
return locals()
@service.jsonrpc
@service.jsonrpc2
def ristampa_fattura_da_id(args):
id_fattura=args['0']
dati_fattura = db(db.fatture_salvate.id == id_fattura).select().first()
# print dati_fattura
id_cliente = dati_fattura.id_cliente
ddts_id = dati_fattura.id_ddt
# response.flash = ddts_id
numero_fattura_da_salvare = dati_fattura.numero_fattura
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
fattura = FATTURA("FATTURA DIFFERITA",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADEMZA")
ddts_id = eval(ddts_id)
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
lista_ddt = []
for ddt_id in ddts_id:
lista_ddt.append(ddt_id)
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id).select()
# print "DDT ID : ",ddt_id
for row in rows:
id_ordine = row.id_ordine
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza = scadenza.strftime("%d/%m/%Y")
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except:
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
# print "Aggiunta rig"
sconti = row.sconti
if row.sconti is None:
sconti=""
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.quantita,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
# db.fatture_salvate.insert(nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
fattura.insert_rows()
fattura.create_pdf()
def ristampa_fattura():
links=[lambda row: BUTTON("Ristampa",_onclick=XML('ristampaFattura('+str(row.id)+')'),_class='button btn btn-default')]
fields=[db.fatture_salvate.data_fattura,db.fatture_salvate.numero_fattura,db.fatture_salvate.nome_cliente,db.fatture_salvate.totale]
fatture_da_ristampare = SQLFORM.grid(db.fatture_salvate,formname='fatture_salvate',maxtextlength=100,create=False,editable=False, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,fields=fields)
return locals()
def fatturazione_differita_2():
id_fattura = request.args[0]
fattura = db(db.fattura_cliente.id == id_fattura).select().first()
id_cliente = fattura.id_cliente
dal = fattura.dal
al_fixed = fattura.al
al = fattura.al + datetime.timedelta(days=2)
nome_cliente = fattura.nome_cliente
"""
"""
# print "ID CLIENTE IN FATTURA DIFFERITA = ",id_cliente
"""
Select all ddts of the selected client.
"""
# print fattura.dal,al
ddts_id = ((db.ddt_cliente.id_cliente == id_cliente) & (db.ddt_cliente.data_richiesta >= fattura.dal) & (db.ddt_cliente.data_richiesta <= al) & (db.ddt_cliente.fattura_emessa == 'F') & (db.ddt_cliente.numero_ddt != 'None'))
# ddts_id = ((db.ddt_cliente.id_cliente == id_cliente) & (db.ddt_cliente.data_richiesta >= fattura.dal) & (db.ddt_cliente.data_richiesta <= al) & (db.ddt_cliente.numero_ddt != 'None'))
links=[lambda row: BUTTON("Aggiungi DDT",_onclick=XML('aggiungiDDT('+str(row.id)+')'),_class='button btn btn-default')]
db.ddt_cliente.totale = Field.Virtual("Totale", lambda row: calcola_totale_iva_inclusa_da_ddt(row.ddt_cliente.id))
# db.ddt_cliente.totale = Field.Virtual("Totale", lambda row: 0)
fields=[db.ddt_cliente.data_richiesta,db.ddt_cliente.numero_ddt,db.ddt_cliente.totale]
# query=((db.ordine_cliente.id_cliente== id_cliente) & (db.ordine_cliente.ddt_completato =='F'))
# query=(db.ordine_cliente.ddt_completato == '0')
print "---------------"
ddt_da_fatturare = SQLFORM.grid(query=ddts_id,formname='ordini_clienti_ddt',maxtextlength=100,create=False,editable=False, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,user_signature=True,args=request.args[:1],fields=fields)
return locals()
def calcola_totale_per_mese_da_ddt_cliente():
current_month = 1
return locals()
def calcola_totale_per_anno():
current_year=2018
return locals()
def calcola_totale_per_anno_data():
lista=[]
riga=[]
riga.append("Cliente")
riga.append("Totale")
lista.append(riga)
try:
year = int(request.vars['y'])
except:
year = datetime.datetime.now().year
# day_start,day_end = monthrange(datetime.datetime.now().year, month)
# day_start = 1
# st = str(day_start)+"/"+str(month)+"/"+str(datetime.datetime.now().year)
start_date = datetime.datetime(year,1,1)
end_date = datetime.datetime(year,12,31).date()
# print start_date,end_date
rows1= db(db.clienti).select()
db(db.totali_ddt_mese_).delete()
db.totali_ddt_mese_.id.readable=False;
totalissimo=0
nome_cliente=""
for r1 in rows1:
try:
riga=[]
totale = 0
ddts = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None')).select()
for ddt in ddts:
nome_cliente = ddt.nome_cliente
# print "NOME CLIENTE = ",nome_cliente,ddt.id
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
riga.append(nome_cliente)
riga.append(totale)
lista.append(riga)
db.totali_ddt_mese_.insert(cliente=nome_cliente,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
except Exception,e:
# print "ECCEZZIONE ",e
pass
# print lista
form = SQLFORM.grid(db.totali_ddt_mese_,deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,user_signature=True,args=request.args[:1])
return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
return locals()
def calcola_totale_per_anno_leonardo():
current_year=1
return locals()
def calcola_totale_per_anno_leonardo_data():
lista=[]
riga=[]
riga.append("Cliente")
riga.append("Totale")
lista.append(riga)
form=""
totalissimo=1000
try:
year = int(request.vars['y'])
except:
year = datetime.datetime.now().year
# day_start,day_end = monthrange(datetime.datetime.now().year, month)
day_start = 1
st = str(day_start)+"/"+str(1)+"/"+str(year)
start_date = datetime.datetime(year,1,day_start)
end_date = datetime.datetime(year,12,31).date() + timedelta(days=1)
# print start_date,end_date
# return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
rows1= db(db.clienti.id==41).select()
db(db.totali_ddt_anno_).delete()
db.totali_ddt_anno_.id.readable=False;
totalissimo=0
nome_cliente=""
for r1 in rows1:
try:
riga=[]
totale = 0
dest1 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('CHIETI'))).select()
dest2 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('BISENZIO'))).select()
dest3 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('BAINSIZZA'))).select()
dest4 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('NERVIANO'))).select()
dest5 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('ADRIATICA'))).select()
riga=[]
totale=0
for ddt in dest1:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='CHIETI'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest2:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='<NAME>'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest3:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='<NAME>'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest4:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='NERVIANO'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest5:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='FOCACCIA GROUP SRL'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
except Exception,e:
# print "ECCEZZIONE ",e
pass
# print lista
form = SQLFORM.grid(db.totali_ddt_anno_,deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,user_signature=True,args=request.args[:1])
return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
def calcola_totale_per_mese_da_ddt_cliente_data():
lista=[]
riga=[]
riga.append("Cliente")
riga.append("Totale")
lista.append(riga)
try:
month = int(request.vars['m'])
except:
month = datetime.datetime.now().month
day_start,day_end = monthrange(datetime.datetime.now().year, month)
day_start = 1
st = str(day_start)+"/"+str(month)+"/"+str(datetime.datetime.now().year)
start_date = datetime.datetime(datetime.datetime.now().year,month,day_start)
end_date = datetime.datetime(datetime.datetime.now().year,month,day_end).date() + timedelta(days=1)
# print start_date,end_date
rows1= db(db.clienti).select()
db(db.totali_ddt_mese_).delete()
db.totali_ddt_mese_.id.readable=False;
totalissimo=0
nome_cliente=""
for r1 in rows1:
try:
riga=[]
totale = 0
ddts = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None')).select()
for ddt in ddts:
nome_cliente = ddt.nome_cliente
# print "NOME CLIENTE = ",nome_cliente,ddt.id
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
riga.append(nome_cliente)
riga.append(totale)
lista.append(riga)
db.totali_ddt_mese_.insert(cliente=nome_cliente,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
except Exception,e:
# print "ECCEZZIONE ",e
pass
# print lista
form = SQLFORM.grid(db.totali_ddt_mese_,deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,user_signature=True,args=request.args[:1])
return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
def ritorna_prezzo_europeo(importo):
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
return importo
def ritorna_int_calcola_totale_iva_esclusa_da_ddt(id_ddt):
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == id_ddt).select()
# print "DDT ID : ",id_ddt
totale = 0
importo_totale = 0
imposta_totale = 0
for row in rows:
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
# codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
# percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale += saved_importo
# imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
except:
pass
| |
off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = file_paths.var_dir
var_file_dict = file_paths.get_var_dic()
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
if cfg.GenTestList:
# load mean std values
global_mean_vector, global_std_vector = normaliser.load_mean_std_values(norm_info_file)
else:
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
# for hmpd vocoder we don't need to normalize the
# pdd values
if cfg.vocoder_type == 'hmpd':
stream_start_index = {}
dimension_index = 0
recorded_vuv = False
vuv_dimension = None
for feature_name in cfg.out_dimension_dict.keys():
if feature_name != 'vuv':
stream_start_index[feature_name] = dimension_index
else:
vuv_dimension = dimension_index
recorded_vuv = True
dimension_index += cfg.out_dimension_dict[feature_name]
logger.info('hmpd pdd values are not normalized since they are in 0 to 1')
global_mean_vector[:,stream_start_index['pdd']: stream_start_index['pdd'] + cfg.out_dimension_dict['pdd']] = 0
global_std_vector[:,stream_start_index['pdd']: stream_start_index['pdd'] + cfg.out_dimension_dict['pdd']] = 1
normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
if cfg.GenTestList:
min_max_normaliser.load_min_max_values(norm_info_file)
else:
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
if not cfg.GenTestList:
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_var_vector = feature_std_vector**2
feature_var_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list, train_y_file_list = file_paths.get_train_list_x_y()
valid_x_file_list, valid_y_file_list = file_paths.get_valid_list_x_y()
test_x_file_list, test_y_file_list = file_paths.get_test_list_x_y()
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
add_feat_dim = sum(cfg.additional_features.values())
lab_dim = label_normaliser.dimension + add_feat_dim + cfg.appended_input_dim
if cfg.VoiceConversion:
lab_dim = cfg.cmp_dim
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layer_size))
for hid_size in hidden_layer_size:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = file_paths.get_nnets_file_name()
temp_dir_name = file_paths.get_temp_nn_dir_name()
gen_dir = os.path.join(gen_dir, temp_dir_name)
if cfg.switch_to_keras or cfg.switch_to_tensorflow:
### set configuration variables ###
cfg.inp_dim = lab_dim
cfg.out_dim = cfg.cmp_dim
cfg.inp_feat_dir = nn_label_norm_dir
cfg.out_feat_dir = nn_cmp_norm_dir
cfg.pred_feat_dir = gen_dir
if cfg.GenTestList and cfg.test_synth_dir!="None":
cfg.inp_feat_dir = cfg.test_synth_dir
cfg.pred_feat_dir = cfg.test_synth_dir
if cfg.switch_to_keras:
### call kerasclass and use an instance ###
from run_keras_with_merlin_io import KerasClass
keras_instance = KerasClass(cfg)
elif cfg.switch_to_tensorflow:
### call Tensorflowclass and use an instance ###
from run_tensorflow_with_merlin_io import TensorflowClass
tf_instance = TensorflowClass(cfg)
### DNN model training
if cfg.TRAINDNN:
var_dict = load_covariance(var_file_dict, cfg.out_dimension_dict)
logger.info('training DNN')
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_mean_vector = cmp_min_max[0, ]
cmp_std_vector = cmp_min_max[1, ]
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
if cfg.switch_to_keras:
keras_instance.train_keras_model()
elif cfg.switch_to_tensorflow:
tf_instance.train_tensorflow_model()
else:
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot, var_dict = var_dict,
cmp_mean_vector = cmp_mean_vector, cmp_std_vector = cmp_std_vector,init_dnn_model_file=cfg.start_from_trained_model)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
if cfg.GENBNFEA:
# Please only tune on this step when you want to generate bottleneck features from DNN
gen_dir = file_paths.bottleneck_features
bottleneck_size = min(hidden_layer_size)
bottleneck_index = 0
for i in range(len(hidden_layer_size)):
if hidden_layer_size[i] == bottleneck_size:
bottleneck_index = i
logger.info('generating bottleneck features from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_id_list = file_id_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_hidden_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, bottleneck_index)
### generate parameters from DNN
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.GenTestList:
gen_file_id_list = test_id_list
test_x_file_list = nn_label_norm_file_list
if cfg.test_synth_dir!="None":
gen_dir = cfg.test_synth_dir
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
if cfg.switch_to_keras:
keras_instance.test_keras_model()
elif cfg.switch_to_tensorflow:
tf_instance.test_tensorflow_model()
else:
reshape_io = True if cfg.rnn_batch_training else False
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, reshape_io)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
if cfg.AcousticModel:
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features, enforce_silence = cfg.enforce_silence)
if cfg.singing:
meta = pickle.load(open(os.path.join(cfg.singing_inter_data_dir, 'meta'), 'rb'))
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG, cfg=cfg, meta=meta)
else:
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG, cfg=cfg)
if cfg.DurationModel:
### Perform duration normalization(min. state dur set to 1) ###
gen_dur_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.dur_ext)
gen_label_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.lab_ext)
in_gen_label_align_file_list = prepare_file_path_list(gen_file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
if cfg.singing:
meta = pickle.load(open(os.path.join(cfg.singing_inter_data_dir, 'meta'), 'rb'))
generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, meta)
else:
generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict)
label_modifier = HTSLabelModification(silence_pattern = cfg.silence_pattern, label_type = cfg.label_type)
label_modifier.modify_duration_labels(in_gen_label_align_file_list, gen_dur_list, gen_label_list)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list, cfg) # reference copy synthesis speech
### setting back to original conditions before calculating objective scores ###
if cfg.GenTestList:
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
### evaluation: RMSE and CORR for duration
if cfg.CALMCD and cfg.DurationModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(inter_data_dir, 'ref_data')
ref_dur_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.dur_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_dur_list, cfg.dur_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.dur_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features)
remover.remove_silence(in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_dur_list)
valid_dur_rmse, valid_dur_corr = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
test_dur_rmse, test_dur_corr = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
logger.info('Develop: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(valid_dur_rmse, valid_dur_corr))
logger.info('Test: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(test_dur_rmse, test_dur_corr))
### evaluation: calculate distortion
if cfg.CALMCD and cfg.AcousticModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(inter_data_dir, 'ref_data')
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
# for straight or world vocoders
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
# for magphase vocoder
ref_mag_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mag_ext)
ref_real_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.real_ext)
ref_imag_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.imag_ext)
# for GlottDNN vocoder
ref_lsf_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lsf_ext)
ref_slsf_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.slsf_ext)
ref_gain_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.gain_ext)
ref_hnr_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.hnr_ext)
# for pulsemodel vocoder
ref_pdd_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.pdd_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, | |
dut: string
:param nvo_name: evpn instance name to be created
:type nvo_name: string
:param vtep_name: vtep name to be bound to evpn instance
:type vtep_name: string
:param config: it takes value as 'yes' or 'no' to configure or remove evpn instance respectively
:type config: string
:param : cli_type
:param : skip_error
:return: None
usage:
create_evpn_instance(dut1, "dut1EVPN", "dut1VTEP", cli_type='click')
create_evpn_instance(dut1, "dut1EVPN", "dut1VTEP", config='no', cli_type='klish')
Created by: Julius <<EMAIL>
"""
cli_type = st.get_ui_type(dut,cli_type=cli_type)
if config == 'yes':
action = 'add'
else:
vtep_name = ''
action = 'del'
if cli_type == 'click':
command = "config vxlan evpn_nvo {} {} {}".format(action, nvo_name, vtep_name)
st.debug(command)
return st.config(dut, command, skip_error_check=skip_error, type=cli_type)
elif cli_type == 'klish':
st.error("NVO command is not supported in klish")
return False
elif cli_type in ['rest-put','rest-patch']:
st.error("NVO config through OCYANG URI not supported")
return False
else:
st.error("Invalid CLI type - {}".format(cli_type))
return False
def map_vlan_vni(dut, vtep_name, vlan_id, vni_id, range_val='1', config='yes', skip_error=False, cli_type=''):
"""
purpose:
This definition is used to create VLAN to VNI mapping under EVPN instance
Arguments:
:param dut: device to be configured
:type dut: string
:param vtep_name: VTEP name where VLAN to VNI mapping needs to be done
:type vtep_name: string
:param vlan_id: vlan id to be mapped to VNI
:type vlan_id: string
:param vni_id: VNI id where vlan to be mapped
:type vni_id: string
:param range_val: range of vlans to be mapped to VNI
:type range_val: string
:param config: it takes value as 'yes' or 'no' to configure or remove evpn instance respectively
:type config: string
:param : cli_type
:param : skip_error
:return: None
usage:
map_vlan_vni(dut1, "dut1VTEP", "100", "100", cli_type='click')
map_vlan_vni(dut1, "dut1VTEP", "100", "100", config="no", cli_type='click')
map_vlan_vni(dut1, "dut1VTEP", "100", "100", range="10")
map_vlan_vni(dut1, "dut1VTEP", "100", "100", range="10", config="no")
Created by: Julius <<EMAIL>
"""
cli_type = st.get_ui_type(dut,cli_type=cli_type)
range_val = int(range_val)
if config == 'yes':
conf_str = ''
action = 'add'
else:
conf_str = 'no'
action = 'del'
if cli_type == 'click':
if range_val > 1:
vlan_end = int(vlan_id) + range_val - 1
command = "config vxlan map_range {} {} {} {} {}".format(action, vtep_name, vlan_id, vlan_end, vni_id)
elif range_val == 1:
command = "config vxlan map {} {} {} {}".format(action, vtep_name, vlan_id, vni_id)
elif cli_type == 'klish':
command = []
command.append('interface vxlan {}'.format(vtep_name))
if range_val == 1:
command.append('{} map vni {} vlan {}'.format(conf_str, vni_id, vlan_id))
elif range_val > 1:
command.append('{} map vni {} vlan {} count {}'.format(conf_str, vni_id, vlan_id, range_val))
command.append('exit')
elif cli_type in ['rest-put','rest-patch']:
if range_val == 1:
rest_urls = st.get_datastore(dut, "rest_urls")
vlan_data = str(vlan_id) if type(vlan_id) is not str else vlan_id
vlan_str = 'Vlan'+vlan_data
vni_id = int(vni_id) if type(vni_id) is not int else vni_id
if config == 'yes':
url = rest_urls['config_vlan_vni_mapping'].format(vlan_str)
payload = { "openconfig-vxlan:vni-instance":
[{"vni-id": vni_id,
"source-nve": vtep_name,
"config": {"vni-id": vni_id, "source-nve": vtep_name}
}]
}
response = config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload,get_response=True)
error_list = response['output'].get('ietf-restconf:errors', {}).get('error', [])
if error_list:
err_msg = error_list[0].get('error-message', '')
st.banner('FAIL-OCYANG: vlan-vni map failed')
return err_msg
else:
url = rest_urls['delete_vlan_vni_mapping'].format(vlan_str,vni_id,vtep_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG')
return False
return
elif range_val > 1:
### In case of range , need to call above URI multiple times, instead fallback to klish
cli_type = 'klish'
command = []
command.append('interface vxlan {}'.format(vtep_name))
command.append('{} map vni {} vlan {} count {}'.format(conf_str, vni_id, vlan_id, range_val))
command.append('exit')
else:
st.error("Invalid CLI type - {}".format(cli_type))
return False
st.debug(command)
return st.config(dut, command, skip_error_check=skip_error, type=cli_type)
def parse_rest_output_vxlan_tunnel(response):
tunnel_list = response['output'].get('openconfig-vxlan:vxlan-tunnel-infos',{}).get('vxlan-tunnel-info',[])
tunnel_count = len(tunnel_list)
result = []
for tunnel in tunnel_list:
dict = {}
dict['total_count'] = tunnel_count
dict['src_vtep'] = tunnel.get('state',{}).get('source-ip',"")
dict['rem_vtep'] = tunnel.get('state',{}).get('peer-ip',"")
tunnel_status = tunnel.get('state',{}).get('status',"")
if tunnel_status == 'UP':
dict['tun_status'] = 'oper_up'
elif tunnel_status == 'DOWN':
dict['tun_status'] = 'oper_down'
else:
## To handle later for any other type
dict['tun_status'] = tunnel['state']['status']
result.append(dict)
return result
def verify_vxlan_tunnel_status(dut, src_vtep, rem_vtep_list, exp_status_list, cli_type=''):
'''
purpose:
This definition is used to verify operational status of VxLAN tunnel
Arguments:
:param dut: Device name where the command to be executed
:type dut: string
:param src_vtep: ip address of local VTEP
:type src_vtep: string
:param rem_vtep_list: list of remote VTEP ip address
:type rem_vtep_list: string
:param exp_status_list: list of expected operational status of VTEP's; example ['oper_down','oper_up']
:type exp_status_list: list
:return: True/False True - success case; False - Failure case
usage: verify_vxlan_tunnel_status(dut1,'1.1.1.1',['2.2.2.2','3.3.3.3'],['oper_up','oper_up'])
verify_vxlan_tunnel_status(dut1,'1.1.1.1',['2.2.2.2','3.3.3.3'],['oper_down','oper_up'])
Created by: Julius <<EMAIL>
'''
cli_type = st.get_ui_type(dut,cli_type=cli_type)
success = True
if cli_type in ['click','klish']:
cli_out = st.show(dut, 'show vxlan tunnel', type=cli_type)
elif cli_type in ['rest-put','rest-patch']:
rest_urls = st.get_datastore(dut, "rest_urls")
url = rest_urls['vxlan_tunnel_info']
response = get_rest(dut, rest_url=url)
if response['output']:
cli_out = parse_rest_output_vxlan_tunnel(response)
else:
st.error("OCYANG-FAIL: verify vxlan tunnel - Get Response is empty")
return False
else:
st.error("Invalid CLI type - {}".format(cli_type))
return False
for rem_vtep,status in zip(rem_vtep_list,exp_status_list):
fil_out = filter_and_select(cli_out, ["tun_status"], {"src_vtep" : src_vtep,
"rem_vtep" : rem_vtep})
if not fil_out:
st.error('No entry found for source VTEP: {} and remote VTEP: {} in '
'output: {}'.format(src_vtep,rem_vtep,cli_out))
success = False
continue
else:
fil_out = fil_out[0]
if fil_out["tun_status"] == status:
st.log('Match found; remote VTEP {} status {}; expected '
'{}'.format(rem_vtep,fil_out["tun_status"],status))
else:
st.error('Match NOT found; expected status for remote VTEP: {} is : {} '
'but found: {}'.format(rem_vtep,status,fil_out["tun_status"]))
success = False
return True if success else False
def verify_bgp_l2vpn_evpn_route_type_macip(dut,**kwargs):
"""
Author: <NAME> (<EMAIL>)
verify_bgp_l2vpn_evpn_route_type_macip(dut=data.dut1,evpn_type_2_prefix="[2]:[0]:[48]:[00:21:ee:00:10:16]",rd="1.1.1.1:2",status_code="*>",metric="",next_hop="172.16.58.3",weight="32768",path="",origin_code="i")
verify_bgp_l2vpn_evpn_route_type_macip(dut=data.dut1,evpn_type_2_prefix="[2]:[0]:[48]:[00:21:ee:00:10:16]:[32]:[5172.16.31.10]",rd="1.1.1.1:2",status_code="*>",metric="",next_hop="172.16.58.3",weight="32768",path="",origin_code="i")
To verify bgp l2vpn evpn route type macip
:param dut:
:param evpn_type_2_prefix:
:param rd:
:param path:
:param status_code:
:param weight:
:param metric:
:param next_hop:
:param origin_code:
:param displayed_prefixes:
:param no_of_paths:
:return:
:reteturn:
"""
cli_type = kwargs.pop('cli_type', st.get_ui_type(dut,**kwargs))
cli_type = "vtysh" if cli_type == 'click' else cli_type
if cli_type in ["rest-put", "rest-patch"]:
ret_val=True
rest_urls = st.get_datastore(dut, "rest_urls")
url = rest_urls["get_evpn_routes"]
rest_out = get_rest(dut, rest_url=url, timeout=30)
if rest_out["status"] == 200:
out_dict = {}
rest_out = rest_out["output"]["openconfig-bgp-evpn-ext:routes"]["route"]
match = False
for i in rest_out:
try:
prefix = i["prefix"]
if prefix == kwargs["evpn_type_2_prefix"] and i["state"]['valid-route']:
out_dict["next_hop"]=i["attr-sets"]["next-hop"]
if 'rd' in kwargs:
out_dict["rd"]=i["route-distinguisher"]
if 'origin_code' in kwargs and i['attr-sets']['origin'] == "IGP":
out_dict["origin_code"]= "i"
if 'origin_code' in kwargs and i['attr-sets']['origin'] == "EGP":
out_dict["origin_code"]= "e"
if 'origin_code' in kwargs and i['attr-sets']['origin'] == "incomplete":
out_dict["origin_code"]= "?"
if 'path' in kwargs:
as_path = ""
for as1 in i['attr-sets']['as-path']['as-segment'][0]['state']['member']:
as_path = as_path + str(as1) + " "
as_path = as_path.strip()
out_dict["path"]= as_path
for key in out_dict.keys():
if key in kwargs:
if out_dict[key] == kwargs[key]:
st.log("Expected value {} found for key: {} for route {}".format(out_dict[key], key,prefix))
match = True
else:
st.log("Match NOT found; expected value {} but got"
" {}".format(kwargs[key], out_dict[key]))
ret_val = False
if match:
break
except Exception:
continue
if not match:
st.log("MAC IP Route {} was not found in the rest output".format(kwargs["evpn_type_2_prefix"]))
return False
elif not ret_val:
return False
else:
return True
else:
st.log("REST command execution failed")
ret_val = False
else:
output = st.show(dut,"show bgp l2vpn evpn route type macip",type=cli_type)
if len(output) == 0:
st.error("Output is Empty")
return False
count = 0
no_common_key = 0
ret_val1 = False
dict1 = {}
common_key_list = ['bgp_version','router_id','displayed_prefixes','no_of_paths']
for a in output:
for key in a:
output[output.index(a)][key]=output[output.index(a)][key].lstrip()
output[output.index(a)][key]=output[output.index(a)][key].rstrip()
for key in kwargs:
if key in common_key_list:
no_common_key = no_common_key + 1
if no_common_key > 0:
rlist = output[0]
count = 0
for key in kwargs:
if rlist[key] == kwargs[key] and key in common_key_list:
count = count + 1
if no_common_key == count:
ret_val1 = True
for key in kwargs:
if key in common_key_list:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
for key in kwargs:
if key in common_key_list:
if rlist[key] == kwargs[key]:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
st.log("No-Match: Match key {} NOT found => {} : {}".format(key,kwargs[key],rlist[key]))
st.log("\n")
for key in common_key_list:
if key in kwargs:
dict1[key] = kwargs[key]
del kwargs[key]
if no_common_key > 0 and ret_val1 is False:
st.error("DUT {} -> Match Not Found {}".format(dut,dict1))
return ret_val1
ret_val = "True"
#Converting all kwargs to list type to handle single or list of instances
for key in kwargs:
if type(kwargs[key]) is list:
kwargs[key] = list(kwargs[key])
else:
kwargs[key] = [kwargs[key]]
#convert kwargs into list of dictionary
input_dict_list =[]
for i in range(len(kwargs[kwargs.keys()[0]])):
temp_dict = {}
for key in kwargs.keys():
temp_dict[key] = kwargs[key][i]
input_dict_list.append(temp_dict)
for input_dict in input_dict_list:
| |
deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import both_may_depth_2_complexity
>>> both_may_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [20, 160, 5, 4, 15]
i_val_inc = [10, 10, 10, 6, 6]
params = [-1 for _ in range(5)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):
for w1 in range(max(params[2] - i_val_inc[2] // 2, 0), min(w, l + 1, i_val[2])):
for w2 in range(max(params[3] - i_val_inc[3] // 2, 0), min(w - 2 * p, l + 1, i_val[3], 2 * w1), 2):
for p1 in range(max(params[4] - i_val_inc[4] // 2, (p + 1) // 2), min(w, i_val[4])):
k1 = (k) // 2
reps = (binom(p, p / 2) * binom(k1 - p, p1 - p / 2)) ** 2 * binom(w2, w2 / 2) * binom(
l - w2,
w1 - w2 / 2)
reps = 1 if reps == 0 else reps
L1 = binom(k1, p1)
if log2(L1) > time:
continue
L12 = max(1, L1 ** 2 * binom(l, w1) // 2 ** l)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - w2 - 2 * p)) - 2 * log2(
binom(k1, p)) - log2(binom(l, w2)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
first_level_nn = _indyk_motwani_complexity(L1, l, w1, hmap)
second_level_nn = _indyk_motwani_complexity(L12, n - k - l, w - 2 * p - w2, hmap)
T_tree = 2 * first_level_nn + second_level_nn
T_rep = int(ceil(2 ** max(0, l - log2(reps))))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, l, w1, w2, p1, log2(L1), log2(L12)]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
par = {"l": params[1], "p": params[0], "p1": params[4], "w1": params[2], "w2": params[3], "depth": 2}
res = {"time": time, "memory": memory, "parameters": par}
return res
def may_ozerov_complexity(n, k, w, mem=inf, hmap=1, only_depth_two=0, memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm using Indyk-Motwani for NN search
[MayOze15] <NAME>. and <NAME>.: On computing nearest neighbors with applications to decoding of binary linear codes.
In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)
expected weight distribution::
+-------------------------+---------------------+---------------------+
| <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|
| w - 2p | p | p |
+-------------------------+---------------------+---------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import may_ozerov_complexity
>>> may_ozerov_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
d2 = may_ozerov_depth_2_complexity(n, k, w, mem, hmap, memory_access)
d3 = may_ozerov_depth_3_complexity(n, k, w, mem, hmap, memory_access)
return d2 if d2["time"] < d3["time"] or only_depth_two else d3
def may_ozerov_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm in depth 2 using Indyk-Motwani for NN search
[MayOze15] <NAME>. and <NAME>.: On computing nearest neighbors with applications to decoding of binary linear codes.
In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)
expected weight distribution::
+-------------------------+---------------------+---------------------+
| <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|
| w - 2p | p | p |
+-------------------------+---------------------+---------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import may_ozerov_depth_2_complexity
>>> may_ozerov_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [30, 300, 25]
i_val_inc = [10, 10, 10]
params = [-1 for _ in range(3)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):
for p1 in range(max(params[2] - i_val_inc[2] // 2, (p + 1) // 2), min(w, i_val[2])):
k1 = (k + l) // 2
reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2
L1 = binom(k1, p1)
if log2(L1) > time:
continue
L12 = L1 ** 2 // 2 ** l
L12 = max(L12, 1)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(
log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(binom(k1, p)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 2 * _list_merge_complexity(L1, l, hmap) + _indyk_motwani_complexity(L12,
n - k - l,
w - 2 * p,
hmap)
T_rep = int(ceil(2 ** max(l - log2(reps), 0)))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, l, p1]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l": params[1], "p": params[0], "p1": params[2], "depth": 2}
res = {"time": time, "memory": memory, "parameters": par}
return res
def may_ozerov_depth_3_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm in depth 3 using Indyk-Motwani for NN search
[MayOze15] <NAME>. and <NAME>.: On computing nearest neighbors with applications to decoding of binary linear codes.
In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)
expected weight distribution::
+-------------------------+---------------------+---------------------+
| <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|
| w - 2p | p | p |
+-------------------------+---------------------+---------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import may_ozerov_depth_3_complexity
>>> may_ozerov_depth_3_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [20, 200, 20, 10]
i_val_inc = [10, 10, 10, 10]
params = [-1 for _ in | |
(self->argc > 0) ? self->args[0] : "");
this->args = strjoin(this->args, (self->argc > 0) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 1) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 1) ? self->args[1] : "");
this->args = strjoin(this->args, (self->argc > 1) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 2) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 2) ? self->args[2] : "");
this->args = strjoin(this->args, (self->argc > 2) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 3) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 3) ? self->args[3] : "");
this->args = strjoin(this->args, (self->argc > 3) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 4) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 4) ? self->args[4] : "");
this->args = strjoin(this->args, (self->argc > 4) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 5) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 5) ? self->args[5] : "");
this->args = strjoin(this->args, (self->argc > 5) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 6) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 6) ? self->args[6] : "");
this->args = strjoin(this->args, (self->argc > 6) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 7) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 7) ? self->args[7] : "");
this->args = strjoin(this->args, (self->argc > 7) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 8) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 8) ? self->args[8] : "");
this->args = strjoin(this->args, (self->argc > 8) ? "\\"" : "");
this->args = strjoin(this->args, (self->argc > 9) ? ", \\"" : "");
this->args = strjoin(this->args, (self->argc > 9) ? self->args[9]: "");
this->args = strjoin(this->args, (self->argc > 9) ? "\\"" : "");
/* Prints self->argc to permits verifying the internal consistency since
* this code is quite fishy. */
printf("%d %d:%d %s(\\"%s\\", [%d%s]) = %d\\n",
logindex, ppid, pid, probefunc,
self->e_arg0,
self->argc,
this->args,
errno);
logindex++;
/* TODO(maruel): Clean up memory
self->e_arg0 = 0;
self->argc = 0;
self->args[0] = 0;
self->args[1] = 0;
self->args[2] = 0;
self->args[3] = 0;
self->args[4] = 0;
self->args[5] = 0;
self->args[6] = 0;
self->args[7] = 0;
self->args[8] = 0;
self->args[9] = 0;
*/
}
syscall::open*:entry /trackedpid[pid]/ {
self->arg0 = arg0;
self->arg1 = arg1;
self->arg2 = arg2;
}
syscall::open*:return /trackedpid[pid] && errno == 0/ {
printf("%d %d:%d %s(\\"%s\\", %d, %d) = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0),
self->arg1, self->arg2, errno);
logindex++;
self->arg0 = 0;
self->arg1 = 0;
self->arg2 = 0;
}
syscall::rename:entry /trackedpid[pid]/ {
self->arg0 = arg0;
self->arg1 = arg1;
}
syscall::rename:return /trackedpid[pid]/ {
printf("%d %d:%d %s(\\"%s\\", \\"%s\\") = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0),
copyinstr(self->arg1), errno);
logindex++;
self->arg0 = 0;
self->arg1 = 0;
}
/* Track chdir, it's painful because it is only receiving relative path */
syscall::chdir:entry /trackedpid[pid]/ {
self->arg0 = arg0;
}
syscall::chdir:return /trackedpid[pid] && errno == 0/ {
printf("%d %d:%d %s(\\"%s\\") = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0), errno);
logindex++;
self->arg0 = 0;
}
/* TODO(maruel): *stat* functions and friends
syscall::access:return,
syscall::chdir:return,
syscall::chflags:return,
syscall::chown:return,
syscall::chroot:return,
syscall::getattrlist:return,
syscall::getxattr:return,
syscall::lchown:return,
syscall::lstat64:return,
syscall::lstat:return,
syscall::mkdir:return,
syscall::pathconf:return,
syscall::readlink:return,
syscall::removexattr:return,
syscall::setxattr:return,
syscall::stat64:return,
syscall::stat:return,
syscall::truncate:return,
syscall::unlink:return,
syscall::utimes:return,
*/
"""
@classmethod
def code(cls, pid, cwd):
"""Setups the D code to implement child process tracking.
Injects the pid and the initial cwd into the trace header for context.
The reason is that the child process is already running at that point so:
- no proc_start() is logged for it.
- there is no way to figure out the absolute path of cwd in kernel on OSX
Since the child process is already started, initialize current_processes to
1.
"""
pid = str(pid)
cwd = os.path.realpath(cwd).replace('\\', '\\\\').replace('%', '%%')
return (
'dtrace:::BEGIN {\n'
' current_processes = 1;\n'
' logindex = 0;\n'
' trackedpid[' + pid + '] = 1;\n'
' printf("%d %d:%d %s_%s(\\"' + cwd + '\\") = 0\\n",\n'
' logindex, ppid, ' + pid + ', probeprov, probename);\n'
' logindex++;\n'
'}\n') + cls.D_CODE
class Context(ApiBase.Context):
# This is the most common format. index pid function(args) = result
RE_HEADER = re.compile(r'^\d+ (\d+):(\d+) ([a-zA-Z_\-]+)\((.*?)\) = (.+)$')
# Arguments parsing.
RE_DTRACE_BEGIN = re.compile(r'^\"(.+?)\"$')
RE_CHDIR = re.compile(r'^\"(.+?)\"$')
RE_EXECVE = re.compile(r'^\"(.+?)\", \[(\d+), (.+)\]$')
RE_OPEN = re.compile(r'^\"(.+?)\", (\d+), (-?\d+)$')
RE_RENAME = re.compile(r'^\"(.+?)\", \"(.+?)\"$')
O_DIRECTORY = 0x100000
class Process(ApiBase.Context.Process):
pass
def __init__(self, blacklist):
super(Dtrace.Context, self).__init__(blacklist)
# Process ID of the trace_child_process.py wrapper script instance.
self._tracer_pid = None
# First process to be started by self._tracer_pid.
self._initial_pid = None
self._initial_cwd = None
def on_line(self, line):
match = self.RE_HEADER.match(line)
assert match, line
fn = getattr(
self,
'handle_%s' % match.group(3).replace('-', '_'),
self._handle_ignored)
return fn(
int(match.group(1)),
int(match.group(2)),
match.group(3),
match.group(4),
match.group(5))
def to_results(self):
"""Uses self._initial_pid to determine the initial process."""
process = self.processes[self._initial_pid].to_results_process()
assert sorted(self.processes) == sorted(p.pid for p in process.all), (
sorted(self.processes), sorted(p.pid for p in process.all))
return Results(process)
def handle_dtrace_BEGIN(self, _ppid, pid, _function, args, _result):
assert not self._tracer_pid and not self._initial_pid
self._tracer_pid = pid
self._initial_cwd = self.RE_DTRACE_BEGIN.match(args).group(1)
def handle_proc_start(self, ppid, pid, _function, _args, result):
"""Transfers cwd.
The dtrace script already takes care of only tracing the processes that
are child of the traced processes so there is no need to verify the
process hierarchy.
"""
assert result == '0'
assert pid not in self.processes
assert (ppid == self._tracer_pid) != bool(self._initial_pid)
if not self._initial_pid:
self._initial_pid = pid
ppid = None
cwd = self._initial_cwd
else:
parent = self.processes[ppid]
cwd = parent.cwd
proc = self.processes[pid] = self.Process(self, pid, cwd, ppid)
proc.cwd = cwd
logging.debug('New child: %s -> %d' % (ppid, pid))
def handle_proc_exit(self, _ppid, pid, _function, _args, _result):
"""Removes cwd."""
if pid != self._tracer_pid:
# self._tracer_pid is not traced itself.
self.processes[pid].cwd = None
def handle_execve(self, _ppid, pid, _function, args, _result):
"""Sets the process' executable.
TODO(maruel): Read command line arguments. See
https://discussions.apple.com/thread/1980539 for an example.
https://gist.github.com/1242279
Will have to put the answer at http://stackoverflow.com/questions/7556249.
:)
"""
match = self.RE_EXECVE.match(args)
assert match, args
proc = self.processes[pid]
proc.executable = match.group(1)
proc.command = process_quoted_arguments(match.group(3))
assert int(match.group(2)) == len(proc.command), args
def handle_chdir(self, _ppid, pid, _function, args, result):
"""Updates cwd."""
assert self._tracer_pid
assert pid in self.processes
if result.startswith('0'):
cwd = self.RE_CHDIR.match(args).group(1)
if not cwd.startswith('/'):
cwd2 = os.path.join(self.processes[pid].cwd, cwd)
logging.debug('handle_chdir(%d, %s) -> %s' % (pid, cwd, cwd2))
else:
logging.debug('handle_chdir(%d, %s)' % (pid, cwd))
cwd2 = cwd
self.processes[pid].cwd = cwd2
else:
assert False, 'Unexecpected fail: %s' % result
def handle_open_nocancel(self, ppid, pid, function, args, result):
return self.handle_open(ppid, pid, function, args, result)
def handle_open(self, _ppid, pid, function, args, result):
match = self.RE_OPEN.match(args)
assert match, (pid, function, args, result)
args = match.groups()
flag = int(args[1])
if self.O_DIRECTORY & flag == self.O_DIRECTORY:
# Ignore directories.
return
self._handle_file(pid, args[0], result)
def handle_rename(self, _ppid, pid, _function, args, result):
args = self.RE_RENAME.match(args).groups()
self._handle_file(pid, args[0], result)
self._handle_file(pid, args[1], result)
def _handle_file(self, pid, filepath, result):
if result.startswith(('-1', '2')):
return
if not filepath.startswith('/'):
filepath = os.path.join(self.processes[pid].cwd, filepath)
# We can get '..' in the path.
filepath = os.path.normpath(filepath)
# Sadly, still need to filter out directories here;
# saw open_nocancel(".", 0, 0) = 0 lines.
if os.path.isdir(filepath):
return
self.processes[pid].add_file(filepath)
@staticmethod
def _handle_ignored(_ppid, pid, function, args, result):
logging.debug('%d %s(%s) = %s' % (pid, function, args, result))
@staticmethod
def clean_trace(logname):
if os.path.isfile(logname):
os.remove(logname)
@classmethod
def gen_trace(cls, cmd, cwd, logname, output):
"""Runs dtrace on an executable.
This dtruss is broken when it starts the process itself or when tracing
child processes, this code starts a wrapper process trace_child_process.py,
which waits for dtrace to start, then trace_child_process.py starts the
executable to trace.
"""
logging.info('gen_trace(%s, %s, %s, %s)' % (cmd, cwd, logname, output))
logging.info('Running: %s' % cmd)
signal = 'Go!'
logging.debug('Our pid: %d' % os.getpid())
# Part 1: start the child process.
stdout = stderr = None
if output:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
child_cmd = [
sys.executable, os.path.join(BASE_DIR, 'trace_child_process.py'),
]
child = subprocess.Popen(
child_cmd + cmd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
cwd=cwd)
logging.debug('Started child pid: %d' % child.pid)
# Part 2: start dtrace process.
# Note: do | |
<reponame>rokroskar/GPy
import numpy as np
import sympy as sp
from sympy.utilities.codegen import codegen
from sympy.core.cache import clear_cache
from scipy import weave
import re
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
import tempfile
import pdb
import ast
from kernpart import Kernpart
from ...util.config import config
class spkern(Kernpart):
"""
A kernel object, where all the hard work in done by sympy.
:param k: the covariance function
:type k: a positive definite sympy function of x_0, z_0, x_1, z_1, x_2, z_2...
To construct a new sympy kernel, you'll need to define:
- a kernel function using a sympy object. Ensure that the kernel is of the form k(x,z).
- that's it! we'll extract the variables from the function k.
Note:
- to handle multiple inputs, call them x_1, z_1, etc
- to handle multpile correlated outputs, you'll need to add parameters with an index, such as lengthscale_i and lengthscale_j.
"""
def __init__(self, input_dim, k=None, output_dim=1, name=None, param=None):
if name is None:
self.name='sympykern'
else:
self.name = name
if k is None:
raise ValueError, "You must provide an argument for the covariance function."
self._sp_k = k
sp_vars = [e for e in k.atoms() if e.is_Symbol]
self._sp_x= sorted([e for e in sp_vars if e.name[0:2]=='x_'],key=lambda x:int(x.name[2:]))
self._sp_z= sorted([e for e in sp_vars if e.name[0:2]=='z_'],key=lambda z:int(z.name[2:]))
# Check that variable names make sense.
assert all([x.name=='x_%i'%i for i,x in enumerate(self._sp_x)])
assert all([z.name=='z_%i'%i for i,z in enumerate(self._sp_z)])
assert len(self._sp_x)==len(self._sp_z)
self.input_dim = len(self._sp_x)
self._real_input_dim = self.input_dim
if output_dim > 1:
self.input_dim += 1
assert self.input_dim == input_dim
self.output_dim = output_dim
# extract parameter names
thetas = sorted([e for e in sp_vars if not (e.name[0:2]=='x_' or e.name[0:2]=='z_')],key=lambda e:e.name)
# Look for parameters with index.
if self.output_dim>1:
self._sp_theta_i = sorted([e for e in thetas if (e.name[-2:]=='_i')], key=lambda e:e.name)
self._sp_theta_j = sorted([e for e in thetas if (e.name[-2:]=='_j')], key=lambda e:e.name)
# Make sure parameter appears with both indices!
assert len(self._sp_theta_i)==len(self._sp_theta_j)
assert all([theta_i.name[:-2]==theta_j.name[:-2] for theta_i, theta_j in zip(self._sp_theta_i, self._sp_theta_j)])
# Extract names of shared parameters
self._sp_theta = [theta for theta in thetas if theta not in self._sp_theta_i and theta not in self._sp_theta_j]
self.num_split_params = len(self._sp_theta_i)
self._split_theta_names = ["%s"%theta.name[:-2] for theta in self._sp_theta_i]
for theta in self._split_theta_names:
setattr(self, theta, np.ones(self.output_dim))
self.num_shared_params = len(self._sp_theta)
self.num_params = self.num_shared_params+self.num_split_params*self.output_dim
else:
self.num_split_params = 0
self._split_theta_names = []
self._sp_theta = thetas
self.num_shared_params = len(self._sp_theta)
self.num_params = self.num_shared_params
for theta in self._sp_theta:
val = 1.0
if param is not None:
if param.has_key(theta):
val = param[theta]
setattr(self, theta.name, val)
#deal with param
self._set_params(self._get_params())
#Differentiate!
self._sp_dk_dtheta = [sp.diff(k,theta).simplify() for theta in self._sp_theta]
if self.output_dim > 1:
self._sp_dk_dtheta_i = [sp.diff(k,theta).simplify() for theta in self._sp_theta_i]
self._sp_dk_dx = [sp.diff(k,xi).simplify() for xi in self._sp_x]
if False:
self.compute_psi_stats()
self._gen_code()
if False:
extra_compile_args = ['-ftree-vectorize', '-mssse3', '-ftree-vectorizer-verbose=5']
else:
extra_compile_args = []
self.weave_kwargs = {
'support_code':self._function_code,
'include_dirs':[tempfile.gettempdir(), current_dir],
'headers':['"sympy_helpers.h"'],
'sources':[os.path.join(current_dir,"sympy_helpers.cpp")],
'extra_compile_args':extra_compile_args,
'extra_link_args':[],
'verbose':True}
if config.getboolean('parallel', 'openmp'): self.weave_kwargs.append('-lgomp')
def __add__(self,other):
return spkern(self._sp_k+other._sp_k)
def _gen_code(self):
"""Generates the C functions necessary for computing the covariance function using the sympy objects as input."""
#TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??
#generate c functions from sympy objects
argument_sequence = self._sp_x+self._sp_z+self._sp_theta
code_list = [('k',self._sp_k)]
# gradients with respect to covariance input
code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]
# gradient with respect to parameters
code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]
# gradient with respect to multiple output parameters
if self.output_dim > 1:
argument_sequence += self._sp_theta_i + self._sp_theta_j
code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]
(foo_c,self._function_code), (foo_h,self._function_header) = \
codegen(code_list, "C",'foobar',argument_sequence=argument_sequence)
#put the header file where we can find it
f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')
f.write(self._function_header)
f.close()
# Substitute any known derivatives which sympy doesn't compute
self._function_code = re.sub('DiracDelta\(.+?,.+?\)','0.0',self._function_code)
############################################################
# This is the basic argument construction for the C code. #
############################################################
arg_list = (["X2(i, %s)"%x.name[2:] for x in self._sp_x]
+ ["Z2(j, %s)"%z.name[2:] for z in self._sp_z])
# for multiple outputs need to also provide these arguments reversed.
if self.output_dim>1:
reverse_arg_list = list(arg_list)
reverse_arg_list.reverse()
# Add in any 'shared' parameters to the list.
param_arg_list = [shared_params.name for shared_params in self._sp_theta]
arg_list += param_arg_list
precompute_list=[]
if self.output_dim > 1:
reverse_arg_list+=list(param_arg_list)
split_param_arg_list = ["%s1(%s)"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]
split_param_reverse_arg_list = ["%s1(%s)"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]
arg_list += split_param_arg_list
reverse_arg_list += split_param_reverse_arg_list
# Extract the right output indices from the inputs.
c_define_output_indices = [' '*16 + "int %s=(int)%s(%s, %i);"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]
precompute_list += c_define_output_indices
reverse_arg_string = ", ".join(reverse_arg_list)
arg_string = ", ".join(arg_list)
precompute_string = "\n".join(precompute_list)
# Code to compute argments string needed when only X is provided.
X_arg_string = re.sub('Z','X',arg_string)
# Code to compute argument string when only diagonal is required.
diag_arg_string = re.sub('int jj','//int jj',X_arg_string)
diag_arg_string = re.sub('j','i',diag_arg_string)
if precompute_string == '':
# if it's not multioutput, the precompute strings are set to zero
diag_precompute_string = ''
diag_precompute_replace = ''
else:
# for multioutput we need to extract the index of the output form the input.
diag_precompute_string = precompute_list[0]
diag_precompute_replace = precompute_list[1]
# Here's the code to do the looping for K
self._K_code =\
"""
// _K_code
// Code for computing the covariance function.
int i;
int j;
int N = target_array->dimensions[0];
int num_inducing = target_array->dimensions[1];
int input_dim = X_array->dimensions[1];
//#pragma omp parallel for private(j)
for (i=0;i<N;i++){
for (j=0;j<num_inducing;j++){
%s
//target[i*num_inducing+j] =
TARGET2(i, j) += k(%s);
}
}
%s
"""%(precompute_string,arg_string,"/*"+str(self._sp_k)+"*/") #adding a string representation forces recompile when needed
self._K_code_X = """
// _K_code_X
// Code for computing the covariance function.
int i;
int j;
int N = target_array->dimensions[0];
int num_inducing = target_array->dimensions[1];
int input_dim = X_array->dimensions[1];
//#pragma omp parallel for private(j)
for (i=0;i<N;i++){
%s // int ii=(int)X2(i, 1);
TARGET2(i, i) += k(%s);
for (j=0;j<i;j++){
%s //int jj=(int)X2(j, 1);
double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));
TARGET2(i, j) += kval;
TARGET2(j, i) += kval;
}
}
/*%s*/
"""%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed
# Code to do the looping for Kdiag
self._Kdiag_code =\
"""
// _Kdiag_code
// Code for computing diagonal of covariance function.
int i;
int N = target_array->dimensions[0];
int input_dim = X_array->dimensions[1];
//#pragma omp parallel for
for (i=0;i<N;i++){
%s
//target[i] =
TARGET1(i)=k(%s);
}
%s
"""%(diag_precompute_string,diag_arg_string,"/*"+str(self._sp_k)+"*/") #adding a string representation forces recompile when needed
# Code to compute gradients
grad_func_list = []
if self.output_dim>1:
grad_func_list += c_define_output_indices
grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]
grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]
grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])
grad_func_string = '\n'.join(grad_func_list)
self._dK_dtheta_code =\
"""
// _dK_dtheta_code
// Code for computing gradient of covariance with respect to parameters.
int i;
int j;
int N = partial_array->dimensions[0];
int num_inducing = partial_array->dimensions[1];
int input_dim = X_array->dimensions[1];
//#pragma omp parallel for private(j)
for (i=0;i<N;i++){
for (j=0;j<num_inducing;j++){
%s
}
}
%s
"""%(grad_func_string,"/*"+str(self._sp_k)+"*/") # adding a string representation forces recompile when needed
# Code to compute gradients for Kdiag TODO: needs clean up
diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)
diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)
diag_grad_func_string = re.sub('j','i',diag_grad_func_string)
diag_grad_func_string = re.sub('PARTIAL2\(i, i\)','PARTIAL1(i)',diag_grad_func_string)
self._dKdiag_dtheta_code =\
"""
// _dKdiag_dtheta_code
// Code for computing gradient of diagonal with respect to parameters.
int i;
int N = partial_array->dimensions[0];
int input_dim = X_array->dimensions[1];
for (i=0;i<N;i++){
%s
}
%s
"""%(diag_grad_func_string,"/*"+str(self._sp_k)+"*/") #adding a string representation forces recompile when needed
# Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.
gradX_func_list = []
if self.output_dim>1:
gradX_func_list += c_define_output_indices
gradX_func_list += ["TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);"%(q,q,arg_string) for q in range(self._real_input_dim)]
gradX_func_string = "\n".join(gradX_func_list)
self._dK_dX_code = \
"""
// _dK_dX_code
// Code for computing gradient of covariance with respect to inputs.
int i;
int j;
int N = partial_array->dimensions[0];
int num_inducing = partial_array->dimensions[1];
int input_dim = X_array->dimensions[1];
//#pragma | |
+ ["R_Li[" + str(i + 1) + "]" for i in range(8)]
pars_of_interest = pars_of_interest + [
"R_I",
"R_L",
"theta_md",
"theta_masks",
"sig",
"voc_effect_alpha",
"voc_effect_delta",
"voc_effect_omicron",
]
pars_of_interest = pars_of_interest + [
col for col in df_fit if "phi" in col and "simplex" not in col
]
# save a summary for ease of viewing
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_main_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
fit_summary.loc[pars_of_interest].to_csv(filename)
return None
def plot_and_save_posterior_samples(data_date):
"""
Runs the full suite of plotting.
"""
data_date = pd.to_datetime(data_date) # Define data date
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# read in the posterior sample
samples_mov_gamma = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# * Note: 2020-09-09 won't work (for some reason)
######### Read in microdistancing (md) surveys #########
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True)
df = df_google.merge(
df_Reff[
[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]
],
on=["date", "state"],
how="inner",
)
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": pd.date_range(start="2020-03-01", end=first_end_date).values,
"WA": pd.date_range(start="2020-03-01", end=first_end_date).values,
}
# Second wave inputs
sec_states = sorted([
'NSW',
# 'VIC',
])
sec_start_date = "2020-06-01"
sec_end_date = "2021-01-19"
# choose dates for each state for sec wave
sec_date_range = {
"NSW": pd.date_range(start="2020-06-01", end="2021-01-19").values,
# "VIC": pd.date_range(start="2020-06-01", end="2020-10-28").values,
}
# Third wave inputs
third_states = sorted([
"NSW",
"VIC",
"ACT",
"QLD",
"SA",
"TAS",
# "NT",
"WA",
])
# Subtract the truncation days to avoid right truncation as we consider infection dates
# and not symptom onset dates
third_end_date = data_date - pd.Timedelta(days=truncation_days)
# choose dates for each state for third wave
# Note that as we now consider the third wave for ACT, we include it in
# the third wave fitting only!
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
# "NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
fit_mask = df.state.isin(first_states)
if fit_post_March:
fit_mask = (fit_mask) & (df.date >= start_date)
fit_mask = (fit_mask) & (df.date <= first_end_date)
second_wave_mask = df.state.isin(sec_states)
second_wave_mask = (second_wave_mask) & (df.date >= sec_start_date)
second_wave_mask = (second_wave_mask) & (df.date <= sec_end_date)
# Add third wave stuff here
third_wave_mask = df.state.isin(third_states)
third_wave_mask = (third_wave_mask) & (df.date >= third_start_date)
third_wave_mask = (third_wave_mask) & (df.date <= third_end_date)
predictors = mov_values.copy()
# predictors.extend(['driving_7days','transit_7days','walking_7days','pc'])
# remove residential to see if it improves fit
# predictors.remove("residential_7days")
df["post_policy"] = (df.date >= ban).astype(int)
dfX = df.loc[fit_mask].sort_values("date")
df2X = df.loc[second_wave_mask].sort_values("date")
df3X = df.loc[third_wave_mask].sort_values("date")
dfX["is_first_wave"] = 0
for state in first_states:
dfX.loc[dfX.state == state, "is_first_wave"] = (
dfX.loc[dfX.state == state]
.date.isin(first_date_range[state])
.astype(int)
.values
)
df2X["is_sec_wave"] = 0
for state in sec_states:
df2X.loc[df2X.state == state, "is_sec_wave"] = (
df2X.loc[df2X.state == state]
.date.isin(sec_date_range[state])
.astype(int)
.values
)
# used to index what dates are also featured in omicron
omicron_date_range = pd.date_range(start=omicron_start_date, end=third_end_date)
df3X["is_third_wave"] = 0
for state in third_states:
df3X.loc[df3X.state == state, "is_third_wave"] = (
df3X.loc[df3X.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# condition on being in third wave AND omicron
df3X.loc[df3X.state == state, "is_omicron_wave"] = (
(
df3X.loc[df3X.state == state].date.isin(omicron_date_range)
* df3X.loc[df3X.state == state].date.isin(third_date_range[state])
)
.astype(int)
.values
)
data_by_state = {}
sec_data_by_state = {}
third_data_by_state = {}
for value in ["mean", "std", "local", "imported"]:
data_by_state[value] = pd.pivot(
dfX[["state", value, "date"]], index="date", columns="state", values=value
).sort_index(axis="columns")
# account for dates pre pre second wave
if df2X.loc[df2X.state == sec_states[0]].shape[0] == 0:
print("making empty")
sec_data_by_state[value] = pd.DataFrame(columns=sec_states).astype(float)
else:
sec_data_by_state[value] = pd.pivot(
df2X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre third wave
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# now add in the summary stats for Omicron Reff
for value in ["mean_omicron", "std_omicron"]:
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# FIRST PHASE
mobility_by_state = []
mobility_std_by_state = []
| |
' (date month)'),
("%y %b", dt.datetime.strftime(now, "%b %y") + ' (month year)'),
("%y %b", dt.datetime.strftime(now, "%y %b") + ' (year month)'),
("%b %d %Y", dt.datetime.strftime(now, "%b %d %Y") + ' (full date)'),
("%Y %b %d", dt.datetime.strftime(now, "%Y %b %d") + ' (full date)')]
return axis_list_menu
# =============================================================================
def getBatteryDeviceList(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Create a list of battery-powered devices
Creates a list of tuples that contains the device ID and device name of all
Indigo devices that report a batterLevel device property that is not None.
If no devices meet the criteria, a single tuple is returned as a place-
holder.
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
batt_list = [(dev.id, dev.name) for dev in indigo.devices.iter() if dev.batteryLevel is not None]
if len(batt_list) == 0:
batt_list = [(-1, 'No battery devices detected.'), ]
return batt_list
# =============================================================================
def getFileList(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Get list of CSV files for various dropdown menus.
Generates a list of CSV source files that are located in the folder specified
within the plugin configuration dialog. If the method is unable to find any CSV
files, an empty list is returned.
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
file_name_list_menu = []
default_path = '{path}/com.fogbert.indigoplugin.matplotlib/'.format(path=indigo.server.getLogsFolderPath())
source_path = self.pluginPrefs.get('dataPath', default_path)
try:
import glob
import os
for file_name in glob.glob(u"{path}{fn}".format(path=source_path, fn='*.csv')):
final_filename = os.path.basename(file_name)
file_name_list_menu.append((final_filename, final_filename[:-4]))
# Sort the file list
file_name_list_menu = sorted(file_name_list_menu, key=lambda s: s[0].lower()) # Case insensitive sort
# Add 'None' as an option, and show it first in list
file_name_list_menu = file_name_list_menu + [(u"-5", u"%%separator%%"), (u"None", u"None")]
except IOError as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"Error generating file list: {s}. See plugin log for more "
u"information.".format(s=sub_error))
# return sorted(file_name_list_menu, key=lambda s: s[0].lower()) # Case insensitive sort
return file_name_list_menu
# =============================================================================
def getFontList(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Provide a list of font names for various dropdown menus.
Note that these are the fonts that Matplotlib can see, not necessarily all of
the fonts installed on the system. If matplotlib can't find any fonts, then a
default list of fonts that matplotlib supports natively are provided.
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
font_menu = []
try:
from os import path
for font in mfont.findSystemFonts(fontpaths=None, fontext='ttf'):
font_name = path.splitext(path.basename(font))[0]
if font_name not in font_menu:
font_menu.append(font_name)
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"Error building font list. Returning generic list. {s}. See plugin log for more "
u"information.".format(s=sub_error))
font_menu = FONT_MENU
return sorted(font_menu)
# =============================================================================
def getForecastSource(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Return a list of WUnderground devices for forecast chart devices
Generates and returns a list of potential forecast devices for the forecast
devices type. Presently, the plugin only works with WUnderground devices, but
the intention is to expand the list of compatible devices going forward.
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
forecast_source_menu = []
# We accept both WUnderground (legacy) and Fantastic Weather devices. We have to
# construct these one at a time. Note the typo in the bundle identifier is correct.
try:
for dev in indigo.devices.itervalues("com.fogbert.indigoplugin.fantasticwWeather"):
if dev.deviceTypeId in ('Daily', 'Hourly'):
forecast_source_menu.append((dev.id, dev.name))
for dev in indigo.devices.itervalues("com.fogbert.indigoplugin.wunderground"):
if dev.deviceTypeId in ('wundergroundTenDay', 'wundergroundHourly'):
forecast_source_menu.append((dev.id, dev.name))
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"Error getting list of forecast devices: {s}. See plugin log for more "
u"information.".format(s=sub_error))
self.logger.threaddebug(u"Forecast device list generated successfully: {fsm}".format(fsm=forecast_source_menu))
self.logger.threaddebug(u"forecast_source_menu: {fsm}".format(fsm=forecast_source_menu))
return sorted(forecast_source_menu, key=lambda s: s[1].lower())
# =============================================================================
def plotActionApi(self, plugin_action, dev=None, caller_waiting_for_result=False):
"""
Plugin API handler
A container for simple API calls to the matplotlib plugin. All payload elements
are required, although kwargs can be an empty dict if no kwargs desired. If
caller is waiting for a result (recommended), returns a dict.
Receives::
payload = {'x_values': [1, 2, 3],
'y_values': [2, 4, 6],
'kwargs': {'linestyle': 'dashed',
'color': 'b',
'marker': 's',
'markerfacecolor': 'b'},
'path': '/full/path/name/',
'filename': 'chart_filename.png'}
-----
:param class 'indigo.PluginAction' plugin_action:
:param class 'indigo.Device' dev:
:param bool caller_waiting_for_result:
"""
self.logger.info(u"Scripting payload: {pyld}".format(pyld=dict(plugin_action.props)))
# TODO: take a look at broadcast messages and whether this is something else that can be made to
# work in this neighborhood.
# TODO: this worked well up until where the "real" device would get its data.
# Need to make the API shim device work with existing get data steps.
# # Instantiate an instance of an ApiDevice for data from the API call.
# my_device = ApiDevice()
#
# # ============================= Unpack Payload ==============================
# # Take payload data from action and parse it into API device attributes.
# my_device.apiXvalues = plugin_action.props['x_values']
# my_device.apiYvalues = plugin_action.props['y_values']
# my_device.apiKwargs = plugin_action.props['kwargs']
# my_device.path_name = plugin_action.props['path']
# my_device.apiFileName = plugin_action.props['filename']
# my_device.deviceTypeId = plugin_action.props['deviceTypeId']
#
# # ====================== Obtain All Plugin Preferences ======================
# # Take plugin prefs and parse them into API device attributes.
# my_device.globalProps = self.pluginPrefs
#
# # ================== Obtain All Properties for Device Type ==================
# # Take device type properties and parse them into API device attributes.
#
# # Get the config XML for the chart device type
# props = self.devicesTypeDict[my_device.deviceTypeId]["ConfigUIRawXml"]
# root = eTree.ElementTree(eTree.fromstring(props))
#
# # Extract all field ids and defaultValues from XML
# for type_tag in root.findall('Field'):
# field_id = type_tag.get('id')
# default_value = type_tag.get('defaultValue')
# if field_id not in my_device.globalProps.keys():
# my_device.globalProps[field_id] = default_value
#
# # Call for chart to be produced.
# self.charts_refresh(dev_list=[my_device])
dpi = int(self.pluginPrefs.get('chartResolution', 100))
height = float(self.pluginPrefs.get('rectChartHeight', 250))
width = float(self.pluginPrefs.get('rectChartWidth', 600))
face_color = self.pluginPrefs.get('faceColor', '#000000')
bk_color = self.pluginPrefs.get('backgroundColor', '#000000')
# ============================= Unpack Payload ==============================
x_values = plugin_action.props['x_values']
y_values = plugin_action.props['y_values']
kwargs = plugin_action.props['kwargs']
path_name = plugin_action.props['path']
file_name = plugin_action.props['filename']
try:
fig = plt.figure(1, figsize=(width / dpi, height / dpi))
ax = fig.add_subplot(111)
ax.patch.set_facecolor(face_color)
ax.plot(x_values, y_values, **kwargs)
plt.savefig(u"{path}{fn}".format(path=path_name, fn=file_name),
facecolor=bk_color,
dpi=dpi)
plt.clf()
plt.close('all')
except Exception as sub_error:
if caller_waiting_for_result:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"[{name}] Error: {fn}. See plugin log for more "
u"information.".format(name=dev.name, fn=sub_error))
return {'success': False, 'message': u"{s}".format(s=sub_error)}
if caller_waiting_for_result:
# Note! returns from actions that were called by calls to indigo.executeAction()
# can't be Bools, indigo.Dict -- and likely other types. Strings and the following
# dict will work.
return {'success': True, 'message': u"Success"}
# =============================================================================
def pluginEnvironmentLogger(self):
"""
Log information about the plugin resource environment.
Write select information about the environment that the plugin is running in.
This method is only called once, when the plugin is first loaded (or reloaded).
-----
:var int chart_devices:
:var int csv_devices:
"""
chart_devices = 0
csv_engines = 0
# ========================== Get Plugin Device Load ===========================
for dev in indigo.devices.iter('self'):
if dev.pluginProps.get('isChart', False):
chart_devices += 1
elif dev.deviceTypeId == 'csvEngine':
csv_engines += 1
self.logger.info(u"")
self.logger.info(u"{0:{1}^135}".format(" Matplotlib Environment ", "="))
self.logger.info(u"{0:<31} {1}".format("Matplotlib version:", plt.matplotlib.__version__))
self.logger.info(u"{0:<31} {1}".format("Numpy version:", np.__version__))
self.logger.info(u"{0:<31} {1}".format("Matplotlib RC Path:", plt.matplotlib.matplotlib_fname()))
log_path = indigo.server.getLogsFolderPath(pluginId='com.fogbert.indigoplugin.matplotlib')
self.logger.info(u"{0:<31} {1}".format("Matplotlib Plugin log location:", log_path))
self.logger.info(u"{0:<31} {1}".format("Number of Chart Devices:", chart_devices))
self.logger.info(u"{0:<31} {1}".format("Number of CSV Engine Devices:", csv_engines))
# rcParams is a dict containing all of the initial matplotlibrc settings
self.logger.threaddebug(u"{0:<31} {1}".format("Matplotlib base rcParams:", dict(rcParams)))
self.logger.threaddebug(u"{0:<31} {1}".format('Initial Plugin Prefs:', dict(self.pluginPrefs)))
self.logger.info(u"{0:{1}^135}".format("", "="))
# =============================================================================
def plugin_error_handler(self, sub_error):
"""
Centralized handling of traceback messages
Centralized handling of traceback messages formatted for pretty display in the
plugin log file. If sent here, they will not be displayed in the Indigo Events
log. Use the following syntax to send exceptions here::
self.pluginErrorHandler(traceback.format_exc())
-----
:param traceback object sub_error:
"""
sub_error = sub_error.splitlines()
self.logger.critical(u"{0:!^80}".format(" TRACEBACK "))
for line in sub_error:
self.logger.critical(u"!!! {ln}".format(ln=line))
self.logger.critical(u"!" * 80)
# =============================================================================
def process_plotting_log(self, device, replies, errors):
"""
Process output of multiprocessing queue messages
The processLogQueue() method accepts a multiprocessing queue that contains log
messages. The method parses those messages across the various self.logger.
calls.
-----
:param indigo.Device device:
:param str replies:
:param unicode errors:
"""
# ======================= Process Output Queue ========================
try:
replies = pickle.loads(replies)
| |
<reponame>ouyang-w-19/decogo
# MINLP written by GAMS Convert at 04/21/18 13:55:14
#
# Equation counts
# Total E G L N X C B
# 327 211 58 58 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 560 154 406 0 0 0 0 0
# FX 1 1 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1856 1624 232 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(105.15,121),initialize=105.15)
m.x2 = Var(within=Reals,bounds=(104.4,121),initialize=104.4)
m.x3 = Var(within=Reals,bounds=(103.35,121),initialize=103.35)
m.x4 = Var(within=Reals,bounds=(102.5,121),initialize=102.5)
m.x5 = Var(within=Reals,bounds=(101.24,121),initialize=101.24)
m.x6 = Var(within=Reals,bounds=(105.4,121),initialize=105.4)
m.x7 = Var(within=Reals,bounds=(107.9,121),initialize=107.9)
m.x8 = Var(within=Reals,bounds=(106.5,121),initialize=106.5)
m.x9 = Var(within=Reals,bounds=(106,121),initialize=106)
m.x10 = Var(within=Reals,bounds=(104.17,121),initialize=104.17)
m.x11 = Var(within=Reals,bounds=(103.7,121),initialize=103.7)
m.x12 = Var(within=Reals,bounds=(102.64,121),initialize=102.64)
m.x13 = Var(within=Reals,bounds=(101.9,121),initialize=101.9)
m.x14 = Var(within=Reals,bounds=(102.6,121),initialize=102.6)
m.x15 = Var(within=Reals,bounds=(103.5,121),initialize=103.5)
m.x16 = Var(within=Reals,bounds=(104.3,121),initialize=104.3)
m.x17 = Var(within=Reals,bounds=(105.5,121),initialize=105.5)
m.x18 = Var(within=Reals,bounds=(104.1,121),initialize=104.1)
m.x19 = Var(within=Reals,bounds=(102.9,121),initialize=102.9)
m.x20 = Var(within=Reals,bounds=(102.83,121),initialize=102.83)
m.x21 = Var(within=Reals,bounds=(102.8,121),initialize=102.8)
m.x22 = Var(within=Reals,bounds=(103.9,121),initialize=103.9)
m.x23 = Var(within=Reals,bounds=(104.2,121),initialize=104.2)
m.x24 = Var(within=Reals,bounds=(107.5,121),initialize=107.5)
m.x25 = Var(within=Reals,bounds=(104.4,121),initialize=104.4)
m.x26 = Var(within=Reals,bounds=(103.4,121),initialize=103.4)
m.x27 = Var(within=Reals,bounds=(103.9,121),initialize=103.9)
m.x28 = Var(within=Reals,bounds=(105.65,121),initialize=105.65)
m.x29 = Var(within=Reals,bounds=(104.5,121),initialize=104.5)
m.x30 = Var(within=Reals,bounds=(104.1,121),initialize=104.1)
m.x31 = Var(within=Reals,bounds=(104.4,121),initialize=104.4)
m.x32 = Var(within=Reals,bounds=(104.2,121),initialize=104.2)
m.x33 = Var(within=Reals,bounds=(104.6,121),initialize=104.6)
m.x34 = Var(within=Reals,bounds=(104.7,121),initialize=104.7)
m.x35 = Var(within=Reals,bounds=(105.43,121),initialize=105.43)
m.x36 = Var(within=Reals,bounds=(105.9,121),initialize=105.9)
m.x37 = Var(within=Reals,bounds=(121,121),initialize=121)
m.x38 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x97 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x98 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x99 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x100 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x101 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x102 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x103 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x104 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x105 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x106 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x107 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x108 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x109 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x110 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x111 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x112 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x113 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x114 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x115 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x116 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x117 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x118 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x119 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x120 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x121 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x122 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x123 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x124 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x125 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x126 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x127 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x128 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x129 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x130 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x131 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x132 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x133 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x134 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x135 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x136 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x137 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x138 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x139 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x140 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x141 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x142 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x143 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x144 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x145 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x146 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x147 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x148 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x149 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x150 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x151 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x152 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.x153 = Var(within=Reals,bounds=(0.00282743338823081,0.0706858347057703),initialize=0.00282743338823081)
m.b154 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b155 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b156 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b157 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b158 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b159 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b160 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b161 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b162 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b163 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b164 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b165 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b166 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b167 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b168 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b169 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b170 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b171 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b172 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b173 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b174 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b175 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b176 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b177 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b178 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b179 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b180 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b181 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b182 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b183 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b184 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b185 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b186 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b187 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b188 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b189 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b190 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b191 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b192 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b193 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b194 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b195 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b196 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b197 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b198 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b199 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b200 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b201 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b202 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b203 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b204 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b205 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b206 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b207 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b208 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b209 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b210 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b211 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b212 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b213 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b214 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b215 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b216 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b217 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b218 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b219 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b220 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b221 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b222 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b223 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b224 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b225 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b226 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b227 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b228 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b229 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b230 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b231 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b232 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b233 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b234 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b235 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b236 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b237 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b238 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b239 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b240 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b241 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b242 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b243 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b244 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b245 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b246 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b247 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b248 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b249 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b250 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b251 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b252 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b253 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b254 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b255 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b256 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b257 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b258 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b259 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b260 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b261 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b262 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b263 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b264 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b265 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b266 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b267 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b268 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b269 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b270 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b271 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b272 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b273 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b274 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b275 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b276 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b277 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b278 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b279 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b280 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b281 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b282 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b283 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b284 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b285 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b286 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b287 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b288 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b289 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b290 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b291 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b292 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b293 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b294 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b295 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b296 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b297 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b298 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b299 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b300 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b301 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b302 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b303 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b304 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b305 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b306 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b307 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b308 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b309 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b310 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b311 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b312 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b313 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b314 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b315 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b316 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b317 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b318 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b319 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b320 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b321 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b322 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b323 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b324 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b325 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b326 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b327 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b328 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b329 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b330 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b331 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b332 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b333 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b334 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b335 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b336 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b337 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b338 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b339 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b340 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b341 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b342 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b343 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b344 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b345 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b346 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b347 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b348 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b349 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b350 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b351 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b352 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b353 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b354 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b355 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b356 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b357 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b358 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b359 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b360 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b361 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b362 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b363 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b364 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b365 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b366 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b367 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b368 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b369 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b370 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b371 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b372 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b373 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b374 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b375 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b376 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b377 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b378 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b379 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b380 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b381 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b382 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b383 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b384 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b385 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b386 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b387 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b388 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b389 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b390 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b391 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b392 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b393 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b394 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b395 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b396 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b397 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b398 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b399 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b400 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b401 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b402 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b403 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b404 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b405 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b406 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b407 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b408 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b409 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b410 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b411 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b412 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b413 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b414 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b415 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b416 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b417 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b418 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b419 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b420 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b421 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b422 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b423 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b424 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b425 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b426 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b427 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b428 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b429 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b430 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b431 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b432 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b433 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b434 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b435 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b436 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b437 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b438 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b439 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b440 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b441 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b442 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b443 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b444 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b445 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b446 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b447 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b448 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b449 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b450 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b451 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b452 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b453 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b454 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b455 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b456 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b457 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b458 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b459 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b460 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b461 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b462 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b463 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b464 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b465 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b466 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b467 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b468 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b469 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b470 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b471 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b472 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b473 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b474 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b475 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b476 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b477 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b478 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b479 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b480 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b481 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b482 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b483 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b484 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b485 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b486 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b487 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b488 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b489 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b490 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b491 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b492 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b493 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b494 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b495 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b496 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b497 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b498 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b499 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b500 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b501 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b502 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b503 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b504 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b505 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b506 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b507 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b508 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b509 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b510 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b511 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b512 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b513 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b514 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b515 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b516 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b517 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b518 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b519 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b520 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b521 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b522 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b523 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b524 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b525 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b526 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b527 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b528 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b529 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b530 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b531 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b532 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b533 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b534 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b535 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b536 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b537 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b538 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b539 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b540 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b541 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b542 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b543 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b544 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b545 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b546 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b547 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b548 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b549 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b550 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b551 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b552 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b553 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b554 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b555 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b556 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b557 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b558 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b559 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= 914716.4*m.b154 + 1324944.8*m.b155 + 2550319.6*m.b156 + 3996076*m.b157 + 8070480.4*m.b158
+ 9265320.4*m.b159 + 14703170*m.b160 + 519574.9*m.b161 + 752591.8*m.b162 + 1448626.1*m.b163
+ 2269841*m.b164 + 4584173.9*m.b165 + 5262863.9*m.b166 + 8351657.5*m.b167 + 825008.6*m.b168
+ 1195005.2*m.b169 + 2300205.4*m.b170 + 3604174*m.b171 + 7278994.6*m.b172 + 8356654.6*m.b173
+ 13261205*m.b174 + 1122863.3*m.b175 + 1626440.6*m.b176 + 3130653.7*m.b177 + 4905397*m.b178
+ 9906946.3*m.b179 + 11373676.3*m.b180 + 18048927.5*m.b181 + 2154640.8*m.b182 + 3120945.6*m.b183
+ 6007351.2*m.b184 + 9412872*m.b185 + 19010248.8*m.b186 + 21824728.8*m.b187 + 34633740*m.b188
+ | |
Ind. Co.,Ltd.",
"844F03": "Ablelink Electronics Ltd",
"845181": "Samsung Electronics Co.,Ltd",
"84569C": "Coho Data, Inc.,",
"845787": "DVR C&C Co., Ltd.",
"845C93": "Chabrier Services",
"845DD7": "Shenzhen Netcom Electronics Co.,Ltd",
"846223": "Shenzhen Coship Electronics Co., Ltd.",
"8462A6": "EuroCB (Phils), Inc.",
"846AED": "Wireless Tsukamoto.,co.LTD",
"846EB1": "Park Assist LLC",
"847207": "I&C Technology",
"84742A": "zte corporation",
"847616": "Addat S.r.o.",
"8478AC": "Cisco",
"847A88": "HTC Corporation",
"847E40": "Texas Instruments",
"8482F4": "Beijing Huasun Unicreate Technology Co., Ltd",
"848336": "Newrun",
"848371": "Avaya, Inc",
"848433": "Paradox Engineering SA",
"848506": "Apple",
"848D84": "Rajant Corporation",
"848DC7": "Cisco SPVTG",
"848E0C": "Apple",
"848E96": "Embertec Pty Ltd",
"848F69": "Dell Inc.",
"849000": "<NAME>",
"84948C": "Hitron Technologies. Inc",
"8496D8": "Pace plc",
"8497B8": "Memjet Inc.",
"849CA6": "Arcadyan Technology Corporation",
"849DC5": "Centera Photonics Inc.",
"84A6C8": "Intel Corporate",
"84A783": "Alcatel Lucent",
"84A8E4": "Shenzhen Huawei Communication Technologies Co., Ltd",
"84A991": "Cyber Trans Japan Co.,Ltd.",
"84ACA4": "Beijing Novel Super Digital TV Technology Co., Ltd",
"84AF1F": "Beat System Service Co,. Ltd.",
"84B153": "Apple",
"84B59C": "Juniper networks",
"84C2E4": "Jiangsu Qinheng Co., Ltd.",
"84C727": "Gnodal Ltd",
"84C7A9": "C3PO S.A.",
"84C8B1": "Incognito Software Inc.",
"84C9B2": "D-Link International",
"84D32A": "IEEE 1905.1",
"84D9C8": "Unipattern Co.,",
"84DB2F": "Sierra Wireless Inc",
"84DD20": "Texas Instruments",
"84DE3D": "Crystal Vision Ltd",
"84DF0C": "NET2GRID BV",
"84E4D9": "Shenzhen NEED technology Ltd.",
"84E629": "Bluwan SA",
"84E714": "Liang Herng Enterprise,Co.Ltd.",
"84EA99": "Vieworks",
"84ED33": "BBMC Co.,Ltd",
"84F493": "OMS spol. s.r.o.",
"84F64C": "Cross Point BV",
"84FCFE": "Apple",
"84FE9E": "RTC Industries, Inc.",
"880355": "Arcadyan Technology Corp.",
"880905": "MTMCommunications",
"880F10": "Huami Information Technology Co.,Ltd.",
"880FB6": "Jabil Circuits India Pvt Ltd,-EHTP unit",
"881036": "Panodic(ShenZhen) Electronics Limted",
"88124E": "Qualcomm Atheros",
"88142B": "Protonic Holland",
"881544": "Meraki, Inc.",
"8818AE": "Tamron Co., Ltd",
"881FA1": "Apple",
"882012": "LMI Technologies",
"8821E3": "Nebusens, S.L.",
"882364": "Watchnet DVR Inc",
"8823FE": "TTTech Computertechnik AG",
"88252C": "Arcadyan Technology Corporation",
"882E5A": "storONE",
"88308A": "Murata Manufactuaring Co.,Ltd.",
"88329B": "Samsung Electro Mechanics co.,LTD.",
"883314": "Texas Instruments",
"88354C": "Transics",
"883612": "SRC Computers, LLC",
"8841C1": "ORBISAT DA AMAZONIA IND E AEROL SA",
"8841FC": "AirTies Wireless Netowrks",
"8843E1": "CISCO SYSTEMS, INC.",
"8844F6": "Nokia Corporation",
"88462A": "Telechips Inc.",
"884B39": "Siemens AG, Healthcare Sector",
"8851FB": "Hewlett Packard",
"88532E": "Intel Corporate",
"885395": "Apple",
"8853D4": "Huawei Technologies Co., Ltd",
"88576D": "XTA Electronics Ltd",
"885A92": "Cisco",
"885BDD": "Aerohive Networks Inc.",
"885C47": "Alcatel Lucent",
"88615A": "Siano Mobile Silicon Ltd.",
"8863DF": "Apple",
"88685C": "Shenzhen ChuangDao & Perpetual Eternal Technology Co.,Ltd",
"886B76": "CHINA HOPEFUL GROUP HOPEFUL ELECTRIC CO.,LTD",
"887398": "K2E Tekpoint",
"887556": "Cisco",
"88789C": "Game Technologies SA",
"888603": "HUAWEI TECHNOLOGIES CO.,LTD",
"8886A0": "Simton Technologies, Ltd.",
"888717": "CANON INC.",
"8887DD": "DarbeeVision Inc.",
"888914": "All Components Incorporated",
"888964": "GSI Electronics Inc.",
"888B5D": "Storage Appliance Corporation",
"888C19": "Brady Corp Asia Pacific Ltd",
"889166": "Viewcooper Corp.",
"8891DD": "Racktivity",
"889471": "Brocade Communications Systems, Inc.",
"8894F9": "Gemicom Technology, Inc.",
"8895B9": "Unified Packet Systems Crop",
"889676": "TTC MARCONI s.r.o.",
"8897DF": "Entrypass Corporation Sdn. Bhd.",
"889821": "TERAON",
"889B39": "Samsung Electronics Co.,Ltd",
"889CA6": "BTB Korea INC",
"889FFA": "Hon Hai Precision Ind. Co.,Ltd.",
"88A3CC": "Amatis Controls",
"88A5BD": "QPCOM INC.",
"88A73C": "Ragentek Technology Group",
"88ACC1": "Generiton Co., Ltd.",
"88AE1D": "COMPAL INFORMATION(KUNSHAN)CO.,LTD",
"88B168": "Delta Control GmbH",
"88B1E1": "AirTight Networks, Inc.",
"88B627": "Gembird Europe BV",
"88BA7F": "Qfiednet Co., Ltd.",
"88BFD5": "Simple Audio Ltd",
"88C36E": "Beijing Ereneben lnformation Technology Limited",
"88C626": "Logitech - Ultimate Ears",
"88C663": "Apple",
"88CB87": "Apple",
"88D7BC": "DEP Company",
"88D962": "Canopus Systems US LLC",
"88DC96": "SENAO Networks, Inc.",
"88DD79": "Voltaire",
"88E0A0": "Shenzhen VisionSTOR Technologies Co., Ltd",
"88E0F3": "Juniper Networks",
"88E3AB": "Huawei Technologies Co., Ltd",
"88E712": "Whirlpool Corporation",
"88E7A6": "iKnowledge Integration Corp.",
"88E8F8": "YONG TAI ELECTRONIC (DONGGUAN) LTD.",
"88E917": "Tamaggo",
"88ED1C": "Cudo Communication Co., Ltd.",
"88F031": "Cisco",
"88F077": "CISCO SYSTEMS, INC.",
"88F488": "cellon communications technology(shenzhen)Co.,Ltd.",
"88F490": "Jetmobile Pte Ltd",
"88F7C7": "Technicolor USA Inc.",
"88FD15": "LINEEYE CO., LTD",
"88FED6": "ShangHai WangYong Software Co., Ltd.",
"8C006D": "Apple",
"8C04FF": "Technicolor USA Inc.",
"8C078C": "FLOW DATA INC",
"8C088B": "Remote Solution",
"8C09F4": "ARRIS Group, Inc.",
"8C0C90": "Ruckus Wireless",
"8C0CA3": "Amper",
"8C0EE3": "GUANGDONG OPPO MOBILE TELECOMMUNICATIONS CORP.,LTD.",
"8C11CB": "ABUS Security-Center GmbH & Co. KG",
"8C1F94": "RF Surgical System Inc.",
"8C210A": "TP-LINK TECHNOLOGIES CO., LTD.",
"8C271D": "QuantHouse",
"8C278A": "Vocollect Inc",
"8C2937": "Apple",
"8C2DAA": "Apple",
"8C2F39": "IBA Dosimetry GmbH",
"8C3330": "EmFirst Co., Ltd.",
"8C3AE3": "LG Electronics",
"8C3C07": "Skiva Technologies, Inc.",
"8C3C4A": "NAKAYO TELECOMMUNICATIONS,INC.",
"8C41F2": "RDA Technologies Ltd.",
"8C4435": "Shanghai BroadMobi Communication Technology Co., Ltd.",
"8C4AEE": "GIGA TMS INC",
"8C4B59": "3D Imaging & Simulations Corp",
"8C4CDC": "PLANEX COMMUNICATIONS INC.",
"8C4DB9": "Unmonday Ltd",
"8C4DEA": "Cerio Corporation",
"8C5105": "Shenzhen ireadygo Information Technology CO.,LTD.",
"8C53F7": "A&D ENGINEERING CO., LTD.",
"8C541D": "LGE",
"8C569D": "Imaging Solutions Group",
"8C56C5": "Nintendo Co., Ltd.",
"8C57FD": "LVX Western",
"8C5877": "Apple",
"8C598B": "C Technologies AB",
"8C5AF0": "Exeltech Solar Products",
"8C5CA1": "d-broad,INC",
"8C5FDF": "Beijing Railway Signal Factory",
"8C604F": "CISCO SYSTEMS, INC.",
"8C640B": "Beyond Devices d.o.o.",
"8C6422": "Sony Ericsson Mobile Communications AB",
"8C6878": "Nortek-AS",
"8C6AE4": "Viogem Limited",
"8C705A": "Intel Corporate",
"8C71F8": "Samsung Electronics Co.,Ltd",
"8C736E": "Fujitsu Limited",
"8C76C1": "Goden Tech Limited",
"8C7712": "Samsung Electronics Co.,Ltd",
"8C7716": "LONGCHEER TELECOMMUNICATION LIMITED",
"8C7B9D": "Apple",
"8C7C92": "Apple",
"8C7CB5": "Hon Hai Precision Ind. Co.,Ltd.",
"8C7CFF": "Brocade Communications Systems, Inc.",
"8C7EB3": "Lytro, Inc.",
"8C82A8": "Insigma Technology Co.,Ltd",
"8C8401": "PRIVATE",
"8C89A5": "Micro-Star INT'L CO., LTD",
"8C8A6E": "ESTUN AUTOMATION TECHNOLOY CO., LTD",
"8C8E76": "taskit GmbH",
"8C90D3": "Alcatel Lucent",
"8C9236": "Aus.Linx Technology Co., Ltd.",
"8C94CF": "Encell Technology, Inc.",
"8CA048": "Beijing NeTopChip Technology Co.,LTD",
"8CA982": "Intel Corporate",
"8CAE4C": "Plugable Technologies",
"8CAE89": "Y-cam Solutions Ltd",
"8CB64F": "CISCO SYSTEMS, INC.",
"8CB7F7": "Shenzhen UniStrong Science & Technology Co., Ltd",
"8CB82C": "IPitomy Communications",
"8CB864": "AcSiP Technology Corp.",
"8CBEBE": "Xiaomi Technology Co.,Ltd",
"8CBF9D": "Shanghai Xinyou Information Technology Ltd. Co.",
"8CC121": "Panasonic Corporation AVC Networks Company",
"8CC5E1": "<NAME>ka Telecommunication Technology Co.,Ltd",
"8CC7AA": "Radinet Communications Inc.",
"8CC7D0": "zhejiang ebang communication co.,ltd",
"8CC8CD": "Samsung Electronics Co., LTD",
"8CCDA2": "ACTP, Inc.",
"8CCDE8": "Nintendo Co., Ltd.",
"8CCF5C": "BEFEGA GmbH",
"8CD17B": "CG Mobile",
"8CD3A2": "VisSim AS",
"8CD628": "Ikor Metering",
"8CDB25": "ESG Solutions",
"8CDD8D": "Wifly-City System Inc.",
"8CDE52": "ISSC Technologies Corp.",
"8CDE99": "Comlab Inc.",
"8CE081": "zte corporation",
"8CE748": "PRIVATE",
"8CE7B3": "Sonardyne International Ltd",
"8CEEC6": "Precepscion Pty. Ltd.",
"8CF945": "Power Automation pte Ltd",
"8CF9C9": "MESADA Technology Co.,Ltd.",
"8CFABA": "Apple",
"8CFDF0": "QUALCOMM Incorporated",
"90004E": "Hon Hai Precision Ind. Co.,Ltd.",
"90013B": "SAGEMCOM",
"90028A": "Shenzhen Shidean Legrand Electronic Products Co.,Ltd",
"9002A9": "ZHEJIANG DAHUA TECHNOLOGY CO.,LTD",
"9003B7": "PARROT",
"900917": "Far-sighted mobile",
"900A3A": "PSG Plastic Service GmbH",
"900D66": "Digimore Electronics Co., Ltd",
"900DCB": "ARRIS Group, Inc.",
"90185E": "Apex Tool Group GmbH & Co OHG",
"90187C": "Samsung Electro Mechanics co., LTD.",
"9018AE": "Shanghai Meridian Technologies, Co. Ltd.",
"901900": "SCS SA",
"901ACA": "ARRIS Group, Inc.",
"901B0E": | |
# original code: https://github.com/dyhan0920/PyramidNet-PyTorch/blob/master/train.py
import argparse
import os
import shutil
import time
import cv2
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import resnet as RN
import pyramidnet as PYRM
import utils
import numpy as np
import warnings
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from models.experimental import attempt_load
from models.yolo import Model
from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size,
check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path,
init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods,
one_cycle, print_args, print_mutation, strip_optimizer)
warnings.filterwarnings("ignore")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Cutmix PyTorch CIFAR-10, CIFAR-100 and ImageNet-1k Training')
parser.add_argument('--net_type', default='yolov5n', type=str,
help='networktype: resnet, and pyamidnet')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch_size', default=600, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=1, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--depth', default=18, type=int,
help='depth of the network (default: 32)')
parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',
help='to use basicblock for CIFAR datasets (default: bottleneck)')
parser.add_argument('--dataset', dest='dataset', default='LSD', type=str,
help='dataset (options: cifar10, cifar100, and imagenet)')
parser.add_argument('--no-verbose', dest='verbose', action='store_false',
help='to print the status at every iteration')
parser.add_argument('--alpha', default=300, type=float,
help='number of new channel increases per depth (default: 300)')
parser.add_argument('--expname', default='TEST', type=str,
help='name of experiment')
parser.add_argument('--beta', default=0.3, type=float,
help='hyperparameter beta')
parser.add_argument('--cutmix_prob', default=0.5, type=float,
help='cutmix probability')
parser.set_defaults(bottleneck=True)
parser.set_defaults(verbose=True)
best_err1 = 100
best_err5 = 100
class MyDataset(Dataset):
def __init__(self, root, datatxt, transform=None):
super(MyDataset, self).__init__()
fh = open(root+datatxt,'r')
imgs = []
for line in fh:
line = line.rstrip()
words = line.split('\t')
imgs.append((words[0], int(words[1])))
self.imgs = imgs
self.transform = transform
def __getitem__(self ,index):
fn,label = self.imgs[index]
#img = Image.open(root+fn)
img = cv2.imread(fn)
#print(fn)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
PIL_IMG=Image.fromarray(img)
if self.transform is not None:
img = self.transform(PIL_IMG)
#img = img.transpose(2, 0, 1)
return img,label
def __len__(self):
return len(self.imgs)
def main():
global args, best_err1, best_err5
args = parser.parse_args()
if args.dataset.startswith('cifar'):
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
if args.dataset == 'cifar100':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('../data', train=True, download=True, transform=transform_train),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('../data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
numberofclass = 100
elif args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=True, download=True, transform=transform_train),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
numberofclass = 10
else:
raise Exception('unknown dataset: {}'.format(args.dataset))
elif args.dataset == 'imagenet':
traindir = os.path.join('/home/data/ILSVRC/train')
valdir = os.path.join('/home/data/ILSVRC/val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
jittering = utils.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.4)
lighting = utils.Lighting(alphastd=0.1,
eigval=[0.2175, 0.0188, 0.0045],
eigvec=[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
jittering,
lighting,
normalize,
]))
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
numberofclass = 1000
elif args.dataset == 'LSD':
transform_train = transforms.Compose([
transforms.Resize((256,256)),
transforms.RandomHorizontalFlip(0.5),
transforms.RandomVerticalFlip(0.5),
transforms.RandomCrop((224,224)),
transforms.ToTensor(),
#transforms.Normalize(mean=[0.0],
# std=[0.3922])
])
transform_test = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),#(H,W,C)转换为(C,H,W),取值范围是0-1
#transforms.Normalize(mean=[0.0],
# std=[0.3922])
])
trainset = MyDataset(root='G:/DataSet/LSD/', datatxt='train.txt',transform=transform_train)
train_loader = DataLoader(trainset, batch_size=256, shuffle=True,num_workers=args.workers, pin_memory=True)
testset = MyDataset(root='G:/DataSet/LSD/', datatxt='val.txt', transform=transform_test)
val_loader = DataLoader(testset, batch_size=256, shuffle=True,num_workers=args.workers, pin_memory=True)
else:
raise Exception('unknown dataset: {}'.format(args.dataset))
print("=> creating model '{}'".format(args.net_type))
if args.net_type == 'resnet':
model = RN.ResNet('imagenet', 18, 2, args.bottleneck) # for ResNet
elif args.net_type == 'pyramidnet':
model = PYRM.PyramidNet(args.dataset, args.depth, args.alpha, numberofclass,
args.bottleneck)
elif args.net_type == 'yolov5n':
cfg = 'E:/DLPROJTCT/CutMix-PyTorch-master/models/yolov5n_class.yaml'
weights='E:/DLPROJTCT/CutMix-PyTorch-master/yolov5n.pt'
model = Model(cfg, ch=1, nc=2)
ckpt = torch.load(weights) # load checkpoint
exclude = ['anchor']
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(csd, strict=False) # load
else:
raise Exception('unknown network architecture: {}'.format(args.net_type))
model = torch.nn.DataParallel(model).cuda()
print(model)
print('the number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=True)
cudnn.benchmark = True
for epoch in range(0, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
err1, err5, val_loss = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
is_best = err1 <= best_err1
best_err1 = min(err1, best_err1)
if is_best:
best_err5 = err5
print('Current best accuracy (top-1 and 5 error):', best_err1, best_err5)
save_checkpoint({
'epoch': epoch,
'arch': args.net_type,
'state_dict': model.state_dict(),
'best_err1': best_err1,
'best_err5': best_err5,
'optimizer': optimizer.state_dict(),
}, is_best)
print('Best accuracy (top-1 and 5 error):', best_err1, best_err5)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
current_LR = get_learning_rate(optimizer)[0]
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda()
target = target.cuda()
r = np.random.rand(1)
if args.beta > 0 and r < args.cutmix_prob:
# generate mixed sample
lam = np.random.beta(args.beta, args.beta)
rand_index = torch.randperm(input.size()[0]).cuda()
target_a = target
target_b = target[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(input.size(), lam)
input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2]))
# compute output
output = model(input)
loss = criterion(output, target_a) * lam + criterion(output, target_b) * (1. - lam)
else:
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
err1, err5 = accuracy(output.data, target, topk=(1, 1))
losses.update(loss.item(), input.size(0))
top1.update(err1.item(), input.size(0))
top5.update(err5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and args.verbose == True:
print('Epoch: [{0}/{1}][{2}/{3}]\t'
'LR: {LR:.6f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\t'
'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(
epoch, args.epochs, i, len(train_loader), LR=current_LR, batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
print('* Epoch: [{0}/{1}]\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\t Train Loss {loss.avg:.3f}'.format(
epoch, args.epochs, top1=top1, top5=top5, loss=losses))
return losses.avg
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def validate(val_loader, model, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
err1, err5 = accuracy(output.data, target, topk=(1, 1))
losses.update(loss.item(), input.size(0))
top1.update(err1.item(), input.size(0))
top5.update(err5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and args.verbose == True:
print('Test (on val set): [{0}/{1}][{2}/{3}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\t'
'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(
epoch, args.epochs, i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print('* Epoch: [{0}/{1}]\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\t Test Loss {loss.avg:.3f}'.format(
epoch, args.epochs, top1=top1, top5=top5, loss=losses))
return top1.avg, top5.avg, losses.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = "runs/%s/" % (args.expname)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'runs/%s/' % (args.expname) + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if args.dataset.startswith('cifar'):
lr = args.lr * (0.1 ** (epoch // (args.epochs * 0.5))) * (0.1 ** (epoch // (args.epochs * | |
# Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
from django.utils.translation import ugettext as _, ugettext_lazy
from iondb.rundb.models import (
Chip,
LibraryKey,
RunType,
KitInfo,
common_CV,
ApplicationGroup,
SampleGroupType_CV,
dnaBarcode,
ReferenceGenome,
ApplProduct,
PlannedExperiment,
SampleAnnotation_CV,
FlowOrder,
Content,
Plugin,
Sample,
)
from iondb.utils import validation
from iondb.rundb.labels import Experiment as _Experiment
from iondb.rundb.plan.views_helper import dict_bed_hotspot
import os
import re
import logging
from django.db.models import Q
from iondb.rundb.labels import PlanTemplate, ScientificApplication
logger = logging.getLogger(__name__)
MAX_LENGTH_PLAN_NAME = 60
MAX_LENGTH_SAMPLE_NAME = 127
MAX_LENGTH_SAMPLE_ID = 127
MAX_LENGTH_PROJECT_NAME = 64
MAX_LENGTH_NOTES = 1024
MAX_LENGTH_SAMPLE_TUBE_LABEL = 512
MIN_FLOWS = 1
MAX_FLOWS = 2000
MIN_QC_INT = 1
MAX_QC_INT = 100
PROJECT_NAME_LENGTH = 64
MIN_LIBRARY_READ_LENGTH = 0
MAX_LIBRARY_READ_LENGTH = 1000
# VALID_TEMPLATING_SIZES = ["200", "400"]
def validate_plan_name(value, field_label):
errors = []
if not validation.has_value(value):
errors.append(validation.required_error(field_label))
if not validation.is_valid_chars(value):
errors.append(validation.invalid_chars_error(field_label))
if not validation.is_valid_length(value, MAX_LENGTH_PLAN_NAME):
errors.append(
validation.invalid_length_error(field_label, MAX_LENGTH_PLAN_NAME, value)
)
return errors
def validate_notes(value, field_label=_Experiment.notes.verbose_name):
errors = []
if value:
if not validation.is_valid_chars(value):
errors.append(validation.invalid_chars_error(field_label))
if not validation.is_valid_length(value, MAX_LENGTH_NOTES):
errors.append(
validation.invalid_length_error(field_label, MAX_LENGTH_NOTES, value)
)
return errors
def validate_sample_name(
value,
field_label,
isTemplate=None,
isTemplate_label=PlanTemplate.verbose_name,
barcodeId=None,
barcodeId_label=ugettext_lazy("workflow.step.kits.fields.barcodeId.label"),
): # TODO: i18n
errors = []
if not value:
if not isTemplate:
errors.append(validation.required_error(field_label))
else:
if isTemplate:
errors.append(
validation.format(
ugettext_lazy("template.messages.validation.invalidsamples"),
{"name": isTemplate_label},
)
) # "Invalid input. Sample information cannot be saved in the %(name)s"
if barcodeId:
errors.append(
validation.format(
ugettext_lazy(
"plannedexperiment.messages.validation.nonbarcoded.barcodesetnotrequired"
),
{"barcodeSetName": barcodeId_label, "barcodeSetValue": barcodeId},
)
) # "Invalid input. %(barcodeSetName)s (%(barcodeSetValue)s) should not be provided for non barcoded plan"
if not validation.is_valid_chars(value):
errors.append(validation.invalid_chars_error(field_label))
if not validation.is_valid_leading_chars(value):
errors.append(validation.invalid_leading_chars(field_label))
if not validation.is_valid_length(value, MAX_LENGTH_SAMPLE_NAME):
errors.append(
validation.invalid_length_error(
field_label, MAX_LENGTH_SAMPLE_NAME, value
)
)
return errors
def validate_barcoded_sample_info(
sampleName,
sampleName_label,
sampleExternalId,
sampleExternalId_label,
nucleotideType,
nucleotideType_label,
sampleReference,
sampleReference_label,
runType,
applicationGroupName,
):
errors = []
if not validation.is_valid_chars(sampleName):
errors.append(validation.invalid_chars_error(sampleName_label))
if not validation.is_valid_leading_chars(sampleName):
errors.append(validation.invalid_leading_chars(sampleName_label))
if not validation.is_valid_length(sampleName, MAX_LENGTH_SAMPLE_NAME):
errors.append(
validation.invalid_length_error(
sampleName_label, MAX_LENGTH_SAMPLE_NAME, sampleName
)
)
sample_id_errors = validate_sample_id(
sampleExternalId, field_label=sampleExternalId_label
)
if sample_id_errors:
errors.extend(sample_id_errors)
nucleotideType_errors, sample_nucleotideType = validate_sample_nucleotideType(
nucleotideType, runType, applicationGroupName, field_label=nucleotideType_label
)
if nucleotideType_errors:
errors.extend(nucleotideType_errors)
ref_errors, ref_short_name = validate_reference(
sampleReference,
field_label=sampleReference_label,
runType=runType,
applicationGroupName=applicationGroupName,
application_label=ScientificApplication.verbose_name,
)
if ref_errors:
errors.extend(ref_errors)
return errors, ref_short_name, sample_nucleotideType
def validate_sample_nucleotideType(value, runType, applicationGroupName, field_label):
"""
validate nucleotide type case-insensitively with leading/trailing blanks in the input ignored
"""
errors = []
nucleotideType = ""
try:
runTypeObj = RunType.objects.filter(runType=runType)[0]
value_from_runType = (
runTypeObj.nucleotideType.upper() if runTypeObj.nucleotideType else ""
)
except Exception:
value_from_runType = ""
if value:
nucleotideType = value.strip().upper()
if nucleotideType == "FUSIONS":
nucleotideType = "RNA"
if value_from_runType:
if value_from_runType == "DNA_RNA":
valid_values = ["DNA", "RNA", "Fusions"]
else:
valid_values = [value_from_runType]
else:
valid_values = ["DNA", "RNA"]
if not validation.is_valid_keyword(nucleotideType, valid_values):
errors.append(validation.invalid_keyword_error(field_label, valid_values))
else:
# fill in nucleotideType from selected runType or applicationGroup
if value_from_runType and value_from_runType != "DNA_RNA":
nucleotideType = value_from_runType
elif applicationGroupName in ["DNA", "RNA"]:
nucleotideType = applicationGroupName
return errors, nucleotideType
def validate_reference(
value, field_label, runType, applicationGroupName, application_label
):
errors = []
ref_short_name = ""
value = value.strip() if value else ""
if value:
applProduct = ApplProduct.objects.filter(
isActive=True,
applType__runType=runType,
applicationGroup__name=applicationGroupName,
) or ApplProduct.objects.filter(isActive=True, applType__runType=runType)
if applProduct and not applProduct[0].isReferenceSelectionSupported:
errors.append(
validation.invalid_invalid_value_related(
field_label, value, application_label
)
)
else:
selectedRefs = (
ReferenceGenome.objects.filter(name=value)
or ReferenceGenome.objects.filter(name__iexact=value)
or ReferenceGenome.objects.filter(short_name=value)
or ReferenceGenome.objects.filter(short_name__iexact=value)
)
if selectedRefs:
ref_short_name = selectedRefs[0].short_name
else:
errors.append(validation.invalid_not_found_error(field_label, value))
return errors, ref_short_name
def validate_reference_short_name(value, field_label):
errors = []
if validation.has_value(value):
value = value.strip()
reference = ReferenceGenome.objects.filter(short_name=value, enabled=True)
if not reference.exists():
generic_not_found_error = validation.invalid_not_found_error(
field_label, value
)
error_fixing_message = ugettext_lazy(
"references_genome_download.messages.help.import"
) # "To import it, visit Settings > References > Import Preloaded Ion References"
reference_error_message = (
generic_not_found_error.strip() + error_fixing_message.strip()
) # avoid lazy
errors.append(reference_error_message)
return errors
def validate_sample_id(value, field_label):
errors = []
if not validation.is_valid_chars(value):
errors.append(validation.invalid_chars_error(field_label))
if not validation.is_valid_length(value, MAX_LENGTH_SAMPLE_ID):
errors.append(
validation.invalid_length_error(field_label, MAX_LENGTH_SAMPLE_ID, value)
)
return errors
def validate_sample_tube_label(value, field_label):
errors = []
if value:
if not validation.is_valid_chars(value):
errors.append(validation.invalid_chars_error(field_label))
if not validation.is_valid_length(value, MAX_LENGTH_SAMPLE_TUBE_LABEL):
errors.append(
validation.invalid_length_error(
field_label, MAX_LENGTH_SAMPLE_TUBE_LABEL, value
)
)
return errors
def validate_chip_type(value, field_label, isNewPlan=None):
errors = []
warnings = []
if not value:
errors.append(validation.required_error(field_label))
else:
value = value.strip()
query_args = (Q(name__iexact=value) | Q(description__iexact=value),)
chip = Chip.objects.filter(*query_args)
if chip:
if not chip[0].isActive:
if isNewPlan:
errors.append(
validation.invalid_not_active(
field_label, value, include_error_prefix=False
)
) # 'Chip %s not active' % value
else:
warnings.append(
validation.invalid_not_active(
field_label, value, include_error_prefix=False
)
) # "Found inactive %s %s" % (field_label, value))
else:
if chip[0].getChipWarning:
warnings.append(chip[0].getChipWarning)
else:
errors.append(
validation.invalid_invalid_value(field_label, value)
) # 'Chip %s not found' % value
return errors, warnings
def validate_flows(value, field_label):
errors = []
if type(value) != int and value.isdigit():
value2 = int(value)
else:
value2 = value
if type(value2) == int:
if not validation.is_valid_uint(value2):
errors.append(validation.invalid_uint(field_label))
elif value2 < MIN_FLOWS or value2 > MAX_FLOWS:
errors.append(validation.invalid_range(field_label, MIN_FLOWS, MAX_FLOWS))
else:
errors.append(validation.invalid_range(field_label, MIN_FLOWS, MAX_FLOWS))
return errors
def validate_library_key(value, field_label):
errors = []
selectedLibKey = None
if not value:
errors.append(validation.required_error(field_label))
else:
try:
selectedLibKey = LibraryKey.objects.get(name__iexact=value.strip())
except LibraryKey.DoesNotExist:
try:
selectedLibKey = LibraryKey.objects.get(sequence__iexact=value.strip())
except LibraryKey.DoesNotExist:
logger.debug("plan_validator.validate_lib_key ...%s not found" % input)
selectedLibKey = None
if not selectedLibKey:
errors.append(
validation.invalid_not_found_error(field_label, value)
) # 'Library key %s not found' % value
return errors, selectedLibKey
def validate_libraryReadLength(value, field_label):
errors = []
if type(value) != int and value.isdigit():
value2 = int(value)
else:
value2 = value
if type(value2) == int:
if not validation.is_valid_uint_n_zero(value2):
errors.append(validation.invalid_uint(field_label))
elif value2 < MIN_LIBRARY_READ_LENGTH:
errors.append(
validation.invalid_range(
field_label, MIN_LIBRARY_READ_LENGTH, MAX_LIBRARY_READ_LENGTH
)
)
elif value2 > MAX_LIBRARY_READ_LENGTH:
errors.append(
validation.invalid_range(
field_label, MIN_LIBRARY_READ_LENGTH, MAX_LIBRARY_READ_LENGTH
)
)
else:
errors.append(
validation.invalid_range(
field_label, MIN_LIBRARY_READ_LENGTH, MAX_LIBRARY_READ_LENGTH
)
)
return errors
'''
#templatingSize validation became obsolete, this field has been replaced by samplePrepProtocol
def validate_templatingSize(value, displayedName='Templating Size'):
"""
validate templating size case-insensitively with leading/trailing blanks in the input ignored
"""
errors = []
input = ""
valid_values = VALID_TEMPLATING_SIZES
if value:
input = value.strip().upper()
if not validation.is_valid_keyword(input, valid_values):
errors.append(validation.invalid_keyword_error(field_label, valid_values))
return errors
'''
def validate_QC(value, field_label):
errors = []
if not validation.is_valid_uint(value):
errors.append(validation.invalid_uint(field_label))
elif int(value) > MAX_QC_INT:
errors.append(validation.invalid_range(field_label, MIN_QC_INT, MAX_QC_INT))
return errors
def validate_projects(value, field_label, delim=","):
"""
validate projects case-insensitively with leading/trailing blanks in the input ignored
"""
errors = []
trimmed_projects = ""
if value:
for project in value.split(delim):
trimmed_project = project.strip()
if trimmed_project:
trimmed_projects = trimmed_projects + trimmed_project + delim
if not validation.is_valid_chars(trimmed_project):
errors.append(validation.invalid_chars_error(field_label))
if not validation.is_valid_length(trimmed_project, PROJECT_NAME_LENGTH):
errors.append(
validation.invalid_length_error(
field_label, PROJECT_NAME_LENGTH, trimmed_project
)
)
if errors:
break
return errors, trimmed_projects
def validate_barcode_kit_name(value, field_label):
errors = []
if validation.has_value(value):
value = value.strip()
kits = dnaBarcode.objects.filter(name=value)
if not kits:
errors.append(
validation.invalid_not_found_error(field_label, value)
) # "%s %s not found" % (displayedName, value)
return errors
def get_kitInfo_by_name_or_description(value, kitType=[]):
value = value.strip()
query_args = (Q(name__iexact=value) | Q(description__iexact=value),)
query_kwargs = {"kitType__in": kitType} if kitType else {}
kit = KitInfo.objects.filter(*query_args, **query_kwargs)
return kit[0] if kit else None
def validate_optional_kit_name(value, kitType, field_label, isNewPlan=None):
errors = []
warnings = []
if validation.has_value(value):
kit = get_kitInfo_by_name_or_description(value, kitType)
if not kit:
errors.append(validation.invalid_not_found_error(field_label, value))
elif kit and not kit.isActive:
if isNewPlan:
errors.append(validation.invalid_not_active(field_label, value))
else:
warnings.append(validation.invalid_not_active(field_label, value))
return errors, warnings
def _validate_spp_value_uid(value):
# validate if the user specified SPP matches the common_CV value or uid
# if not, send an appropriate error message with the valid SPP values.
common_CVs = common_CV.objects.filter(cv_type__in=["samplePrepProtocol"])
for cv in common_CVs:
isValid = True
if (value.lower() == cv.value.lower()) or (value.lower() == cv.uid.lower()):
return (isValid, cv, common_CVs)
else:
isValid = False
return (isValid, value, common_CVs)
def _validate_ssp_templatingKit(samplePrepValue, templatingKitName, cvLists):
# validate whether the samplePrepProtocol is supported for this templatingKit
isValid = False
validSPP_tempKit = []
allowedSpp = []
try:
# check if the user specified spp's category is in the Templating Kit categories lists
# If not, send an appropriate error message with the valid samplePrep Protocol values
# which is supported by the specific templatingKit
kit = KitInfo.objects.get(
kitType__in=["TemplatingKit", "IonChefPrepKit"], name=templatingKitName
)
allowedSpp = kit.categories
for value in samplePrepValue.split(";"):
if value in kit.categories:
isValid = True
return isValid, validSPP_tempKit
except Exception as err:
logger.debug("plan_validator._validate_ssp_templatingKit() : Error, %s" % err)
if not isValid:
try:
if "samplePrepProtocol" in allowedSpp:
for category in allowedSpp.split(";"):
if category == "samplePrepProtocol":
continue
if "sampleprep" in category.lower():
for cvlist in cvLists:
if category in cvlist.categories:
validSPP_tempKit.append(cvlist.value)
except Exception as err:
logger.debug(
"plan_validator._validate_ssp_templatingKit() : Error, %s" % err
)
return isValid, validSPP_tempKit
def validate_plan_samplePrepProtocol(
value, templatingKitName, field_label, templatingKitName_label
):
# validate if input matches the common_CV value or uid
errors = []
if value:
value = value.strip()
(isValid, cv, cvLists) = _validate_spp_value_uid(value)
if not isValid:
cvLists = [cvlist.value for cvlist in cvLists]
cvLists.append("undefined")
validation.invalid_choice(field_label, value, cvLists)
errors.append(validation.invalid_choice(field_label, value, cvLists))
logger.debug(
"plan_validator.validate_plan_samplePrepProtocol() : Error, %s is not valid. Valid sample kit protocols are: %s"
% (value, cvLists)
)
| |
only 1
if idx_ld == self._ord_caliper[-1] and idx == self.calipers[idx_ld].mru_index():
shape['fillcolor'] = CLR_CLPR_RECT_ACT
else:
shape['fillcolor'] = CLR_CLPR_RECT
def clear_measurements(self):
self._ord_caliper = []
for c in self.calipers:
c.clear_measurements()
def get_caliper_annotations(self, idx_ld):
return self.calipers[idx_ld].get_caliper_annotations()
def update_caliper_lead_removed(self, idx_ld_rmv):
""" When a single lead channel is removed
Returns true if caliper measurements removed
"""
idxs_rmv = [idx for idx, i in enumerate(self._ord_caliper) if i == idx_ld_rmv]
# Remove all caliper measurement history associated with this lead
remove_by_indices(self._ord_caliper, sorted(idxs_rmv, reverse=True))
self.calipers[idx_ld_rmv].clear_measurements() # Clear caliper measurements on the lead removed
return idx_ld_rmv != []
def update_caliper_annotations_time(self, strt, end, figs, idxs_lead):
caliper_removed_lds, edited_prevs = map(list, zip(
*[self.calipers[idx].update_caliper_annotations_time(strt, end, figs[i])
for i, idx in enumerate(idxs_lead)]))
edited_prev = True
if self._ord_caliper:
idx_mru = self._ord_caliper[-1]
if not edited_prevs[idxs_lead.index(idx_mru)]:
edited_prev = False
while caliper_removed_lds[idxs_lead.index(idx_mru)]:
if len(self.calipers[idx_mru]) == 0: # No more caliper for most recent lead
del self._ord_caliper[-1]
if not self._ord_caliper: # No more caliper history for all leads
break
else:
idx_mru = self._ord_caliper[-1]
return True, edited_prev
else:
return False, edited_prev
def caliper_on_display(self, idx_ld, xc, yc, x0, y0):
return self.calipers[idx_ld].caliper_on_display(xc, yc, x0, y0)
class Caliper:
""" Handles caliper internal updates for a single lead,
depending on `EcgApp` time range navigation and caliper measurement creation, removal and edit.
"""
# MAX_CLP_RNG = pd.to_timedelta(20, unit='s') # Maximum range intended for caliper usage
def __init__(self, record, idx_lead):
self.rec = record
self.idx_lead = idx_lead # Each Caliper serves a particular lead, by index
self._n_mesr = 0 # Number of caliper measurements, given by
self.lst_coords = [] # List to keep track of plotly user-drawn shapes, in that order
self.lst_ann_mesr = [] # List of annotation pairs,
# synchronized with the above, on the corresponding text annotation measurements
self._ord_mesr_edit = [] # Keeps track of the order of elements modified by index
# Most recent one on end of list
self._idx_shape_last = None # Index of caliper last edit, per Dash `shapes` list storage order
def __len__(self):
return self._n_mesr
def to_synchronized(self):
# Each caliper coordinates with the associated lead index
return list(zip([self.idx_lead] * self._n_mesr, self.lst_coords))
def get_caliper_annotations(self):
return flatten(*self.lst_ann_mesr)
# return [ann for pr in self.lst_ann_mesr for ann in pr] # Flattens 2d list to 1d
def _update_caliper_edit_order_after_remove(self, idxs_rmv):
""" Update the attribute on (order of measurement edits based on index),
based on indices already removed
Indices removed should be reversely sorted """
remove_by_indices(self._ord_mesr_edit, idxs_rmv)
# ic(idxs_rmv, self._ord_mesr_edit)
for idx_rmv in idxs_rmv:
for idx_idx, idx in enumerate(self._ord_mesr_edit):
if idx > idx_rmv: # Decrement by the right amount for the remaining elements
self._ord_mesr_edit[idx_idx] -= 1
# Maintain that `ord_mesr_edit` must have uniquely every integer within [0, its length)
def update_caliper_annotations_shape(self, layout_changed):
"""
Expected to be called on every shape layout change, there will always be a change
:param layout_changed: The layout of the figure change
:return Caliper change enum type, and whether edited the same caliper as last edit
"""
if 'shapes' in layout_changed: # Add/removal of shape
lst_shape = layout_changed['shapes']
l = len(lst_shape)
if l > self._n_mesr: # New shape added by user
coords = _shape_dict_to_coords(lst_shape[-1])
self.lst_coords.append(coords)
self.lst_ann_mesr.append(_measure(*coords))
self._ord_mesr_edit.append(self._n_mesr) # Index of newly created shape is always the last one
self._idx_shape_last = self._n_mesr
self._n_mesr += 1
return CLP_CH.Add, False
# Linearly check membership for the removed rect
else: # A shape removed
# Must be that there were a single shape, and it's now removed
idx_rmv = _get_idx_removed(lst_shape, self.lst_coords)
del self.lst_coords[idx_rmv]
del self.lst_ann_mesr[idx_rmv]
self._n_mesr -= 1
self._idx_shape_last = None
self._update_caliper_edit_order_after_remove([idx_rmv])
return CLP_CH.Remove, False
else: # Change to an existing shape
def _get_idx_changed_shape(k):
""" User responsible for match success
"""
return int(re.match(r'^shapes\[([0-9]+)]\.(.){2}$', k).group(1))
# Any one of the keys suffice, e.g. {'shapes[2].x0', 'shapes[2].x1', 'shapes[2].y0', 'shapes[2].y1'}
idx_ch = _get_idx_changed_shape(list(layout_changed.keys())[0])
if idx_ch == self._idx_shape_last:
edited_prev = True
else:
edited_prev = False
self._idx_shape_last = idx_ch
coords = _shape_dict_to_coords(layout_changed, changed=True)
self.lst_coords[idx_ch] = coords
self.lst_ann_mesr[idx_ch] = _measure(*coords)
idx_edt = self._ord_mesr_edit.index(idx_ch) # Find the original ordering of this edited shape
del self._ord_mesr_edit[idx_edt]
self._ord_mesr_edit.append(idx_ch) # Promote to recently edited
return CLP_CH.Edit, edited_prev
def update_caliper_annotations_time(self, strt, end, fig):
""" Expected to be called on every display time range change, there might not be a change
Removes user-drawn shapes out of range if any, in the `figs` argument
# :return Updated list of text annotations within display time range if any measurement removed,
False otherwise
"""
removed, idxs = _get_caliper_indices_out_of_range(
self.lst_coords,
self.rec.count_to_pd_time(strt),
self.rec.count_to_pd_time(end)
)
if self._idx_shape_last in idxs:
edited_prev = False
self._idx_shape_last = None # Caliper removed, reset
else:
edited_prev = True
if self._idx_shape_last is not None:
self._idx_shape_last -= sum(i < self._idx_shape_last for i in idxs)
if removed:
idxs.sort(reverse=True)
self._n_mesr -= len(idxs)
remove_by_indices(self.lst_coords, idxs)
remove_by_indices(self.lst_ann_mesr, idxs)
remove_by_indices(fig['layout']['shapes'], idxs)
self._update_caliper_edit_order_after_remove(idxs)
return removed, edited_prev
def clear_measurements(self):
self._n_mesr = 0
self.lst_coords = []
self.lst_ann_mesr = []
self._ord_mesr_edit = []
def mru_index(self):
""" Most recently edited caliper measurement, as index into the """
return self._ord_mesr_edit[-1]
def get_mru_caliper_coords(self):
"""
:return: 4-tuple, (x0, x1, y0, y1) as pandas time objects and integer voltages
if a measurement exists, None otherwise """
if self.has_measurement():
return self.lst_coords[self._ord_mesr_edit[-1]]
def has_measurement(self):
return self._n_mesr > 0
def caliper_on_display(self, xc, yc, x0, y0):
for coords in self.lst_coords:
if coords == (xc, yc, x0, y0):
return True
return False
class CaliperS:
""" Synchronized caliper across all leads """
def __init__(self, record): # Linked record automatically updates by parent
self.rec = record
self._n_mesr = 0
self.lst_coords = []
self._ld_idxs_coord = [] # Associate each caliper measurement to it's MRU edit by lead index
self.lst_ann_mesr = []
self._ord_mesr_edit = []
self._idx_shape_last = None
self._idx_ld_last = None
# TODO: See `ecg_app`
# def from_independent(self, ord_caliper, ld_idxs_coord, lst_coords, lst_ann_mesr):
# ic(ord_caliper, ld_idxs_coord, lst_coords, lst_ann_mesr)
# self._n_mesr = len(self.lst_coords)
# self._ord_mesr_edit = ord_caliper
# self._ld_idxs_coord = ld_idxs_coord
# self.lst_coords = lst_coords
# self.lst_ann_mesr = lst_ann_mesr
#
# def render(self, figs_gra):
# """ Broadcast the shapes to every single channel """
# ic([f['layout']['shapes'] for f in figs_gra])
# shapes = flatten(*[f['layout']['shapes'] for f in figs_gra])
# ic(shapes)
# for f in figs_gra:
# f['layout']['shapes'] = shapes
def get_caliper_annotations(self):
return flatten(*self.lst_ann_mesr)
def _update_measurement_edit_order_after_remove(self, idxs_rmv):
""" Update the attribute on (order of measurement edits based on index), based on indices already removed
Indices removed should be reversely sorted """
remove_by_indices(self._ord_mesr_edit, idxs_rmv)
for idx_rmv in idxs_rmv:
for idx_idx, idx in enumerate(self._ord_mesr_edit):
if idx > idx_rmv: # Decrement by the right amount for the remaining elements
self._ord_mesr_edit[idx_idx] -= 1
# Maintain that `ord_mesr_edit` must have uniquely every integer within [0, its length)
def update_caliper_annotations_shape(self, layout_changed, idx_ld):
"""
Expected to be called on every shape layout change, there will always be a change
:param layout_changed: The layout of the figure change
:param idx_ld: The lead index that the shape edit is based on
"""
if 'shapes' in layout_changed: # Add/removal of shape
lst_shape = layout_changed['shapes']
l = len(lst_shape)
if l > self._n_mesr: # New shape added by user
coords = _shape_dict_to_coords(lst_shape[-1])
self.lst_coords.append(coords)
self._ld_idxs_coord.append(idx_ld)
self.lst_ann_mesr.append(_measure(*coords))
self._idx_shape_last = self._n_mesr
self._idx_ld_last = idx_ld
self._ord_mesr_edit.append(self._n_mesr)
self._n_mesr += 1
return CLP_CH.Add, False
# Linearly check membership for the removed rect
else: # A shape removed
# Must be that there were a single shape, and it's now removed
idx_rmv = _get_idx_removed(lst_shape, self.lst_coords)
del self.lst_coords[idx_rmv]
del self.lst_ann_mesr[idx_rmv]
del self._ld_idxs_coord[idx_rmv]
self._n_mesr -= 1
self._idx_shape_last = None
self._idx_ld_last = None
if idx_ld is not None and self.has_measurement():
self._ld_idxs_coord[-1] = idx_ld
self._update_measurement_edit_order_after_remove([idx_rmv])
return CLP_CH.Remove, False
else: # Change to an existing shape
# Any one of the keys suffice, e.g. {'shapes[2].x0', 'shapes[2].x1', 'shapes[2].y0', 'shapes[2].y1'}
idx_ch = self._get_idx_changed_shape(list(layout_changed.keys())[0])
coords = _shape_dict_to_coords(layout_changed, changed=True)
self.lst_coords[idx_ch] = coords
self.lst_ann_mesr[idx_ch] = _measure(*coords)
self._ld_idxs_coord[idx_ch] = idx_ld
if idx_ch == self._idx_shape_last and idx_ld == self._idx_ld_last:
edited_prev = True
else:
edited_prev = False
self._idx_shape_last = idx_ch
self._idx_ld_last = idx_ld
idx_edt = self._ord_mesr_edit.index(idx_ch) # Find the original ordering of this edited shape
del self._ord_mesr_edit[idx_edt]
self._ord_mesr_edit.append(idx_ch) # Promote to recently edited
return CLP_CH.Edit, edited_prev
@staticmethod
def _get_idx_changed_shape(k):
""" User responsible for match success
"""
return int(re.match(r'^shapes\[([0-9]+)]\.(.){2}$', k).group(1))
def update_caliper_annotations_time(self, strt, end, figs, idx_ld):
""" Expected to be called on every |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.