input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
things need to be taken into
account in the following priority:
1. If `noBivariates` is `True`, then `y` must be equal to `0`.
2. If `self.useMean` is `True`, then `y` is equal to the mean of the
full length of the secondary channel.
3. If `y` is provided, then use the provided value.
4. If `timestamp` is provided, then get the values for the given `timestamp`.
5. Otherwise, default to the mean of the secondary channel.
Depending on if `y` is scalar, one of two methods will be used, detailed below.
The method uses the following algebra, calculated in the `_build`
method, to reduce the equation to fewer operations. For brevity:
`x` = `values`
`xref`, `yref` = `references`
`an` = `coeffs[n]`
`bn` = `_fastCoeffs[n]`
`y` is not scalar:
`f(x) = a0*(x - xref)*(y - yref) + a1*(x - xref) + a2*(y - yref) + a3`
=>
`f(x) = x*y*(a0) + x*(a1 - a0*yref) + y*(a2 - a0*xref) + (a3 + a0*xref*yref - a1*xref - a2*yref)`
=>
`f(x) = b0*x*y + b1*x + b2*y + b3`
`y` is scalar:
`f(x) = b0*x*y + b1*x + b2*y + b3`
=>
`f(x) = x*(b0*y + b1) + (b2*y + b3)`
"""
scalar = np.isscalar(values)
if scalar:
values = float(values)
elif out is None:
out = np.zeros_like(values, dtype=np.float64)
session = self.dataset.lastSession if session is None else session
sessionId = None if session is None else session.sessionId
try:
if self._eventlist is None or self._sessionId != sessionId:
channel = self.dataset.channels[self.channelId][self.subchannelId]
self._eventlist = channel.getSession(session.sessionId)
self._sessionId = session.sessionId
except IndexError as err:
# In multithreaded environments, there's a rare race condition
# in which the main channel can be accessed before the calibration
# channel has loaded. This should fix it.
logger.warning("%s occurred in Bivariate polynomial %r" %
err.__class__.__name__, self.id)
return None
if noBivariates:
y = 0
elif self.useMean:
y = self._eventlist.getMean()
elif y is None and timestamp is None:
y = self._eventlist.getMean()
elif y is None and timestamp is not None:
y = np.fromiter((self._eventlist[self._eventlist.getEventIndexNear(t)][0] for t in timestamp), dtype=np.float64)
if len(self._fastCoeffs) == 1:
if scalar:
out = self._fastCoeffs[0]
else:
out[:] = self._fastCoeffs[0]
elif len(self._fastCoeffs) == 4:
if np.isscalar(y):
# a0*x*y + a1*x + a2*y + a3 =
# x*(a0*y + a1) + (a2*y + a3)
if scalar:
out = values
else:
out[:] = values
out *= self._fastCoeffs[0]*y + self._fastCoeffs[1]
out += self._fastCoeffs[2]*y + self._fastCoeffs[3]
else:
out += values*y*self._fastCoeffs[0]
out += values*self._fastCoeffs[1]
out += y*self._fastCoeffs[2]
out += self._fastCoeffs[3]
else:
raise
return out
def __call__(self, timestamp, value, session=None, noBivariates=False):
""" Apply the polynomial to an event.
:param timestamp: The time of the event to process.
:param value: The value of the event to process.
:keyword session: The session containing the event.
:keyword noBivariates: If `True`, the reference channel will not
be used.
"""
session = self.dataset.lastSession if session is None else session
sessionId = None if session is None else session.sessionId
try:
if self._eventlist is None or self._sessionId != sessionId:
channel = self.dataset.channels[self.channelId][self.subchannelId]
self._eventlist = channel.getSession(session.sessionId)
self._sessionId = session.sessionId
if len(self._eventlist) == 0:
return timestamp, value
# Optimization: don't check the other channel if Y is unused
if noBivariates:
y = (0, 1)
else:
y = self._noY or self._eventlist.getMean()
return timestamp, self._function(value, y)
except (IndexError, ZeroDivisionError) as err:
# In multithreaded environments, there's a rare race condition
# in which the main channel can be accessed before the calibration
# channel has loaded. This should fix it.
logger.warning("%s occurred in Bivariate polynomial %r" %
err.__class__.__name__, self.id)
return None
except (IndexError, ZeroDivisionError) as err:
# In multithreaded environments, there's a rare race condition
# in which the main channel can be accessed before the calibration
# channel has loaded. This should fix it.
logger.warning("%s occurred in Bivariate polynomial %r" %
err.__class__.__name__, self.id)
return None
def asDict(self):
""" Dump the polynomial as a dictionary. Intended for use when
generating EBML.
"""
cal = super(Bivariate, self).asDict()
cal['BivariateCalReferenceValue'] = self._references[1]
cal['BivariateChannelIDRef'] = self.channelId
cal['BivariateSubChannelIDRef'] = self.subchannelId
return cal
def isValid(self, session=None, noBivariates=False, _retries=3):
""" Check the validity of the Transform.
:keyword session: The session to check (could be valid in one and
invalid in another, e.g. one session has no temperature data).
:keyword noBivariates: If `True`, the reference channel will not
be used.
"""
valid = super(Bivariate, self).isValid(session, noBivariates)
if noBivariates or not valid:
return valid
session = self.dataset.lastSession if session is None else session
sessionId = None if session is None else session.sessionId
try:
if self._eventlist is None or self._sessionId != sessionId:
channel = self.dataset.channels[self.channelId][self.subchannelId]
self._eventlist = channel.getSession(session.sessionId)
self._sessionId = session.sessionId
if len(self._eventlist) == 0:
return False
except:
# HACK: In multithreaded environments, there's a rare race
# condition in which the main channel can be accessed before the
# calibration channel has loaded. Retry isValid() a few times.
if _retries == 0:
return False
return self.isValid(session, noBivariates, _retries-1)
@property
def useMean(self):
return self._useMean
@useMean.setter
def useMean(self, value):
self._useMean = value
for w in self._watchers:
w.useMean = self.useMean
#===============================================================================
#
#===============================================================================
class CombinedPoly(Bivariate):
""" Calibration transform that combines multiple polynomials into a single
function. Used for combining Channel and Subchannel transforms to make
them more efficient.
"""
def copy(self):
""" Create a duplicate of this Transform.
"""
return self.__class__(self.poly, subchannel=self._subchannel,
calId=self.id, dataset=self.dataset,
**self.kwargs)
def __init__(self, poly, subchannel=None, calId=None, dataset=None,
**kwargs):
self.id = calId
self.poly = poly
self.subpolys = kwargs
self._fastCoeffs = None
if subchannel is not None:
self.poly = self.poly[subchannel]
self.kwargs = kwargs
p = list(kwargs.values())[0]
for attr in ('dataset', '_eventlist', '_sessionId', 'channelId',
'subchannelId', '_references', '_coeffs', '_variables'):
setattr(self, attr, getattr(poly, attr, getattr(p, attr, None)))
self.dataset = self.dataset or dataset
self._subchannel = subchannel
self._watchers = weakref.WeakSet()
self._build()
self._useMean = True
def _build(self):
if self.poly is None:
self.poly = Univariate((1, 0))
if len(self.subpolys) == 1:
p = list(self.subpolys.values())[0]
if p is not None:
for attr in ('_str', '_source', '_function', '_noY'):
setattr(self, attr, getattr(p, attr, None))
phead, _, src = self.poly.source.partition(": ")
for k, v in self.subpolys.items():
if v is None:
ssrc = "lambda x: x"
else:
ssrc = v.source
start, _, end = ssrc.rpartition(": ")
s = "(%s)" % (end or start)
src = self._reduce(src.replace(k, s))
if self._subchannel is not None:
src = src.replace('x', 'x[%d]' % self._subchannel)
# Merge in all function globals, in case components use additional
# libraries (e.g. math).
evalGlobals = {'math': math}
if self.poly is not None:
evalGlobals.update(self.poly._function.__globals__)
for p in self.subpolys.values():
if p is not None:
evalGlobals.update(p._function.__globals__)
if isinstance(self.poly, Bivariate):
# a0*(b0*x + b1)*(c0*y + c1) + a1*(b0*x + b1) + a2*(c0*y + c1) + a3 =
# x*y*(a0*b0*c0) + x*(a0*b0*c1 + a1*b0) + y*(a0*b1*c0 + a2*c0) + (a0*b1*c1 + a1*b1 + a2*c1 + a3)
a = self.poly._fastCoeffs
if self.poly.variables[0] in self.subpolys:
if self.subpolys[self.variables[0]] is not None and \
not isinstance(self.subpolys[self.variables[0]], ComplexTransform):
b = self.subpolys[self.variables[0]]._fastCoeffs
else:
b = (1, 0)
else:
b = (1, 0)
if self.poly.variables[1] in self.subpolys:
if self.subpolys[self.variables[1]] is not None and \
not isinstance(self.subpolys[self.variables[1]], ComplexTransform):
c = self.subpolys[self.variables[1]]._fastCoeffs
else:
c = (1, 0)
else:
c = (1, 0)
self._fastCoeffs = (
a[0]*b[0]*c[0],
a[0]*b[0]*c[1] + a[1]*b[0],
a[0]*b[1]*c[0] + a[2]*c[0],
a[0]*b[1]*c[1] + a[1]*b[1] + a[2]*c[1] + a[3],
)
elif isinstance(self.poly, Univariate):
# a0*(b0*x + b1) + a1 = a0*b0*x + (a1 + a0*b1)
a = self.poly._fastCoeffs
b = self.subpolys[self.poly.variables[0]]
if b is None:
b = (1., 0.)
else:
b = b._fastCoeffs
self._fastCoeffs = (a[0]*b[0], a[1] + a[0]*b[1])
self._str = src
self._source = "%s: %s" % (phead, src)
self._function = eval(self._source, evalGlobals)
self._noY = (0, 1) if 'y' not in src else False
for x in [self.poly] + list(self.subpolys.values()):
if x is not None:
x.addWatcher(self)
def inplace(self, values, y=None, timestamp=None, session=None, noBivariates=False, out=None):
""" In-place transform for the `CombinedPoly` transform. It reduces the
number of array allocations/operations compared to the normal
`__call__` method. The user can supply | |
import sys
import ctypes as ct
import os.path
import config.config as c
import NuToPos as ntp
import rdbparse as rp
import playDarts as pd
import GetNRCDailyInfo as nrc
import distance as sd
import dailyparse as dp
import numpy as np
import ArgParser as p
import PhysicsConstants as pc
basepath = os.path.dirname(__file__)
clibpath = os.path.abspath(os.path.join(basepath,"ctypes_libs"))
DEBUG = p.debug #False
#Takes in an array of UnoscSpecGen classes (assume all have same energy points on y-axis)
#And returns the dNdE function that results from them (neutrino energy)
def build_Theory_dNdE(unosc_spectra,oscParams):
energy_array = unosc_spectra[0].energy_array
Total_PerfectSpectra = np.zeros(len(energy_array))
for ReacSpectra in unosc_spectra:
PerfectOscSpec = Osc_CoreSysGen(ReacSpectra, oscParams,[None])
Total_PerfectSpectra += PerfectOscSpec.Summed_Spectra
return dNdE(energy_array,Total_PerfectSpectra)
def build_Theory_dNdE_wCoreSys(unosc_spectra,oscParams):
energy_array = unosc_spectra[0].energy_array
Total_VariedSpectra = np.zeros(len(energy_array))
for ReacSpectra in unosc_spectra:
VariedOscSpec = Osc_CoreSysGen(ReacSpectra,oscParams,c.SYSTEMATICS)
Total_VariedSpectra += VariedOscSpec.Summed_Spectra
return dNdE(energy_array, Total_VariedSpectra)
#Class takes in four RATDB type Isotope Info entries, one
#Spectrum entry (has the isotope compositions), and an energy to evaulate at.
#The class evaluates Isotope's associated small lambda, combines with the isotope
#fractions to make the reactor's Lambda function
#Isotopes should be fed as follows: iso_array[0] = 235U RATDB entry, iso_array[1]=238U,
#iso_array[2]=239Pu, iso_array[3]=241Pu
class Lambda(object):
def __init__(self,iso_array,isofracs, E):
self.E = E
self.iso_arr = iso_array
self.sl_array = []
for iso in self.iso_arr:
self.sl_array.append(self.smallLambda(iso))
self.isofracs = isofracs
self.value = 'none'
self.defineBigLambda()
def smallLambda(self,iso):
poly_terms = []
for i in np.arange(0,len(iso.poly_coeff)):
term = self.polyTerm(iso.poly_coeff[i], self.E, i)
poly_terms.append(term)
exp_term = np.sum(poly_terms,axis=0)
sl = np.exp(exp_term)
return sl
def defineBigLambda(self):
bl_terms = []
for i,sl in enumerate(self.sl_array):
bl_term = self.isofracs[i] * sl
bl_terms.append(bl_term)
bl = np.sum(bl_terms,axis=0)
self.value = bl
def polyTerm(self, a, e, c):
return a * (e**c)
#Class takes in permanent details for a reactor plant (ReacDetails RATDB entry)
#And the reactor's status (ReacStatus RATDB entry), and the RATDB entries of
#isotopes to use, and the energys to calculate the spectrum at
#and builds an array of spectra arrays (self.Unosc_Spectra) evaluated at the
#given energies. There is one array for each core of the plant.
class UnoscSpecGen(object):
def __init__(self,ReacDetails,ReacStatus,iso_array,energy_array,Uptime):
self.energy_array = energy_array
self.ReacDetails = ReacDetails
self.ReacStatus = ReacStatus
self.iso_array = iso_array
self.Uptime = Uptime
self.__core_check()
self.no_cores = self.ReacStatus.no_cores
self.Core_Distances = []
self.__CoreDistancesFromSNO()
self.Unosc_Spectra = []
self.__calcSpectra()
#Now, incorporate Power information from daily_update database
self.CoreSpecs = coreGen(ReacStatus,self.Unosc_Spectra,self.Uptime)
self.Unosc_Spectra = self.CoreSpecs.Unosc_Spectra_wP
def core_check(self):
if self.ReacDetails.no_cores != self.ReacStatus.no_cores:
raise UserWarning("WARNING: Number of cores in REACTOR" + \
"RATDB entry does not match REACTOR_STATUS entry. Using" + \
"number of cores put in REACTOR STATUS entry.")
def CoreDistancesFromSNO(self):
self.Core_Distances = [] #Refresh array before adding core distances
for i in np.arange(0,self.no_cores):
longitude = self.ReacDetails.core_longitudes[i]
latitude = self.ReacDetails.core_latitudes[i]
altitude = self.ReacDetails.core_altitudes[i]
coreDistance = sd.distance(c.LONGLATALT,[longitude,latitude,altitude])
self.Core_Distances.append(coreDistance)
if DEBUG == True:
print("For core " + self.ReacStatus.index + "...")
print("Core distances calculated! In km... " + str(self.Core_Distances))
def calcSpectra(self):
self.Unosc_Spectra = [] #Refresh array before adding spectrums
for i in np.arange(0,self.no_cores):
coreType = self.ReacStatus.core_types[i]
coreDistance = self.Core_Distances[i]
isotope_composition = rp.Reactor_Spectrum(coreType).param_composition
#loop over energies, calculate spectra's values
LambdaFunction = Lambda(self.iso_array, isotope_composition,self.energy_array).value
coreLambda = LambdaFunction / self.spectrumDenom(isotope_composition)
coreSpectrum = ( coreLambda / \
(4.*np.pi * (coreDistance**2)))
self.Unosc_Spectra.append(np.array(coreSpectrum))
def spectrumDenom(self,isocomp):
denominator = 0.0
for i,iso in enumerate(self.iso_array):
denominator += isocomp[i] * self.iso_array[i].Eperfission
return denominator
#Make private copies of the public methods
__core_check = core_check
__calcSpectra = calcSpectra
__CoreDistancesFromSNO = CoreDistancesFromSNO
#Class takes in the unoscillated spectra associated with a reactor plant.
#generates each core's operation details based on the number of days ran
#in an experiment and the load factor of a reactor for each day. Builds the
#unoscillated spectra with power and load factor values included.
class coreGen(object):
def __init__(self,ReacStatus,Unosc_Spectra,Uptime):
self.Uptime = Uptime
self.ReacName = ReacStatus.index
self.core_powers = ReacStatus.core_powers #Core powers as in RAT
self.Unosc_Spectra = Unosc_Spectra
#Number of days of data collected from the daily_updates DB
self.numdays_ofdata = 0
#Sum of licensed MWts grabbed for each day and sum of load factors
self.TotMWts = []
self.TotLFs = []
#These are calculated as the total divided by the numdays_ofdata
self.AvgLFs = []
self.AvgMWts = []
#Spectra with power corrections
self.Unosc_Spectra_wP = []
#if self.ReacDetails.index is in c.CAList, use RATDB core powers with no
#statistical fluctuations
if self.ReacName not in c.USList:
self.__Power_Perfect()
else:
self.__Power_AvgAvailable()
def Power_Perfect(self):
'''
Assumes all reactors run at 2015 averaged thermal power, no statistical
fluctuations. Ends up scaling each reactor core's spectra by the
approproate thermal power, time, and MeV conversion factor.
'''
for i,coreSpectrum in enumerate(self.Unosc_Spectra):
coreSpectrum = coreSpectrum * self.Uptime * pc.MWHTOMEV * \
self.core_powers[i]
self.Unosc_Spectra_wP.append(coreSpectrum)
def Power_AvgAvailable(self):
'''
Grabs all of the load factors and licensed MWts available for this reactors
cores from the daily database, averages the licensed MWts and load
factors, and uses these with the hard-coded runtime to generate each core
power.
'''
self.numdays_ofdata, AllReacEntries = dp.getAllEntriesInDailyUpdates(self.ReacName)
allLicensedMWts = []
allLoadFactors = []
for entry in AllReacEntries:
allLicensedMWts.append(np.array(entry["lic_core_powers"]))
allLoadFactors.append(np.array(entry["capacities"]))
#Now, average over all values for each core
self.TotMWts = np.sum(allLicensedMWts, axis=0)
self.TotLFs = np.sum(allLoadFactors, axis=0)
self.AvgMWts = self.TotMWts/self.numdays_ofdata
self.AvgLFs = (self.TotLFs/self.numdays_ofdata) #Shown in DB as percentage
#now, use the average values to rescale the spectrums
for i,coreSpectrum in enumerate(self.Unosc_Spectra):
coreSpectrum = coreSpectrum * self.Uptime * pc.MWHTOMEV * \
self.AvgMWts[i] * (self.AvgLFs[i] / 100.0)
self.Unosc_Spectra_wP.append(coreSpectrum)
__Power_Perfect = Power_Perfect
__Power_AvgAvailable = Power_AvgAvailable
#Class takes an UnoscSpecGen class (contains spectrums for each core of one reactor)
#and outputs the the oscillated Spectrums. oscParams should have two entries:
#[delta m-squared, sin^2(theta12)]
class Osc_CoreSysGen(object):
def __init__(self, UnoscSpecGen, oscParams,Systematics):
self.Systematics = Systematics
self.Unosc_Spectra = UnoscSpecGen.Unosc_Spectra
self.ReacDetails = UnoscSpecGen.ReacDetails
self.ReacStatus = UnoscSpecGen.ReacStatus
self.energy_array = UnoscSpecGen.energy_array
self.Core_Distances = UnoscSpecGen.Core_Distances
#define your variable oscillation paramaters;
self.SINSQT12 = oscParams[1]
self.DELTAMSQ21 = oscParams[0]
#calculate needed fixed oscillation parameters
self.SINSQTWO12 = self.calcSINSQTWO(self.SINSQT12)
self.COSSQT12 = self.calcCOSSQ(self.SINSQT12)
self.AvgLFs = UnoscSpecGen.CoreSpecs.AvgLFs
#Adds systematic fluctuations to each core
self.__addCoreSystematics()
#Oscillate each core spectra, then sum them
self.Osc_Spectra = []
self.__oscillateSpectra()
self.Summed_Spectra = []
self.__sumSpectra()
def addCoreSystematics(self):
'''
If called, the spectra from each core is scaled according to the
average load factor. Basically, sample from a gaussian of
mu=LF and sigma = 25% for now. Can make a function of LF later.
'''
if ("USSYS" in self.Systematics) and \
(self.ReacDetails.index in c.USList):
sysSigmas = c.US_LF_VAR * np.ones(len(self.AvgLFs))
#Get the fluctuation from each core's avg LF, in percentage
sysFlucs = pd.RandShoot(self.AvgLFs,sysSigmas, \
len(self.AvgLFs)) - self.AvgLFs
Unosc_Spectra_wSys = []
#For each spectrum, vary by the fluctuation calculated in sysFlucs
for i,coreSpectrum in enumerate(self.Unosc_Spectra):
coreSpectrum = coreSpectrum * (1 + (sysFlucs[i]/100.0))
Unosc_Spectra_wSys.append(coreSpectrum)
self.Unosc_Spectra = Unosc_Spectra_wSys
#Vary each Canadian reactor core's flux around it's thermal power
elif ("CASYS" in self.Systematics) and \
(self.ReacDetails.index in c.CAList):
numcores = len(self.ReacStatus.core_powers)
#Thermal MWts for CA reactors already have LFs factored in
#ReacStatus entries
coreAvgs = 100.0 * np.ones(numcores)
coreSigmas = (c.CA_LF_VAR) * np.ones(numcores)
core_SysVar = pd.RandShoot(coreAvgs,coreSigmas, numcores)
print(core_SysVar)
Unosc_Spectra_wSys = []
#For each spectrum, vary by the fluctuation calculated in sysFlucs
for i,coreSpectrum in enumerate(self.Unosc_Spectra):
coreSpectrum = coreSpectrum * (core_SysVar[i] / 100.0)
Unosc_Spectra_wSys.append(coreSpectrum)
self.Unosc_Spectra = Unosc_Spectra_wSys
def oscillateSpectra(self):
self.Osc_Spectra = [] #Refresh array before adding spectrums
for i,spectrum in enumerate(self.Unosc_Spectra):
self.Osc_Spectra.append(np.product([spectrum,self.Pee(self.energy_array, \
self.Core_Distances[i])],axis=0))
def calcSINSQTWO(self, sst12):
result = 4. * sst12 * (1. - sst12)
return result
def calcCOSSQ(self, sst12):
result = 1. - sst12
return result
def Pee(self,E,L):
#Takes in an array of energies and one length and returns an array
#of the Pee spectrum.
#L must be given in kilometers, energy in MeV
#USING THE EQN. FROM SVOBODA/LEARNED
term1 = pc.COS4THT13*self.SINSQTWO12*(np.sin(1E-12 * \
self.DELTAMSQ21 * L /(4 * E * pc.hbarc))**2)
result = (1. - term1) # + term2 + term3)
#OR, USING 2-PARAMETER APPROXIMATION USED BY KAMLAND
# result = 1 - (self.SINSQTWO12 * np.sin((1.27 * \
# DELTAMSQ21*L)/(E/1000))**2)
return result
def sumSpectra(self):
summed_spectra = np.sum(self.Osc_Spectra, axis=0)
self.Summed_Spectra = summed_spectra
#Make private copies of the public methods; important to let
#subclasses override methods without breaking intraclass method calls.
__sumSpectra = sumSpectra
__oscillateSpectra = oscillateSpectra
__addCoreSystematics = addCoreSystematics
#Class takes in a reactor spectrum array (oscillated or unoscillated) and the
#relative x-axis array (Energy_Array in the class) and calculates the
#dNdE function for the spectrum. Total Runtime and Thermal power associated
#with the spectrum's core must already be factored into the spectrum.
class dNdE(object):
def __init__(self,Energy_Array,Spectrum):
self.Nu_Energy_Array = Energy_Array
self.Spectrum = Spectrum
self.Array_Check()
self.resolution = None
self.Nu_dNdE = []
self.evalNudNdE()
self.Pos_Energy_Array | |
#from grounding import Grounder
import grounding
from pddl.parser import Parser
from task import Operator
from pddl.pddl import Type, Predicate, Effect, Action, Domain, Problem
def assert_equal(result, expected):
assert result == expected
def get_action(name, signature, precondition, addlist, dellist):
effect = Effect()
effect.addlist = set(addlist)
effect.dellist = set(dellist)
return Action(name, signature, precondition, effect)
"""
test domain and problem
"""
# types:
type_object = Type("object", None)
type_vehicle = Type("vehicle", type_object)
type_car = Type("car", type_vehicle)
type_truck = Type("truck", type_vehicle)
type_city = Type("city", type_object)
type_country = Type("country", type_object)
type_my_car = Type("my_car", type_vehicle)
type_color = Type("color", type_object)
types = {"object": type_object, "vehicle": type_vehicle, "car": type_car,
"truck": type_truck, "city": type_city, "country": type_country,
"my_car": type_my_car, "color": type_color}
# predicates:
predicate_car_orig = Predicate("at", [("car", types["car"]),
("orig", types["city"])])
predicate_car_dest = Predicate("at", [("car", types["car"]),
("dest", types["city"])])
predicate_veh_orig = Predicate("at", [("vehicle", types["vehicle"]),
("orig", types["city"])])
predicate_veh_dest = Predicate("at", [("vehicle", types["vehicle"]),
("dest", types["city"])])
predicate_in = Predicate("in", [("car", types["car"]), ("in", types["city"])])
#predicate which does not occur in any operator:
predicate_car_color = Predicate("car_color", [("car", types["car"]),
("color", types["color"])])
predicate_at = Predicate("at", [("vehicle", types["vehicle"]),
("city", types["city"])])
predicates = {"at": predicate_car_dest, "in": predicate_in,
"car_color": predicate_car_color}
# actions:
action_drive_car = get_action("DRIVE-CAR", [("car", [types["car"]]),
("orig", [types["city"]]), ("dest", [types["city"]])],
[predicate_car_dest], [predicate_car_orig],
[predicate_car_dest])
actions = {"drive-car": action_drive_car}
# objects:
objects = {"red_car": types["car"], "green_car": types["car"],
"blue_truck": types["truck"], "freiburg": types["city"],
"basel": types["city"], "green": types["color"],
"yellow": types["color"]}
# initial and goal state:
initial_state = [Predicate("at", [("red_car", types["car"]),
("freiburg", types["city"])]),
Predicate("at", [("green_car", types["car"]),
("basel", types["city"])]),
Predicate("at", [("blue_truck", types["truck"]),
("freiburg", types["city"])]),
Predicate("at", [("yellow_truck", types["truck"]),
("basel", types["city"])])]
goal_state = [Predicate("at", [("red_car", types["car"]),
("basel", types["city"])]),
Predicate("at", [("green_car", types["car"]),
("freiburg", types["city"])]),
Predicate("at", [("blue_truck", types["truck"]),
("basel", types["city"])]),
Predicate("at", [("yellow_truck", types["truck"]),
("freiburg", types["city"])])]
# domain and problem
standard_domain = Domain("test_domain_statics", types, predicates, actions)
standard_problem = Problem("test_problem_statics", standard_domain, objects,
initial_state, goal_state)
def test_statics1():
"""
A static predicate is a predicate, which doesn't occur in an effect of an
action.
"""
type_object = Type("object", None)
type_car = Type("car", type_vehicle)
type_city = Type("city", type_object)
type_country = Type("country", type_object)
types = {"object": type_object, "car": type_car, "city": type_city,
"country": type_country}
predicate_orig = Predicate("at", [("car", types["car"]),
("dest", types["city"])])
predicate_dest = Predicate("at", [("car", types["car"]),
("orig", types["city"])])
predicate_in = Predicate("in", [("city", types["city"]),
("country", types["country"])])
action_drive_car = get_action("DRIVE-CAR", [("car", [types["car"]]),
("loc-orig", [types["city"]]),
("loc-dest", [types["city"]])],
[predicate_orig], [predicate_dest],
[predicate_orig])
expected = [("in", grounding._get_statics([predicate_in],
[action_drive_car]), True),
("dest", grounding._get_statics([predicate_dest],
[action_drive_car]), False),
("orig", grounding._get_statics([predicate_orig],
[action_drive_car]), False)]
for pre, statics, element in expected:
yield in_statics, pre, statics, element
def test_statics2():
type_object = Type("object", None)
predicate_a = Predicate("a", [])
predicate_b = Predicate("b", [])
the_action = get_action("the-action", [], [predicate_a], [predicate_b], [])
statics = grounding._get_statics([predicate_a, predicate_b], [the_action])
assert predicate_a.name in statics and predicate_b.name not in statics
def in_statics(predicate, statics, element):
if element:
assert predicate in statics
else:
assert not predicate in statics
def test_type_map1():
"""type map: maps each type to a list of objects"""
type_object = Type("object", None)
type_vehicle = Type("vehicle", type_object)
type_car = Type("car", type_vehicle)
type_truck = Type("truck", type_vehicle)
type_city = Type("city", type_object)
objects = {"red_car": type_car, "green_car": type_car,
"blue_truck": type_truck, "motorbike": type_vehicle,
"freiburg": type_city, "basel": type_city}
type_map = grounding._create_type_map(objects)
expected = [("red_car", type_map[type_car]),
("green_car", type_map[type_car]),
("blue_truck", type_map[type_truck]),
("red_car", type_map[type_vehicle]),
("green_car", type_map[type_vehicle]),
("blue_truck", type_map[type_vehicle]),
("motorbike", type_map[type_vehicle]),
("freiburg", type_map[type_city]),
("basel", type_map[type_city]),
("green_car", type_map[type_object]),
("motorbike", type_map[type_object]),
("basel", type_map[type_object])]
for object, object_list in expected:
yield in_object_set, object, object_list
def test_type_map2():
type_object = Type("object", None)
objects = {"object1": type_object}
type_map = grounding._create_type_map(objects)
assert "object1" in type_map[type_object]
def in_object_set(object, object_list):
assert object in object_list
def test_collect_facts():
op1 = Operator("op1", {"var1"}, {}, {"var3"})
op2 = Operator("op2", {"var2"}, {"var3"}, {})
op3 = Operator("op3", {}, {"var1"}, {"var4"})
assert {"var1", "var2", "var3", "var4"} == grounding._collect_facts(
[op1, op2, op3])
def test_operators():
# action with signature with 2 types
action_drive_vehicle = get_action("DRIVE-VEHICLE",
[("vehicle", [types["car"],
types["truck"]]),
("orig", [types["city"]]),
("dest", [types["city"]])],
[predicate_veh_orig],
[predicate_veh_dest],
[predicate_veh_orig])
# action with predicate in add & delete list
action_add_delete = get_action("STAY", [("car", [types["car"]]),
("in", [types["city"]])],
[predicate_in], [predicate_in],
[predicate_in])
# action with constant input
action_constant = get_action("CONSTANT-ACTION",
[("my_car", [types["my_car"]]),
("city", [types["city"]])],
[],
[Predicate("in", [("basel", [types["city"]]),
("switzerland",
[types["country"]])])], [])
# action with only delete effects
action_only_delete = get_action("LEAVE",
[("car", [types["car"]]),
("in", [types["city"]])],
[predicate_in], [], [predicate_in])
# action with delete effect which does not occur in precondition
action_delete = get_action("DELETE", [("car", [types["car"]]),
("orig", [types["city"]]),
("dest", [types["city"]])],
[], [predicate_car_orig], [predicate_car_dest])
type_map = grounding._create_type_map(objects)
grounded_initial_state = grounding._get_partial_state(initial_state)
grounded_drive_car = list(
grounding._ground_action(action_drive_car, type_map, [],
grounded_initial_state))
grounded_drive_vehicle = list(
grounding._ground_action(action_drive_vehicle, type_map, [],
grounded_initial_state))
grounded_add_delete = list(
grounding._ground_action(action_add_delete, type_map, [],
grounded_initial_state))
grounded_only_delete = list(
grounding._ground_action(action_only_delete, type_map, [],
grounded_initial_state))
grounded_delete = list(
grounding._ground_action(action_delete, type_map, [],
grounded_initial_state))
domain = Domain("test_domain", types,
{"in": Predicate("in", [("city", types["city"]),
("country", types["country"])])},
{"action-constant": action_constant},
{"my_car": types["car"]})
problem = Problem("test_problem", domain, objects, initial_state,
goal_state)
task = grounding.ground(problem)
grounded_constant = task.operators
expected = [("(DRIVE-CAR red_car freiburg basel)", grounded_drive_car),
("(DRIVE-VEHICLE blue_truck freiburg basel)",
grounded_drive_vehicle),
("(STAY red_car freiburg)", grounded_add_delete),
("(LEAVE red_car freiburg)", grounded_only_delete),
("(DELETE red_car freiburg basel)", grounded_delete)]
for operator, grounded_operators in expected:
yield operator_grounded, operator, grounded_operators
def operator_grounded(operator, grounded_operators):
grounded = False
for op in grounded_operators:
if(operator == op.name):
grounded = True
assert grounded
def test_create_operator():
statics = grounding._get_statics(standard_domain.predicates.values(),
[action_drive_car])
initial_state = [Predicate("at", [("ford", types["car"]),
("freiburg", types["city"])])]
operator = grounding._create_operator(
action_drive_car,
{"car": "ford", "dest": "berlin", "orig": "freiburg"},
[], initial_state)
assert operator.name == "(DRIVE-CAR ford freiburg berlin)"
assert operator.preconditions == {'(at ford berlin)'}
assert operator.add_effects == {'(at ford freiburg)'}
assert operator.del_effects == {'(at ford berlin)'}
def test_get_grounded_string():
grounded_string = "(DRIVE-CAR ford freiburg berlin)"
assert grounding._get_grounded_string(
"DRIVE-CAR", ["ford", "freiburg", "berlin"]) == grounded_string
def test_ground():
"""
predicate which does not occur in any operator: "car_color"
-> does it occurs in a variable?
-> does it occur in an operator?
"""
task = grounding.ground(standard_problem)
assert not any(var.startswith("car_color") for var in task.facts)
for operators in task.operators:
assert not any(pre.startswith("car_color")
for pre in operators.preconditions)
assert not any(add.startswith("car_color")
for add in operators.add_effects)
assert not any(dee.startswith("car_color")
for dee in operators.del_effects)
def test_regression():
parser = Parser('')
def parse_problem(domain, problem):
parser.domInput = domain
parser.probInput = problem
domain = parser.parse_domain(False)
return parser.parse_problem(domain, False)
prob_05 = """
;; See domain file for description of this test.
(define (problem regression-test-05)
(:domain regression-test)
(:objects y - object)
(:init)
(:goal (the-predicate x y)))
"""
dom_05 = """
;; Expected behaviour: plan of length one found
;; Observed behaviour (r265): plan of length zero found
(define (domain regression-test)
(:requirements :typing) ;; work around problem in regression test #4.
(:predicates (the-predicate ?v1 ?v2 - object))
(:constants x - object)
(:action theaction
:parameters (?x - object)
:precondition (and)
:effect (the-predicate x ?x)
)
)
"""
prob_06 = """
;; See domain file for description of this test.
(define (problem regression-test-06)
(:domain regression-test)
(:objects y - object)
(:init)
(:goal (the-predicate y y)))
"""
dom_06 = """
;; Expected behaviour: planner proves that no plan exists
;; Observed behaviour (r265): plan of length one found
(define (domain regression-test)
(:requirements :typing) ;; work around problem in regression test #4.
(:predicates (the-predicate ?v1 ?v2 - object))
(:constants x - object)
(:action theaction
:parameters (?x - object)
:precondition (and)
:effect (the-predicate x ?x)
)
)
"""
# problem / domain 07 contains a different action compared
# to the actions of domain 5 & 6
prob_07 = prob_06
dom_07 = """
(define (domain regression-test)
(:requirements :typing) ;; work around problem in regression test #4.
(:predicates (the-predicate ?v1 ?v2 - object))
(:constants y - object)
(:action theaction
:parameters (?x - object)
:precondition (and)
:effect (the-predicate y ?x)
)
)
"""
# action of problem / domain 8 differs only in the variable name compared
# to the actions of problem 5 and 6: After grounding there should be no
# difference between the grounded actions
prob_08 = prob_05
dom_08 = """
(define (domain regression-test)
(:requirements :typing) ;; work around problem in regression test #4.
(:predicates (the-predicate ?v1 ?v2 - object))
(:constants x - object)
(:action theaction
:parameters (?z - object)
:precondition (and)
:effect (the-predicate x ?z)
)
)
"""
parsed_problem5 = parse_problem(dom_05, prob_05)
parsed_problem6 = parse_problem(dom_06, prob_06)
parsed_problem7 = parse_problem(dom_07, prob_07)
parsed_problem8 = parse_problem(dom_08, prob_08)
#coded input:
type_object = Type("object", None)
types = {"object": type_object}
predicates = {"the_predicate": Predicate("the-predicate",
[("v1", type_object),
("v2", type_object)])}
constants = {"x": type_object}
actions = {"theaction": get_action("theaction",
[("?x", [type_object])], [],
[Predicate("the-predicate",
[("x", type_object),
("?x", type_object)])], [])}
domain = Domain("regression-test", types, predicates, actions, constants)
problem5 = Problem("regression-test-05", domain, {"y": type_object}, [],
[Predicate("the-predicate", [("x", type_object),
("y", type_object)])])
problem6 = Problem("regression-test-06", domain, {"y": type_object}, [],
[Predicate("the-predicate", [("y", type_object),
("y", type_object)])])
parsed_task5 = | |
if( pkey_i != pkey_j):
nblist_i.cnt += 1
nblist_i.list.append(pkey_j)
#
# Add extra index positions for key+1 call made by final key
#
nblist_i.index.append(nblist_i.cnt + 1)
# Clear 2D list from memory
del nd2D
#
return nblist_i
def guess_nblist(self,radius_type,radii_buffer=1.25):
"""
Create neighbor list of particles based on distance and element.covalent_radius of each particle
Args:
* radius_type (int)
* 0 - element.covalent_radius
* 1 - element.vdw_radius
* radii_buffer (float) to multiply radii cut off
Return:
* NBlist (object)
"""
nblist_i = NBlist()
nblist_i.list = []
nblist_i.index = []
nblist_i.cnt = -1
if( radius_type == 0 ):
logger.debug("Guessing neighbor list using the covalent radius of the particles element ")
elif( radius_type == 1 ):
logger.debug("Guessing neighbor list using the Van der Waals radius of the particles element ")
else:
error_msg = 'Argument "radius_type" needs to be an integer of 0 or 1'
error_msg += "\n Returning Empty NBlist object "
raise ValueError(error_string)
return nblist_i
# Create 2D list of lists of inter particle distances
npos_i = self.positions
npos_j = self.positions
dr_matrix, dist_matrix = self.lat.delta_npos(npos_i,npos_j)
# Loop over all particles
for pkey_i,particle_i in self.particles.iteritems():
if( radius_type == 0 ):
radii_i = particle_i.bonded_radius
elif( radius_type == 1 ):
radii_i = particle_i.nonbonded_radius
nblist_i.index.append(nblist_i.cnt + 1)
for pkey_j,particle_j in self.particles.iteritems():
if( pkey_i != pkey_j):
if( radius_type == 0 ):
radii_j = particle_j.bonded_radius
elif( radius_type == 1 ):
radii_j = particle_j.nonbonded_radius
dr_cut = radii_i + radii_j
dr_cut = dr_cut*radii_buffer
logger.debug("Particles i_%d - j_%d dr %f cut %f "%(pkey_i,pkey_j,dist_matrix[pkey_i,pkey_j],dr_cut))
if( dist_matrix[pkey_i,pkey_j] <= dr_cut ):
nblist_i.cnt += 1
nblist_i.list.append(pkey_j)
# Add extra index positions for key+1 call made by final key
nblist_i.index.append(nblist_i.cnt + 1)
# Clear list from memory
del dr_matrix
del dist_matrix
#
return nblist_i
def getSubStructure(self,pkeys,tag="blank"):
"""
Create new structure container from list of particle keys
"""
new_strucC = Structure(str(tag))
key_update = dict()
# Set lattice
new_strucC.lat = self.lat
# Set particles
for pkey_i in pkeys:
p_i = self.particles[pkey_i]
pos_i = self.positions[pkey_i]
new_strucC.add_partpos(p_i,pos_i, deepcopy = True)
key_update[pkey_i] = new_strucC.n_particles -1
if( len(self.bonded_nblist.index) > 0 ):
# Update bonded neighbor list
new_strucC.bonded_nblist = NBlist()
for pkey_i in pkeys:
new_strucC.bonded_nblist.index.append(new_strucC.bonded_nblist.cnt + 1)
for pkey_j in self.bonded_nblist.getnbs(pkey_i):
if( pkey_j in pkeys ):
new_strucC.bonded_nblist.cnt += 1
new_strucC.bonded_nblist.list.append(key_update[pkey_j])
new_strucC.bonded_nblist.index.append(new_strucC.bonded_nblist.cnt + 1)
new_strucC.bonded_bonds()
return new_strucC
def write_coord(self):
"""
Write coordinates into string
"""
coord = ''.join([" %5s %16.8f %16.8f %16.8f \n"%(particle_i.symbol,self.positions[pkey_i][0],self.positions[pkey_i][1],self.positions[pkey_i][2] ) for pkey_i,particle_i in self.particles.iteritems()])
return coord
def write_xyz_str(self):
'''
Write xyz file string
'''
xyz_str = " %d \n" % self.n_particles
xyz_str += " %s \n"%(self.tag)
xyz_str += self.write_coord()
return str(xyz_str)
def write_xyz(self, xyz_file=''):
'''
Write a structure to an xyz file
Kwargs:
* xyz_file (str) xyz file to write data to
'''
if( len(xyz_file) == 0 ):
xyz_file = "%s.xyz"%(self.tag)
xyz_str = self.write_xyz_str()
F = open(xyz_file,"w")
F.write(xyz_str)
F.close()
def write_xyz_list(self, list_i,xyz_file=''):
'''
Write a list of certain particles of the structure to an xyz file
Args:
* list_i (list) list of particle indexes
Kwargs:
* xyz_file (str) xyz file to write data to
'''
if( len(xyz_file) == 0 ):
xyz_file = "%s.xyz"%(self.tag)
F = open(xyz_file,"w")
#
# Loop over structures
#
F.write(" %d \n" % len(list_i) )
F.write(" %s \n"%" structures.Container ")
for pkey_i in list_i:
particle_i = self.particles[pkey_i]
pos_i = self.positions[pkey_i]
F.write(" %5s %16.8f %16.8f %16.8f \n"%(particle_i.symbol,pos_i[0],pos_i[1],pos_i[2] ))
F.close()
def read_xyz(self, xyz_file=''):
'''
Read a structure to an xmol file
Kwargs:
* xyz_file (str) xyz file to read data from
'''
if( len(xyz_file) == 0 ):
xyz_file = "%s.xyz"%(self.tag)
line_cnt = 0
try:
with open(xyz_file) as f:
for line in f:
line_cnt += 1
col = line.split()
if( line_cnt > 2 and len(col) >= 4 ):
# Read lines and add particles to structure
symbol = str(col[0])
pos_i = np.array( [float(col[1]),float(col[2]),float(col[3])] )
pt_i = Particle(symbol=symbol)
self.add_partpos(pt_i,pos_i,deepcopy = True)
except:
logger.warning(" File not found %s in %s "%(xyz_file,os.getcwd()))
return
def write_list(self,list_i,tag_i):
'''
Write list of particle keys to file to use in remote analysis
Args:
* list_i (list): list of particle indexes
* tag_i (str): string to be used as file name ``tag``.list
'''
list_str = [str(pkey) for pkey in list_i]
list_file = '%s.list'%(tag_i)
outfile = open(list_file,'wb')
outfile.write("\n".join(list_str))
outfile.close()
return list_file
def shift(self, pkey, vec):
"""
Shift position of pkey by vector
Arguments:
* pkey (int) particle key
* vec (numpy array) vector
"""
self._property['positions'][pkey] += vec
def shift_pos(self,vec):
'''
Shift position of all particles by vecx
'''
for pkey_i in self.particles.keys():
self.shift( pkey_i, vec)
def pbc_pos(self):
'''
Apply periodic boundary conditions to
'''
for r_i in self._property['positions']:
for d in range(self.lat.n_dim ):
r_i[d] = r_i[d] - self.lat.matrix[d][d] * round( r_i[d]/ self.lat.matrix[d][d] )
def lat_cubic(self,len_o):
'''
Set lattice to cubic with lattice constant len
'''
self.lat.set_cubic(len_o)
def calc_mass(self):
"""
Calculate total mass of structure
"""
self._property['mass'] = float(0.0)
for pkey_i, particle_i in self.particles.iteritems():
self._property['mass'] += particle_i.mass
return
def calc_charge(self):
"""
Calculate total charge of structure
"""
self._property['charge'] = 0.0
for pkey_i, particle_i in self.particles.iteritems():
self._property['charge'] += particle_i.charge
return
def calc_volume(self):
"""
Calculate volume of structure
.. math::
Volume = ( v_i x v_j ) * v_k
"""
v_i = self.lat.matrix[0]
v_j = self.lat.matrix[1]
v_k = self.lat.matrix[2]
v_ij = np.cross(v_i,v_j)
self._property['volume'] = np.dot(v_ij,v_k)
return
def calc_density(self):
"""
Calculate density of structure
"""
self._property['density'] = self.mass/self.volume
def calc_center_mass(self):
"""
Find center of mass of a structure
"""
self._property['center_mass'] = np.zeros(self.lat.n_dim)
for pkey_i, particle_i in self.particles.iteritems():
mass_i = particle_i.mass
# print self.positions[pkey_i][0],self.positions[pkey_i][1],self.positions[pkey_i][2],mass_i
for dim in range(self.lat.n_dim):
self._property['center_mass'][dim] += mass_i*np.array(self.positions[pkey_i][dim])
for dim in range(self.lat.n_dim):
self._property['center_mass'][dim] = self._property['center_mass'][dim]/self.mass
return
def calc_composition(self):
"""
Calculate composition
"""
# Find max mol and residue numbers
self.mol_max = -1
self.residue_max = -1
for pkey_i, particle_i in self.particles.iteritems():
if( particle_i.mol > self.mol_max ): self.mol_max = particle_i.mol
if( particle_i.residue > self.residue_max ): self.residue_max = particle_i.residue
self.composition = np.zeros(len(pymatgen_pt._pt_data),dtype=np.int)
for pkey_i, particle_i in self.particles.iteritems():
el_i = int( particle_i.element.number )
if( el_i >= 0 ):
self.composition[el_i] += 1
def calc_formula(self):
"""
Calculate chemical formula
"""
self.chemicalformula = ""
self.calc_composition()
el_n_list = [6,1]
el_n_list += [ i for i in range(1,len(pymatgen_pt._pt_data)) if( i != 6 and i != 1 ) ]
for n_i in el_n_list:
if( self.composition[n_i] > 0 ):
el_i = pymatgen_pt.Element.from_Z(n_i)
self.chemicalformula += "%s%d"%(el_i.symbol,self.composition[n_i])
def sum_charge(self,pkey_i,pkey_j):
'''
Sum charge of particle i into particle j
Args:
* pkey_i (int) Particle key
* pkey_j (int) Particle key
'''
# Sum charges of particles to be removed into attachment points
logger.debug(" Summing {} with charge {} into particle {}".format(self.particles[pkey_j].symbol,self.particles[pkey_j].charge,pkey_i))
self.particles[pkey_i].charge += self.particles[pkey_j].charge
self.particles[pkey_j].charge = 0.0
def sum_prop(self,pkey_i,pkey_j):
'''
Sum property of particle i into particle j
Args:
* pkey_i (int) Particle key
* pkey_j (int) Particle key
.. TODO::
This should be changed to sum_charge
'''
# Sum charges of particles to be removed into attachment points
logger.debug(" Summing {} with charge {} into particle {}".format(self.particles[pkey_j].symbol,self.particles[pkey_j].charge,pkey_i))
#print " into ",self.particles[pkey_i].symbol,self.particles[pkey_i].charge
self.particles[pkey_i]._property['charge'] += self.particles[pkey_j].charge
self.particles[pkey_j]._property['charge'] = 0.0
def maxtags(self):
"""
Find max mol and residue numbers
"""
self.mol_max = -1
self.residue_max = -1
for pkey_i, particle_i in self.particles.iteritems():
if( particle_i.mol > self.mol_max ): self.mol_max = particle_i.mol
if( particle_i.residue > self.residue_max ): self.residue_max = particle_i.residue
def mol_mult(self):
"""
Find value to multiply mol index by
so residue index can be added to get a unique group value
"""
self.mol_multiplier = float( len( str( abs( round(self.mol_max,0) )))*10.0/len( str( abs( round(self.mol_max,0) ))) )*10.0
def n_molecules(self):
"""
Number of molecules
.. TODO::
deprecate
"""
max_mol = 0
for pkey_i, particle_i in self.particles.iteritems():
if( max_mol < particle_i.mol ): max_mol = particle_i.mol
return max_mol
| |
: sparse matrix, dia
Inverse mass matrix, D^{-1}
lhsfac : cholesky Factor object
Factorized left side, solves biharmonic problem
notboundary : ndarray, int
Indices of non-boundary vertices
"""
try:
from scikits.sparse.cholmod import cholesky
factorize = lambda x: cholesky(x).solve_A
except ImportError:
factorize = sparse.linalg.dsolve.factorized
B, D, W, V = self.laplace_operator
npt = len(D)
g = np.nonzero(D > 0)[0] # Find vertices with non-zero mass
#g = np.nonzero((L.sum(0) != 0).A.ravel())[0] # Find vertices with non-zero mass
notboundary = np.setdiff1d(np.arange(npt)[g], boundary_verts) # find non-boundary verts
D = np.clip(D, clip_D, D.max())
Dinv = sparse.dia_matrix((D**-1,[0]), (npt,npt)).tocsr() # construct Dinv
L = Dinv.dot((V-W)) # construct Laplace-Beltrami operator
lhs = (V-W).dot(L) # construct left side, almost squared L-B operator
#lhsfac = cholesky(lhs[notboundary][:,notboundary]) # factorize
lhsfac = factorize(lhs[notboundary][:,notboundary]) # factorize
return lhs, D, Dinv, lhsfac, notboundary
def _create_interp(self, verts, bhsolver=None):
"""Creates interpolator that will interpolate values at the given `verts` using
biharmonic interpolation.
Parameters
----------
verts : 1D array-like of ints
Indices of vertices that will serve as knot points for interpolation.
bhsolver : (lhs, rhs, Dinv, lhsfac, notboundary), optional
A 5-tuple representing a biharmonic equation solver. This structure
is created by create_biharmonic_solver.
Returns
-------
_interp : function
Function that will interpolate a given set of values across the surface.
The values can be 1D or 2D (number of dimensions by len `verts`). Any
number of dimensions can be interpolated simultaneously.
"""
if bhsolver is None:
lhs, D, Dinv, lhsfac, notb = self.create_biharmonic_solver(verts)
else:
lhs, D, Dinv, lhsfac, notb = bhsolver
npt = len(D)
def _interp(vals):
"""Interpolate function with values `vals` at the knot points."""
v2 = np.atleast_2d(vals)
nd,nv = v2.shape
ij = np.zeros((2,nv*nd))
ij[0] = np.array(verts)[np.repeat(np.arange(nv), nd)]
ij[1] = np.tile(np.arange(nd), nv)
r = sparse.csr_matrix((vals.T.ravel(), ij), shape=(npt,nd))
vr = lhs.dot(r)
#phi = lhsfac.solve_A(-vr.todense()[notb]) # 29.9ms
#phi = lhsfac.solve_A(-vr[notb]).todense() # 29.3ms
#phi = lhsfac.solve_A(-vr[notb].todense()) # 28.2ms
phi = lhsfac(-vr[notb].todense())
tphi = np.zeros((npt,nd))
tphi[notb] = phi
tphi[verts] = v2.T
return tphi
return _interp
def interp(self, verts, vals):
"""Interpolates a function between N knot points `verts` with the values `vals`.
`vals` can be a D x N array to interpolate multiple functions with the same
knot points.
Using this function directly is unnecessarily expensive if you want to interpolate
many different values between the same knot points. Instead, you should directly
create an interpolator function using _create_interp, and then call that function.
In fact, that's exactly what this function does.
See create_biharmonic_solver for math details.
Parameters
----------
verts : 1D array-like of ints
Indices of vertices that will serve as knot points for interpolation.
vals : 2D ndarray, shape (dimensions, len(verts))
Values at the knot points. Can be multidimensional.
Returns
-------
tphi : 2D ndarray, shape (total_verts, dimensions)
Interpolated value at every vertex on the surface.
"""
return self._create_interp(verts)(vals)
@property
@_memo
def _facenorm_cross_edge(self):
ppts = self.ppts
fnorms = self.face_normals
fe12 = np.cross(fnorms, ppts[:,1] - ppts[:,0])
fe23 = np.cross(fnorms, ppts[:,2] - ppts[:,1])
fe31 = np.cross(fnorms, ppts[:,0] - ppts[:,2])
return fe12, fe23, fe31
def approx_geodesic_distance(self, verts, m=0.1):
"""Computes approximate geodesic distance (in mm) from each vertex in
the surface to any vertex in the collection `verts`. This approximation
is computed using Varadhan's formula for geodesic distance based on the
heat kernel. This is very fast (quite a bit faster than `geodesic_distance`)
but very inaccurate. Use with care.
In short, we let heat diffuse across the surface from sources at `verts`,
and then look at the resulting heat levels in every other vertex to
approximate how far they are from the sources. In theory, this should
be very accurate as the duration of heat diffusion goes to zero. In
practice, short duration leads to numerical instability and error.
Parameters
----------
verts : 1D array-like of ints
Set of vertices to compute distance from. This function returns the shortest
distance to any of these vertices from every vertex in the surface.
m : float, optional
Scalar on the duration of heat propagation. Default 0.1.
Returns
-------
1D ndarray, shape (total_verts,)
Approximate geodesic distance (in mm) from each vertex in the
surface to the closest vertex in `verts`.
"""
npt = len(self.pts)
t = m * self.avg_edge_length ** 2 # time of heat evolution
if m not in self._rlfac_solvers:
B, D, W, V = self.laplace_operator
nLC = W - V # negative laplace matrix
spD = sparse.dia_matrix((D,[0]), (npt,npt)).tocsr() # lumped mass matrix
lfac = spD - t * nLC # backward Euler matrix
# Exclude rows with zero weight (these break the sparse LU, that finicky fuck)
goodrows = np.nonzero(~np.array(lfac.sum(0) == 0).ravel())[0]
self._goodrows = goodrows
self._rlfac_solvers[m] = sparse.linalg.dsolve.factorized(lfac[goodrows][:,goodrows])
# Solve system to get u, the heat values
u0 = np.zeros((npt,)) # initial heat values
u0[verts] = 1.0
goodu = self._rlfac_solvers[m](u0[self._goodrows])
u = np.zeros((npt,))
u[self._goodrows] = goodu
return -4 * t * np.log(u)
def geodesic_distance(self, verts, m=1.0, fem=False):
"""Minimum mesh geodesic distance (in mm) from each vertex in surface to any
vertex in the collection `verts`.
Geodesic distance is estimated using heat-based method (see 'Geodesics in Heat',
Crane et al, 2012). Diffusion of heat along the mesh is simulated and then
used to infer geodesic distance. The duration of the simulation is controlled
by the parameter `m`. Larger values of `m` will smooth & regularize the distance
computation. Smaller values of `m` will roughen and will usually increase error
in the distance computation. The default value of 1.0 is probably pretty good.
This function caches some data (sparse LU factorizations of the laplace-beltrami
operator and the weighted adjacency matrix), so it will be much faster on
subsequent runs.
The time taken by this function is independent of the number of vertices in verts.
Parameters
----------
verts : 1D array-like of ints
Set of vertices to compute distance from. This function returns the shortest
distance to any of these vertices from every vertex in the surface.
m : float, optional
Reverse Euler step length. The optimal value is likely between 0.5 and 1.5.
Default is 1.0, which should be fine for most cases.
fem : bool, optional
Whether to use Finite Element Method lumped mass matrix. Wasn't used in
Crane 2012 paper. Doesn't seem to help any.
Returns
-------
1D ndarray, shape (total_verts,)
Geodesic distance (in mm) from each vertex in the surface to the closest
vertex in `verts`.
"""
npt = len(self.pts)
if m not in self._rlfac_solvers or m not in self._nLC_solvers:
B, D, W, V = self.laplace_operator
nLC = W - V # negative laplace matrix
if not fem:
spD = sparse.dia_matrix((D,[0]), (npt,npt)).tocsr() # lumped mass matrix
else:
spD = B
t = m * self.avg_edge_length ** 2 # time of heat evolution
lfac = spD - t * nLC # backward Euler matrix
# Exclude rows with zero weight (these break the sparse LU, that finicky fuck)
goodrows = np.nonzero(~np.array(lfac.sum(0) == 0).ravel())[0]
self._goodrows = goodrows
self._rlfac_solvers[m] = sparse.linalg.dsolve.factorized(lfac[goodrows][:,goodrows])
self._nLC_solvers[m] = sparse.linalg.dsolve.factorized(nLC[goodrows][:,goodrows])
# Solve system to get u, the heat values
u0 = np.zeros((npt,)) # initial heat values
u0[verts] = 1.0
goodu = self._rlfac_solvers[m](u0[self._goodrows])
u = np.zeros((npt,))
u[self._goodrows] = goodu
# Compute grad u at each face
gradu = self.surface_gradient(u, at_verts=False)
# Compute X (normalized grad u)
#X = np.nan_to_num((-gradu.T / np.sqrt((gradu**2).sum(1))).T)
graduT = gradu.T
gusum = ne.evaluate("sum(gradu ** 2, 1)")
X = np.nan_to_num(ne.evaluate("-graduT / sqrt(gusum)").T)
# Compute integrated divergence of X at each vertex
#x1 = x2 = x3 = np.zeros((X.shape[0],))
c32, c13, c21 = self._cot_edge
x1 = 0.5 * (c32 * X).sum(1)
x2 = 0.5 * (c13 * X).sum(1)
x3 = 0.5 * (c21 * X).sum(1)
conn1, conn2, conn3 = self._polyconn
divx = conn1.dot(x1) + conn2.dot(x2) + conn3.dot(x3)
# Compute phi (distance)
goodphi = self._nLC_solvers[m](divx[self._goodrows])
phi = np.zeros((npt,))
phi[self._goodrows] = goodphi - goodphi.min()
# Ensure that distance is zero for selected verts
phi[verts] = 0.0
return | |
"""
@author: eagle705
https://github.com/eagle705/pytorch-bert-crf-ner/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import torch
from pathlib import Path
from tensorflow import keras
import numpy as np
from konlpy.tag import Twitter
from collections import Counter
from threading import Thread
import six
from torch import nn
class Config:
def __init__(self, json_path):
with open(json_path, mode='r') as io:
params = json.loads(io.read())
self.__dict__.update(params)
def save(self, json_path):
with open(json_path, mode='w') as io:
json.dump(self.__dict__, io, indent=4)
def update(self, json_path):
with open(json_path, mode='r') as io:
params = json.loads(io.read())
self.__dict__.update(params)
@property
def dict(self):
return self.__dict__
class CheckpointManager:
def __init__(self, model_dir):
if not isinstance(model_dir, Path):
model_dir = Path(model_dir)
self._model_dir = model_dir
def save_checkpoint(self, state, filename):
torch.save(state, self._model_dir / filename)
def load_checkpoint(self, filename):
state = torch.load(self._model_dir / filename, map_location=torch.device('cpu'))
return state
class SummaryManager:
def __init__(self, model_dir):
if not isinstance(model_dir, Path):
model_dir = Path(model_dir)
self._model_dir = model_dir
self._summary = {}
def save(self, filename):
with open(self._model_dir / filename, mode='w') as io:
json.dump(self._summary, io, indent=4)
def load(self, filename):
with open(self._model_dir / filename, mode='r') as io:
metric = json.loads(io.read())
self.update(metric)
def update(self, summary):
self._summary.update(summary)
def reset(self):
self._summary = {}
@property
def summary(self):
return self._summary
class Vocabulary(object):
"""Vocab Class"""
def __init__(self, token_to_idx=None):
self.token_to_idx = {}
self.idx_to_token = {}
self.idx = 0
self.PAD = self.padding_token = "[PAD]"
self.START_TOKEN = "<S>"
self.END_TOKEN = "<T>"
self.UNK = "[UNK]"
self.CLS = "[CLS]"
self.MASK = "[MASK]"
self.SEP = "[SEP]"
self.SEG_A = "[SEG_A]"
self.SEG_B = "[SEG_B]"
self.NUM = "<num>"
self.cls_token = self.CLS
self.sep_token = self.SEP
self.special_tokens = [self.PAD,
self.START_TOKEN,
self.END_TOKEN,
self.UNK,
self.CLS,
self.MASK,
self.SEP,
self.SEG_A,
self.SEG_B,
self.NUM]
self.init_vocab()
if token_to_idx is not None:
self.token_to_idx = token_to_idx
self.idx_to_token = {v: k for k, v in token_to_idx.items()}
self.idx = len(token_to_idx) - 1
# if pad token in token_to_idx dict, get pad_id
if self.PAD in self.token_to_idx:
self.PAD_ID = self.transform_token2idx(self.PAD)
else:
self.PAD_ID = 0
def init_vocab(self):
for special_token in self.special_tokens:
self.add_token(special_token)
self.PAD_ID = self.transform_token2idx(self.PAD)
def __len__(self):
return len(self.token_to_idx)
def to_indices(self, tokens):
return [self.transform_token2idx(X_token) for X_token in tokens]
def add_token(self, token):
if not token in self.token_to_idx:
self.token_to_idx[token] = self.idx
self.idx_to_token[self.idx] = token
self.idx += 1
def transform_token2idx(self, token, show_oov=False):
try:
return self.token_to_idx[token]
except:
if show_oov is True:
print("key error: " + str(token))
token = self.UNK
return self.token_to_idx[token]
def transform_idx2token(self, idx):
try:
return self.idx_to_token[idx]
except:
print("key error: " + str(idx))
idx = self.token_to_idx[self.UNK]
return self.idx_to_token[idx]
def build_vocab(self, list_of_str, threshold=1, vocab_save_path="./data_in/token_vocab.json",
split_fn=Twitter().morphs):
"""Build a token vocab"""
def do_concurrent_tagging(start, end, text_list, counter):
for i, text in enumerate(text_list[start:end]):
text = text.strip()
text = text.lower()
try:
tokens_ko = split_fn(text)
# tokens_ko = [str(pos[0]) + '/' + str(pos[1]) for pos in tokens_ko]
counter.update(tokens_ko)
if i % 1000 == 0:
print("[%d/%d (total: %d)] Tokenized input text." % (
start + i, start + len(text_list[start:end]), len(text_list)))
except Exception as e: # OOM, Parsing Error
print(e)
continue
counter = Counter()
num_thread = 4
thread_list = []
num_list_of_str = len(list_of_str)
for i in range(num_thread):
thread_list.append(Thread(target=do_concurrent_tagging, args=(
int(i * num_list_of_str / num_thread), int((i + 1) * num_list_of_str / num_thread), list_of_str,
counter)))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
# vocab_report
print(counter.most_common(10)) # print most common tokens
tokens = [token for token, cnt in counter.items() if cnt >= threshold]
for i, token in enumerate(tokens):
self.add_token(str(token))
print("len(self.token_to_idx): ", len(self.token_to_idx))
import json
with open(vocab_save_path, 'w', encoding='utf-8') as f:
json.dump(self.token_to_idx, f, ensure_ascii=False, indent=4)
return self.token_to_idx
def keras_pad_fn(token_ids_batch, maxlen, pad_id=0, padding='post', truncating='post'):
padded_token_ids_batch = pad_sequences(token_ids_batch,
value=pad_id, # vocab.transform_token2idx(PAD),
padding=padding,
truncating=truncating,
maxlen=maxlen)
return padded_token_ids_batch
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
for x in sequences:
try:
lengths.append(len(x))
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
class Tokenizer:
""" Tokenizer class"""
def __init__(self, vocab, split_fn, pad_fn, maxlen):
self._vocab = vocab
self._split = split_fn
self._pad = pad_fn
self._maxlen = maxlen
# def split(self, string: str) -> list[str]:
def split(self, string):
tokens = self._split(string)
return tokens
# def transform(self, list_of_tokens: list[str]) -> list[int]:
def transform(self, tokens):
indices = self._vocab.to_indices(tokens)
pad_indices = self._pad(indices, pad_id=0, maxlen=self._maxlen) if self._pad else indices
return pad_indices
# def split_and_transform(self, string: str) -> list[int]:
def split_and_transform(self, string):
return self.transform(self.split(string))
@property
def vocab(self):
return self._vocab
def list_of_tokens_to_list_of_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_list_of_tokens(self, X_str_batch):
X_token_batch = [self._split(X_str) for X_str in X_str_batch]
return X_token_batch
def list_of_tokens_to_list_of_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_list_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)
return X_ids_batch
def list_of_string_to_arr_of_pad_token_ids(self, X_str_batch, add_start_end_token=False):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
# print("X_token_batch: ", X_token_batch)
if add_start_end_token is True:
return self.add_start_end_token_with_pad(X_token_batch)
else:
X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)
pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_X_ids_batch
def list_of_tokens_to_list_of_cls_sep_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_tokens = [self._vocab.cls_token] + X_tokens + [self._vocab.sep_token]
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_arr_of_cls_sep_pad_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)
pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_X_ids_batch
def list_of_string_to_list_of_cls_sep_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)
return X_ids_batch
def add_start_end_token_with_pad(self, X_token_batch):
dec_input_token_batch = [[self._vocab.START_TOKEN] + X_token for X_token in X_token_batch]
dec_output_token_batch = [X_token + [self._vocab.END_TOKEN] for X_token in X_token_batch]
dec_input_token_batch = self.list_of_tokens_to_list_of_token_ids(dec_input_token_batch)
pad_dec_input_ids_batch = self._pad(dec_input_token_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
dec_output_ids_batch = self.list_of_tokens_to_list_of_token_ids(dec_output_token_batch)
pad_dec_output_ids_batch = self._pad(dec_output_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_dec_input_ids_batch, pad_dec_output_ids_batch
def decode_token_ids(self, token_ids_batch):
list_of_token_batch = []
for token_ids in token_ids_batch:
token_token = [self._vocab.transform_idx2token(token_id) for token_id in token_ids]
# token_token = [self._vocab[token_id] for token_id in token_ids]
list_of_token_batch.append(token_token)
return list_of_token_batch
class BERTClassifier(nn.Module):
def __init__(self,
bert,
hidden_size = 768,
num_classes = 7,
dr_rate = None,
params = None):
super(BERTClassifier, self).__init__()
self.bert = bert
self.dr_rate = dr_rate
self.classifier = nn.Linear(hidden_size, num_classes)
if dr_rate:
self.dropout = nn.Dropout(p=dr_rate)
def gen_attention_mask(self, token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length):
| |
<reponame>cosanlab/facesync<gh_stars>1-10
from __future__ import division
'''
FaceSync Utils Class
==========================================
VideoViewer: Watch video and plot data simultaneously.
AudioAligner: Align two audios manually
neutralface: points that show a face
ChangeAU: change AUs and return new face
'''
__all__ = ['VideoViewer','AudioAligner','neutralface','audict','plotface','ChangeAU','read_facet']
__author__ = ["<NAME>"]
__license__ = "MIT"
import os
import numpy as np
import matplotlib.pyplot as plt
def read_facet(facetfile,fullfacet=False,demean = False,demedian=False,zscore=False,fillna=False,sampling_hz=None, target_hz=None):
'''
This function reads in an iMotions-FACET exported facial expression file. Uses downsample function from nltools.
Arguments:
fullfacet(def: False): If True, Action Units also provided in addition to default emotion predictions.
demean(def: False): Demean data
demedian(def: False): Demedian data
zscore(def: False): Zscore data
fillna(def: False): fill null values with ffill
sampling_hz & target_hz: To downsample, specify the sampling hz and target hz.
Returns:
d: dataframe of processed facial expressions
'''
import pandas as pd
def downsample(data,sampling_freq=None, target=None, target_type='samples', method='mean'):
''' Downsample pandas to a new target frequency or number of samples
using averaging.
Args:
data: Pandas DataFrame or Series
sampling_freq: Sampling frequency of data
target: downsampling target
target_type: type of target can be [samples,seconds,hz]
method: (str) type of downsample method ['mean','median'],
default: mean
Returns:
downsampled pandas object
'''
if not isinstance(data,(pd.DataFrame,pd.Series)):
raise ValueError('Data must by a pandas DataFrame or Series instance.')
if not (method=='median') | (method=='mean'):
raise ValueError("Metric must be either 'mean' or 'median' ")
if target_type is 'samples':
n_samples = target
elif target_type is 'seconds':
n_samples = target*sampling_freq
elif target_type is 'hz':
n_samples = sampling_freq/target
else:
raise ValueError('Make sure target_type is "samples", "seconds", '
' or "hz".')
idx = np.sort(np.repeat(np.arange(1,data.shape[0]/n_samples,1),n_samples))
# if data.shape[0] % n_samples:
if data.shape[0] > len(idx):
idx = np.concatenate([idx, np.repeat(idx[-1]+1,data.shape[0]-len(idx))])
if method=='mean':
return data.groupby(idx).mean().reset_index(drop=True)
elif method=='median':
return data.groupby(idx).median().reset_index(drop=True)
d = pd.read_table(facetfile, skiprows=4, sep='\t',
usecols = ['FrameTime','Joy Evidence','Anger Evidence','Surprise Evidence','Fear Evidence','Contempt Evidence',
'Disgust Evidence','Sadness Evidence','Confusion Evidence','Frustration Evidence',
'Neutral Evidence','Positive Evidence','Negative Evidence','AU1 Evidence','AU2 Evidence',
'AU4 Evidence','AU5 Evidence','AU6 Evidence','AU7 Evidence','AU9 Evidence','AU10 Evidence',
'AU12 Evidence','AU14 Evidence','AU15 Evidence','AU17 Evidence','AU18 Evidence','AU20 Evidence',
'AU23 Evidence','AU24 Evidence','AU25 Evidence','AU26 Evidence','AU28 Evidence','AU43 Evidence','NoOfFaces',
'Yaw Degrees', 'Pitch Degrees', 'Roll Degrees'])
# Choose index either FrameTime or FrameNo
d = d.set_index(d['FrameTime'].values/1000.0)
if type(fullfacet) == bool:
if fullfacet==True:
facets = ['Joy Evidence','Anger Evidence','Surprise Evidence','Fear Evidence','Contempt Evidence',
'Disgust Evidence','Sadness Evidence','Confusion Evidence','Frustration Evidence',
'Neutral Evidence','Positive Evidence','Negative Evidence','AU1 Evidence','AU2 Evidence',
'AU4 Evidence','AU5 Evidence','AU6 Evidence','AU7 Evidence','AU9 Evidence','AU10 Evidence',
'AU12 Evidence','AU14 Evidence','AU15 Evidence','AU17 Evidence','AU18 Evidence','AU20 Evidence',
'AU23 Evidence','AU24 Evidence','AU25 Evidence','AU26 Evidence','AU28 Evidence','AU43 Evidence','NoOfFaces',
'Yaw Degrees', 'Pitch Degrees', 'Roll Degrees']
elif fullfacet == False:
if type(fullfacet) == bool:
facets = ['Joy Evidence','Anger Evidence','Surprise Evidence','Fear Evidence','Contempt Evidence',
'Disgust Evidence','Sadness Evidence','Confusion Evidence','Frustration Evidence',
'Neutral Evidence','Positive Evidence','Negative Evidence','NoOfFaces']
else:
facets = fullfacet
d = d[facets] # change datatype to float16 for less memory use
if zscore:
d = (d.ix[:,:] - d.ix[:,:].mean()) / d.ix[:,:].std(ddof=0)
if fillna:
d = d.fillna(method='ffill')
if demedian:
d = d-d.median()
if demean:
d = d-d.mean()
if sampling_hz and target_hz:
d = downsample(d,sampling_freq=sampling_hz,target=target_hz,target_type='hz')
return d
def rec_to_time(vals,fps):
times = np.array(vals)/60./fps
times = [str(int(np.floor(t))).zfill(2)+':'+str(int((t-np.floor(t))*60)).zfill(2) for t in times]
return times
def VideoViewer(path_to_video, data_df,xlabel='', ylabel='',title='',figsize=(6.5,3),legend=False,xlim=None,ylim=None,plot_rows=False):
"""
This function plays a video and plots the data underneath the video and moves a cursor as the video plays.
Plays videos using Jupyter_Video_Widget by https://github.com/Who8MyLunch/Jupyter_Video_Widget
Currently working on: Python 3
For plot update to work properly plotting needs to be set to: %matplotlib notebook
Args:
path_to_video : file path or url to a video. tested with mov and mp4 formats.
data_df : pandas dataframe with columns to be plotted in 30hz. (plotting too many column can slowdown update)
ylabel(str): add ylabel
legend(bool): toggle whether to plot legend
xlim(list): pass xlimits [min,max]
ylim(list): pass ylimits [min,max]
plot_rows(bool): Draws individual plots for each column of data_df. (Default: True)
"""
from jpy_video import Video
from IPython.display import display, HTML
display(HTML(data="""
<style>
div#notebook-container { width: 95%; }
div#menubar-container { width: 65%; }
div#maintoolbar-container { width: 99%; }
</style>
"""))
f = os.path.abspath(path_to_video)
wid = Video(f)
wid.layout.width='640px'
wid.display()
lnwidth = 3
fps = wid.timebase**-1 # time base is play rate hard coded at 30fps
print(fps)
if plot_rows:
fig,axs = plt.subplots(data_df.shape[1],1,figsize=figsize) # hardcode figure size for now..
else:
fig,axs = plt.subplots(1,1,figsize=figsize)
t=wid.current_time
if plot_rows and data_df.shape[1]>1:
for ixs, ax in enumerate(axs):
ax.axvline(fps*t,color='k',linestyle='--',linewidth=lnwidth) # cursor is always first of ax
# plot each column
data_df.iloc[:,ixs].plot(ax=ax,legend=legend,xlim=xlim,ylim=ylim)
ax.set_xticks = np.arange(0,data_df.shape[0],5)
ax.set(ylabel =data_df.columns[ixs], xlabel=xlabel, xticklabels = rec_to_time(ax.get_xticks(),fps))
else:
axs.axvline(fps*t,color='k',linestyle='--',linewidth=lnwidth) # cursor is always first of ax
# plot each column
data_df.plot(ax=axs,legend=legend,xlim=xlim,ylim=ylim)
axs.set_xticks = np.arange(0,data_df.shape[0],5)
axs.set(ylabel = data_df.columns[0],xlabel=xlabel, title=title, xticklabels = rec_to_time(axs.get_xticks(),fps))
if legend:
plt.legend(loc=1)
plt.tight_layout()
def plot_dat(axs,t,fps=fps):
if plot_rows and data_df.shape[1]>1:
for ax in axs:
if ax.lines:
ax.lines[0].set_xdata([np.round(fps*t),np.round(fps*t)])
else:
if axs.lines:
axs.lines[0].set_xdata([np.round(fps*t),np.round(fps*t)])
fig.canvas.draw()
def on_value_change(change,ax=axs,fps=fps):
if change['name']=='_event':
plot_dat(axs=axs, t=change['new']['currentTime'],fps=fps)
# call on_value_change that will call plotting function plot_dat whenever there is cursor update
wid.observe(on_value_change)
def AudioAligner(original, sample, search_start=0.0,search_end=15.0, xmax = 60,manual=False,reduce_orig_volume=1):
"""
This function pull up an interactive console to find the offsets between two audios.
Args:
original: path to original audio file (e.g. '../audios/original.wav')
sample: path to the sample audio file (e.g. '../audios/sample.wav')
search_start(float): start range for slider to search for offset
search_end(float): end range for slider to search for offset
xmax(int): Range of audio to plot from beginning
manual(bool): set to True to turn off auto-refresh
reduce_orig_volume(int or float): Original wav sounds are often larger so divide the volume by this number.
"""
import scipy.io.wavfile as wav
from IPython.display import Audio
from IPython.display import display
from ipywidgets import widgets
orig_r,orig = wav.read(original)
# volume is often louder on original so you can reduce it
orig = orig/reduce_orig_volume
# take one channel of target audio. probably not optimal
if np.ndim(orig) >1:
orig = orig[:,0]
# grab one channel of sample audio
tomatch_r,tomatch = wav.read(sample)
if np.ndim(tomatch) >1:
tomatch = tomatch[:,0]
fs = 44100
def audwidg(offset,play_start):
allshift = play_start
samplesize = 30
tomatchcopy = tomatch[int((allshift+offset)*tomatch_r):int((allshift+offset)*tomatch_r)+fs*samplesize]
shape = tomatchcopy.shape[0]
origcopy = orig[int((allshift)*tomatch_r):int((allshift)*tomatch_r)+fs*samplesize]
# when target audio is shorter, pad difference with zeros
if origcopy.shape[0] < tomatchcopy.shape[0]:
diff = tomatchcopy.shape[0] - origcopy.shape[0]
origcopy = np.pad(origcopy, pad_width = (0,diff),mode='constant')
toplay = origcopy + tomatchcopy
display(Audio(data=toplay,rate=fs))
def Plot_Audios(offset,x_min,x_max):
# print('Precise offset : ' + str(offset))
fig,ax = plt.subplots(figsize=(20,3))
ax.plot(orig[int(fs*x_min):int(fs*x_max)],linewidth=.5,alpha=.8,color='r')
ax.plot(tomatch[int(fs*x_min)+int(fs*offset) : int(fs*x_max)+int(fs*offset)],linewidth=.5,alpha=.8)
ax.set_xticks([(tick-x_min)*fs for tick in range(int(x_min),int(x_max+1))])
ax.set_xticklabels([tick for tick in range(int(x_min),int(x_max)+1)])
ax.set_xlim([(x_min-x_min)*fs, (x_max-x_min)*fs] )
ax.set_ylabel('Audio')
ax.set_xlabel('Target Audio Time')
audwidg(offset,x_min)
plt.show()
widgets.interact(Plot_Audios,
offset=widgets.FloatSlider(value = 0.5*(search_start+search_end), readout_format='.3f', min = float(search_start), max = float(search_end), step = 0.001,
description='Adjusted offset: ',layout=widgets.Layout(width='90%')),
x_min=widgets.FloatSlider(description='Min X on audio plot', value=0.0,min=0.0,max=xmax,step=0.1, layout=widgets.Layout(width='50%')),
x_max=widgets.FloatSlider(description='Max X on audio plot', value=xmax,min=0.0,max=xmax,step=0.1, layout=widgets.Layout(width='50%')),
__manual=manual
)
neutralface = {-34: (212, 335),
-33: (222, 342), -32: (237, 342), -30: (203, 335), -29: (222, 335),
-28: (237, 328), -26: (227, 288), -25: (238, 292), -19: (201, 219),
-18: (184, 220), -17: (169, 214), -16: (184, 204), -15: (201, 203),
-14: (217, 215), -13: (225, 181), -12: (203, 172), -11: (180, 170),
-10: (157, 174), -9: (142, 180), -8: (122, 222), -7: (126, 255),
-6: (133, 286), -5: (139, 318), -4: (148, 349), -3: (165, 375),
-2: (190, 397), -1: (219, 414),
0: (252, 419),
1: (285, 414), 2: (315, 398), 3: (341, 377), 4: (359, 351),
5: (368, 319), 6: (371, 287), 7: (376, 254), 8: (378, 221),
9: (354, 180), 10: (339, 173), 11: (316, 167), 12: (293, 171),
13: (270, 180), 14: (281, 215), 15: (296, 203), 16: (314, 202),
17: (328, 212), 18: (315, 219), 19: (297, 219), 20: (248, 207),
21: (248, 227), 22: (248, 247), 23: (248, 268), 24: (248, 294),
25: (260, 291), 26: (271, 287), 27: (248, 333), 28: (262, 328),
29: (279, 335), 30: (296, 335), 31: (250, 340), 32: (264, 342),
33: (280, 342), 34: (288, 335)}
audict = {'AU1' : {-11:(2,0),11:(-2,0),-12:(5,-8),12:(-5,-8),-13:(0,-20),13:(0,-20) },
# Brow Lowerer
'AU4': {-10:(4,5),10:(-4,5),-11:(4,15),11:(-4,15),-12:(5,20),12:(-5,20),-13:(0,15),13:(0,15) },
# Upper Lid Raiser
'AU5': {-9:(2,-9),9:(2,-9), -10:(2,-10),10:(-2,-10),-11:(2,-15),11:(-2,-15),
-12:(5,-12),12:(-5,-12),-13:(0,-10),13:(0,-10),
-16:(0,-10),-15:(0,-10),16:(0,-10),15:(0,-10),
-19:(0,10),-18:(0,10),19:(0,10),18:(0,10)},
# cheek raiser
'AU6': {-8:(20,0),8:(-20,0), -7:(10,-5),7:(-10,-5), -6:(2,-8), 6:(-2,-8),
-9:(5,5),9:(-5,5),
17:(-5,5),18:(-3,-3),19:(-3,-3),
-17:(5,5),-18:(3,-3),-19:(3,-3)},
# nose wrinkler
'AU9': {-15:(2,4),15:(-2,4),-14:(2,3),14:(-2,3),
20:(0,5), 21:(0,-5), 22:(0,-7), 23:(0,-10),
-26:(5,-15),-25:(0,-15),24:(0,-15),25:(0,-15),26:(-5,-15),
-10:(2,0),10:(-2,0),-11:(2,8),11:(-2,8),
-12:(5,12),12:(-5,12),-13:(0,10),13:(0,10)
},
# Upper Lip Raiser
'AU10': {-34:(0,5),-33:(0,-2),-30:(0,3),-29:(0,-10),-28:(0,-5),
-26:(-5,-8),-25:(0,-3),24:(0,-3),25:(0,-3),26:(5,-8),
27:(0,-10),28:(0,-5),29:(0,-10),30:(0,3),33:(0,-2),34:(0,5)},
# Lip corner Puller
'AU12': { -30: (-10,-15), -34: (-5,-5), 30:(10,-15), 34:(5,-5), -29:(0,0), 29:(0,0) },
#AU14 Dimpler
'AU14': {-33:(0,-5),-32:(0,-5),-30:(-5,-5),-28:(0,5),28:(0,5),30:(5,-5),31:(0,-5),32:(0,-5),33:(0,-5)},
# Chin raiser
'AU17': { -2:(5,0),-1:(5,-5),0:(0,-20),-1:(-5,-5),2:(-5,0)},
# Lip Puckerer
'AU18': {-30:(5,0), 30:(-5,0), -34:(5,0), 34:(-5,0),
-33:(5,0),33:(-5,0), -29:(5,0),29:(-5,0),30:(-5,0),
-28:(0,0),28:(0,0),27:(0,-8),31:(0,10),-32:(0,7),32:(0,7)} ,
# Lips Part
'AU25': {-28:(0,-3),28:(0,-3),27:(0,-5),31:(0,7),-32:(0,7),32:(0,7)},
# Lip Suck
'AU28': {-33:(0,-5),-32:(0,-5),-28:(0,5),24:(0,-3),28:(0,-5),31:(0,-5),32:(0,-5),33:(0,-5)}
| |
), # 15 Status Update Length was 15 but panel seems to send different lengths
0xA6 : PanelCallBack( 15, True, False ), # 15 Zone Types I think!!!!
0xA7 : PanelCallBack( 15, True, False ), # 15 Panel Status Change
0xAB : PanelCallBack( 15, False, False ), # 15 Enroll Request 0x0A OR Ping 0x03 Length was 15 but panel seems to send different lengths
0xB0 : PanelCallBack( None, True, False ),
0xF1 : PanelCallBack( 9, False, False ) # 9
}
pmReceiveMsgB0_t = {
0x04 : "Zone status",
0x18 : "Open/close status",
0x39 : "Activity"
}
pmLogEvent_t = {
"EN" : (
"None", "Interior Alarm", "Perimeter Alarm", "Delay Alarm", "24h Silent Alarm", "24h Audible Alarm",
"Tamper", "Control Panel Tamper", "Tamper Alarm", "Tamper Alarm", "Communication Loss", "Panic From Keyfob",
"Panic From Control Panel", "Duress", "Confirm Alarm", "General Trouble", "General Trouble Restore",
"Interior Restore", "Perimeter Restore", "Delay Restore", "24h Silent Restore", "24h Audible Restore",
"Tamper Restore", "Control Panel Tamper Restore", "Tamper Restore", "Tamper Restore", "Communication Restore",
"Cancel Alarm", "General Restore", "Trouble Restore", "Not used", "Recent Close", "Fire", "Fire Restore",
"No Active", "Emergency", "No used", "Disarm Latchkey", "Panic Restore", "Supervision (Inactive)",
"Supervision Restore (Active)", "Low Battery", "Low Battery Restore", "AC Fail", "AC Restore",
"Control Panel Low Battery", "Control Panel Low Battery Restore", "RF Jamming", "RF Jamming Restore",
"Communications Failure", "Communications Restore", "Telephone Line Failure", "Telephone Line Restore",
"Auto Test", "Fuse Failure", "Fuse Restore", "Keyfob Low Battery", "Keyfob Low Battery Restore", "Engineer Reset",
"Battery Disconnect", "1-Way Keypad Low Battery", "1-Way Keypad Low Battery Restore", "1-Way Keypad Inactive",
"1-Way Keypad Restore Active", "Low Battery", "Clean Me", "Fire Trouble", "Low Battery", "Battery Restore",
"AC Fail", "AC Restore", "Supervision (Inactive)", "Supervision Restore (Active)", "Gas Alert", "Gas Alert Restore",
"Gas Trouble", "Gas Trouble Restore", "Flood Alert", "Flood Alert Restore", "X-10 Trouble", "X-10 Trouble Restore",
"Arm Home", "Arm Away", "Quick Arm Home", "Quick Arm Away", "Disarm", "Fail To Auto-Arm", "Enter To Test Mode",
"Exit From Test Mode", "Force Arm", "Auto Arm", "Instant Arm", "Bypass", "Fail To Arm", "Door Open",
"Communication Established By Control Panel", "System Reset", "Installer Programming", "Wrong Password",
"Not Sys Event", "Not Sys Event", "Extreme Hot Alert", "Extreme Hot Alert Restore", "Freeze Alert",
"Freeze Alert Restore", "Human Cold Alert", "Human Cold Alert Restore", "Human Hot Alert",
"Human Hot Alert Restore", "Temperature Sensor Trouble", "Temperature Sensor Trouble Restore",
# new values partition models
"PIR Mask", "PIR Mask Restore", "", "", "", "", "", "", "", "", "", "",
"Alarmed", "Restore", "Alarmed", "Restore", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "Exit Installer", "Enter Installer", "", "", "", "", "" ),
"NL" : (
"Geen", "In alarm", "In alarm", "In alarm", "In alarm", "In alarm",
"Sabotage alarm", "Systeem sabotage", "Sabotage alarm", "Add user", "Communicate fout", "Paniekalarm",
"Code bedieningspaneel paniek", "Dwang", "Bevestig alarm", "Successful U/L", "Probleem herstel",
"Herstel", "Herstel", "Herstel", "Herstel", "Herstel",
"Sabotage herstel", "Systeem sabotage herstel", "Sabotage herstel", "Sabotage herstel", "Communicatie herstel",
"Stop alarm", "Algemeen herstel", "Brand probleem herstel", "Systeem inactief", "Recent close", "Brand", "Brand herstel",
"Niet actief", "Noodoproep", "Remove user", "Controleer code", "Bevestig alarm", "Supervisie",
"Supervisie herstel", "Batterij laag", "Batterij OK", "230VAC uitval", "230VAC herstel",
"Controlepaneel batterij laag", "Controlepaneel batterij OK", "Radio jamming", "Radio herstel",
"Communicatie mislukt", "Communicatie hersteld", "Telefoonlijn fout", "Telefoonlijn herstel",
"Automatische test", "Zekeringsfout", "Zekering herstel", "Batterij laag", "Batterij OK", "Monteur reset",
"Accu vermist", "Batterij laag", "Batterij OK", "Supervisie",
"Supervisie herstel", "Lage batterij bevestiging", "Reinigen", "Probleem", "Batterij laag", "Batterij OK",
"230VAC uitval", "230VAC herstel", "Supervisie", "Supervisie herstel", "Gas alarm", "Gas herstel",
"Gas probleem", "Gas probleem herstel", "Lekkage alarm", "Lekkage herstel", "Probleem", "Probleem herstel",
"Deelschakeling", "Ingeschakeld", "Snel deelschakeling", "Snel ingeschakeld", "Uitgezet", "Inschakelfout (auto)", "Test gestart",
"Test gestopt", "Force aan", "Geheel in (auto)", "Onmiddelijk", "Overbruggen", "Inschakelfout",
"Log verzenden", "Systeem reset", "Installateur programmeert", "Foutieve code", "Overbruggen" )
}
pmLogUser_t = {
"EN" : [ "System ", "Zone 01", "Zone 02", "Zone 03", "Zone 04", "Zone 05", "Zone 06", "Zone 07", "Zone 08",
"Zone 09", "Zone 10", "Zone 11", "Zone 12", "Zone 13", "Zone 14", "Zone 15", "Zone 16", "Zone 17", "Zone 18",
"Zone 19", "Zone 20", "Zone 21", "Zone 22", "Zone 23", "Zone 24", "Zone 25", "Zone 26", "Zone 27", "Zone 28",
"Zone 29", "Zone 30", "Fob 01", "Fob 02", "Fob 03", "Fob 04", "Fob 05", "Fob 06", "Fob 07", "Fob 08",
"User 01", "User 02", "User 03", "User 04", "User 05", "User 06", "User 07", "User 08", "Pad 01", "Pad 02",
"Pad 03", "Pad 04", "Pad 05", "Pad 06", "Pad 07", "Pad 08", "Sir 01", "Sir 02", "2Pad 01", "2Pad 02",
"2Pad 03", "2Pad 04", "X10 01", "X10 02", "X10 03", "X10 04", "X10 05", "X10 06", "X10 07", "X10 08",
"X10 09", "X10 10", "X10 11", "X10 12", "X10 13", "X10 14", "X10 15", "PGM ", "GSM ", "P-LINK ",
"PTag 01", "PTag 02", "PTag 03", "PTag 04", "PTag 05", "PTag 06", "PTag 07", "PTag 08" ],
"NL" : [ "Systeem", "Zone 01", "Zone 02", "Zone 03", "Zone 04", "Zone 05", "Zone 06", "Zone 07", "Zone 08",
"Zone 09", "Zone 10", "Zone 11", "Zone 12", "Zone 13", "Zone 14", "Zone 15", "Zone 16", "Zone 17", "Zone 18",
"Zone 19", "Zone 20", "Zone 21", "Zone 22", "Zone 23", "Zone 24", "Zone 25", "Zone 26", "Zone 27", "Zone 28",
"Zone 29", "Zone 30", "Fob 01", "Fob 02", "Fob 03", "Fob 04", "Fob 05", "Fob 06", "Fob 07", "Fob 08",
"Gebruiker 01", "Gebruiker 02", "Gebruiker 03", "Gebruiker 04", "Gebruiker 05", "Gebruiker 06", "Gebruiker 07",
"Gebruiker 08", "Pad 01", "Pad 02",
"Pad 03", "Pad 04", "Pad 05", "Pad 06", "Pad 07", "Pad 08", "Sir 01", "Sir 02", "2Pad 01", "2Pad 02",
"2Pad 03", "2Pad 04", "X10 01", "X10 02", "X10 03", "X10 04", "X10 05", "X10 06", "X10 07", "X10 08",
"X10 09", "X10 10", "X10 11", "X10 12", "X10 13", "X10 14", "X10 15", "PGM ", "GSM ", "P-LINK ",
"PTag 01", "PTag 02", "PTag 03", "PTag 04", "PTag 05", "PTag 06", "PTag 07", "PTag 08" ]
}
pmSysStatus_t = {
"EN" : (
"Disarmed", "Home Exit Delay", "Away Exit Delay", "Entry Delay", "Armed Home", "Armed Away", "User Test",
"Downloading", "Programming", "Installer", "Home Bypass", "Away Bypass", "Ready", "Not Ready", "??", "??",
"Disarmed Instant", "Home Instant Exit Delay", "Away Instant Exit Delay", "Entry Delay Instant", "Armed Home Instant",
"Armed Away Instant" ),
"NL" : (
"Uitgeschakeld", "Deel uitloopvertraging", "Totaal uitloopvertraging", "Inloopvertraging", "Deel ingeschakeld",
"Totaal ingeschakeld", "Gebruiker test", "Downloaden", "Programmeren", "Monteurmode", "Deel met overbrugging",
"Totaal met overbrugging", "Klaar", "Niet klaar", "??", "??", "Direct uitschakelen", "Direct Deel uitloopvertraging",
"Direct Totaal uitloopvertraging", "Direct inloopvertraging", "Direct Deel", "Direct Totaal" )
}
pmSysStatusFlags_t = {
"EN" : ( "Ready", "Alert in memory", "Trouble", "Bypass on", "Last 10 seconds", "Zone event", "Status changed", "Alarm event" ),
"NL" : ( "Klaar", "Alarm in geheugen", "Probleem", "Overbruggen aan", "Laatste 10 seconden", "Zone verstoord", "Status gewijzigd", "Alarm actief")
}
#pmArmed_t = {
# 0x03 : "", 0x04 : "", 0x05 : "", 0x0A : "", 0x0B : "", 0x13 : "", 0x14 : "", 0x15 : ""
#}
pmArmMode_t = {
"Disarmed" : 0x00, "Stay" : 0x04, "Armed" : 0x05, "UserTest" : 0x06, "StayInstant" : 0x14, "ArmedInstant" : 0x15, "Night" : 0x04, "NightInstant" : 0x14
}
pmDetailedArmMode_t = (
"Disarmed", "ExitDelay_ArmHome", "ExitDelay_ArmAway", "EntryDelay", "Stay", "Armed", "UserTest", "Downloading", "Programming", "Installer",
"Home Bypass", "Away Bypass", "Ready", "NotReady", "??", "??", "Disarm", "ExitDelay", "ExitDelay", "EntryDelay", "StayInstant", "ArmedInstant"
) # Not used: Night, NightInstant, Vacation
pmEventType_t = {
"EN" : (
"None", "Tamper Alarm", "Tamper Restore", "Open", "Closed", "Violated (Motion)", "Panic Alarm", "RF Jamming",
"Tamper Open", "Communication Failure", "Line Failure", "Fuse", "Not Active", "Low Battery", "AC Failure",
"Fire Alarm", "Emergency", "Siren Tamper", "Siren Tamper Restore", "Siren Low Battery", "Siren AC Fail" ),
"NL" : (
"Geen", "Sabotage alarm", "Sabotage herstel", "Open", "Gesloten", "Verstoord (beweging)", "Paniek alarm", "RF verstoring",
"Sabotage open", "Communicatie probleem", "Lijnfout", "Zekering", "Niet actief", "Lage batterij", "AC probleem",
"Brandalarm", "Noodoproep", "Sirene sabotage", "Sirene sabotage herstel", "Sirene lage batterij", "Sirene AC probleem" )
}
pmPanelAlarmType_t = | |
<reponame>joakimzhang/python-electron
#!/usr/bin/env python
'''
display a image in a subprocess
<NAME>
June 2012
'''
import time
from wx_loader import wx
try:
import cv2.cv as cv
except ImportError:
import cv
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import mp_widgets
from MAVProxy.modules.lib.mp_menu import *
class MPImageData:
'''image data to display'''
def __init__(self, img):
self.width = img.width
self.height = img.height
self.data = img.tostring()
class MPImageTitle:
'''window title to use'''
def __init__(self, title):
self.title = title
class MPImageBrightness:
'''image brightness to use'''
def __init__(self, brightness):
self.brightness = brightness
class MPImageFitToWindow:
'''fit image to window'''
def __init__(self):
pass
class MPImageFullSize:
'''show full image resolution'''
def __init__(self):
pass
class MPImageMenu:
'''window menu to add'''
def __init__(self, menu):
self.menu = menu
class MPImagePopupMenu:
'''popup menu to add'''
def __init__(self, menu):
self.menu = menu
class MPImageNewSize:
'''reported to parent when window size changes'''
def __init__(self, size):
self.size = size
class MPImage():
'''
a generic image viewer widget for use in MP tools
'''
def __init__(self,
title='MPImage',
width=512,
height=512,
can_zoom = False,
can_drag = False,
mouse_events = False,
key_events = False,
auto_size = False,
report_size_changes = False,
daemon = False):
import multiprocessing
self.title = title
self.width = width
self.height = height
self.can_zoom = can_zoom
self.can_drag = can_drag
self.mouse_events = mouse_events
self.key_events = key_events
self.auto_size = auto_size
self.report_size_changes = report_size_changes
self.menu = None
self.popup_menu = None
from multiprocessing_queue import makeIPCQueue
self.in_queue = makeIPCQueue()
self.out_queue = makeIPCQueue()
self.default_menu = MPMenuSubMenu('View',
items=[MPMenuItem('Fit Window', 'Fit Window', 'fitWindow'),
MPMenuItem('Full Zoom', 'Full Zoom', 'fullSize')])
self.child = multiprocessing.Process(target=self.child_task)
self.child.daemon = daemon
self.child.start()
self.set_popup_menu(self.default_menu)
def child_task(self):
'''child process - this holds all the GUI elements'''
mp_util.child_close_fds()
from wx_loader import wx
state = self
self.app = wx.App(False)
self.app.frame = MPImageFrame(state=self)
self.app.frame.Show()
self.app.MainLoop()
def is_alive(self):
'''check if child is still going'''
return self.child.is_alive()
def set_image(self, img, bgr=False):
'''set the currently displayed image'''
if not self.is_alive():
return
if bgr:
img = cv.CloneImage(img)
cv.CvtColor(img, img, cv.CV_BGR2RGB)
self.in_queue.put(MPImageData(img))
def set_title(self, title):
'''set the frame title'''
self.in_queue.put(MPImageTitle(title))
def set_brightness(self, brightness):
'''set the image brightness'''
self.in_queue.put(MPImageBrightness(brightness))
def fit_to_window(self):
'''fit the image to the window'''
self.in_queue.put(MPImageFitToWindow())
def full_size(self):
'''show the full image resolution'''
self.in_queue.put(MPImageFullSize())
def set_menu(self, menu):
'''set a MPTopMenu on the frame'''
self.menu = menu
self.in_queue.put(MPImageMenu(menu))
def set_popup_menu(self, menu):
'''set a popup menu on the frame'''
self.popup_menu = menu
self.in_queue.put(MPImagePopupMenu(menu))
def get_menu(self):
'''get the current frame menu'''
return self.menu
def get_popup_menu(self):
'''get the current popup menu'''
return self.popup_menu
def poll(self):
'''check for events, returning one event'''
if self.out_queue.qsize():
return self.out_queue.get()
return None
def events(self):
'''check for events a list of events'''
ret = []
while self.out_queue.qsize():
ret.append(self.out_queue.get())
return ret
def terminate(self):
'''terminate child process'''
self.child.terminate()
self.child.join()
class MPImageFrame(wx.Frame):
""" The main frame of the viewer
"""
def __init__(self, state):
wx.Frame.__init__(self, None, wx.ID_ANY, state.title)
self.state = state
state.frame = self
self.sizer = wx.BoxSizer(wx.VERTICAL)
state.panel = MPImagePanel(self, state)
self.sizer.Add(state.panel, 1, wx.EXPAND)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_IDLE, self.on_idle)
self.Bind(wx.EVT_SIZE, state.panel.on_size)
def on_idle(self, event):
'''prevent the main loop spinning too fast'''
state = self.state
time.sleep(0.1)
class MPImagePanel(wx.Panel):
""" The image panel
"""
def __init__(self, parent, state):
wx.Panel.__init__(self, parent)
self.frame = parent
self.state = state
self.img = None
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.Bind(wx.EVT_SET_FOCUS, self.on_focus)
self.redraw_timer.Start(100)
self.mouse_down = None
self.drag_step = 10
self.zoom = 1.0
self.menu = None
self.popup_menu = None
self.wx_popup_menu = None
self.popup_pos = None
self.last_size = None
self.done_PIL_warning = False
state.brightness = 1.0
# dragpos is the top left position in image coordinates
self.dragpos = wx.Point(0,0)
self.need_redraw = True
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.mainSizer)
# panel for the main image
self.imagePanel = mp_widgets.ImagePanel(self, wx.EmptyImage(state.width,state.height))
self.mainSizer.Add(self.imagePanel, flag=wx.TOP|wx.LEFT|wx.GROW, border=0)
if state.mouse_events:
self.imagePanel.Bind(wx.EVT_MOUSE_EVENTS, self.on_event)
else:
self.imagePanel.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
if state.key_events:
self.imagePanel.Bind(wx.EVT_KEY_DOWN, self.on_event)
else:
self.imagePanel.Bind(wx.EVT_KEY_DOWN, self.on_key_event)
self.imagePanel.Bind(wx.EVT_MOUSEWHEEL, self.on_mouse_wheel)
self.redraw()
state.frame.Fit()
def on_focus(self, event):
self.imagePanel.SetFocus()
def on_focus(self, event):
'''called when the panel gets focus'''
self.imagePanel.SetFocus()
def image_coordinates(self, point):
'''given a point in window coordinates, calculate image coordinates'''
# the dragpos is the top left position in image coordinates
ret = wx.Point(int(self.dragpos.x + point.x/self.zoom),
int(self.dragpos.y + point.y/self.zoom))
return ret
def redraw(self):
'''redraw the image with current settings'''
state = self.state
if self.img is None:
self.mainSizer.Fit(self)
self.Refresh()
state.frame.Refresh()
self.SetFocus()
return
# get the current size of the containing window frame
size = self.frame.GetSize()
(width, height) = (self.img.GetWidth(), self.img.GetHeight())
rect = wx.Rect(self.dragpos.x, self.dragpos.y, int(size.x/self.zoom), int(size.y/self.zoom))
#print("redraw", self.zoom, self.dragpos, size, rect);
if rect.x > width-1:
rect.x = width-1
if rect.y > height-1:
rect.y = height-1
if rect.width > width - rect.x:
rect.width = width - rect.x
if rect.height > height - rect.y:
rect.height = height - rect.y
scaled_image = self.img.Copy()
scaled_image = scaled_image.GetSubImage(rect);
scaled_image = scaled_image.Rescale(int(rect.width*self.zoom), int(rect.height*self.zoom))
if state.brightness != 1.0:
try:
from PIL import Image
pimg = mp_util.wxToPIL(scaled_image)
pimg = Image.eval(pimg, lambda x: int(x * state.brightness))
scaled_image = mp_util.PILTowx(pimg)
except Exception:
if not self.done_PIL_warning:
print("Please install PIL for brightness control")
self.done_PIL_warning = True
# ignore lack of PIL library
pass
self.imagePanel.set_image(scaled_image)
self.need_redraw = False
self.mainSizer.Fit(self)
self.Refresh()
state.frame.Refresh()
self.SetFocus()
'''
from guppy import hpy
h = hpy()
print h.heap()
'''
def on_redraw_timer(self, event):
'''the redraw timer ensures we show new map tiles as they
are downloaded'''
state = self.state
while state.in_queue.qsize():
obj = state.in_queue.get()
if isinstance(obj, MPImageData):
img = wx.EmptyImage(obj.width, obj.height)
img.SetData(obj.data)
self.img = img
self.need_redraw = True
if state.auto_size:
client_area = state.frame.GetClientSize()
total_area = state.frame.GetSize()
bx = max(total_area.x - client_area.x,0)
by = max(total_area.y - client_area.y,0)
state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by))
if isinstance(obj, MPImageTitle):
state.frame.SetTitle(obj.title)
if isinstance(obj, MPImageMenu):
self.set_menu(obj.menu)
if isinstance(obj, MPImagePopupMenu):
self.set_popup_menu(obj.menu)
if isinstance(obj, MPImageBrightness):
state.brightness = obj.brightness
self.need_redraw = True
if isinstance(obj, MPImageFullSize):
self.full_size()
if isinstance(obj, MPImageFitToWindow):
self.fit_to_window()
if self.need_redraw:
self.redraw()
def on_size(self, event):
'''handle window size changes'''
state = self.state
self.need_redraw = True
if state.report_size_changes:
# tell owner the new size
size = self.frame.GetSize()
if size != self.last_size:
self.last_size = size
state.out_queue.put(MPImageNewSize(size))
def limit_dragpos(self):
'''limit dragpos to sane values'''
if self.dragpos.x < 0:
self.dragpos.x = 0
if self.dragpos.y < 0:
self.dragpos.y = 0
if self.img is None:
return
if self.dragpos.x >= self.img.GetWidth():
self.dragpos.x = self.img.GetWidth()-1
if self.dragpos.y >= self.img.GetHeight():
self.dragpos.y = self.img.GetHeight()-1
def on_mouse_wheel(self, event):
'''handle mouse wheel zoom changes'''
state = self.state
if not state.can_zoom:
return
mousepos = self.image_coordinates(event.GetPosition())
rotation = event.GetWheelRotation() / event.GetWheelDelta()
oldzoom = self.zoom
if rotation > 0:
self.zoom /= 1.0/(1.1 * rotation)
elif rotation < 0:
self.zoom /= 1.1 * (-rotation)
if self.zoom > 10:
self.zoom = 10
elif self.zoom < 0.1:
self.zoom = 0.1
if oldzoom < 1 and self.zoom > 1:
self.zoom = 1
if oldzoom > 1 and self.zoom < 1:
self.zoom = 1
self.need_redraw = True
new = self.image_coordinates(event.GetPosition())
# adjust dragpos so the zoom doesn't change what pixel is under the mouse
self.dragpos = wx.Point(self.dragpos.x - (new.x-mousepos.x), self.dragpos.y - (new.y-mousepos.y))
self.limit_dragpos()
def on_drag_event(self, event):
'''handle mouse drags'''
state = self.state
if not state.can_drag:
return
newpos = self.image_coordinates(event.GetPosition())
dx = -(newpos.x - self.mouse_down.x)
dy = -(newpos.y - self.mouse_down.y)
self.dragpos = wx.Point(self.dragpos.x+dx,self.dragpos.y+dy)
self.limit_dragpos()
self.mouse_down = newpos
self.need_redraw = True
self.redraw()
def show_popup_menu(self, pos):
'''show a popup menu'''
self.popup_pos = self.image_coordinates(pos)
self.frame.PopupMenu(self.wx_popup_menu, pos)
def on_mouse_event(self, event):
'''handle mouse events'''
pos = event.GetPosition()
if event.RightDown() and self.popup_menu is not None:
self.show_popup_menu(pos)
return
if event.Leaving():
self.mouse_pos = None
else:
self.mouse_pos = pos
if event.LeftDown():
self.mouse_down = self.image_coordinates(pos)
if event.Dragging() and event.ButtonIsDown(wx.MOUSE_BTN_LEFT):
self.on_drag_event(event)
def on_key_event(self, event):
'''handle key events'''
keycode = event.GetKeyCode()
if keycode == wx.WXK_HOME:
self.zoom = 1.0
self.dragpos = wx.Point(0, 0)
self.need_redraw = True
def on_event(self, event):
'''pass events to the parent'''
state = self.state
if isinstance(event, wx.MouseEvent):
self.on_mouse_event(event)
if isinstance(event, wx.KeyEvent):
self.on_key_event(event)
if (isinstance(event, wx.MouseEvent) and
not event.ButtonIsDown(wx.MOUSE_BTN_ANY) and
event.GetWheelRotation() == 0):
# don't flood the queue with mouse movement
return
evt = mp_util.object_container(event)
pt = self.image_coordinates(wx.Point(evt.X,evt.Y))
evt.X = pt.x
evt.Y = pt.y
state.out_queue.put(evt)
def on_menu(self, event):
'''called on menu event'''
state = self.state
if self.popup_menu is not None:
ret = self.popup_menu.find_selected(event)
if ret is not None:
ret.popup_pos = self.popup_pos
if ret.returnkey == 'fitWindow':
self.fit_to_window()
elif ret.returnkey == 'fullSize':
self.full_size()
else:
state.out_queue.put(ret)
return
if self.menu is not None:
ret = self.menu.find_selected(event)
if ret is not None:
state.out_queue.put(ret)
return
def set_menu(self, menu):
'''add a menu from the parent'''
self.menu = menu
wx_menu = menu.wx_menu()
self.frame.SetMenuBar(wx_menu)
self.frame.Bind(wx.EVT_MENU, self.on_menu)
def set_popup_menu(self, menu):
'''add a popup menu from the parent'''
self.popup_menu | |
"""
:mod:`test_frbr_rda_expression` Tests FRBR RDA Expression and supporting
properties from RDF documents
"""
__author__ = '<NAME>'
import logging
import unittest,redis,config
import lib.common as common
import lib.frbr_rda as frbr_rda
import lib.namespaces as ns
redis_server = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_TEST_DB)
class TestExpressionRDAGroup1Elements(unittest.TestCase):
def setUp(self):
self.accessibility_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.accessibility_content_key,"Test Expression Accessibility")
self.additional_scale_information_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.hset(self.additional_scale_information_key,
"type",
"source dimensions")
self.artistic_and_or_technical_credit_key = "frad:person:%s" % redis_server.incr("global:frad:person")
redis_server.hset(self.artistic_and_or_technical_credit_key,
"frad:family",
"Wallace")
self.aspect_ratio_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.aspect_ratio_key,"1:5")
self.award_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.award_key,"Awarded first place")
self.cataloguers_note_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.hset(self.cataloguers_note_key,"type","bibliographic history")
redis_server.hset(self.cataloguers_note_key,"value","Test Cataloguer's Note")
self.colour_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.colour_content_key,"256 Colors")
self.content_type_key = "mime:type:HTTP"
redis_server.set(self.content_type_key,"hypertext transfer protocol")
self.date_of_capture_key = "mods:dateCaptured:%s" % redis_server.incr("global:mods:dateCaptured")
redis_server.hset(self.date_of_capture_key,"year","1945")
self.date_of_expression_key = self.date_of_capture_key
self.duration_key = "mods:keyDate:%s" % redis_server.incr("global:mods:keyDate")
redis_server.hset(self.duration_key,
"start",
"1950")
redis_server.hset(self.duration_key,
"end",
"2010")
self.form_of_musical_notation_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.form_of_musical_notation_key,"modern staff notation")
self.form_of_notated_movement_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.form_of_notated_movement_key,"Eshkol-Wachman Movement Notation")
self.form_of_notation_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.form_of_notation_key,"Test Expression Form of Notation")
self.form_of_tactile_notation_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.format_of_notated_music_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.horizontal_scale_of_cartographic_content_key = "kvm:scale:%s" % redis_server.incr("global:kvm:scale")
redis_server.set(self.horizontal_scale_of_cartographic_content_key,"kilometers")
self.identifier_for_the_expression_key = "mods:identifier:%s" % redis_server.incr("global:mods:identifier")
self.illustrative_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.language_of_expression_key = "xml:lang:en"
self.language_of_the_content_key = self.language_of_expression_key
self.medium_of_performance_of_musical_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.medium_of_performance_of_musical_content_key,"Baritone (Musical instrument)")
self.other_details_of_cartographic_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.other_distinguishing_characteristic_of_the_expression_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.hset(self.other_distinguishing_characteristic_of_the_expression_key,
"type",
"source characteristics")
redis_server.hset(self.other_distinguishing_characteristic_of_the_expression_key,
"value",
"Test Source Characteristic")
self.performer_key = "frad:person:%s" % redis_server.incr("global:frad:person")
redis_server.set(self.performer_key,"Test Expression Perfomer")
self.scale_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.scale_of_still_image_or_three_dimensional_form_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.place_and_date_of_capture_key = "mods:originInfo:%s" % redis_server.incr("global:mods:originInfo")
self.place_of_capture_key = "mods:city:Colorado Springs"
redis_server.hset(self.place_and_date_of_capture_key,"place",self.place_of_capture_key)
redis_server.hset(self.place_and_date_of_capture_key,"mods:dateCaptured","2001")
self.projection_of_cartographic_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.script_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.scale_of_still_image_or_three_dimensional_form_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.scale_of_still_image_or_three_dimensional_form_key,"1:100,000")
self.sound_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.sound_content_key,"Test Sound Content for Expression")
self.source_consulted_key = "frbr:Work:%s" % redis_server.incr("global:frbr:Work")
redis_server.set(self.source_consulted_key,"Test Source Consulted for Expression")
self.summarization_of_the_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.summarization_of_the_content_key,"Test Expression Summary")
self.supplementary_content_key = "frbr:Expression:%s" % redis_server.incr("global:frbr:Expression")
self.vertical_scale_of_cartographic_content_key = "kvm:scale:%s" % redis_server.incr("global:kvm:scale")
redis_server.set(self.vertical_scale_of_cartographic_content_key,"meter")
params = {'Accessibility content (Expression)':self.accessibility_content_key,
'Additional scale information (Expression)':self.additional_scale_information_key,
'Artistic and/or technical credit (Expression)':self.artistic_and_or_technical_credit_key,
'Aspect ratio (Expression)':self.aspect_ratio_key,
'Award (Expression)':self.award_key,
"Cataloguer's note (Expression)":self.cataloguers_note_key,
'Colour content (Expression)':self.colour_content_key,
'Colour content of resource designed for persons with visual impairments (Expression)':"No",
'Colour of moving images (Expression)':"Multiple",
'Colour of still image (Expression)':["green","blue"],
'Colour of three-dimensional form (Expression)':"black",
'Content type (Expression)':self.content_type_key,
'Date of capture (Expression)':self.date_of_capture_key,
'Date of expression':self.date_of_expression_key,
'Duration (Expression)':self.duration_key,
'Form of musical notation (Expression)':self.form_of_musical_notation_key,
'Form of notated movement (Expression)':self.form_of_notated_movement_key,
'Form of notation (Expression)':self.form_of_notation_key,
'Form of tactile notation (Expression)':self.form_of_tactile_notation_key,
'Format of notated music (Expression)':self.format_of_notated_music_key,
'Horizontal scale of cartographic content (Expression)':self.horizontal_scale_of_cartographic_content_key,
'Identifier for the expression':self.identifier_for_the_expression_key,
'Illustrative content (Expression)':self.illustrative_content_key,
'Language of expression':self.language_of_expression_key,
'Language of the content (Expression)':self.language_of_the_content_key,
'Medium of performance of musical content (Expression)':self.medium_of_performance_of_musical_content_key,
'Other details of cartographic content (Expression)':self.other_details_of_cartographic_content_key,
'Other distinguishing characteristic of the expression':self.other_distinguishing_characteristic_of_the_expression_key,
'Performer, narrator, and/or presenter (Expression)':self.performer_key,
'Place and date of capture (Expression)':self.place_and_date_of_capture_key,
'Place of capture (Expression)':self.place_of_capture_key,
'Projection of cartographic content (Expression)':self.projection_of_cartographic_content_key,
'Scale (Expression)':self.scale_key,
'Scale of still image or three-dimensional form (Expression)':self.scale_of_still_image_or_three_dimensional_form_key,
'Script (Expression)':self.script_key,
'Sound content (Expression)':self.sound_content_key,
'Source consulted (Expression)':self.source_consulted_key,
'Status of identification (Expression)':"established",
'Summarization of the content (Expression)':self.summarization_of_the_content_key,
'Supplementary content (Expression)':self.supplementary_content_key,
'Vertical scale of cartographic content (Expression)':self.vertical_scale_of_cartographic_content_key}
self.expression = frbr_rda.Expression(redis_server=redis_server,
**params)
def test_init(self):
self.assert_(self.expression.redis_ID)
def test_accessibility_content(self):
accessibility_content_key = getattr(self.expression,
'Accessibility content (Expression)')
self.assertEquals(self.accessibility_content_key,
accessibility_content_key)
self.assertEquals(redis_server.hget(self.additional_scale_information_key,
"type"),
"source dimensions")
def test_additional_scale_information(self):
additional_scale_information_key = getattr(self.expression,
'Additional scale information (Expression)')
self.assertEquals(additional_scale_information_key,
self.additional_scale_information_key)
self.assertEquals(redis_server.hget(additional_scale_information_key,
"type"),
"source dimensions")
def test_artistic_and_or_technical_credit(self):
artistic_and_or_technical_credit_key = getattr(self.expression,
'Artistic and/or technical credit (Expression)')
self.assertEquals(self.artistic_and_or_technical_credit_key,
artistic_and_or_technical_credit_key)
self.assertEquals(redis_server.hget(artistic_and_or_technical_credit_key,
"frad:family"),
"Wallace")
def test_aspect_ratio(self):
aspect_ratio_key = getattr(self.expression,
'Aspect ratio (Expression)')
self.assertEquals(aspect_ratio_key,
self.aspect_ratio_key)
self.assertEquals(redis_server.get(aspect_ratio_key),
"1:5")
def test_award(self):
award_key = getattr(self.expression,
'Award (Expression)')
self.assertEquals(self.award_key,award_key)
self.assertEquals(redis_server.get(self.award_key),
"Awarded first place")
def test_cataloguers_note(self):
cataloguers_note_key = getattr(self.expression,
"Cataloguer's note (Expression)")
self.assertEquals(self.cataloguers_note_key,
cataloguers_note_key)
self.assertEquals(redis_server.hget(cataloguers_note_key,
"type"),
"bibliographic history")
self.assertEquals(redis_server.hget(cataloguers_note_key,
"value"),
"Test Cataloguer's Note")
def test_colour_content(self):
colour_content_key = getattr(self.expression,
'Colour content (Expression)')
self.assertEquals(self.colour_content_key,
colour_content_key)
self.assertEquals(redis_server.get(colour_content_key),
"256 Colors")
def test_colour_content_resource(self):
self.assertEquals(getattr(self.expression,
'Colour content of resource designed for persons with visual impairments (Expression)'),
"No")
def test_colour_moving_images(self):
self.assertEquals(getattr(self.expression,
'Colour of moving images (Expression)'),
"Multiple")
def test_colour_still_image(self):
self.assertEquals(getattr(self.expression,
'Colour of still image (Expression)'),
["green","blue"])
def test_colour_three_dimensional_form(self):
self.assertEquals(getattr(self.expression,
'Colour of three-dimensional form (Expression)'),
"black")
def test_content_type(self):
content_type_key = getattr(self.expression,
'Content type (Expression)')
self.assertEquals(self.content_type_key,
content_type_key)
self.assertEquals(redis_server.get(self.content_type_key),
"hypertext transfer protocol")
def test_date_of_capture(self):
date_of_capture_key = getattr(self.expression,
'Date of capture (Expression)')
self.assertEquals(self.date_of_capture_key,
date_of_capture_key)
self.assertEquals(redis_server.hget(date_of_capture_key,
"year"),
"1945")
def test_date_of_expression(self):
date_of_expression_key = getattr(self.expression,
'Date of expression')
self.assertEquals(self.date_of_expression_key,
date_of_expression_key)
self.assertEquals(redis_server.hget(date_of_expression_key,
"year"),
"1945")
def test_duration_key(self):
duration_key = getattr(self.expression,
'Duration (Expression)')
self.assertEquals(duration_key,self.duration_key)
self.assertEquals(redis_server.hget(duration_key,
"start"),
"1950")
self.assertEquals(redis_server.hget(duration_key,
"end"),
"2010")
def test_form_of_musical_notation(self):
form_of_musical_notation_key = getattr(self.expression,
'Form of musical notation (Expression)')
self.assertEquals(self.form_of_musical_notation_key,
form_of_musical_notation_key)
self.assertEquals(redis_server.get(form_of_musical_notation_key),
"modern staff notation")
def test_form_of_notated_movement(self):
form_of_notated_movement_key = getattr(self.expression,
'Form of notated movement (Expression)')
self.assertEquals(self.form_of_notated_movement_key,
form_of_notated_movement_key)
self.assertEquals(redis_server.get(form_of_notated_movement_key),
"Eshkol-Wachman Movement Notation")
def test_form_of_notation(self):
form_of_notation_key = getattr(self.expression,
'Form of notation (Expression)')
self.assertEquals(self.form_of_notation_key,
form_of_notation_key)
self.assertEquals(redis_server.get(self.form_of_notation_key),
"Test Expression Form of Notation")
def test_form_of_tactile_notation(self):
form_of_tactile_notation_key = getattr(self.expression,
'Form of tactile notation (Expression)')
self.assertEquals(self.form_of_tactile_notation_key,
form_of_tactile_notation_key)
def test_format_of_notated_music(self):
format_of_notated_music_key = getattr(self.expression,
'Format of notated music (Expression)')
self.assertEquals(self.format_of_notated_music_key,
format_of_notated_music_key)
def test_horizontal_scale_of_cartographic_content(self):
horizontal_scale_of_cartographic_content_key = getattr(self.expression,
'Horizontal scale of cartographic content (Expression)')
self.assertEquals(self.horizontal_scale_of_cartographic_content_key,
horizontal_scale_of_cartographic_content_key)
def test_identifier_for_the_expression(self):
identifier_for_the_expression_key = getattr(self.expression,
'Identifier for the expression')
self.assertEquals(self.identifier_for_the_expression_key,
identifier_for_the_expression_key)
def test_illustrative_content(self):
illustrative_content_key = getattr(self.expression,
'Illustrative content (Expression)')
self.assertEquals(self.illustrative_content_key,
illustrative_content_key)
def test_language_of_expression(self):
language_of_expression_key = getattr(self.expression,
'Language of expression')
self.assertEquals(self.language_of_expression_key,
language_of_expression_key)
def test_language_of_the_content(self):
language_of_the_content_key = getattr(self.expression,
'Language of the content (Expression)')
self.assertEquals(self.language_of_the_content_key,
language_of_the_content_key)
def test_medium_of_performance_of_musical_content(self):
medium_of_performance_of_musical_content_key = getattr(self.expression,
'Medium of performance of musical content (Expression)')
self.assertEquals(self.medium_of_performance_of_musical_content_key,
medium_of_performance_of_musical_content_key)
def test_other_details_of_cartographic_content(self):
other_details_of_cartographic_content_key = getattr(self.expression,
'Other details of cartographic content (Expression)')
self.assertEquals(self.other_details_of_cartographic_content_key,
other_details_of_cartographic_content_key)
def test_other_distinguishing_characteristic_of_the_expression(self):
other_distinguishing_characteristic_of_the_expression_key = getattr(self.expression,
'Other distinguishing characteristic of the expression')
self.assertEquals(self.other_distinguishing_characteristic_of_the_expression_key,
other_distinguishing_characteristic_of_the_expression_key)
def test_performer_narrator(self):
performer_key = getattr(self.expression,
'Performer, narrator, and/or presenter (Expression)')
self.assertEquals(self.performer_key,
performer_key)
def test_place_and_date_of_capture(self):
place_and_date_of_capture_key = getattr(self.expression,
'Place and date of capture (Expression)')
self.assertEquals(self.place_and_date_of_capture_key,
place_and_date_of_capture_key)
def test_place_of_capture(self):
place_of_capture_key = getattr(self.expression,
'Place of capture (Expression)')
self.assertEquals(self.place_of_capture_key,
place_of_capture_key)
def test_projection_of_cartographic_content(self):
projection_of_cartographic_content_key = getattr(self.expression,
'Projection of cartographic content (Expression)')
self.assertEquals(self.projection_of_cartographic_content_key,
projection_of_cartographic_content_key)
def test_scale(self):
scale_key = getattr(self.expression,
'Scale (Expression)')
self.assertEquals(self.scale_key,
scale_key)
def test_scale_of_still_image_or_three_dimensional_form(self):
scale_of_still_image_or_three_dimensional_form_key = getattr(self.expression,
'Scale of still image or three-dimensional form (Expression)')
self.assertEquals(self.scale_of_still_image_or_three_dimensional_form_key,
scale_of_still_image_or_three_dimensional_form_key)
def test_script(self):
script_key = getattr(self.expression,
'Script (Expression)')
self.assertEquals(self.script_key,
script_key)
def test_sound_content_key(self):
sound_content_key = getattr(self.expression,
'Sound content (Expression)')
self.assertEquals(self.sound_content_key,
sound_content_key)
self.assertEquals(redis_server.get(self.sound_content_key),
"Test Sound Content for Expression")
def test_source_consulted(self):
source_consulted_key = getattr(self.expression,
'Source consulted (Expression)')
self.assertEquals(self.source_consulted_key,
source_consulted_key)
self.assertEquals(redis_server.get(source_consulted_key),
"Test Source Consulted for Expression")
def test_status_of_identification(self):
self.assertEquals(getattr(self.expression,
'Status of identification (Expression)'),
"established")
def test_summarization_of_the_content(self):
summarization_of_the_content_key = getattr(self.expression,
'Summarization of the content (Expression)')
self.assertEquals(self.summarization_of_the_content_key,
summarization_of_the_content_key)
self.assertEquals(redis_server.get(self.summarization_of_the_content_key),
"Test Expression Summary")
def test_supplementary_content(self):
supplementary_content_key = getattr(self.expression,
'Supplementary content (Expression)')
self.assertEquals(self.supplementary_content_key,
supplementary_content_key)
def test_vertical_scale_of_cartographic_content(self):
vertical_scale_of_cartographic_content_key = getattr(self.expression,
'Vertical scale of cartographic content (Expression)')
self.assertEquals(self.vertical_scale_of_cartographic_content_key,
vertical_scale_of_cartographic_content_key)
self.assertEquals(redis_server.get(self.vertical_scale_of_cartographic_content_key),
"meter")
def tearDown(self):
redis_server.flushdb()
class TestExpressionWEMIRelationships(unittest.TestCase):
def setUp(self):
self.abridged_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.abridgement_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_in_part_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_in_part_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.abstract_key = "mods:abstract:%s" % redis_server.incr("global:mods:abstract")
redis_server.set(self.abstract_key,"Test Abstract of Expression")
self.abstracted_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.abstracted_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.accompanying_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adaptation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_motion_pic_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_motion_pic_scrn_play_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_radio_programme_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_radio_script_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_screenplay_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_tv_programme_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_tv_scrn_play_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_video_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_video_scrn_play_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.addenda_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.addenda_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.analysed_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.analysis_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.appendix_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.appendix_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.augmentation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.augmented_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.augmented_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.basis_for_libretto_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.cadenza_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.cadenza_composed_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.catalogue_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.catalogue_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.choreography_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.choreography_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.commentary_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.commentary_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.complemented_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.concordance_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.concordance_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.contains_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
| |
False )
if 67 - 67: o0oOOo0O0Ooo . I1Ii111 % iIii1I11I1II1 / I1Ii111
IIiIiII = ( prefix . addr_length ( ) * 8 ) - iIi1iii1
oOoo00oOoO0o = ( 2 ** iIi1iii1 - 1 ) << IIiIiII
return ( ( self . address & oOoo00oOoO0o ) == prefix . address )
if 18 - 18: I11i * ooOoO0o
if 46 - 46: IiII
def mask_address ( self , mask_len ) :
IIiIiII = ( self . addr_length ( ) * 8 ) - mask_len
oOoo00oOoO0o = ( 2 ** mask_len - 1 ) << IIiIiII
self . address &= oOoo00oOoO0o
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
O00OooO0o = self . print_prefix ( )
oo00oO000oOo = prefix . print_prefix ( ) if prefix else ""
return ( O00OooO0o == oo00oO000oOo )
if 57 - 57: I1IiiI - ooOoO0o
if 70 - 70: I1ii11iIi11i * ooOoO0o
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
IIIIIi = lisp_myrlocs [ 0 ]
if ( IIIIIi == None ) : return ( False )
IIIIIi = IIIIIi . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == IIIIIi )
if 40 - 40: I1IiiI / I1ii11iIi11i / Oo0Ooo
if ( self . is_ipv6 ( ) ) :
IIIIIi = lisp_myrlocs [ 1 ]
if ( IIIIIi == None ) : return ( False )
IIIIIi = IIIIIi . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == IIIIIi )
if 28 - 28: OoO0O00 / I1ii11iIi11i % OOooOOo % I1IiiI + Ii1I
return ( False )
if 6 - 6: o0oOOo0O0Ooo % OOooOOo
if 71 - 71: oO0o + II111iiii * O0 / i11iIiiIii * o0oOOo0O0Ooo
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid is 0 and mask_len is 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 85 - 85: o0oOOo0O0Ooo - I1Ii111
self . instance_id = iid
self . mask_len = mask_len
if 90 - 90: OoO0O00 * I1Ii111 * iII111i * Ii1I + OoOoOO00 / iII111i
if 63 - 63: o0oOOo0O0Ooo * I1Ii111
def lcaf_length ( self , lcaf_type ) :
iiiIIiiIi = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : iiiIIiiIi += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : iiiIIiiIi += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : iiiIIiiIi += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : iiiIIiiIi = iiiIIiiIi * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : iiiIIiiIi += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : iiiIIiiIi += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : iiiIIiiIi += 4
return ( iiiIIiiIi )
if 9 - 9: ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
if 62 - 62: I11i
if 58 - 58: I11i . OoOoOO00 + iII111i . iII111i
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
if 66 - 66: iII111i + i1IIi
if 24 - 24: O0 / OoooooooOO - OoOoOO00
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
def lcaf_encode_iid ( self ) :
O000oo0O0OO0 = LISP_LCAF_INSTANCE_ID_TYPE
O0III1Iiii1i11 = socket . htons ( self . lcaf_length ( O000oo0O0OO0 ) )
o0OoO0000o = self . instance_id
O000oOOoOOO = self . afi
o00O0Oo = 0
if ( O000oOOoOOO < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
O000oOOoOOO = LISP_AFI_LCAF
o00O0Oo = 0
else :
O000oOOoOOO = 0
o00O0Oo = self . mask_len
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
oO0OoOo0oo = struct . pack ( "BBBBH" , 0 , 0 , O000oo0O0OO0 , o00O0Oo , O0III1Iiii1i11 )
oO0OoOo0oo += struct . pack ( "IH" , socket . htonl ( o0OoO0000o ) , socket . htons ( O000oOOoOOO ) )
if ( O000oOOoOOO == 0 ) : return ( oO0OoOo0oo )
if 63 - 63: IiII + oO0o + II111iiii * I11i
if ( self . afi == LISP_AFI_GEO_COORD ) :
oO0OoOo0oo = oO0OoOo0oo [ 0 : - 2 ]
oO0OoOo0oo += self . address . encode_geo ( )
return ( oO0OoOo0oo )
if 49 - 49: OoO0O00
if 78 - 78: I1IiiI - I1ii11iIi11i
oO0OoOo0oo += self . pack_address ( )
return ( oO0OoOo0oo )
if 24 - 24: Ii1I + I11i
if 5 - 5: I1Ii111 . Ii1I - ooOoO0o % OoooooooOO
def lcaf_decode_iid ( self , packet ) :
O00oO00oOO00O = "BBBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 2 - 2: OOooOOo . IiII . iII111i / Oo0Ooo
O0o000 , o00oo0 , O000oo0O0OO0 , o0Ooo , iiiIIiiIi = struct . unpack ( O00oO00oOO00O ,
packet [ : ooOoooOoo0oO ] )
packet = packet [ ooOoooOoo0oO : : ]
if 34 - 34: OoooooooOO % OoOoOO00 * o0oOOo0O0Ooo . oO0o
if ( O000oo0O0OO0 != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 94 - 94: O0 . I1ii11iIi11i . i11iIiiIii - I1Ii111 . IiII + oO0o
O00oO00oOO00O = "IH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 48 - 48: OoOoOO00 * I11i
o0OoO0000o , O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
packet = packet [ ooOoooOoo0oO : : ]
if 92 - 92: I1IiiI * I1IiiI
iiiIIiiIi = socket . ntohs ( iiiIIiiIi )
self . instance_id = socket . ntohl ( o0OoO0000o )
O000oOOoOOO = socket . ntohs ( O000oOOoOOO )
self . afi = O000oOOoOOO
if ( o0Ooo != 0 and O000oOOoOOO == 0 ) : self . mask_len = o0Ooo
if ( O000oOOoOOO == 0 ) :
self . afi = LISP_AFI_IID_RANGE if o0Ooo else LISP_AFI_ULTIMATE_ROOT
if 9 - 9: IiII * I1IiiI * OoO0O00 - I1IiiI * I1IiiI - OoO0O00
if 20 - 20: i1IIi + I1IiiI + i11iIiiIii + II111iiii + i1IIi
if 18 - 18: i11iIiiIii * O0 * Oo0Ooo + iII111i + OOooOOo
if 62 - 62: OOooOOo - oO0o + i1IIi % Ii1I . I1Ii111 . | |
np.array([[0.0025, 0.01, 0.0025], [0.01, 0.95, 0.01], [0.0025, 0.01, 0.0025]])
Pois = ndimage.convolve(Pois.copy(), kernel)
#Pois = ndimage.filters.gaussian_filter(Pois.copy(), sigma=0.4)
fourierSp = np.log10(np.abs(fftpack.fft2(Pois)))
print 'Poisson 2d Smoothed:', np.var(Pois)
print np.mean(fourierSp), np.median(fourierSp), np.std(fourierSp), np.max(fourierSp), np.min(fourierSp)
fig = plt.figure(figsize=(14.5, 6.5))
plt.suptitle('Fourier Analysis of Smoothed Poisson Data')
plt.suptitle('Original Image', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(Pois, origin='lower', interpolation=interpolation)
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1f', ticks=[99000, 100000, 101000])
i2 = ax2.imshow(fourierSp[0:ss, 0:ss], interpolation=interpolation, origin='lower',
rasterized=True, vmin=3, vmax=7)
plt.colorbar(i2, ax=ax2, orientation='horizontal')
ax1.set_xlabel('X [pixel]')
ax2.set_xlabel('$l_{x}$')
ax1.set_ylabel('Y [pixel]')
plt.savefig('FourierPoissonSmooth.pdf')
ax2.set_xlim(0, 10)
ax2.set_ylim(0, 10)
plt.savefig('FourierPoissonSmooth2.pdf')
ax2.set_xlim(ss-10, ss-1)
ax2.set_ylim(ss-10, ss-1)
plt.savefig('FourierPoissonSmooth3.pdf')
plt.close()
#difference
fig = plt.figure()
plt.suptitle('Power Spectrum of Smoothed Poisson Data / Power Spectrum of Poisson Data')
ax = fig.add_subplot(111)
i = ax.imshow(fourierSp[0:ss, 0:ss] / fourierSpectrum1[0:ss, 0:ss],
origin='lower', interpolation=interpolation, vmin=0.9, vmax=1.1)
plt.colorbar(i, ax=ax, orientation='horizontal')
plt.savefig('FourierPSDiv.pdf')
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
plt.savefig('FourierPSDiv2.pdf')
ax.set_xlim(ss-10, ss-1)
ax.set_ylim(ss-10, ss-1)
plt.savefig('FourierPSDiv3.pdf')
plt.close()
#x = np.arange(1024)
#y = 10 * np.sin(x / 30.) + 20
#img = np.vstack([y, ] * 1024)
x, y = np.mgrid[0:32, 0:32]
#img = 10*np.sin(x/40.) * 10*np.sin(y/40.)
img = 100 * np.cos(x*np.pi/4.) * np.cos(y*np.pi/4.)
kernel = np.array([[0.0025, 0.01, 0.0025], [0.01, 0.95, 0.01], [0.0025, 0.01, 0.0025]])
img = ndimage.convolve(img.copy(), kernel)
fourierSpectrum2 = np.abs(fftpack.fft2(img))
#fourierSpectrum2 = np.log10(np.abs(fftpack.fftshift(fftpack.fft2(img))))
print np.mean(fourierSpectrum2), np.median(fourierSpectrum2), np.std(fourierSpectrum2), np.max(fourierSpectrum2), np.min(fourierSpectrum2)
fig = plt.figure(figsize=(14.5, 6.5))
plt.suptitle('Fourier Analysis of Flat-field Data')
plt.suptitle('Original Image', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1e')
i2 = ax2.imshow(fourierSpectrum2[0:512, 0:512], interpolation=interpolation, origin='lower',
rasterized=True)
plt.colorbar(i2, ax=ax2, orientation='horizontal')
ax1.set_xlabel('X [pixel]')
ax2.set_xlabel('$l_{x}$')
ax2.set_ylim(0, 16)
ax2.set_xlim(0, 16)
ax1.set_ylabel('Y [pixel]')
plt.savefig('FourierSin.pdf')
plt.close()
x, y = np.mgrid[0:1024, 0:1024]
img = 10*np.sin(x/40.) * 10*np.sin(y/40.)
fourierSpectrum2 = np.log10(np.abs(fftpack.fft2(img)))
print np.mean(fourierSpectrum2), np.median(fourierSpectrum2), np.std(fourierSpectrum2), np.max(fourierSpectrum2), np.min(fourierSpectrum2)
fig = plt.figure(figsize=(14.5, 6.5))
plt.suptitle('Fourier Analysis of Flat-field Data')
plt.suptitle('Original Image', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation)
plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1e')
i2 = ax2.imshow(fourierSpectrum2[0:512, 0:512], interpolation=interpolation, origin='lower',
rasterized=True, vmin=-1, vmax=7)
plt.colorbar(i2, ax=ax2, orientation='horizontal')
ax1.set_xlabel('X [pixel]')
ax2.set_xlabel('$l_{x}$')
ax2.set_ylim(0, 20)
ax2.set_xlim(0, 20)
ax1.set_ylabel('Y [pixel]')
plt.savefig('FourierSin2.pdf')
plt.close()
def sinusoidalExample():
interpolation = 'none'
x, y = np.mgrid[0:32, 0:32]
img = 100 * np.cos(x*np.pi/4.) * np.cos(y*np.pi/4.)
power = np.log10(np.abs(fftpack.fft2(img.copy())))
sigma = np.linspace(0.2, 3.0, 20)
fig = plt.figure(figsize=(14.5, 7))
plt.suptitle('Fourier Analysis of Sinusoidal Data')
plt.suptitle('Gaussian Smoothed', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
p1 = plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1f', ticks=[-100, -50, 0, 50, 100])
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower',
rasterized=True, vmin=-1, vmax=7)
p2 = plt.colorbar(i2, ax=ax2, orientation='horizontal')
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
def init():
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=-1, vmax=7)
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
return i1, p1, i2, p2, sigma_text
def animate(i):
im = ndimage.filters.gaussian_filter(img.copy(), sigma=sigma[i])
power = np.log10(np.abs(fftpack.fft2(im)))
i1 = ax1.imshow(im, origin='lower', interpolation=interpolation)
i2 = ax2.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=-1, vmax=7)
sigma_text.set_text('sigma=%f' % sigma[i])
return i1, p1, p2, p2, sigma_text
#note that the frames defines the number of times animate functions is being called
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=20, interval=1, blit=True)
anim.save('FourierSmoothing.mp4', fps=3)
def poissonExample():
interpolation = 'none'
img = np.random.poisson(100000, size=(32, 32))
power = np.log10(np.abs(fftpack.fft2(img.copy())))
sigma = np.linspace(0.2, 3.0, 20)
fig = plt.figure(figsize=(14.5, 7))
plt.suptitle('Fourier Analysis of Poisson Data')
plt.suptitle('Gaussian Smoothed', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
p1 = plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1f', ticks=[99500, 100000, 100500])
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower',
rasterized=True, vmin=2, vmax=7)
p2 = plt.colorbar(i2, ax=ax2, orientation='horizontal')
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
def init():
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=7)
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
return i1, p1, p2, p2, sigma_text
def animate(i):
im = ndimage.filters.gaussian_filter(img.copy(), sigma=sigma[i])
power = np.log10(np.abs(fftpack.fft2(im)))
i1 = ax1.imshow(im, origin='lower', interpolation=interpolation)
i2 = ax2.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=7)
sigma_text.set_text('sigma=%f' % sigma[i])
return i1, p1, p2, p2, sigma_text
#note that the frames defines the number of times animate functions is being called
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=20, interval=1, blit=True)
anim.save('FourierSmoothingPoisson.mp4', fps=3)
def poissonExampleLowpass():
interpolation = 'none'
img = np.random.poisson(100000, size=(32, 32))
power = np.log10(np.abs(fftpack.fft2(img.copy())))
sigma = np.linspace(0.1, 100.0, 20)
fig = plt.figure(figsize=(14.5, 7))
plt.suptitle('Fourier Analysis of Poisson Data (lowpass filtering)')
plt.suptitle('Lowpass Filtered', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
p1 = plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1f', ticks=[99500, 100000, 100500])
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower',
rasterized=True, vmin=2, vmax=7)
p2 = plt.colorbar(i2, ax=ax2, orientation='horizontal')
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
def init():
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=7)
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
return i1, p1, p2, p2, sigma_text
def animate(i):
kernel_low = [[1.0/sigma[i],1.0/sigma[i],1.0/sigma[i]],
[1.0/sigma[i],1.0/sigma[i],1.0/sigma[i]],
[1.0/sigma[i],1.0/sigma[i],1.0/sigma[i]]]
im = ndimage.convolve(img.copy(), kernel_low)
power = np.log10(np.abs(fftpack.fft2(im)))
i1 = ax1.imshow(im, origin='lower', interpolation=interpolation)
i2 = ax2.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=7)
sigma_text.set_text('kernel %f' % (1./sigma[i]))
return i1, p1, p2, p2, sigma_text
#note that the frames defines the number of times animate functions is being called
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=20, interval=1, blit=True)
anim.save('FourierSmoothingPoissonLowpass.mp4', fps=3)
def poissonExamplePixelSharing():
interpolation = 'none'
img = np.random.poisson(100000, size=(32, 32))
power = np.log10(np.abs(fftpack.fft2(img.copy())))
sigma = np.logspace(-4, 1, 100)
fig = plt.figure(figsize=(14.5, 7))
plt.suptitle('Fourier Analysis of Poisson Data (kernel smoothing)')
plt.suptitle('Kernel Convolved', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
p1 = plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1f', ticks=[99500, 100000, 100500])
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower',
rasterized=True, vmin=2, vmax=7)
p2 = plt.colorbar(i2, ax=ax2, orientation='horizontal')
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
def init():
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
i2 = ax1.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=7)
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
return i1, p1, p2, p2, sigma_text
def animate(i):
kernel = [[0.0, sigma[i]/4., 0.0],
[sigma[i]/4., 1.0 - sigma[i], sigma[i]/4.],
[0.0, sigma[i]/4., 0.0]]
im = ndimage.convolve(img.copy(), kernel)
power = np.log10(np.abs(fftpack.fft2(im)))
i1 = ax1.imshow(im, origin='lower', interpolation=interpolation)
i2 = ax2.imshow(power[0:16, 0:16], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=7)
sigma_text.set_text('kernel %f' % sigma[i])
return i1, p1, p2, p2, sigma_text
#note that the frames defines the number of times animate functions is being called
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=100, interval=1, blit=True)
anim.save('FourierSmoothingPoissonSharing.mp4', fps=3)
def poissonExamplePixelSharing2():
interpolation = 'none'
flux = 100000
size = 2**6
ss = size /2
img = np.random.poisson(flux, size=(size, size))
power = np.log10(np.abs(fftpack.fft2(img.copy())))
sigma = np.logspace(-3, -0.1, 25)
fig = plt.figure(figsize=(14.5, 7))
plt.suptitle('Fourier Analysis of Poisson Data (kernel smoothing)')
plt.suptitle('Kernel Convolved', x=0.32, y=0.26)
plt.suptitle(r'$\log_{10}$(2D Power Spectrum)', x=0.72, y=0.26)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
p1 = plt.colorbar(i1, ax=ax1, orientation='horizontal', format='%.1f', ticks=[99500, 100000, 100500])
i2 = ax1.imshow(power[0:ss, 0:ss], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=6)
p2 = plt.colorbar(i2, ax=ax2, orientation='horizontal')
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
def init():
i1 = ax1.imshow(img, origin='lower', interpolation=interpolation, rasterized=True)
i2 = ax1.imshow(power[0:ss, 0:ss], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=6)
sigma_text = ax1.text(0.02, 0.95, '', transform=ax1.transAxes)
return i1, p1, p2, p2, sigma_text
def animate(i):
kernel = [[0.0, sigma[i]/4., 0.0],
[sigma[i]/4., 1.0 - sigma[i], sigma[i]/4.],
[0.0, sigma[i]/4., 0.0]]
im = ndimage.convolve(img.copy(), kernel)
print 'smoothed', sigma[i], np.var(img), np.var(im)
power = np.log10(np.abs(fftpack.fft2(im)))
i1 = ax1.imshow(im, origin='lower', interpolation=interpolation)
i2 = ax2.imshow(power[0:ss, 0:ss], interpolation=interpolation, origin='lower', rasterized=True, vmin=2, vmax=6)
sigma_text.set_text('kernel %e' % sigma[i])
return i1, p1, p2, p2, sigma_text
#note that the frames defines the number of times animate functions is being called
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=25, interval=1, blit=True)
anim.save('FourierSmoothingPoissonSharing2.mp4', fps=3)
def comparePower(file1='05Sep_14_35_00s_Euclid.fits', file2='05Sep_14_36_31s_Euclid.fits', gain=3.1):
d1 = pf.getdata(file1) * gain
d2 = pf.getdata(file2) * gain
#pre/overscans
overscan1 = d1[11:2056, 4150:4192].mean()
overscan2 = d2[11:2056, 4150:4192].mean()
#define quadrants and subtract the bias levels
Q11 = d1[11:2050, 2110:4131] - overscan1
Q21 = d2[11:2050, 2110:4131] - overscan2
#limit to 1024
Q11 = Q11[300:1324, 300:1324]
Q21 = Q21[300:1324, 300:1324]
#difference image
diff = Q11 - Q21
fourierSpectrumD = np.abs(fftpack.fft2(diff))[0:512, 0:512]
cornervalues = fourierSpectrumD[510:512, 510:512]
print 'data'
print cornervalues
print np.log10(cornervalues)
print fourierSpectrumD[511:512, 511:512]
print | |
freeze_weights(model.module.ext_embeddings)
if args.config_ext_emb_method == 'concat':
freeze_weights(model.module.roberta_classifier.roberta.encoder.layer[args.ext_emb_concat_layer].attention.self, ['query_t', 'key_t', 'value_t'])
if args.config_ext_emb_method.lower() == 'kar':
freeze_weights(model.module.roberta_classifier.roberta.encoder.knowbert_block)
elif (freeze_type is None):
freeze_weights(model.module, defreeze=True)
elif freeze_type == 'user_input':
# Unfreezing model
freeze_weights(model.module, defreeze=True)
if args.config_ext_emb_method == 'concat':
freeze_weights(model.module.roberta_classifier.roberta.encoder.layer[args.ext_emb_concat_layer].attention.self, ['query_t', 'key_t', 'value_t'], defreeze=True)
if args.config_ext_emb_method == 'kar':
freeze_weights(model.module.roberta_classifier.roberta.encoder.knowbert_block.kar_attention.multihead, ['query', 'key', 'value'], defreeze=True)
else:
raise ValueError("Invalid freeze_type")
## bypassing freeze_type (the input argument)
if args.freeze_bert: # freezing BERT model (but leaving the classifier and ext_embeddings trainable)
freeze_weights(model.module.roberta_classifier.roberta.encoder)
if args.freeze_ext_emb_and_kar: # freezing external embeddings weights and KAR's
freeze_weights(model.module.ext_embeddings)
if args.config_ext_emb_method == 'kar':
freeze_weights(model.module.roberta_classifier.roberta.encoder.knowbert_block.kar_attention.multihead, ['query', 'key', 'value'])
if args.freeze_ext_emb: # freezing external embeddings weights
freeze_weights(model.module.ext_embeddings)
def train_model(model, train_examples, optimizer, lr=args.learning_rate, freeze_type=None, nb_epochs=3):
global global_step
if train_examples == []:
return model, -1, -1, '', ''
# Sampling a small set from train_examples for train accuracy check
if args.train_acc_size < len(train_examples):
ind = sorted(random.sample(range(len(train_examples)), k=args.train_acc_size))
train_examples_for_acc = [train_examples[i] for i in ind]
else:
train_examples_for_acc = train_examples
train_dataloader = load_or_gen_features('train', train_examples)
model.train()
manage_freezing(args, model, freeze_type)
epoch = 0
eval_acc_by_label_str = ''
dist_by_label_str =''
training_results = []
freeze_list = [] # used to 'manually' freeze sub_modules that don't freeze using requires_grad=False due to momentum
for _ in trange(int(nb_epochs), desc="Epoch"):
my_logger('\n\n\n------------ Epoch %d -----------'%epoch)
if freeze_type == 'user_input':
freeze_list, is_early_finish = get_interactive_freeze(args.freeze_input[epoch])
if is_early_finish: break
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
if args.print_trained_layers:
prev_model_weights, module_names = get_model_weights2(model) # for debug only
prev_par = copy_model_params(freeze_list) # used to 'manually' freeze sub_modules that don't freeze using requires_grad=False due to momentum
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
if args.model_type=='bert':
input_ids, input_mask, segment_ids, label_ids, ext_emb_ids, example_weight, example_source = batch
loss = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids, ext_emb_ids=ext_emb_ids, example_weight=example_weight, fix_position=args.fix_position)
elif args.model_type=='roberta':
input_ids, input_mask, label_ids, ext_emb_ids, example_weight, example_source = batch # same like 'bert' but without 'segment_ids'
outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask, labels=label_ids, ext_emb_ids=ext_emb_ids, example_weight=example_weight, fix_position=args.fix_position)
loss = outputs[0]
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = lr * WarmupLinearSchedule(global_step / num_train_optimization_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
# force_freeze(freeze_list)
global_step += 1
# if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# my_logger('loss: %1.2f'%((tr_loss - logging_loss) / args.logging_steps))
# # tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
# logging_loss = tr_loss
paste_model_params(prev_par, freeze_list) # used to 'manually' freeze sub_modules that don't freeze using requires_grad=False due to momentum
epoch_loss = tr_loss / nb_tr_steps
my_logger('Epoch loss: %1.2f' % epoch_loss)
del prev_par
if args.print_trained_layers: # debug. track the changes of weights of the different modules.
cur_model_weights, _ = get_model_weights2(model) # for debug only
print_models_dif2(module_names, prev_model_weights, cur_model_weights)
if args.do_eval_train:
my_logger('\n\t\t\t Evaluating Training set and MNLI accuracy after Epoch %d:\n' % epoch)
train_acc, eval_loss, eval_acc_by_label_str, dist_by_label_str = eval_examples_batch(train_examples_for_acc, model, data_type='train_eval')
train_acc_str = " Train: %1.2f" % train_acc * 100
mnli_acc,_ ,_, _,_ = get_MNLI_dev_acc(args.data_dir, model)
my_logger('-------\nTraining set accuracy for Epoch %d: %s, MNLI Dev:= %1.2f\n'%(epoch, train_acc_str, mnli_acc*100))
if args.test_during_train or (freeze_type =='all_but_ext_emb' and args.num_of_rand_init>1):
my_logger('\nEvaluating Dev set accuracy after Epoch %d:\n' % epoch)
dev_acc, eval_loss, eval_acc_by_label_str, dist_by_label_str = eval_examples_batch(dev_examples, model)
my_logger('-------\nDev set accuracy for epoch %d=%1.2f%% %s, loss=%1.2f \n%s\n'%(epoch, dev_acc*100, eval_acc_by_label_str, eval_loss, dist_by_label_str))
wan.update({'epoch %d acc'%(epoch):'%1.2f%%'%(dev_acc*100)})
training_results.append((epoch_loss, eval_loss, dev_acc))
epoch +=1
if args.test_during_train or (freeze_type =='all_but_ext_emb' and args.num_of_rand_init > 1):
print_epochs_data(training_results)
last_eval_loss = eval_loss
last_dev_acc = dev_acc
else:
last_eval_loss = -1
last_dev_acc = -1
del loss
torch.cuda.empty_cache()
return model, last_eval_loss, last_dev_acc, eval_acc_by_label_str, dist_by_label_str
def print_epochs_data(training_results):
epoch_losses_s = str([round(x[0],2) for x in training_results])
test_losses_s = str([round(x[1],2) for x in training_results])
dev_accs = str([round(x[2]*100,1) for x in training_results])
my_logger('\n\nTraining Results: \n\tEpoch losses: %s\n\tTest losses: %s\n\tTest accs: %s'%(epoch_losses_s, test_losses_s, dev_accs))
def print_models_dif(module_names, prev_model_weights, cur_model_weights):
print("\n\nprev - currnt*********************************************************")
prev_weights, prev_layers = prev_model_weights
cur_weights, cur_layers = cur_model_weights
for module_name, p, c in zip(module_names, prev_weights, cur_weights):
print("%22s: %1.0e" % (module_name, (p - c).abs().mean()), 'O'*int(np.log10((p - c).abs().mean().item()+1e-12) +12)*3)
print('\nLayers:')
for layer, (prev_layer, cur_layer) in enumerate(zip(prev_layers, cur_layers)):
acc = 0
for p, c in zip(prev_layer, cur_layer):
acc += (p - c).abs().mean()
acc /= len(prev_layer)
print(" layer %2d: %1.0e" % (layer, acc), 'O'*int(np.log10(acc.item()+1e-12) +12)*3)
print('\n')
def find_models_dict_diffs(model_a, model_b):
my_logger("\n\nprev - currnt*********************************************************")
for module in model_a:
w_a = model_a[module]
w_b = model_b[module]
diff = (w_a - w_b).abs().mean()
if diff >0:
my_logger('%s: %1.1e'%(module, diff))
def force_freeze(model, prev_modules, mudules_to_freeze):
for module in mudules_to_freeze:
module.data =1
def get_model_weights(model):
layerToDisplay = args.ext_emb_concat_layer
weights = [
model.module.bert_classifier.bert.encoder.layer[layerToDisplay].attention.self.query.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layerToDisplay].attention.self.key.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layerToDisplay].attention.self.value.weight.data.clone(),
## (next 3 lines are dupricated)
# model.module.bert_classifier.bert.encoder.layer[layerToDisplay].attention.self.query_t.weight.data.clone(),
# model.module.bert_classifier.bert.encoder.layer[layerToDisplay].attention.self.key_t.weight.data.clone(),
# model.module.bert_classifier.bert.encoder.layer[layerToDisplay].attention.self.value_t.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layerToDisplay].attention.output.dense.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layerToDisplay].intermediate.dense.weight.data.clone(),
model.module.bert_classifier.bert.embeddings.word_embeddings.weight.data.clone(),
model.module.bert_classifier.bert.embeddings.position_embeddings.weight.data.clone(),
model.module.bert_classifier.bert.embeddings.token_type_embeddings.weight.data.clone(),
model.module.bert_classifier.classifier.weight.data.clone(),
model.module.bert_classifier.bert.encoder.knowbert_block.kar_attention.multihead.query.weight.data.clone(),
model.module.ext_embeddings.weight.data.clone(),
]
layers_weights = [(
model.module.bert_classifier.bert.encoder.layer[layer].attention.self.query.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layer].attention.self.key.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layer].attention.self.value.weight.data.clone(),
## (next 3 lines are dupricated)
# model.module.bert_classifier.bert.encoder.layer[layer].attention.self.query_t.weight.data.clone(),
# model.module.bert_classifier.bert.encoder.layer[layer].attention.self.key_t.weight.data.clone(),
# model.module.bert_classifier.bert.encoder.layer[layer].attention.self.value_t.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layer].attention.output.dense.weight.data.clone(),
model.module.bert_classifier.bert.encoder.layer[layer].intermediate.dense.weight.data.clone() )
for layer in range(args.num_hidden_layers)]
names = ['Query', 'Key', 'Value', 'Norm Linear', 'Intermediate',
'Word Embeddings', 'Position Embeddings', 'Token Type Embeddings', 'Classifier', 'KAR', 'Ext Embeddings']
assert len(weights) == len(names)
return (weights, layers_weights), names
def copy_model_params(sub_model_list):
# receives a list of sub_modules to copy
par = None
for sub_model in sub_model_list:
par = [p.clone() for p in sub_model.parameters()]
return par
def paste_model_params(source_par, sub_model_list):
# receives a list of sub_modules to copy into an existing module
for sub_model in sub_model_list:
for p, prevp in zip(sub_model.parameters(), source_par):
p.data = prevp.data.clone()
def get_interactive_freeze(user_in=None):
print("\n-------------> Please enter freeze typ (1- attention, 2- intermediate, 3- bert embeddings, 4- MLP class., 5- Ext. Emb: ")
if user_in is None:
user_in = input()
freeze_list = []
freeze_weights(model.module) # freeze all, and then defreeze by user request
if '1' in user_in: # train SelfAttention
print("--- Training SelfAttention")
freeze_weights(model.module.bert_classifier.bert.encoder.layer[0].attention, defreeze=True)
if '2' in user_in: # train Intermediate
print("--- Training Intermediate")
freeze_weights(model.module.bert_classifier.bert.encoder.layer[0].intermediate, defreeze=True)
if '3' in user_in: # train bert embeddings
print("--- Training bert embeddings")
freeze_weights(model.module.bert_classifier.bert.embeddings, defreeze=True)
if '4' in user_in: # train MLP of classifier
print("--- Training MLP of classifier")
freeze_weights(model.module.bert_classifier.classifier, defreeze=True)
if '5' in user_in: # train external embeddings
print("--- Training external embeddings")
freeze_weights(model.module.ext_embeddings, defreeze=True)
if '6' in user_in: # train Intermediate
print("--- Training layer 0")
freeze_weights(model.module.bert_classifier.bert.encoder.layer[0], defreeze=True)
freeze_list = [model.module.bert_classifier.bert.encoder.layer[1]]
if '7' in user_in: # train Intermediate
print("--- Training layer 1")
freeze_weights(model.module.bert_classifier.bert.encoder.layer[1], defreeze=True)
freeze_list = [model.module.bert_classifier.bert.encoder.layer[0]]
if user_in in ['', 'all']:
freeze_weights(model.module, defreeze=True)
is_early_finish = user_in.lower() == 'end'
return freeze_list, is_early_finish
def save_model(model, model_dir, model_name):
""" Save a trained model and the associated configuration """
output_model_file = os.path.join(model_dir, model_name)
my_logger('Saving model to %s' % output_model_file, 1)
torch.save(model.state_dict(), output_model_file)
output_config_file = os.path.join(model_dir, NEW_CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model.module.config.to_json_string())
def add_to_config(config, args):
if args.ext_embeddings_type in ['fixed_added', 'class_fixed_added', 'first_item_in_class', 'class_auto_added']:
config.ext_emb_size = config.hidden_size
config.ext_emb_method = 'add'
elif args.ext_embeddings_type in ['class_auto_concat','class_fixed_manual_concat']:
config.ext_emb_size = args.concat_embeddings_size # default: 10
config.ext_emb_method = 'concat'
# args.two_steps_train_with_freeze = True
elif args.ext_embeddings_type in ['class_fixed_concat', 'first_item_in_class_concat']:
config.ext_emb_size = config.hidden_size # default: 10
config.ext_emb_method = 'concat'
# args.two_steps_train_with_freeze = True
elif args.ext_embeddings_type in ['class_auto_project']:
config.ext_emb_size = args.concat_embeddings_size # default: 10
config.ext_emb_method = 'project'
# args.two_steps_train_with_freeze = True
elif args.ext_embeddings_type in ['class_auto_attention']:
config.ext_emb_size = args.kar_ext_emb_size # default: 96 (changed to 768)
config.ext_emb_method = 'kar'
# args.two_steps_train_with_freeze = True
elif args.ext_embeddings_type == '':
config.ext_emb_size = config.hidden_size
config.ext_emb_method = None
else:
raise Exception ("Invalid ext_embeddings_type")
# if args.force_no_freeze:
# args.two_steps_train_with_freeze = False
config.ext_vocab_size = 1000
config.ext_emb_concat_layer = args.ext_emb_concat_layer
config.norm_ext_emb = args.norm_ext_emb
config.num_hidden_layers = args.num_hidden_layers
config.uniform_ext_emb = args.uniform_ext_emb
config.no_kar_norm = args.no_kar_norm
config.debug_type = args.model_debug_type
args.config_ext_emb_method = config.ext_emb_method # for internal use only (for to be added by commandline).
return config
def init_and_load_bert_classifier_model(model_dir, model_name):
load_model_file = os.path.join(model_dir, model_name)
load_config_file = os.path.join(model_dir, BERT_CONFIG_NAME)
my_logger("Loading fine-tuned / pre-trained BERT model %s..."%load_model_file)
config = BertConfig(load_config_file)
config = add_to_config(config, args)
model = BertWrapper(config, load_model_file, num_labels=num_labels, ext_embeddings_type=args.ext_embeddings_type)
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
| |
# Match a path to a course.
# TODO(psimakov): linear search is unacceptable
for course in courses:
if path == course.get_slug() or path.startswith(
'%s/' % course.get_slug()) or course.get_slug() == '/':
return course
debug('No mapping for: %s' % path)
return None
def path_join(base, path):
"""Joins 'base' and 'path' ('path' is interpreted as a relative path).
This method is like os.path.join(), but 'path' is interpreted relatively.
E.g., os.path.join('/a/b', '/c') yields '/c', but this function yields
'/a/b/c'.
Args:
base: The base path.
path: The path to append to base; this is treated as a relative path.
Returns:
The path obtaining by appending 'path' to 'base'.
"""
if os.path.isabs(path):
# Remove drive letter (if we are on Windows).
unused_drive, path_no_drive = os.path.splitdrive(path)
# Remove leading path separator.
path = path_no_drive[1:]
return os.path.join(base, path)
def abspath(home_folder, filename):
"""Creates an absolute URL for a filename in a home folder."""
return path_join(appengine_config.BUNDLE_ROOT,
path_join(home_folder, filename))
def unprefix(path, prefix):
"""Remove the prefix from path. Append '/' if an empty string results."""
if not path.startswith(prefix):
raise Exception('Not prefixed.')
if prefix != '/':
path = path[len(prefix):]
if not path:
path = '/'
return path
def set_static_resource_cache_control(handler):
"""Properly sets Cache-Control for a WebOb/webapp2 response."""
handler.response.cache_control.no_cache = None
handler.response.cache_control.public = DEFAULT_CACHE_CONTROL_PUBLIC
handler.response.cache_control.max_age = DEFAULT_CACHE_CONTROL_MAX_AGE
def make_zip_handler(zipfilename):
"""Creates a handler that serves files from a zip file."""
class CustomZipHandler(zipserve.ZipHandler):
"""Custom ZipHandler that properly controls caching."""
def get(self, name):
"""Handles GET request."""
ZIP_HANDLER_COUNT.inc()
self.ServeFromZipFile(zipfilename, name)
count_stats(self)
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
return CustomZipHandler
class AssetHandler(webapp2.RequestHandler):
"""Handles serving of static resources located on the file system."""
def __init__(self, app_context, filename):
self.app_context = app_context
self.filename = filename
def get_mime_type(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
return default
return guess
def get(self):
"""Handles GET requests."""
debug('File: %s' % self.filename)
if not self.app_context.fs.isfile(self.filename):
self.error(404)
return
set_static_resource_cache_control(self)
self.response.headers['Content-Type'] = self.get_mime_type(
self.filename)
self.response.write(
self.app_context.fs.open(self.filename).read())
class ApplicationContext(object):
"""An application context for a request/response."""
@classmethod
def get_namespace_name_for_request(cls):
"""Gets the name of the namespace to use for this request.
(Examples of such namespaces are NDB and memcache.)
Returns:
The namespace for the current request, or None if no course matches
the current request context path.
"""
course = get_course_for_current_request()
if course:
return course.namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def after_create(cls, instance):
"""Override this method to manipulate freshly created instance."""
pass
def __init__(self, site_type, slug, homefolder, namespace, fs=None):
"""Creates new application context.
Args:
site_type: Specifies the type of context. Must be 'course' for now.
slug: A common context path prefix for all URLs in the context.
homefolder: A folder with the assets belonging to this context.
namespace: A name of a datastore namespace for use by this context.
fs: A file system object to be used for accessing homefolder.
Returns:
The new instance of namespace object.
"""
self.type = site_type
self.slug = slug
self.homefolder = homefolder
self.namespace = namespace
if fs:
self._fs = fs
else:
self._fs = LocalReadOnlyFileSystem()
self.after_create(self)
@ property
def fs(self):
return self._fs
def get_namespace_name(self):
return self.namespace
def get_home_folder(self):
return self.homefolder
def get_slug(self):
return self.slug
def get_config_filename(self):
"""Returns absolute location of a course configuration file."""
filename = abspath(self.get_home_folder(), GCB_CONFIG_FILENAME)
debug('Config file: %s' % filename)
return filename
def get_environ(self):
"""Returns a dict of course configuration variables."""
course_data_filename = self.get_config_filename()
try:
return yaml.load(self.fs.open(course_data_filename))
except Exception:
logging.info('Error: course.yaml file at %s not accessible',
course_data_filename)
raise
def get_template_home(self):
"""Returns absolute location of a course template folder."""
path = abspath(self.get_home_folder(), GCB_VIEWS_FOLDER_NAME)
debug('Template home: %s' % path)
return path
def get_data_home(self):
"""Returns absolute location of a course data folder."""
path = abspath(self.get_home_folder(), GCB_DATA_FOLDER_NAME)
debug('Data home: %s' % path)
return path
def get_template_environ(self, locale, additional_dirs):
"""Create and configure jinja template evaluation environment."""
template_dir = self.get_template_home()
dirs = [template_dir]
if additional_dirs:
dirs += additional_dirs
jinja_environment = self.fs.get_jinja_environ(dirs)
i18n.get_i18n().set_locale(locale)
jinja_environment.install_gettext_translations(i18n)
return jinja_environment
class ApplicationRequestHandler(webapp2.RequestHandler):
"""Handles dispatching of all URL's to proper handlers."""
@classmethod
def bind_to(cls, urls, urls_map):
"""Recursively builds a map from a list of (URL, Handler) tuples."""
for url in urls:
path_prefix = url[0]
handler = url[1]
urls_map[path_prefix] = handler
# add child handlers
if hasattr(handler, 'get_child_routes'):
cls.bind_to(handler.get_child_routes(), urls_map)
@classmethod
def bind(cls, urls):
urls_map = {}
cls.bind_to(urls, urls_map)
cls.urls_map = urls_map
def get_handler(self):
"""Finds a course suitable for handling this request."""
course = get_course_for_current_request()
if not course:
return None
path = get_path_info()
if not path:
return None
return self.get_handler_for_course_type(
course, unprefix(path, course.get_slug()))
def get_handler_for_course_type(self, context, path):
"""Gets the right handler for the given context and path."""
# TODO(psimakov): Add docs (including args and returns).
norm_path = os.path.normpath(path)
# Handle static assets here.
if norm_path.startswith(GCB_ASSETS_FOLDER_NAME):
abs_file = abspath(context.get_home_folder(), norm_path)
handler = AssetHandler(self, abs_file)
handler.request = self.request
handler.response = self.response
handler.app_context = context
debug('Course asset: %s' % abs_file)
STATIC_HANDLER_COUNT.inc()
return handler
# Handle all dynamic handlers here.
if path in ApplicationRequestHandler.urls_map:
factory = ApplicationRequestHandler.urls_map[path]
handler = factory()
handler.app_context = context
handler.request = self.request
handler.response = self.response
debug('Handler: %s > %s' % (path, handler.__class__.__name__))
DYNAMIC_HANDLER_COUNT.inc()
return handler
NO_HANDLER_COUNT.inc()
return None
def get(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.get()
finally:
count_stats(self)
unset_path_info()
def post(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.post()
finally:
count_stats(self)
unset_path_info()
def put(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.put()
finally:
count_stats(self)
unset_path_info()
def delete(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.delete()
finally:
count_stats(self)
unset_path_info()
def assert_mapped(src, dest):
try:
set_path_info(src)
course = get_course_for_current_request()
if not dest:
assert course is None
else:
assert course.get_slug() == dest
finally:
unset_path_info()
def assert_handled(src, target_handler):
try:
set_path_info(src)
handler = ApplicationRequestHandler().get_handler()
if handler is None and target_handler is None:
return None
assert isinstance(handler, target_handler)
return handler
finally:
unset_path_info()
def assert_fails(func):
success = False
try:
func()
success = True
except Exception: # pylint: disable=W0703
pass
if success:
raise Exception()
def test_unprefix():
assert unprefix('/', '/') == '/'
assert unprefix('/a/b/c', '/a/b') == '/c'
assert unprefix('/a/b/index.html', '/a/b') == '/index.html'
assert unprefix('/a/b', '/a/b') == '/'
def test_rule_definitions():
"""Test various rewrite rule definitions."""
# Check that the default site is created when no rules are specified.
assert len(get_all_courses()) == 1
# Test that empty definition is ok.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = ''
assert len(get_all_courses()) == 1
# Test one rule parsing.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/google/pswg:/sites/pswg')
rules = get_all_courses()
assert len(get_all_courses()) == 1
rule = rules[0]
assert rule.get_slug() == '/google/pswg'
assert rule.get_home_folder() == '/sites/pswg'
# Test two rule parsing.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/a/b:/c/d, course:/e/f:/g/h')
assert len(get_all_courses()) == 2
# Test that two of the same slugs are not allowed.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'foo:/a/b:/c/d, bar:/a/b:/c/d')
assert_fails(get_all_courses)
# Test that only 'course' is supported.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'foo:/a/b:/c/d, bar:/e/f:/g/h')
assert_fails(get_all_courses)
# Cleanup.
del os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
# Test namespaces.
set_path_info('/')
try:
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = 'course:/:/c/d'
assert ApplicationContext.get_namespace_name_for_request() == (
'gcb-course-c-d')
finally:
unset_path_info()
def test_url_to_rule_mapping():
"""Tests mapping of a URL to a rule."""
# default mapping
assert_mapped('/favicon.ico', '/')
assert_mapped('/assets/img/foo.png', '/')
# explicit mapping
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/a/b:/c/d, course:/e/f:/g/h')
assert_mapped('/a/b', '/a/b')
assert_mapped('/a/b/', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/e/f', '/e/f')
assert_mapped('/e/f/assets', '/e/f')
assert_mapped('/e/f/views', '/e/f')
assert_mapped('e/f', None)
assert_mapped('foo', None)
# Cleanup.
del os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
def test_url_to_handler_mapping_for_course_type():
"""Tests mapping of a URL to a handler for course type."""
# setup rules
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/a/b:/c/d, course:/e/f:/g/h')
# setup helper classes
class FakeHandler0(object):
def __init__(self):
self.app_context = None
class FakeHandler1(object):
def __init__(self):
self.app_context = None
class FakeHandler2(object):
def __init__(self):
self.app_context = None
# Setup handler.
handler0 = FakeHandler0
handler1 = FakeHandler1
handler2 = FakeHandler2
urls = [('/', handler0), ('/foo', handler1), ('/bar', handler2)]
ApplicationRequestHandler.bind(urls)
# Test proper handler mappings.
assert_handled('/a/b', FakeHandler0)
assert_handled('/a/b/', FakeHandler0)
assert_handled('/a/b/foo', FakeHandler1)
assert_handled('/a/b/bar', FakeHandler2)
# Test assets mapping.
handler = assert_handled('/a/b/assets/img/foo.png', AssetHandler)
assert os.path.normpath(handler.app_context.get_template_home()).endswith(
os.path.normpath('/coursebuilder/c/d/views'))
# This is allowed as we don't go out of /assets/...
handler = assert_handled(
'/a/b/assets/foo/../models/models.py', AssetHandler)
assert os.path.normpath(handler.filename).endswith(
os.path.normpath('/coursebuilder/c/d/assets/models/models.py'))
# This is not allowed as we do go out of /assets/...
assert_handled('/a/b/assets/foo/../../models/models.py', None)
# Test negative cases
assert_handled('/foo', None)
assert_handled('/baz', None)
# Site 'views' and 'data' are not accessible
assert_handled('/a/b/view/base.html', None)
assert_handled('/a/b/data/units.csv', None)
# Default mapping
del os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
urls = [('/', handler0), ('/foo', handler1), ('/bar', handler2)]
# Positive cases
assert_handled('/', FakeHandler0)
assert_handled('/foo', FakeHandler1)
assert_handled('/bar', FakeHandler2)
handler = assert_handled('/assets/js/main.js', AssetHandler)
assert os.path.normpath(handler.app_context.get_template_home()).endswith(
| |
from __future__ import unicode_literals
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.postgres.forms.array import SimpleArrayField
from django.db.models import Count, Q
from mptt.forms import TreeNodeChoiceField
from taggit.forms import TagField
from timezone_field import TimeZoneFormField
from extras.forms import AddRemoveTagsForm, CustomFieldForm, CustomFieldBulkEditForm, CustomFieldFilterForm
from ipam.models import IPAddress, VLAN, VLANGroup
from tenancy.forms import TenancyForm
from tenancy.models import Tenant
from utilities.forms import (
AnnotatedMultipleChoiceField, APISelect, add_blank_choice, ArrayFieldSelectMultiple, BootstrapMixin, BulkEditForm,
BulkEditNullBooleanSelect, ChainedFieldsMixin, ChainedModelChoiceField, CommentField, ComponentForm,
ConfirmationForm, CSVChoiceField, ExpandableNameField, FilterChoiceField, FilterTreeNodeMultipleChoiceField,
FlexibleModelChoiceField, Livesearch, SelectWithDisabled, SelectWithPK, SmallTextarea, SlugField,
)
from virtualization.models import Cluster
from .constants import (
CONNECTION_STATUS_CHOICES, CONNECTION_STATUS_CONNECTED, DEVICE_STATUS_CHOICES, IFACE_FF_CHOICES, IFACE_FF_LAG,
IFACE_MODE_ACCESS, IFACE_MODE_CHOICES, IFACE_MODE_TAGGED_ALL, IFACE_ORDERING_CHOICES, RACK_FACE_CHOICES,
RACK_TYPE_CHOICES, RACK_WIDTH_CHOICES, RACK_WIDTH_19IN, RACK_WIDTH_23IN, SITE_STATUS_CHOICES, SUBDEVICE_ROLE_CHILD,
SUBDEVICE_ROLE_PARENT, SUBDEVICE_ROLE_CHOICES,
)
from .formfields import MACAddressFormField
from .models import (
DeviceBay, DeviceBayTemplate, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate,
Device, DeviceRole, DeviceType, Interface, InterfaceConnection, InterfaceTemplate, Manufacturer, InventoryItem,
Platform, PowerOutlet, PowerOutletTemplate, PowerPort, PowerPortTemplate, Rack, RackGroup, RackReservation,
RackRole, Region, Site, VirtualChassis
)
DEVICE_BY_PK_RE = r'{\d+\}'
INTERFACE_MODE_HELP_TEXT = """
Access: One untagged VLAN<br />
Tagged: One untagged VLAN and/or one or more tagged VLANs<br />
Tagged All: Implies all VLANs are available (w/optional untagged VLAN)
"""
def get_device_by_name_or_pk(name):
"""
Attempt to retrieve a device by either its name or primary key ('{pk}').
"""
if re.match(DEVICE_BY_PK_RE, name):
pk = name.strip('{}')
device = Device.objects.get(pk=pk)
else:
device = Device.objects.get(name=name)
return device
class BulkRenameForm(forms.Form):
"""
An extendable form to be used for renaming device components in bulk.
"""
find = forms.CharField()
replace = forms.CharField()
#
# Regions
#
class RegionForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Region
fields = ['parent', 'name', 'slug']
class RegionCSVForm(forms.ModelForm):
parent = forms.ModelChoiceField(
queryset=Region.objects.all(),
required=False,
to_field_name='name',
help_text='Name of parent region',
error_messages={
'invalid_choice': 'Region not found.',
}
)
class Meta:
model = Region
fields = Region.csv_headers
help_texts = {
'name': 'Region name',
'slug': 'URL-friendly slug',
}
class RegionFilterForm(BootstrapMixin, forms.Form):
model = Site
q = forms.CharField(required=False, label='Search')
#
# Sites
#
class SiteForm(BootstrapMixin, TenancyForm, CustomFieldForm):
region = TreeNodeChoiceField(queryset=Region.objects.all(), required=False)
slug = SlugField()
comments = CommentField()
tags = TagField(required=False)
class Meta:
model = Site
fields = [
'name', 'slug', 'status', 'region', 'tenant_group', 'tenant', 'facility', 'asn', 'time_zone', 'description',
'physical_address', 'shipping_address', 'latitude', 'longitude', 'contact_name', 'contact_phone',
'contact_email', 'comments', 'tags',
]
widgets = {
'physical_address': SmallTextarea(attrs={'rows': 3}),
'shipping_address': SmallTextarea(attrs={'rows': 3}),
}
help_texts = {
'name': "Full name of the site",
'facility': "Data center provider and facility (e.g. Equinix NY7)",
'asn': "BGP autonomous system number",
'time_zone': "Local time zone",
'description': "Short description (will appear in sites list)",
'physical_address': "Physical location of the building (e.g. for GPS)",
'shipping_address': "If different from the physical address",
'latitude': "Latitude in decimal format (xx.yyyyyy)",
'longitude': "Longitude in decimal format (xx.yyyyyy)"
}
class SiteCSVForm(forms.ModelForm):
status = CSVChoiceField(
choices=SITE_STATUS_CHOICES,
required=False,
help_text='Operational status'
)
region = forms.ModelChoiceField(
queryset=Region.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned region',
error_messages={
'invalid_choice': 'Region not found.',
}
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned tenant',
error_messages={
'invalid_choice': 'Tenant not found.',
}
)
class Meta:
model = Site
fields = Site.csv_headers
help_texts = {
'name': 'Site name',
'slug': 'URL-friendly slug',
'asn': '32-bit autonomous system number',
}
class SiteBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Site.objects.all(),
widget=forms.MultipleHiddenInput
)
status = forms.ChoiceField(
choices=add_blank_choice(SITE_STATUS_CHOICES),
required=False,
initial=''
)
region = TreeNodeChoiceField(
queryset=Region.objects.all(),
required=False
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
asn = forms.IntegerField(
min_value=1,
max_value=4294967295,
required=False,
label='ASN'
)
description = forms.CharField(
max_length=100,
required=False
)
time_zone = TimeZoneFormField(
choices=add_blank_choice(TimeZoneFormField().choices),
required=False
)
class Meta:
nullable_fields = ['region', 'tenant', 'asn', 'description', 'time_zone']
class SiteFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Site
q = forms.CharField(required=False, label='Search')
status = AnnotatedMultipleChoiceField(
choices=SITE_STATUS_CHOICES,
annotate=Site.objects.all(),
annotate_field='status',
required=False
)
region = FilterTreeNodeMultipleChoiceField(
queryset=Region.objects.annotate(filter_count=Count('sites')),
to_field_name='slug',
required=False,
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('sites')),
to_field_name='slug',
null_label='-- None --'
)
#
# Rack groups
#
class RackGroupForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = RackGroup
fields = ['site', 'name', 'slug']
class RackGroupCSVForm(forms.ModelForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Name of parent site',
error_messages={
'invalid_choice': 'Site not found.',
}
)
class Meta:
model = RackGroup
fields = RackGroup.csv_headers
help_texts = {
'name': 'Name of rack group',
'slug': 'URL-friendly slug',
}
class RackGroupFilterForm(BootstrapMixin, forms.Form):
site = FilterChoiceField(queryset=Site.objects.annotate(filter_count=Count('rack_groups')), to_field_name='slug')
#
# Rack roles
#
class RackRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = RackRole
fields = ['name', 'slug', 'color']
class RackRoleCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = RackRole
fields = RackRole.csv_headers
help_texts = {
'name': 'Name of rack role',
'color': 'RGB color in hexadecimal (e.g. 00ff00)'
}
#
# Racks
#
class RackForm(BootstrapMixin, TenancyForm, CustomFieldForm):
group = ChainedModelChoiceField(
queryset=RackGroup.objects.all(),
chains=(
('site', 'site'),
),
required=False,
widget=APISelect(
api_url='/api/dcim/rack-groups/?site_id={{site}}',
)
)
comments = CommentField()
tags = TagField(required=False)
class Meta:
model = Rack
fields = [
'site', 'group', 'name', 'facility_id', 'tenant_group', 'tenant', 'role', 'serial', 'type', 'width',
'u_height', 'desc_units', 'comments', 'tags',
]
help_texts = {
'site': "The site at which the rack exists",
'name': "Organizational rack name",
'facility_id': "The unique rack ID assigned by the facility",
'u_height': "Height in rack units",
}
widgets = {
'site': forms.Select(attrs={'filter-for': 'group'}),
}
class RackCSVForm(forms.ModelForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Name of parent site',
error_messages={
'invalid_choice': 'Site not found.',
}
)
group_name = forms.CharField(
help_text='Name of rack group',
required=False
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned tenant',
error_messages={
'invalid_choice': 'Tenant not found.',
}
)
role = forms.ModelChoiceField(
queryset=RackRole.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned role',
error_messages={
'invalid_choice': 'Role not found.',
}
)
type = CSVChoiceField(
choices=RACK_TYPE_CHOICES,
required=False,
help_text='Rack type'
)
width = forms.ChoiceField(
choices=(
(RACK_WIDTH_19IN, '19'),
(RACK_WIDTH_23IN, '23'),
),
help_text='Rail-to-rail width (in inches)'
)
class Meta:
model = Rack
fields = Rack.csv_headers
help_texts = {
'name': 'Rack name',
'u_height': 'Height in rack units',
}
def clean(self):
super(RackCSVForm, self).clean()
site = self.cleaned_data.get('site')
group_name = self.cleaned_data.get('group_name')
name = self.cleaned_data.get('name')
facility_id = self.cleaned_data.get('facility_id')
# Validate rack group
if group_name:
try:
self.instance.group = RackGroup.objects.get(site=site, name=group_name)
except RackGroup.DoesNotExist:
raise forms.ValidationError("Rack group {} not found for site {}".format(group_name, site))
# Validate uniqueness of rack name within group
if Rack.objects.filter(group=self.instance.group, name=name).exists():
raise forms.ValidationError(
"A rack named {} already exists within group {}".format(name, group_name)
)
# Validate uniqueness of facility ID within group
if facility_id and Rack.objects.filter(group=self.instance.group, facility_id=facility_id).exists():
raise forms.ValidationError(
"A rack with the facility ID {} already exists within group {}".format(facility_id, group_name)
)
class RackBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Rack.objects.all(), widget=forms.MultipleHiddenInput)
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False, label='Site')
group = forms.ModelChoiceField(queryset=RackGroup.objects.all(), required=False, label='Group')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
role = forms.ModelChoiceField(queryset=RackRole.objects.all(), required=False)
serial = forms.CharField(max_length=50, required=False, label='Serial Number')
type = forms.ChoiceField(choices=add_blank_choice(RACK_TYPE_CHOICES), required=False, label='Type')
width = forms.ChoiceField(choices=add_blank_choice(RACK_WIDTH_CHOICES), required=False, label='Width')
u_height = forms.IntegerField(required=False, label='Height (U)')
desc_units = forms.NullBooleanField(required=False, widget=BulkEditNullBooleanSelect, label='Descending units')
comments = CommentField(widget=SmallTextarea)
class Meta:
nullable_fields = ['group', 'tenant', 'role', 'serial', 'comments']
class RackFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Rack
q = forms.CharField(required=False, label='Search')
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('racks')),
to_field_name='slug'
)
group_id = FilterChoiceField(
queryset=RackGroup.objects.select_related('site').annotate(filter_count=Count('racks')),
label='Rack group',
null_label='-- None --'
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('racks')),
to_field_name='slug',
null_label='-- None --'
)
role = FilterChoiceField(
queryset=RackRole.objects.annotate(filter_count=Count('racks')),
to_field_name='slug',
null_label='-- None --'
)
#
# Rack reservations
#
class RackReservationForm(BootstrapMixin, TenancyForm, forms.ModelForm):
units = SimpleArrayField(forms.IntegerField(), widget=ArrayFieldSelectMultiple(attrs={'size': 10}))
user = forms.ModelChoiceField(queryset=User.objects.order_by('username'))
class Meta:
model = RackReservation
fields = ['units', 'user', 'tenant_group', 'tenant', 'description']
def __init__(self, *args, **kwargs):
super(RackReservationForm, self).__init__(*args, **kwargs)
# Populate rack unit choices
self.fields['units'].widget.choices = self._get_unit_choices()
def _get_unit_choices(self):
rack = self.instance.rack
reserved_units = []
for resv in rack.reservations.exclude(pk=self.instance.pk):
for u in resv.units:
reserved_units.append(u)
unit_choices = [(u, {'label': str(u), 'disabled': u in reserved_units}) for u in rack.units]
return unit_choices
class RackReservationFilterForm(BootstrapMixin, forms.Form):
q = forms.CharField(required=False, label='Search')
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('racks__reservations')),
to_field_name='slug'
)
group_id = FilterChoiceField(
queryset=RackGroup.objects.select_related('site').annotate(filter_count=Count('racks__reservations')),
label='Rack group',
null_label='-- None --'
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('rackreservations')),
to_field_name='slug',
null_label='-- None --'
)
class RackReservationBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=RackReservation.objects.all(), widget=forms.MultipleHiddenInput)
user = forms.ModelChoiceField(queryset=User.objects.order_by('username'), required=False)
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = []
#
# Manufacturers
#
class ManufacturerForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Manufacturer
fields = ['name', 'slug']
class ManufacturerCSVForm(forms.ModelForm):
class Meta:
model = Manufacturer
fields = Manufacturer.csv_headers
help_texts = {
'name': 'Manufacturer name',
'slug': 'URL-friendly slug',
}
#
# Device types
#
class DeviceTypeForm(BootstrapMixin, CustomFieldForm):
slug = SlugField(slug_source='model')
tags = TagField(required=False)
class Meta:
model = DeviceType
fields = [
'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'is_console_server', 'is_pdu',
'is_network_device', 'subdevice_role', 'interface_ordering', 'comments', 'tags',
]
labels = {
'interface_ordering': 'Order interfaces by',
}
class DeviceTypeCSVForm(forms.ModelForm):
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.all(),
required=True,
to_field_name='name',
help_text='Manufacturer name',
error_messages={
'invalid_choice': 'Manufacturer not found.',
}
)
subdevice_role = CSVChoiceField(
choices=SUBDEVICE_ROLE_CHOICES,
required=False,
help_text='Parent/child status'
)
interface_ordering = CSVChoiceField(
choices=IFACE_ORDERING_CHOICES,
required=False,
help_text='Interface ordering'
)
class Meta:
model = DeviceType
fields = DeviceType.csv_headers
help_texts = {
'model': 'Model name',
'slug': 'URL-friendly slug',
}
class DeviceTypeBulkEditForm(BootstrapMixin, | |
<reponame>bovlb/the-blue-alliance
import copy
import math
from collections import defaultdict
from typing import (
cast,
Dict,
List,
Literal,
Mapping,
MutableMapping,
Optional,
Tuple,
TypedDict,
)
import numpy as np
from pyre_extensions.refinement import none_throws, safe_cast
from backend.common.consts.alliance_color import (
ALLIANCE_COLORS,
AllianceColor,
OPPONENT,
TMatchWinner,
)
from backend.common.consts.comp_level import CompLevel
from backend.common.consts.event_type import (
CMP_EVENT_TYPES,
EventType,
SEASON_EVENT_TYPES,
)
from backend.common.helpers.event_helper import EventHelper
from backend.common.helpers.match_helper import MatchHelper
from backend.common.models.event import Event
from backend.common.models.event_predictions import (
MatchPrediction,
MatchPredictionStatsLevel,
TEventStatMeanVars,
TMatchPredictions,
TMatchPredictionStats,
TRankingPrediction,
TRankingPredictions,
TRankingPredictionStats,
TStatMeanVar,
)
from backend.common.models.keys import TeamKey
from backend.common.models.match import Match
from backend.common.models.team import Team
from backend.common.queries.event_query import TeamYearEventsQuery
class TComputedMatchInfo(TypedDict):
mean: Dict[TeamKey, float]
var: Dict[TeamKey, float]
class ContributionCalculator:
def __init__(
self,
event: Event,
matches: List[Match],
stat: str,
default_mean: float,
default_var: float,
) -> None:
"""
stat: 'score' or a specific breakdown like 'auto_points' or 'boulders'
"""
self._event = event
self._matches = matches
self._stat = stat
self._default_mean = default_mean
self._default_var = default_var
self._team_list, self._team_id_map = self._build_team_mapping()
# Setup matrices
m = len(self._matches)
t = len(self._team_list)
self._Ao = np.zeros((2 * m, t)) # Match teams
self._Mmean = np.zeros((2 * m, 1)) # Means
self._Mvar = np.zeros((2 * m, 1)) # Variances
# Past event stats for initialization
self._past_stats_mean, self._past_stats_var = self._get_past_stats(
self._event, self._team_list
)
# For finding event averages for initialization
self._mean_sums = []
self._var_sums = []
# These aren't used to persist state, just allocating space
self._Oe = np.zeros((t, 1)) # Prior estimates
self._diags = np.ndarray(t) # Prior estimates variances
# Things to return
self._means: Dict[TeamKey, float] = {}
self._vars: Dict[TeamKey, float] = {}
def _build_team_mapping(self) -> Tuple[List[TeamKey], Dict[TeamKey, int]]:
"""
Returns (team_list, team_id_map)
team_list: A list of team_str such as 'frc254' or 'frc254B'
team_id_map: A dict of key: team_str, value: row index in x_matrix that corresponds to the team
"""
# Build team list
team_list = set()
for match in self._matches:
for alliance_color in ALLIANCE_COLORS:
for team in match.alliances[alliance_color]["teams"]:
team_list.add(team)
team_list = list(team_list)
team_id_map = {}
for i, team in enumerate(team_list):
team_id_map[team] = i
return team_list, team_id_map
def _get_past_stats(
self, cur_event: Event, team_list: List[Team]
) -> Tuple[Mapping[TeamKey, List[float]], Mapping[TeamKey, List[float]]]:
past_stats_mean: MutableMapping[TeamKey, List[float]] = defaultdict(
list
) # team key > values
past_stats_var: MutableMapping[TeamKey, List[float]] = defaultdict(
list
) # team key > values
no_priors_team_list = team_list
for year_diff in range(1):
team_events_futures = []
for team in no_priors_team_list:
team_events_futures.append(
(
team,
TeamYearEventsQuery(
team, cur_event.year - year_diff
).fetch_async(),
)
)
no_priors_team_list = []
for team, events_future in team_events_futures:
events = events_future.get_result()
events = EventHelper.sorted_events(events)
no_past_mean = True
for event in events:
if (
event.event_type_enum in SEASON_EVENT_TYPES
and event.start_date < cur_event.start_date
and event.event_type_enum != EventType.CMP_FINALS
and event.details
):
# event.details is backed by in-context cache
predictions = event.details.predictions
if (
predictions
and predictions.get("stat_mean_vars")
and none_throws(predictions["stat_mean_vars"]).get("qual")
):
team_mean = none_throws(predictions["stat_mean_vars"])[
"qual"
][self._stat]["mean"].get(team)
if team_mean is not None:
if year_diff != 0:
team_mean *= (
1 # TODO: Hacky; scale based on actual data
)
past_stats_mean[team].append(team_mean)
no_past_mean = False
team_var = none_throws(predictions["stat_mean_vars"])[
"qual"
][self._stat]["var"].get(team)
if team_var is not None:
if year_diff != 0:
team_var = (
self._default_var * 3
) # TODO: Hacky; scale based on actual data
past_stats_var[team].append(team_var)
if no_past_mean:
no_priors_team_list.append(team)
return past_stats_mean, past_stats_var
def _normpdf(self, x: float, mu: float, sigma: float) -> float:
x = float(x)
mu = float(mu)
sigma = float(sigma)
u = (x - mu) / abs(sigma)
y = (1.0 / (np.sqrt(2.0 * np.pi) * abs(sigma))) * np.exp(-u * u / 2.0)
return y
def calculate_before_match(self, i: int) -> TComputedMatchInfo:
# Used for both mean and var
AoT = self._Ao.transpose()
Aoo = np.dot(AoT, self._Ao)
####################################################################
# Estimate Team Means
# Populate priors
for team in self._team_list:
mean = self._default_mean
if team in self._past_stats_mean:
# Use team's past means
mean = 0
weight_sum = 0
for j, o in enumerate(reversed(self._past_stats_mean[team])):
weight = pow(0.1, j)
mean += weight * o
weight_sum += weight
mean /= weight_sum
else:
if self._past_stats_mean:
# Use averages from other past teams
past_means = [ato[-1] for ato in self._past_stats_mean.values()]
mean = np.mean(np.asarray(past_means))
elif self._mean_sums:
# Use averages from this event
mean = np.mean(self._mean_sums) / 3
self._Oe[self._team_id_map[team]] = mean
self._diags[self._team_id_map[team]] = 3 # TODO
# MMSE Contribution Mean
Omean = np.linalg.inv(Aoo + np.diag(self._diags)).dot(
AoT.dot(self._Mmean) + np.diag(self._diags).dot(self._Oe)
)
for team, Omean in zip(self._team_list, Omean):
self._means[team] = Omean[0]
####################################################################
# Estimate Team Variances
# Populate priors
for team in self._team_list:
var = self._default_var
if team in self._past_stats_var:
# Use team's past variances
var = 0
weight_sum = 0
for j, o in enumerate(reversed(self._past_stats_var[team])):
weight = pow(0.1, j)
var += weight * o
weight_sum += weight
var /= weight_sum
# else:
# if self._past_stats_var:
# # Use averages from other past teams
# var = np.mean([ato[-1] for ato in self._past_stats_var.values()])
# elif self._var_sums:
# # Use averages from this event
# var = np.mean(self._var_sums) / 3
self._Oe[self._team_id_map[team]] = var
self._diags[self._team_id_map[team]] = 3 # TODO
# MMSE Contribution Variance
Ovar = abs(
np.linalg.inv(Aoo + np.diag(self._diags)).dot(
AoT.dot(self._Mvar) + np.diag(self._diags).dot(self._Oe)
)
)
for team, stat in zip(self._team_list, Ovar):
self._vars[team] = stat[0]
####################################################################
# Add results for next iter
match = self._matches[i]
score_breakdown = match.score_breakdown
if match.has_been_played and score_breakdown:
means: Dict[AllianceColor, float] = {}
for color in ALLIANCE_COLORS:
if self._stat == "score":
score = match.alliances[color]["score"]
# Subtract bonus objective scores for playoffs, since they are accounted for explicitly
# 2016; these should be zero for qual matches
score -= score_breakdown[color].get("breachPoints", 0)
score -= score_breakdown[color].get("capturePoints", 0)
# 2017; these should be zero for qual matches
score -= score_breakdown[color].get("kPaBonusPoints", 0)
score -= score_breakdown[color].get("rotorBonusPoints", 0)
means[color] = score
elif self._stat == "auto_points":
means[color] = score_breakdown[color]["autoPoints"]
elif self._stat == "boulders":
means[color] = (
score_breakdown[color].get("autoBouldersLow", 0)
+ score_breakdown[color].get("autoBouldersHigh", 0)
+ score_breakdown[color].get("teleopBouldersLow", 0)
+ score_breakdown[color].get("teleopBouldersHigh", 0)
)
elif self._stat == "crossings":
means[color] = (
score_breakdown[color].get("position1crossings", 0)
+ score_breakdown[color].get("position2crossings", 0)
+ score_breakdown[color].get("position3crossings", 0)
+ score_breakdown[color].get("position4crossings", 0)
+ score_breakdown[color].get("position5crossings", 0)
)
elif self._stat == "pressure":
means[color] = (
float(score_breakdown[color].get("autoFuelHigh", 0))
+ float(score_breakdown[color].get("autoFuelLow", 0)) / 3
+ float(score_breakdown[color].get("teleopFuelHigh", 0)) / 3
+ float(score_breakdown[color].get("teleopFuelLow", 0)) / 9
)
elif self._stat == "gears":
# Guess gears from rotors.
if score_breakdown[color].get("rotor4Engaged"):
num_gears = 12
elif score_breakdown[color].get("rotor3Engaged"):
num_gears = 6
elif score_breakdown[color].get("rotor2Auto"):
num_gears = 3
elif score_breakdown[color].get("rotor2Engaged"):
num_gears = 2
elif score_breakdown[color].get("rotor1Auto"):
num_gears = 1
elif score_breakdown[color].get("rotor1Engaged"):
num_gears = 0 # Free gear
else:
num_gears = -1 # Failed to place reserve gear
means[color] = num_gears
elif self._stat == "endgame_points":
means[color] = score_breakdown[color]["endgamePoints"]
elif self._stat == "rocket_pieces_scored":
count = 0
for side1 in ["Far", "Near"]:
for side2 in ["Left", "Right"]:
for level in ["low", "mid", "top"]:
position = score_breakdown[color][
"{}{}Rocket{}".format(level, side2, side1)
]
if "Cargo" in position:
count += 2
elif "Panel" in position:
count += 1
means[color] = count
elif self._stat == "hab_climb_points":
means[color] = score_breakdown[color]["habClimbPoints"]
elif self._stat == "power_cells_scored":
count = 0
for mode in ["auto", "teleop"]:
for goal in ["Bottom", "Outer", "Inner"]:
count += score_breakdown[color][
"{}Cells{}".format(mode, goal)
]
means[color] = count
elif self._stat == "cargo_scored":
auto_count = 0
for goal in ["Lower", "Upper"]:
for exit in ["Near", "Far", "Red", "Blue"]:
auto_count += score_breakdown[color][
"autoCargo{}{}".format(goal, exit)
]
# Approximate Quintet by adding 2 to count
count = auto_count + 2 if auto_count >= 5 else auto_count
for goal in ["Lower", "Upper"]:
for exit in ["Near", "Far", "Red", "Blue"]:
count += score_breakdown[color][
"teleopCargo{}{}".format(goal, exit)
]
means[color] = count
else:
raise Exception("Unknown stat: {}".format(self._stat))
self._Mmean[2 * i] = means[AllianceColor.RED]
self._Mmean[2 * i + 1] = means[AllianceColor.BLUE]
self._mean_sums.append(means[AllianceColor.RED])
self._mean_sums.append(means[AllianceColor.BLUE])
predicted_mean_red = 0
for team in match.alliances[AllianceColor.RED]["teams"]:
self._Ao[2 * i, self._team_id_map[team]] = 1
predicted_mean_red += self._means[team]
predicted_mean_blue = 0
for team in match.alliances[AllianceColor.BLUE]["teams"]:
self._Ao[2 * i + 1, self._team_id_map[team]] = 1
predicted_mean_blue += self._means[team]
# Find max of prob over var_sum
best_prob = 0
best_var_sum = None
var_sum = 1.0
var_sum_step = 2.0**12
while var_sum > 0 and var_sum_step >= 1:
prob = self._normpdf(
means[AllianceColor.RED], predicted_mean_red, np.sqrt(var_sum)
)
if prob >= best_prob:
best_prob = prob
best_var_sum = var_sum
prob2 = self._normpdf(
means[AllianceColor.RED], predicted_mean_red, np.sqrt(var_sum + 1)
)
if prob2 >= best_prob:
best_prob = prob2
best_var_sum = var_sum + 1
if prob2 > prob:
var_sum += var_sum_step
else:
var_sum -= var_sum_step
var_sum_step /= 2
self._Mvar[2 * i] = | |
<gh_stars>1-10
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import abc
import os
import time
from pathlib import Path
from typing import Any, Callable, Optional, Union
import airbyte_api_client
import yaml
from airbyte_api_client.api import connection_api, destination_api, source_api
from airbyte_api_client.model.airbyte_catalog import AirbyteCatalog
from airbyte_api_client.model.connection_create import ConnectionCreate
from airbyte_api_client.model.connection_read import ConnectionRead
from airbyte_api_client.model.connection_read_list import ConnectionReadList
from airbyte_api_client.model.connection_search import ConnectionSearch
from airbyte_api_client.model.connection_status import ConnectionStatus
from airbyte_api_client.model.connection_update import ConnectionUpdate
from airbyte_api_client.model.destination_create import DestinationCreate
from airbyte_api_client.model.destination_read import DestinationRead
from airbyte_api_client.model.destination_read_list import DestinationReadList
from airbyte_api_client.model.destination_search import DestinationSearch
from airbyte_api_client.model.destination_update import DestinationUpdate
from airbyte_api_client.model.source_create import SourceCreate
from airbyte_api_client.model.source_id_request_body import SourceIdRequestBody
from airbyte_api_client.model.source_read import SourceRead
from airbyte_api_client.model.source_read_list import SourceReadList
from airbyte_api_client.model.source_search import SourceSearch
from airbyte_api_client.model.source_update import SourceUpdate
from click import ClickException
from .diff_helpers import compute_diff, hash_config
from .yaml_loaders import EnvVarLoader
class DuplicateResourceError(ClickException):
pass
class NonExistingResourceError(ClickException):
pass
class InvalidConfigurationError(ClickException):
pass
class ResourceState:
def __init__(self, configuration_path: str, resource_id: str, generation_timestamp: int, configuration_hash: str):
"""This constructor is meant to be private. Construction shall be made with create or from_file class methods.
Args:
configuration_path (str): Path to the configuration this state relates to.
resource_id (str): Id of the resource the state relates to.
generation_timestamp (int): State generation timestamp.
configuration_hash (str): Checksum of the configuration file.
"""
self.configuration_path = configuration_path
self.resource_id = resource_id
self.generation_timestamp = generation_timestamp
self.configuration_hash = configuration_hash
self.path = os.path.join(os.path.dirname(self.configuration_path), "state.yaml")
def as_dict(self):
return {
"resource_id": self.resource_id,
"generation_timestamp": self.generation_timestamp,
"configuration_path": self.configuration_path,
"configuration_hash": self.configuration_hash,
}
def _save(self) -> None:
"""Save the state as a YAML file."""
with open(self.path, "w") as state_file:
yaml.dump(self.as_dict(), state_file)
@classmethod
def create(cls, configuration_path: str, configuration: dict, resource_id: str) -> "ResourceState":
"""Create a state for a resource configuration.
Args:
configuration_path (str): Path to the YAML file defining the resource.
configuration (dict): Configuration object that will be hashed.
resource_id (str): UUID of the resource.
Returns:
ResourceState: state representing the resource.
"""
generation_timestamp = int(time.time())
configuration_hash = hash_config(configuration)
state = ResourceState(configuration_path, resource_id, generation_timestamp, configuration_hash)
state._save()
return state
@classmethod
def from_file(cls, file_path: str) -> "ResourceState":
"""Deserialize a state from a YAML path.
Args:
file_path (str): Path to the YAML state.
Returns:
ResourceState: state deserialized from YAML.
"""
with open(file_path, "r") as f:
raw_state = yaml.safe_load(f)
return ResourceState(
raw_state["configuration_path"],
raw_state["resource_id"],
raw_state["generation_timestamp"],
raw_state["configuration_hash"],
)
class BaseResource(abc.ABC):
APPLY_PRIORITY = 0 # Priority of the resource during the apply. 0 means the resource is top priority.
@property
@abc.abstractmethod
def api(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def create_function_name(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def create_payload(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def update_payload(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def update_function_name(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def search_function_name(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def search_payload(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def resource_id_field(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def resource_type(
self,
): # pragma: no cover
pass
def __init__(
self, api_client: airbyte_api_client.ApiClient, workspace_id: str, local_configuration: dict, configuration_path: str
) -> None:
"""Create a BaseResource object.
Args:
api_client (airbyte_api_client.ApiClient): the Airbyte API client.
workspace_id (str): the workspace id.
local_configuration (dict): The local configuration describing the resource.
configuration_path (str): The path to the local configuration describing the resource with YAML.
"""
self._create_fn = getattr(self.api, self.create_function_name)
self._update_fn = getattr(self.api, self.update_function_name)
self._search_fn = getattr(self.api, self.search_function_name)
self.workspace_id = workspace_id
self.local_configuration = local_configuration
self.configuration_path = configuration_path
self.api_instance = self.api(api_client)
self.state = self._get_state_from_file()
self.local_file_changed = True if self.state is None else hash_config(self.local_configuration) != self.state.configuration_hash
@property
def remote_resource(self):
return self._get_remote_resource()
def _get_comparable_configuration(
self,
) -> Union[SourceRead, DestinationRead, dict]: # pragma: no cover
"""Get the object to which local configuration will be compared to.
Raises:
NonExistingResourceError: Raised if the remote resource does not exists.
Returns:
Union[SourceRead, DestinationRead, dict]: The comparable configuration
"""
if not self.was_created:
raise NonExistingResourceError("Can't find a comparable configuration as the remote resource does not exists.")
else:
return self.remote_resource
@property
def was_created(self):
return True if self.remote_resource else False
def __getattr__(self, name: str) -> Any:
"""Map attribute of the YAML config to the Resource object.
Args:
name (str): Attribute name
Raises:
AttributeError: Raised if the attributed was not found in the local configuration.
Returns:
[Any]: Attribute value
"""
if name in self.local_configuration:
return self.local_configuration.get(name)
raise AttributeError(f"{self.__class__.__name__}.{name} is invalid.")
def _search(self, check_return_type=True) -> Union[SourceReadList, DestinationReadList, ConnectionReadList]:
"""Run search of a resources on the remote Airbyte instance.
Returns:
Union[SourceReadList, DestinationReadList, ConnectionReadList]: Search results
"""
return self._search_fn(self.api_instance, self.search_payload, _check_return_type=check_return_type)
def _get_state_from_file(self) -> Optional[ResourceState]:
"""Retrieve a state object from a local YAML file if it exists.
Returns:
Optional[ResourceState]: the deserialized resource state if YAML file found.
"""
expected_state_path = Path(os.path.join(os.path.dirname(self.configuration_path), "state.yaml"))
if expected_state_path.is_file():
return ResourceState.from_file(expected_state_path)
def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]:
"""Find the remote resource on the Airbyte instance associated with the current resource.
Raises:
DuplicateResourceError: raised if the search results return multiple resources.
Returns:
Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remote resource found.
"""
search_results = self._search().get(f"{self.resource_type}s", [])
if len(search_results) > 1:
raise DuplicateResourceError("Two or more ressources exist with the same name.")
if len(search_results) == 1:
return search_results[0]
else:
return None
def get_diff_with_remote_resource(self) -> str:
"""Compute the diff between current resource and the remote resource.
Raises:
NonExistingResourceError: Raised if the remote resource does not exist.
Returns:
str: The prettyfied diff.
"""
if not self.was_created:
raise NonExistingResourceError("Cannot compute diff with a non existing remote resource.")
current_config = self.configuration
remote_config = self._get_comparable_configuration()
diff = compute_diff(remote_config, current_config)
return diff.pretty()
def _create_or_update(
self,
operation_fn: Callable,
payload: Union[SourceCreate, SourceUpdate, DestinationCreate, DestinationUpdate, ConnectionCreate, ConnectionUpdate],
_check_return_type: bool = True,
) -> Union[SourceRead, DestinationRead]:
"""Wrapper to trigger create or update of remote resource.
Args:
operation_fn (Callable): The API function to run.
payload (Union[SourceCreate, SourceUpdate, DestinationCreate, DestinationUpdate]): The payload to send to create or update the resource.
Kwargs:
_check_return_type (boolean): Whether to check the types returned in the API agains airbyte-api-client open api spec.
Raises:
InvalidConfigurationError: Raised if the create or update payload is invalid.
ApiException: Raised in case of other API errors.
Returns:
Union[SourceRead, DestinationRead, ConnectionRead]: The created or updated resource.
"""
try:
result = operation_fn(self.api_instance, payload, _check_return_type=_check_return_type)
return result, ResourceState.create(self.configuration_path, self.local_configuration, result[self.resource_id_field])
except airbyte_api_client.ApiException as api_error:
if api_error.status == 422:
# This API response error is really verbose, but it embodies all the details about why the config is not valid.
# TODO alafanechere: try to parse it and display it in a more readable way.
raise InvalidConfigurationError(api_error.body)
else:
raise api_error
def create(self) -> Union[SourceRead, DestinationRead, ConnectionRead]:
"""Public function to create the resource on the remote Airbyte instance.
Returns:
Union[SourceRead, DestinationRead, ConnectionRead]: The created resource.
"""
return self._create_or_update(self._create_fn, self.create_payload)
def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]:
"""Public function to update the resource on the remote Airbyte instance.
Returns:
Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource.
"""
return self._create_or_update(self._update_fn, self.update_payload)
@property
def resource_id(self) -> Optional[str]:
"""Exposes the resource UUID of the remote resource
Returns:
str: Remote resource's UUID
"""
return self.state.resource_id if self.state is not None else None
class Source(BaseResource):
api = source_api.SourceApi
create_function_name = "create_source"
resource_id_field = "source_id"
search_function_name = "search_sources"
update_function_name = "update_source"
resource_type = "source"
@property
def create_payload(self):
return SourceCreate(self.definition_id, self.configuration, self.workspace_id, self.resource_name)
@property
def search_payload(self):
if self.state is None:
return SourceSearch(source_definition_id=self.definition_id, workspace_id=self.workspace_id, name=self.resource_name)
else:
return SourceSearch(source_definition_id=self.definition_id, workspace_id=self.workspace_id, source_id=self.state.resource_id)
@property
def update_payload(self):
return SourceUpdate(
source_id=self.resource_id,
connection_configuration=self.configuration,
name=self.resource_name,
)
def _get_comparable_configuration(self):
comparable_configuration = super()._get_comparable_configuration()
return comparable_configuration.connection_configuration
@property
def resource_id_request_body(self) -> SourceIdRequestBody:
"""Creates SourceIdRequestBody from resource id.
Raises:
NonExistingResourceError: raised if the resource id is None.
Returns:
SourceIdRequestBody: The SourceIdRequestBody model instance.
"""
if self.resource_id is None:
raise NonExistingResourceError("The resource id could not be retrieved, the remote resource is not existing.")
return SourceIdRequestBody(source_id=self.resource_id)
@property
def catalog(self) -> AirbyteCatalog:
"""Retrieves the source's Airbyte catalog.
Returns:
AirbyteCatalog: The catalog issued by schema discovery.
"""
schema = self.api_instance.discover_schema_for_source(self.resource_id_request_body, _check_return_type=False)
return schema.catalog
class Destination(BaseResource):
api = destination_api.DestinationApi
create_function_name = "create_destination"
resource_id_field = "destination_id"
search_function_name = "search_destinations"
update_function_name = "update_destination"
resource_type = "destination"
@property
def create_payload(self) -> DestinationCreate:
"""Defines the payload to create the remote resource.
Returns:
DestinationCreate: The DestinationCreate model instance
"""
return DestinationCreate(self.workspace_id, self.resource_name, self.definition_id, self.configuration)
@property
def search_payload(self) -> DestinationSearch:
"""Defines the payload to search the remote resource. Search by resource name if no state found, otherwise search by resource id found in the state.
Returns:
DestinationSearch: The DestinationSearch | |
self.bag_type()
pass
elif token in [PigParser.MAP]:
self.enterOuterAlt(localctx, 4)
self.state = 196
self.map_type()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Simple_typeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self):
return self.getToken(PigParser.INT, 0)
def LONG(self):
return self.getToken(PigParser.LONG, 0)
def FLOAT(self):
return self.getToken(PigParser.FLOAT, 0)
def DOUBLE(self):
return self.getToken(PigParser.DOUBLE, 0)
def CHARARRAY(self):
return self.getToken(PigParser.CHARARRAY, 0)
def BYTEARRAY(self):
return self.getToken(PigParser.BYTEARRAY, 0)
def getRuleIndex(self):
return PigParser.RULE_simple_type
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSimple_type"):
listener.enterSimple_type(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSimple_type"):
listener.exitSimple_type(self)
def simple_type(self):
localctx = PigParser.Simple_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_simple_type)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 199
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << PigParser.INT) | (1 << PigParser.LONG) | (1 << PigParser.FLOAT) | (1 << PigParser.DOUBLE) | (1 << PigParser.CHARARRAY) | (1 << PigParser.BYTEARRAY))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tuple_typeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def tuple_(self):
return self.getTypedRuleContext(PigParser.Tuple_Context, 0)
def tuple_def(self):
return self.getTypedRuleContext(PigParser.Tuple_defContext, 0)
def getRuleIndex(self):
return PigParser.RULE_tuple_type
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTuple_type"):
listener.enterTuple_type(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTuple_type"):
listener.exitTuple_type(self)
def tuple_type(self):
localctx = PigParser.Tuple_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_tuple_type)
try:
self.enterOuterAlt(localctx, 1)
self.state = 201
self.tuple_()
self.state = 202
self.tuple_def()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Bag_typeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def BAG(self):
return self.getToken(PigParser.BAG, 0)
def tuple_def(self):
return self.getTypedRuleContext(PigParser.Tuple_defContext, 0)
def getRuleIndex(self):
return PigParser.RULE_bag_type
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBag_type"):
listener.enterBag_type(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBag_type"):
listener.exitBag_type(self)
def bag_type(self):
localctx = PigParser.Bag_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_bag_type)
try:
self.enterOuterAlt(localctx, 1)
self.state = 204
self.match(PigParser.BAG)
self.state = 205
self.tuple_def()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Map_typeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def MAP(self):
return self.getToken(PigParser.MAP, 0)
def LEFT_BRACKET(self):
return self.getToken(PigParser.LEFT_BRACKET, 0)
def RIGHT_BRACKET(self):
return self.getToken(PigParser.RIGHT_BRACKET, 0)
def getRuleIndex(self):
return PigParser.RULE_map_type
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterMap_type"):
listener.enterMap_type(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitMap_type"):
listener.exitMap_type(self)
def map_type(self):
localctx = PigParser.Map_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_map_type)
try:
self.enterOuterAlt(localctx, 1)
self.state = 207
self.match(PigParser.MAP)
self.state = 208
self.match(PigParser.LEFT_BRACKET)
self.state = 209
self.match(PigParser.RIGHT_BRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Func_clauseContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def func_name(self):
return self.getTypedRuleContext(PigParser.Func_nameContext, 0)
def LEFT_PAREN(self):
return self.getToken(PigParser.LEFT_PAREN, 0)
def RIGHT_PAREN(self):
return self.getToken(PigParser.RIGHT_PAREN, 0)
def func_args(self):
return self.getTypedRuleContext(PigParser.Func_argsContext, 0)
def NOT(self):
return self.getToken(PigParser.NOT, 0)
def FUNC(self):
return self.getToken(PigParser.FUNC, 0)
def getRuleIndex(self):
return PigParser.RULE_func_clause
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFunc_clause"):
listener.enterFunc_clause(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFunc_clause"):
listener.exitFunc_clause(self)
def func_clause(self):
localctx = PigParser.Func_clauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_func_clause)
self._la = 0 # Token type
try:
self.state = 224
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [PigParser.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 211
self.func_name()
self.state = 212
self.match(PigParser.LEFT_PAREN)
self.state = 214
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == PigParser.QUOTEDSTRING:
self.state = 213
self.func_args()
self.state = 216
self.match(PigParser.RIGHT_PAREN)
pass
elif token in [PigParser.NOT]:
self.enterOuterAlt(localctx, 2)
self.state = 218
self.match(PigParser.NOT)
self.state = 219
self.match(PigParser.FUNC)
self.state = 220
self.func_name()
self.state = 222
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 17, self._ctx)
if la_ == 1:
self.state = 221
self.func_args()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Func_nameContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self, i: int = None):
if i is None:
return self.getTokens(PigParser.IDENTIFIER)
else:
return self.getToken(PigParser.IDENTIFIER, i)
def PERIOD(self, i: int = None):
if i is None:
return self.getTokens(PigParser.PERIOD)
else:
return self.getToken(PigParser.PERIOD, i)
def getRuleIndex(self):
return PigParser.RULE_func_name
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFunc_name"):
listener.enterFunc_name(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFunc_name"):
listener.exitFunc_name(self)
def func_name(self):
localctx = PigParser.Func_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_func_name)
try:
self.state = 239
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 21, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 226
self.match(PigParser.IDENTIFIER)
self.state = 231
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 19, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 227
self.match(PigParser.PERIOD)
self.state = 228
self.match(PigParser.IDENTIFIER)
self.state = 233
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(
self._input, 19, self._ctx)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 235
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 234
self.match(PigParser.IDENTIFIER)
else:
raise NoViableAltException(self)
self.state = 237
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(
self._input, 20, self._ctx)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Func_argsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def QUOTEDSTRING(self, i: int = None):
if i is None:
return self.getTokens(PigParser.QUOTEDSTRING)
else:
return self.getToken(PigParser.QUOTEDSTRING, i)
def COMMA(self, i: int = None):
if i is None:
return self.getTokens(PigParser.COMMA)
else:
return self.getToken(PigParser.COMMA, i)
def getRuleIndex(self):
return PigParser.RULE_func_args
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFunc_args"):
listener.enterFunc_args(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFunc_args"):
listener.exitFunc_args(self)
def func_args(self):
localctx = PigParser.Func_argsContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_func_args)
try:
self.state = 254
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 24, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 241
self.match(PigParser.QUOTEDSTRING)
self.state = 246
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 22, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 242
self.match(PigParser.COMMA)
self.state = 243
self.match(PigParser.QUOTEDSTRING)
self.state = 248
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(
self._input, 22, self._ctx)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 250
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 249
self.match(PigParser.QUOTEDSTRING)
else:
raise NoViableAltException(self)
self.state = 252
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(
self._input, 23, self._ctx)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Store_clauseContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def STORE(self):
return self.getToken(PigParser.STORE, 0)
def alias(self):
return self.getTypedRuleContext(PigParser.AliasContext, 0)
def INTO(self):
return self.getToken(PigParser.INTO, 0)
def filename(self):
return self.getTypedRuleContext(PigParser.FilenameContext, 0)
def USING(self):
return self.getToken(PigParser.USING, 0)
def func_clause(self):
return self.getTypedRuleContext(PigParser.Func_clauseContext, 0)
def getRuleIndex(self):
return PigParser.RULE_store_clause
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterStore_clause"):
listener.enterStore_clause(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitStore_clause"):
listener.exitStore_clause(self)
def store_clause(self):
localctx = PigParser.Store_clauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_store_clause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 256
self.match(PigParser.STORE)
self.state = 257
self.alias()
self.state = 258
self.match(PigParser.INTO)
self.state = 259
self.filename()
self.state = 262
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 25, self._ctx)
if la_ == 1:
self.state = 260
self.match(PigParser.USING)
self.state = 261
self.func_clause()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Filter_clauseContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def FILTER(self):
return self.getToken(PigParser.FILTER, 0)
def alias(self):
return self.getTypedRuleContext(PigParser.AliasContext, 0)
def BY(self):
return self.getToken(PigParser.BY, 0)
def cond(self):
return self.getTypedRuleContext(PigParser.CondContext, 0)
def getRuleIndex(self):
return PigParser.RULE_filter_clause
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFilter_clause"):
listener.enterFilter_clause(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFilter_clause"):
listener.exitFilter_clause(self)
def filter_clause(self):
localctx = PigParser.Filter_clauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_filter_clause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 264
self.match(PigParser.FILTER)
self.state = 265
self.alias()
self.state = 266
self.match(PigParser.BY)
self.state = 267
self.cond()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CondContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def or_cond(self):
return self.getTypedRuleContext(PigParser.Or_condContext, 0)
def getRuleIndex(self):
return PigParser.RULE_cond
def enterRule(self, | |
arg1.
3. The combined string is then filtered using filter1.
- The following characters must be escaped by preceeding them with a single \:
- ()|
"""
tokens = [
# When in literal mode, only allow to escape }
lexer.Token("Literal", r"\\[^{}]", "AppendArg", None),
# Allow escaping of special characters
lexer.Token(None, r"\\(.)", "Escape", None),
# Literal sequence is %{....}. Literal states can not be nested further,
# i.e. we include anything until the next }. It is still possible to
# escape } if this character needs to be inserted literally.
lexer.Token("Literal", r"\}", "EndLiteralExpression,PopState", None),
lexer.Token("Literal", r"[^}\\]+", "AppendArg", None),
lexer.Token(None, r"\%\{", "StartExpression,PushState", "Literal"),
# Expansion sequence is %(....)
lexer.Token(None, r"\%\(", "StartExpression", None),
lexer.Token(None, r"\|([a-zA-Z_]+)\)", "Filter", None),
lexer.Token(None, r"\)", "ExpandArg", None),
# Glob up as much data as possible to increase efficiency here.
lexer.Token(None, r"[^()%{}|\\]+", "AppendArg", None),
lexer.Token(None, r".", "AppendArg", None),
# Empty input is also ok.
lexer.Token(None, "^$", None, None)
]
STRING_ESCAPES = {"\\\\": "\\",
"\\(": "(",
"\\)": ")",
"\\{": "{",
"\\}": "}",
"\\%": "%"}
def __init__(self, data, config, default_section="", parameter=None,
context=None):
self.stack = [""]
self.default_section = default_section
self.parameter = parameter
self.config = config
self.context = context
super(StringInterpolator, self).__init__(data)
def Escape(self, string="", **_):
"""Support standard string escaping."""
# Translate special escapes:
self.stack[-1] += self.STRING_ESCAPES.get(string, string)
def Error(self, e):
"""Parse errors are fatal."""
raise ConfigFormatError("While parsing %s: %s" % (self.parameter, e))
def StartExpression(self, **_):
"""Start processing a new expression."""
# Extend the stack for the new expression.
self.stack.append("")
def EndLiteralExpression(self, **_):
if len(self.stack) <= 1:
raise lexer.ParseError(
"Unbalanced literal sequence: Can not expand '%s'" %
self.processed_buffer)
arg = self.stack.pop(-1)
self.stack[-1] += arg
def Filter(self, match=None, **_):
"""Filter the current expression."""
arg = self.stack.pop(-1)
# Filters can be specified as a comma separated list.
for filter_name in match.group(1).split(","):
filter_object = ConfigFilter.classes_by_name.get(filter_name)
if filter_object is None:
raise FilterError("Unknown filter function %r" % filter_name)
logging.info("Applying filter %s for %s.", filter_name, arg)
arg = filter_object().Filter(arg)
self.stack[-1] += arg
def ExpandArg(self, **_):
"""Expand the args as a section.parameter from the config."""
# This function is called when we see close ) and the stack depth has to
# exactly match the number of (.
if len(self.stack) <= 1:
raise lexer.ParseError(
"Unbalanced parenthesis: Can not expand '%s'" % self.processed_buffer)
# This is the full parameter name: e.g. Logging.path
parameter_name = self.stack.pop(-1)
if "." not in parameter_name:
parameter_name = "%s.%s" % (self.default_section, parameter_name)
final_value = self.config.Get(parameter_name, context=self.context)
if final_value is None:
final_value = ""
type_info_obj = (self.config.FindTypeInfo(parameter_name) or
type_info.String())
# Encode the interpolated string according to its type.
self.stack[-1] += type_info_obj.ToString(final_value)
def AppendArg(self, string="", **_):
self.stack[-1] += string
def Parse(self):
self.Close()
if len(self.stack) != 1:
raise lexer.ParseError("Nested expression not balanced.")
return self.stack[0]
class GrrConfigManager(object):
"""Manage configuration system in GRR."""
def __init__(self):
"""Initialize the configuration manager."""
# The context is used to provide different configuration directives in
# different situations. The context can contain any string describing a
# different aspect of the running instance.
self.context = []
self.raw_data = OrderedYamlDict()
self.validated = set()
self.writeback = None
self.writeback_data = OrderedYamlDict()
self.global_override = dict()
self.context_descriptions = {}
self.constants = set()
# This is the type info set describing all configuration
# parameters.
self.type_infos = type_info.TypeDescriptorSet()
# We store the defaults here.
self.defaults = {}
# A cache of validated and interpolated results.
self.FlushCache()
self.initialized = False
def FlushCache(self):
self.cache = {}
def MakeNewConfig(self):
"""Creates a new configuration option based on this one.
Note that it is not normally possible to just instantiate the
config object because it will have an empty set of type
descriptors (i.e. no config options will be defined). Config
options are normally defined at import time, and then they get
added to the CONFIG global in this module.
To obtain a new configuration object, inheriting the regular
config options, this method must be called from the global CONFIG
object, to make a copy.
Returns:
A new empty config object. which has the same parameter definitions as
this one.
"""
result = self.__class__()
# We do not need to copy here since these never change.
result.type_infos = self.type_infos
result.defaults = self.defaults
result.context = self.context
return result
def SetWriteBack(self, filename):
"""Sets the config file which will receive any modifications.
The main config file can be made writable, but directing all Set()
operations into a secondary location. This secondary location will
receive any updates and will override the options for this file.
Args:
filename: A url, or filename which will receive updates. The
file is parsed first and merged into the raw data from this
object.
"""
self.writeback = self.LoadSecondaryConfig(filename)
self.MergeData(self.writeback.RawData(), self.writeback_data)
logging.info("Configuration writeback is set to %s", filename)
def Validate(self, sections=None, parameters=None):
"""Validate sections or individual parameters.
The GRR configuration file contains several sections, used by different
components. Many of these components don't care about other sections. This
method allows a component to declare in advance what sections and parameters
it cares about, and have these validated.
Args:
sections: A list of sections to validate. All parameters within the
section are validated.
parameters: A list of specific parameters (in the format section.name) to
validate.
Returns:
dict of {parameter: Exception}, where parameter is a section.name string.
"""
if isinstance(sections, basestring):
sections = [sections]
if sections is None:
sections = []
if parameters is None:
parameters = []
validation_errors = {}
for section in sections:
for descriptor in self.type_infos:
if descriptor.name.startswith(section + "."):
try:
self.Get(descriptor.name)
except (Error, ValueError) as e:
validation_errors[descriptor.name] = e
for parameter in parameters:
for descriptor in self.type_infos:
if parameter == descriptor.name:
try:
self.Get(descriptor.name)
except (Error, ValueError) as e:
validation_errors[descriptor.name] = e
return validation_errors
def AddContext(self, context_string, description=None):
"""Adds a context string to the global configuration.
The context conveys information about the caller of the config system and
allows the configuration to have specialized results for different callers.
Note that the configuration file may specify conflicting options for
different contexts. In this case, later specified contexts (i.e. the later
AddContext() calls) will trump the earlier specified contexts. This allows
specialized contexts to be specified on the command line which override
normal operating options.
Args:
context_string: A string which describes the global program.
description: A description as to when this context applies.
"""
if context_string not in self.context:
self.context.append(context_string)
self.context_descriptions[context_string] = description
self.FlushCache()
def SetRaw(self, name, value):
"""Set the raw string without verification or escaping."""
if self.writeback is None:
logging.warn("Attempting to modify a read only config object.")
if name in self.constants:
raise ConstModificationError(
"Attempting to modify constant value %s" % name)
self.writeback_data[name] = value
self.FlushCache()
def Set(self, name, value):
"""Update the configuration option with a new value.
Note that this forces the value to be set for all contexts. The value is
written to the writeback location if Save() is later called.
Args:
name: The name of the parameter to set.
value: The value to set it to. The value will be validated against the
option's type descriptor.
Raises:
ConstModificationError: When attempting to change a constant option.
"""
# If the configuration system has a write back location we use it,
# otherwise we use the primary configuration object.
if self.writeback is None:
logging.warn("Attempting to modify a read only config object for %s.",
name)
if name in self.constants:
raise ConstModificationError(
"Attempting to modify constant value %s" % name)
writeback_data = self.writeback_data
# Check if the new value conforms with the type_info.
if value is not None:
type_info_obj = self.FindTypeInfo(name)
value = type_info_obj.ToString(value)
if isinstance(value, basestring):
value = self.EscapeString(value)
writeback_data[name] = value
self.FlushCache()
def EscapeString(self, string):
"""Escape special characters when encoding to a string."""
return re.sub(r"([\\%){}])", r"\\\1", string)
def Write(self):
"""Write out the updated configuration to the fd."""
if self.writeback:
self.writeback.SaveData(self.writeback_data)
else:
raise RuntimeError("Attempting to write a configuration without a "
"writeback location.")
def WriteToFD(self, fd):
"""Write out the updated configuration to the fd."""
if self.writeback:
self.writeback.SaveDataToFD(self.writeback_data, fd)
else:
raise RuntimeError("Attempting to write a configuration without a "
"writeback location.")
| |
<filename>ExecTrial.py<gh_stars>0
#--------------------------------Imports---------------------------------------#
import pygame, sys
from pygame.locals import *
import random
#--------------------------------Imports---------------------------------------#
#----------------------------------Solve Methods-------------------------------#
## These are the functions to solve puzzles and return solved and partially
## solved puzzles as strings. Most definitions are straight out of Peter Norvig's
## method, with unnessecary utilities removed. Added method to convert dict to
## string, since he uses a dict to store and print values. String makes it easier
## to interface to pygame display. Removed his main program calls, left two grids
## and unit test untouched to save time on checking initialization and testing
## cases. A few changes still need to be made, most notably to parse_grid() to
## return the next logical conclusion, and how it arrived at that value.
global solvedCells ## Everytime assign is called, the value assigned is stored
## in this. Waaaay more time efficient than anything else I've tried so far,
## despite running through multiple duplicate entries. Considering writing
## script to remove duplicates, but without a very effiecient method/predefined
## function that might end up taking more time than running through it.
## (O(n) v O(n^2))
global playerSolution
counter = 0
base = 0
playerSolution = []
solvedCells = [] # Needs to be cleared everytime a new puzzle is loaded.
for i in range(0, 81):
playerSolution.append(' ')
def cross(A, B):
"Cross product of elements in A and elements in B."
return [a+b for a in A for b in B]
digits = '123456789'
rows = 'ABCDEFGHI'
cols = digits
squares = cross(rows, cols)
unitlist = ([cross(rows, c) for c in cols] +
[cross(r, cols) for r in rows] +
[cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')])
units = dict((s, [u for u in unitlist if s in u])
for s in squares)
peers = dict((s, set(sum(units[s],[]))-set([s]))
for s in squares)
def parse_grid(grid):
"""Convert grid to a dict of possible values, {square: digits}, or
return False if a contradiction is detected."""
values = dict((s, digits) for s in squares)
global counter
global base
counter = 0
for s,d in grid_values(grid).items():
base = counter
if d in digits and not assign(values, s, d):
return False ## (Fail if we can't assign d to square s.)
## print solvedCells#[base:counter]
## print "--"
#### print solvedCells[j:counter][::-1], j, counter
solvedCells[base:counter] = solvedCells[base:counter][::-1]
## print solvedCells, base, counter
return values
def grid_values(grid):
"Convert grid into a dict of {square: char} with '0' or '.' or ' ' for empties."
chars = [c for c in grid if c in digits or c in '0. ']
assert len(chars) == 81
return dict(zip(squares, chars))
def decodeDict(values):
"""Converts the dicts generated by the other functions into an 81 character
string. Only returns string values of len() == 1, otherwise returns whitespace."""
## Bit of fortune here, but this is absolutely brilliant. Anything that needs to
## be pushed to the surface will necessarily use this to create a string. Did
## not consider this during conception, but the initial Grid states for each
## puzzle are stored in EXACTLY THE SAME FORMAT. Negates the need to constantly
## read information from the screen (Which, in any case, might not be possible),
## just modify and return the string element. If this works (No reason why it
## shouldn't), this will simplify a large portion of Week 4's work.
a = ''
for r in rows:
for c in cols:
if len(values[r+c]) == 1:
a = a + values[r+c]
else:
a = a + " "
return a
def assign(values, s, d):
"""Eliminate all the other values (except d) from values[s] and propagate.
Return values, except return False if a contradiction is detected."""
peerList = []
other_values = values[s].replace(d, '')
global counter
if all(eliminate(values, s, d2) for d2 in other_values):
if [s, d, 0] not in solvedCells and [s, d, 1] not in solvedCells and [s, d, 2] not in solvedCells:
if counter < len(solvedCells):
counter = counter + 1
peerList.append(d)
for s2 in peers[s]:
if len(values[s2]) == 1 and values[s2] not in peerList:
peerList.append(values[s2])
peerList.sort()
if ''.join(peerList) == "123456789":
solvedCells.append([s, d, 2])
else:
solvedCells.append([s, d, 0])
return values
else:
return False
def eliminate(values, s, d):
"""Eliminate d from values[s]; propagate when values or places <= 2.
Return values, except return False if a contradiction is detected."""
if d not in values[s]:
return values ## Already eliminated
values[s] = values[s].replace(d,'')
## (1) If a square s is reduced to one value d2, then eliminate d2 from the peers.
if len(values[s]) == 0:
return False ## Contradiction: removed last value
elif len(values[s]) == 1:
d2 = values[s]
if not all(eliminate(values, s2, d2) for s2 in peers[s]):
return False
## (2) If a unit u is reduced to only one place for a value d, then put it there.
for u in units[s]:
dplaces = [s for s in u if d in values[s]]
if len(dplaces) == 0:
return False ## Contradiction: no place for this value
elif len(dplaces) == 1:
# d can only be in one place in unit; assign it there
if not assign(values, dplaces[0], d):
return False
return values
## This is unnecessary in the final application
def display(values):
"Display these values as a 2-D grid."
width = 1+max(len(values[s]) for s in squares)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print ''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols)
if r in 'CF': print line
print
def solve(grid): return search(parse_grid(grid))
def search(values):
"Using depth-first search and propagation, try all possible values."
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in squares):
return values ## Solved!
## Chose the unfilled square s with the fewest possibilities
n,s = min((len(values[s]), s) for s in squares if len(values[s]) > 1)
return some(search(assign(values.copy(), s, d))
for d in values[s])
def some(seq):
"Return some element of seq that is true."
for e in seq:
if e: return e
return False
def square2Cell(squar):
'''Takes a cell's designation (eg: A1, E5, I9) (string), returns cell number
(eg: 0, 40, 80) (int).'''
if squar[0] == "A": cell = -1
elif squar[0] == "B": cell = 8
elif squar[0] == "C": cell = 17
elif squar[0] == "D": cell = 26
elif squar[0] == "E": cell = 35
elif squar[0] == "F": cell = 44
elif squar[0] == "G": cell = 53
elif squar[0] == "H": cell = 62
elif squar[0] == "I": cell = 71
return cell + int(squar[1])
def isSolved(stringGrid):
"Checks if string representation of puzzle is complete."
for i in range(0, len(stringGrid)):
if stringGrid[i] == ' ':
return False
return True
def Coord(cellNo):
"Takes a cell number, returns cell designation."
a = "ABCDEFGHI"
return a[cellNo/9] + str(cellNo % 9 + 1)
def genSol(stringGrid):
"Accepts a string grid and generates an evolving partial solution."
b = solve(stringGrid)
del solvedCells[:]
a = parse_grid(stringGrid)
## display(b)
sa = list(decodeDict(a))
sb = list(decodeDict(b))
## print stringGrid
## print "".join(sb)
## print
## print "".join(sa)
##
while not(isSolved("".join(sa))):
i = 0
while i < 81:
if sa[i] != sb[i]:
sa[i] = sb[i]
if [Coord(i), sb[i], 0] not in solvedCells and [Coord(i), sb[i], 1] not in solvedCells and [Coord(i), sb[i], 2] not in solvedCells:
solvedCells.append([Coord(i), sb[i], 1])
## Append to solvedCells? Check first
break
i = i + 1
## print "".join(sa)
sa = list(decodeDict(parse_grid("".join(sa))))
## print "".join(sa)
return b
#----------------------------------Solve Methods-------------------------------#
#-------------------------------------Buttons!---------------------------------#
def writeButtons(mouse):
"Reads button values 1 through 9 from the button grid."
if mouse[0] in range(widthBuffer + 3 * cellSize, widthBuffer + 6 * cellSize) and mouse[1] in range(heightBuffer + 10 * cellSize, heightBuffer + 13 * cellSize):
if mouse[0] in range(widthBuffer + 3 * cellSize, widthBuffer + 4 * cellSize) and mouse[1] in range(heightBuffer + 10 * cellSize, heightBuffer + 11 * cellSize):
writeValue = "1"
if mouse[0] in range(widthBuffer + 4 * cellSize, widthBuffer + 5 * cellSize) and mouse[1] in range(heightBuffer + 10 * cellSize, heightBuffer + 11 * cellSize):
writeValue = "2"
if mouse[0] in range(widthBuffer + 5 * cellSize, widthBuffer + 6 * cellSize) and mouse[1] in range(heightBuffer + 10 * cellSize, heightBuffer + 11 * cellSize):
writeValue = "3"
if mouse[0] in range(widthBuffer | |
in range(3)]
for run in range(N_runs):
TV_loss_total += loss[run]['TV']
CostTrain_loss_total += loss[run][('%s' %cost_func[run], 'Train')]
CostTest_loss_total += loss[run][('%s' %cost_func[run], 'Test')]
N_epochs = N_epochs[0]
average_loss['TV'] = TV_loss_total/N_runs
average_loss[('%s' %cost_func[run], 'Train')] = CostTrain_loss_total/N_runs
average_loss[('%s' %cost_func[run], 'Test')] = CostTest_loss_total/N_runs
[TV_max, TV_min, cost_train_max, cost_train_min, cost_test_max, cost_test_min] = [np.zeros(N_epochs-1) for _ in range(6)]
for epoch in range(N_epochs-1):
temp_tv = []
temp_cost_test = []
temp_cost_train = []
for run in range(N_runs):
temp_tv.append(loss[run]['TV'][epoch])
temp_cost_test.append(loss[run][('%s' %cost_func[run], 'Test')][epoch])
temp_cost_train.append(loss[run][('%s' %cost_func[run], 'Train')][epoch])
TV_max[epoch] = max(temp_tv)
cost_train_max[epoch] = max(temp_cost_train)
cost_test_max[epoch] = max(temp_cost_test)
TV_min[epoch] = min(temp_tv)
cost_train_min[epoch] = min(temp_cost_train)
cost_test_min[epoch] = min(temp_cost_test)
error_upper['TV'] = np.absolute(average_loss['TV'] - TV_max)
error_upper[('%s' %cost_func[0], 'Train')] = np.absolute(average_loss[('%s' %cost_func[0], 'Train')] - cost_train_max)
error_upper[('%s' %cost_func[0], 'Test')] = np.absolute(average_loss[('%s' %cost_func[0], 'Test')] - cost_test_max)
error_lower['TV'] = np.absolute(average_loss['TV'] - TV_min)
error_lower[('%s' %cost_func[0], 'Train')] = np.absolute(average_loss[('%s' %cost_func[0], 'Train')] - cost_train_min)
error_lower[('%s' %cost_func[0], 'Test')] = np.absolute(average_loss[('%s' %cost_func[0], 'Test')] - cost_test_min)
return average_loss, error_upper, error_lower
def PrintAveragesToFiles(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs):
if all(x == learning_rate[0] for x in learning_rate) is False:
raise ValueError('All Learning Rates must be the same in all inputs.')
elif all(x == sinkhorn_eps[0] for x in sinkhorn_eps) is False:
raise ValueError('All Sinkhorn regularisers must be the same in all inputs.')
average_loss, error_upper, error_lower = AverageCost(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
stein_params = [score[0], stein_eigvecs[0], stein_eta[0], kernel_type[0]]
N_samples = [N_data_samples[0], N_born_samples[0], batch_size[0], N_kernel_samples[0]]
trial_name = MakeTrialNameFile(cost_func[0], data_type[0], data_circuit[0], N_epochs[0],learning_rate[0], qc[0], kernel_type[0], N_samples, stein_params, sinkhorn_eps[0], 'Average')
loss_path = '%s/loss/%s/' %(trial_name, cost_func[0])
TV_path = '%s/loss/TV/' %trial_name
loss_path_upper_error = '%s/loss/%s/upper_error/' %(trial_name, cost_func[0])
loss_path_lower_error = '%s/loss/%s/lower_error/' %(trial_name, cost_func[0])
#create directories to store output training information
MakeDirectory(loss_path)
MakeDirectory(TV_path)
MakeDirectory(loss_path_upper_error)
MakeDirectory(loss_path_lower_error)
#Print Upper Bounds on loss errors
np.savetxt('%s/loss/%s/upper_error/train' %(trial_name,cost_func[0]), error_upper[('%s' %cost_func[0], 'Train')])
np.savetxt('%s/loss/%s/upper_error/test' %(trial_name,cost_func[0]), error_upper[('%s' %cost_func[0], 'Test')] )
np.savetxt('%s/loss/TV/upper_error' %(trial_name), error_upper[('TV')])
#Print Lower Bounds on loss errors
np.savetxt('%s/loss/%s/lower_error/train' %(trial_name,cost_func[0]), error_lower[('%s' %cost_func[0], 'Train')])
np.savetxt('%s/loss/%s/lower_error/test' %(trial_name,cost_func[0]), error_lower[('%s' %cost_func[0], 'Test')] )
np.savetxt('%s/loss/TV/lower_error' %(trial_name), error_lower[('TV')])
np.savetxt('%s/loss/%s/train_avg' %(trial_name,cost_func[0]), average_loss[('%s' %cost_func[0], 'Train')])
np.savetxt('%s/loss/%s/test_avg' %(trial_name,cost_func[0]), average_loss[('%s' %cost_func[0], 'Test')] )
np.savetxt('%s/loss/TV/average' %(trial_name), average_loss[('TV')]) #Print Total Variation of Distributions during training
return
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(1)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(2)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(3)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(4)
# PrintAveragesToFiles(N_epochs, learning_rate, data_type, data_circuit,N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs)
####################################################################################################################
# #Plot Single Cost
###################################################################################################################
def PlotSingleCostFunction(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, comparison, legend = True):
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps,0)
x = np.arange(0, N_epochs-1, 1)
if comparison.lower() == 'cost':
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps)
if cost_func.lower() == 'mmd':
try:
train_error = np.vstack((lower_error[('MMD', 'Train')], upper_error[('MMD', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('MMD', 'Test')], upper_error[('MMD', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
plot_colour = 'r'
plt.plot(x, average_loss['MMD', 'Train'],'%so-' % plot_colour,\
label =r'MMD on training set for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' % (kernel_type[0], learning_rate) )
plt.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], alpha=0.3, facecolor='%s'%plot_colour)
plt.plot(x, average_loss['MMD', 'Test'],'%s-' % plot_colour,\
label =r'MMD on test set for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' % (kernel_type[0], learning_rate) )
plt.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.3)
elif cost_func.lower() == 'sinkhorn':
try:
train_error = np.vstack((lower_error[('Sinkhorn', 'Train')], upper_error[('Sinkhorn', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('Sinkhorn', 'Test')], upper_error[('Sinkhorn', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
plot_colour = 'b'
x = np.arange(0, len(average_loss['Sinkhorn', 'Train']))
plt.plot(x, average_loss['Sinkhorn', 'Train'],'%so-' % plot_colour,\
label =r'Sinkhorn on training set, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x, average_loss['Sinkhorn', 'Train'] - lower_error['Sinkhorn', 'Train'],\
average_loss['Sinkhorn', 'Train'] + upper_error['Sinkhorn', 'Train'], alpha=0.5, facecolor=plot_colour)
plt.plot(x, average_loss['Sinkhorn', 'Test'],'%s-' % plot_colour,\
label =r'Sinkhorn on test set, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x, average_loss['Sinkhorn', 'Test'] - lower_error['Sinkhorn', 'Test'],\
average_loss['Sinkhorn', 'Test'] + upper_error['Sinkhorn', 'Test'], alpha=0.3, facecolor=plot_colour)
elif cost_func.lower() == 'stein':
if score.lower() == 'exact':
plot_colour = 'c'
plt.plot(loss[('Stein', 'Train')], '%so-' %(plot_colour), label =r'Stein, on training set using Exact score' )
plt.plot(loss[('Stein', 'Test')], '%sx--' %(plot_colour), label =r'Stein, on test set using Exact score ' )
elif score.lower() == 'spectral':
plot_colour = 'm'
plt.plot(loss[('Stein', 'Train')], '%sx-' %(plot_colour), label =r'Stein, on training set using Spectral score' )
plt.plot(loss[('Stein', 'Test')], '%s-' %(plot_colour), label =r'Stein, on test set using Spectral score.' )
plt.legend(loc='best', prop={'size': 20})
plt.show()
elif comparison.lower() == 'tv':
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps)
except:
pass
x = np.arange(0, N_epochs-1, 1)
if cost_func.lower() == 'mmd':
#Compute Average losses and errors, over a certain number of runs
plot_colour = 'r'
plt.plot(x, loss['TV'], label =r'MMD on test set for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' % (kernel_type[0], learning_rate) )
# plt.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2)
elif cost_func.lower() == 'sinkhorn':
plot_colour = 'b'
plt.plot(x, average_loss['TV'],label =r'Sinkhorn using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2)
elif cost_func.lower() == 'stein':
if score.lower() == 'exact':
plot_colour = 'c'
plt.plot(loss['TV'], '%so-' %(plot_colour), label =r'Stein using Exact score.')
elif score.lower() == 'spectral':
plot_colour = 'm'
plt.plot(loss['TV'], '%so-' %(plot_colour), label =r'Stein using Spectral score.')
plt.legend(loc='best', prop={'size': 20})
plt.show()
return
'''3 QUBIT SINKHORN'''
# N_epochs = 200
# learning_rate = 0.01
# data_type = 'Bernoulli_Data'
# data_circuit ='IQP'
# N_born_samples = 500
# N_data_samples = 500
# N_kernel_samples = 2000
# batch_size = 250
# kernel_type = 'Gaussian'
# cost_func = 'Sinkhorn'
# qc = '3q-qvm'
# score = 'Approx'
# stein_eigvecs = 3
# stein_eta = 0.01
# sinkhorn_eps = 0.1
# runs = 0
''''4 QUBIT SINKHORN'''
# N_epochs = 200
# learning_rate = 0.05
# data_type = 'Bernoulli_Data'
# data_circuit ='IQP'
# N_born_samples = 500
# N_data_samples = 500
# N_kernel_samples = 2000
# batch_size = 250
# kernel_type = 'Gaussian'
# cost_func = 'Sinkhorn'
# qc = '4q-qvm'
# score = 'Approx'
# stein_eigvecs = 3
# stein_eta = 0.01
# sinkhorn_eps = 1
# runs = 0
# PlotSingleCostFunction(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, 'cost', legend = True)
# ###################################################################################################################
# #Compare Kernels
###################################################################################################################
def CompareKernelsPlot(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, comparison, runs, legend = True):
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
# print(len(N_epochs), loss)
if all(x.lower() == 'mmd' for x in cost_func) is False:
#If all cost functions to be compared are the mmd
raise ValueError('All cost functions must be MMD')
else:
if comparison.lower() == 'tv':
if qc[0][0].lower() == '2':
plot_colour = ['rs-', 'r+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '3':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '4':
plot_colour = ['rx-', 'bx-', 'c+-', 'mo-']
elif qc[0][0].lower() == '5':
plot_colour = ['rx-', 'bx-']
elif comparison.lower() == 'mmd':
if qc[0][0].lower() == '2':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '3':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '4':
plot_colour = ['rx-', 'bx-']
N_trials = len(N_epochs)
x = np.arange(0, N_epochs[0]-1, 1)
if comparison.lower() == 'probs':
fig, axs = plt.subplots()
axs.clear()
x = np.arange(len(data_probs_final[0]))
axs.bar(x, data_probs_final[0].values(), width=0.2, color= 'k' , align='center')
axs.bar(x-(0.2*(0+1)), born_final_probs[2].values(), width=0.2, color='b', align='center')
axs.bar(x-(0.2*(0+2)), born_final_probs[-1].values(), width=0.2, color='r', align='center')
axs.legend(('Data',r'$\mathsf{MMD}$ with $\kappa_G$',r'$\mathsf{MMD}$ with $\kappa_Q$'), | |
<reponame>paudetseis/SplitPy<gh_stars>10-100
# Copyright 2019 <NAME> & <NAME>
#
# This file is part of SplitPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
:mod:`~splitpy` defines the following base classes:
- :class:`~splitpy.classes.Split`
- :class:`~splitpy.classes.PickPlot`
- :class:`~splitpy.classes.DiagPlot`
The class :class:`~splitpy.classes.Split` contains attributes
and methods for the analysis of teleseismic shear-wave splitting
from three-component seismograms.
The class :class:`~splitpy.classes.PickPlot` defines figure handles
for a picking window showing the seismograms and the predicted teleseismic
shear-wave phase arrivals. This figure is interactive and new picks can
be generated to refine the analysis.
The class :class:`~splitpy.classes.DiagPlot` defines figure handles
for a diagnostic figure showing a summary of the splitting results. It can
be called after each application of the `split.analyze` method to show
the summary of the analysis as a figure. This figure can also be saved as
a .png file.
"""
# -*- coding: utf-8 -*-
from math import ceil
import numpy as np
from splitpy import utils, calc
from obspy import Trace, Stream
import matplotlib.pyplot as plt
import matplotlib.gridspec as gspec
class Meta(object):
"""
A Meta object contains attributes associated with the station-event
data for a single Teleseismic event.
Attributes
----------
time : :class:`~obspy.core.UTCDateTime`
Origin time of earthquake
dep : float
Depth of hypocenter (km)
lon : float
Longitude coordinate of epicenter
lat : float
Latitude coordinate of epicenter
mag : float
Magnitude of earthquake
gac : float
Great arc circle between station and epicenter (degrees)
epi_dist : float
Epicentral distance between station and epicenter (km)
baz : float
Back-azimuth - pointing to earthquake from station (degrees)
az : float
Azimuth - pointing to station from earthquake (degrees)
slow : float
Horizontal slowness of phase
inc : float
Incidence angle of phase at surface
"""
def __init__(self, sta, event, gacmin=85., gacmax=180., phase='SKS',
maxdt=4., ddt=0.1, dphi=1.):
from obspy.geodetics.base import gps2dist_azimuth as epi
from obspy.geodetics import kilometer2degrees as k2d
from obspy.taup import TauPyModel
# Extract event 4D parameters
self.time = event.origins[0].time
self.lon = event.origins[0].longitude
self.lat = event.origins[0].latitude
self.dep = event.origins[0].depth
# Check if depth is valid type
if self.dep is not None:
if self.dep > 1000.:
self.dep = self.dep/1000.
else:
self.dep = 10.
# Magnitude
self.mag = event.magnitudes[0].mag
if self.mag is None:
self.mag = -9.
# Calculate epicentral distance
self.epi_dist, self.az, self.baz = epi(
self.lat, self.lon, sta.latitude, sta.longitude)
self.epi_dist /= 1000
self.gac = k2d(self.epi_dist)
if self.gac > gacmin and self.gac < gacmax:
# Get travel time info
tpmodel = TauPyModel(model='iasp91')
# Get Travel times (Careful: here dep is in meters)
arrivals = tpmodel.get_travel_times(
distance_in_degree=self.gac,
source_depth_in_km=self.dep,
phase_list=[phase])
if len(arrivals) > 1:
print("arrival has many entries: ", len(arrivals))
elif len(arrivals) == 0:
print("no arrival found")
self.accept = False
return
arrival = arrivals[0]
# Attributes from parameters
self.ttime = arrival.time
self.slow = arrival.ray_param_sec_degree/111.
self.inc = arrival.incident_angle
self.phase = phase
self.accept = True
else:
self.ttime = None
self.slow = None
self.inc = None
self.phase = None
self.accept = False
# Attributes that get updated as analysis progresses
self.snrq = None
self.snrt = None
self.maxdt = maxdt
self.ddt = ddt
self.dphi = dphi
self.align = 'LQT'
self.rotated = False
class Result(object):
"""
A Result object contains attributes associated with the result
of a single splitting analysis. These are equally applicable
to the RC or SC method - see :func:`~splitpy.classes.analyze`.
Attributes
----------
Emat: :class:`~numpy.ndarray`
Error minimization matrix
trQ_c: :class:`~obspy.core.Trace`
Corrected radial (Q) component
trT_c: :class:`~obspy.core.Trace`
Corrected transverse (T) component
trFast: :class:`~obspy.core.Trace`
Corrected Fast component
trSlow: :class:`~obspy.core.Trace`
Corrected Slow component
phi: float
Azimuth of fast axis (deg)
dtt: float
Delay time between fast and slow axes (sec)
phi_min: float
Azimuth used in plotting method
ephi: float
Error on azimuth of fast axis (deg)
edtt: float
Error on delay time between fast and slow axes (sec)
errc: float
Error contours on `Emat`
"""
def __init__(self, Emat, trQ_c, trT_c, trFast,
trSlow, phi, dtt, phi_min, edtt, ephi, errc):
self.Emat = Emat
self.trQ_c = trQ_c
self.trT_c = trT_c
self.trFast = trFast
self.trSlow = trSlow
self.phi = phi
self.dtt = dtt
self.phi_min = phi_min
self.edtt = edtt
self.ephi = ephi
self.errc = errc
class Split(object):
"""
A Split object contains dictionary attributes that associate
station information with single event (i.e., earthquake)
metadata, corresponding raw and rotated seismograms and
splitting results.
Note
----
The object is initialized with the ``sta`` field only, and
other attributes are added to the object as the analysis proceeds.
Attributes
----------
sta : object
Object containing station information - from :mod:`~stdb` database.
meta : :class:`~splitpy.classes.Meta`
Object of metadata information for single event.
dataZNE : :class:`~splitpy.classes.Data`
Object containing raw trace data in :class:`~obspy.core.Trace` format
dataLQT : :class:`~splitpy.classes.Data`
Object containing rotated trace data in :class:`~obspy.core.Trace` format
"""
def __init__(self, sta):
# # Load example data if initializing empty object
# if sta == 'demo' or sta == 'Demo':
# print("Uploading demo data - station NY.MMPY")
# import os
# import pickle
# sta = pickle.load(
# open(os.path.join(
# os.path.dirname(__file__),
# "examples/data", "MMPY.pkl"), 'rb'))['NY.MMPY']
# Attributes from parameters
self.sta = sta
# Initialize meta and data objects as None
self.meta = None
self.dataZNE = None
self.dataLQT = None
def add_event(self, event, gacmin=85., gacmax=120., phase='SKS',
returned=False):
"""
Adds event metadata to Split object as Meta object.
Parameters
----------
event : :class:`~obspy.core.event`
Event metadata
"""
from obspy.geodetics.base import gps2dist_azimuth as epi
from obspy.geodetics import kilometer2degrees as k2d
from obspy.taup import TauPyModel
from obspy.core.event.event import Event
# if event == 'demo' or event == 'Demo':
# from obspy.clients.fdsn import Client
# from obspy.core import UTCDateTime
# client = Client()
# # Get catalogue using deployment start and end
# event = client.get_events(
# starttime=UTCDateTime('2015-07-03T06:00:00'),
# endtime=UTCDateTime('2015-07-03T07:00:00'),
# minmagnitude=6.0,
# maxmagnitude=6.5)[0]
# print(event.short_str())
if not isinstance(event, Event):
raise(Exception("Event has incorrect type"))
# Store as object attributes
self.meta = Meta(sta=self.sta, event=event,
gacmin=gacmin, gacmax=gacmax,
phase=phase)
if returned:
return self.meta.accept
def add_data(self, stream, returned=False, new_sr=5.):
"""
Adds stream of raw data as object attribute
Parameters
----------
stream : :class:`~obspy.core.Stream`
Stream container for NEZ seismograms
returned : bool
Whether or not to return the ``accept`` attribute
Returns
-------
accept : bool
Whether or not the object is accepted for further analysis
"""
if not self.meta:
raise(Exception("No meta data available - aborting"))
if not self.meta.accept:
return
# # Load demo data
# if stream == 'demo' or stream == 'Demo':
# import os
# import pickle
# file = open(os.path.join(
# os.path.dirname(__file__),
# "examples/data", "ZNE_Data.pkl"), "rb")
# stream = pickle.load(file)
# print(stream)
if not isinstance(stream, Stream):
raise(Exception("Event has incorrect type"))
try:
self.dataZNE = stream
if not np.allclose(
[t.stats.npts for t in stream[1:]], stream[0].stats.npts):
self.meta.accept = False
# Filter Traces
self.dataZNE.filter('lowpass', freq=0.5*new_sr,
corners=2, zerophase=True)
self.dataZNE.resample(new_sr, no_filter=False)
except:
print("Error: Not all channels are available")
self.meta.accept = False
if returned:
return self.meta.accept
def download_data(self, client, stdata=[], dtype='SAC', ndval=np.nan,
new_sr=5., dts=120., returned=False, verbose=False):
"""
Downloads seismograms based on event origin time and
P phase arrival and adds as object attribute.
Parameters
----------
client : :class:`~obspy.client.fdsn.Client`
Client object
ndval : float
Fill in value for missing data
new_sr : float
New sampling rate (Hz)
dts : float
Time duration (sec)
stdata : List
Station list
returned : bool
Whether or not to return the ``accept`` attribute
Returns
-------
| |
'view']
_all_searchable_fields = ['comment', 'ipv4addr', 'name', 'view', 'zone']
_return_fields = ['extattrs', 'ipv4addr', 'name', 'view']
_remap = {}
_shadow_fields = ['_ref']
class RpzAaaaRecord(InfobloxObject):
""" RpzAaaaRecord: Response Policy Zone Substitute AAAA Record Rule
object.
Corresponds to WAPI object 'record:rpz:aaaa'
An RPZ Substitute (AAAA Record) Rule, maps a domain name to a
substitute IPv6 address. To define a specific name-to-address
mapping, add an Substitute (AAAA Record) Rule to a previously
defined Response Policy Zone.
This record represents the substitution rule for DNS AAAA records.
Fields:
comment: The comment for the record; maximum 256 characters.
disable: Determines if the record is disabled or not. False means
that the record is enabled.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
ipv6addr: The IPv6 Address of the substitute rule.
name: The name for a record in FQDN format. This value cannot be in
unicode format.
rp_zone: The name of a response policy zone in which the record
resides.
ttl: The Time To Live (TTL) value for record. A 32-bit unsigned
integer that represents the duration, in seconds, for which the
record is valid (cached). Zero indicates that the record should
not be cached.
use_ttl: Use flag for: ttl
view: The name of the DNS View in which the record resides. Example:
"external".
zone: The name of the zone in which the record resides. Example:
"zone.com". If a view is not specified when searching by zone,
the default view is used.
"""
_infoblox_type = 'record:rpz:aaaa'
_fields = ['comment', 'disable', 'extattrs', 'ipv6addr', 'name', 'rp_zone',
'ttl', 'use_ttl', 'view', 'zone']
_search_for_update_fields = ['ipv6addr', 'name', 'view']
_updateable_search_fields = ['comment', 'ipv6addr', 'name', 'view']
_all_searchable_fields = ['comment', 'ipv6addr', 'name', 'view', 'zone']
_return_fields = ['extattrs', 'ipv6addr', 'name', 'view']
_remap = {}
_shadow_fields = ['_ref']
class RpzAaaaIpaddressRecord(InfobloxObject):
""" RpzAaaaIpaddressRecord: Response Policy Zone Substitute IPv6
Address Rule object.
Corresponds to WAPI object 'record:rpz:aaaa:ipaddress'
An RPZ Substitute (IPv6 Address) Rule maps an IP address represented
by a host name to a substitute IPv6 address. To define a specific
address-to-address mapping, add an RPZ Substitute (IPv6 Address)
Rule to a previously defined Response Policy Zone.
This record represents the substitution rule for IP trigger policy.
It matches IP addresses that would otherwise appear in AAAA record
in the "answer" section of DNS response.
You should use this object to create IP address substitution rules
instead usage CNAMEIpAddress object.
Fields:
comment: The comment for the record; maximum 256 characters.
disable: Determines if the record is disabled or not. False means
that the record is enabled.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
ipv6addr: The IPv6 Address of the substitute rule.
name: The name for a record in FQDN format. This value cannot be in
unicode format.
rp_zone: The name of a response policy zone in which the record
resides.
ttl: The Time To Live (TTL) value for record. A 32-bit unsigned
integer that represents the duration, in seconds, for which the
record is valid (cached). Zero indicates that the record should
not be cached.
use_ttl: Use flag for: ttl
view: The name of the DNS View in which the record resides. Example:
"external".
zone: The name of the zone in which the record resides. Example:
"zone.com". If a view is not specified when searching by zone,
the default view is used.
"""
_infoblox_type = 'record:rpz:aaaa:ipaddress'
_fields = ['comment', 'disable', 'extattrs', 'ipv6addr', 'name', 'rp_zone',
'ttl', 'use_ttl', 'view', 'zone']
_search_for_update_fields = ['ipv6addr', 'name', 'view']
_updateable_search_fields = ['comment', 'ipv6addr', 'name', 'view']
_all_searchable_fields = ['comment', 'ipv6addr', 'name', 'view', 'zone']
_return_fields = ['extattrs', 'ipv6addr', 'name', 'view']
_remap = {}
_shadow_fields = ['_ref']
class RpzCnameRecord(InfobloxObject):
""" RpzCnameRecord: DNS Response Policy Zone CNAME record object.
Corresponds to WAPI object 'record:rpz:cname'
An RPZ CNAME record represents different RPZ rules, depending on the
value of the canonical name. The intention of this object is to
support QNAME Trigger policy. The QNAME policy trigger applies to
requested domain names (QNAME). This record represents Passthru
Domain Name Rule, Block Domain Name (No Such Domain) Rule, Block
Domain Name (No Data) Rule and Substitute (Domain Name) Rule.
If canonical name is empty, it is a Block Domain Name (No Such
Domain) Rule.
If canonical name is asterisk, it is a Block Domain Name (No Data)
Rule.
If canonical name is the same as record name, it is a Passthru
Domain Name Rule. If name of object starts with wildcard you must
specify special value 'infoblox-passthru' in canonical name in order
to create Wildcard Passthru Domain Name Rule, for more details
please see the Infoblox Administrator Guide.
If canonical name is not Block Domain Name (No Such Domain) Rule,
Block Domain Name (No Data) Rule, or Passthru Domain Name Rule, it
is a substitution rule.
Fields:
canonical: The canonical name in FQDN format. This value can be in
unicode format.
comment: The comment for the record; maximum 256 characters.
disable: Determines if the record is disabled or not. False means
that the record is enabled.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
name: The name for a record in FQDN format. This value cannot be in
unicode format.
rp_zone: The name of a response policy zone in which the record
resides.
ttl: The Time To Live (TTL) value for record. A 32-bit unsigned
integer that represents the duration, in seconds, for which the
record is valid (cached). Zero indicates that the record should
not be cached.
use_ttl: Use flag for: ttl
view: The name of the DNS View in which the record resides. Example:
"external".
zone: The name of the zone in which the record resides. Example:
"zone.com". If a view is not specified when searching by zone,
the default view is used.
"""
_infoblox_type = 'record:rpz:cname'
_fields = ['canonical', 'comment', 'disable', 'extattrs', 'name',
'rp_zone', 'ttl', 'use_ttl', 'view', 'zone']
_search_for_update_fields = ['canonical', 'name', 'view']
_updateable_search_fields = ['canonical', 'comment', 'name', 'view']
_all_searchable_fields = ['canonical', 'comment', 'name', 'view', 'zone']
_return_fields = ['canonical', 'extattrs', 'name', 'view']
_remap = {}
_shadow_fields = ['_ref']
class RpzCnameClientipaddressRecord(InfobloxObject):
""" RpzCnameClientipaddressRecord: DNS RPZ CNAMEClientIpAddress
record object.
Corresponds to WAPI object 'record:rpz:cname:clientipaddress'
A DNS RPZ CNAMEClientIpAddress record represents different RPZ
rules, depending on the value of the canonical name. This record
represents Passthru IP Address Rule, Block IP Address (No Such
Domain) Rule, Block IP Address (No Data) Rule.
This record represents the IP trigger policy. It matches IP
addresses that would otherwise appear in A and AAAA records in the
"answer" section of a DNS response.
If canonical name is empty, it is a Block IP Address (No Such
Domain) Rule.
If canonical name is an asterisk, it is a Block IP Address (No Data)
Rule.
If canonical name is equal to 'rpz-passthru', it is a Passthru IP
Address Rule.
You cannot create Substitute (IPv4/IPv6 Address) Rule for this
record see
the record.rpz.a.ipaddress object
or
the record.rpz.aaaa.ipaddress object
for details.
Fields:
canonical: The canonical name in FQDN format. This value can be in
unicode format.
comment: The comment for the record; maximum 256 characters.
disable: Determines if the record is disabled or not. False means
that the record is enabled.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
is_ipv4: Indicates whether the record is an IPv4 record. If the
return value is "true", it is an IPv4 record. Ohterwise, it is
an IPv6 record.
name: The name for a record in FQDN format. This value cannot be in
unicode format.
rp_zone: The name of a response policy zone in which the record
resides.
ttl: The Time To Live (TTL) value for record. A 32-bit unsigned
integer that represents the duration, in seconds, for which the
record is valid (cached). Zero indicates that the record should
not be cached.
use_ttl: Use | |
from time import sleep
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
import logging
import logging.config
import os.path
import pynvml as N
import psutil
import subprocess
import socket
import sys
import yaml
# Global LOGGER var
LOGGER = logging.getLogger(__name__)
# --------- Class GPUStat : query, functions and process needed to obtain the culprit (pods) that execute jobs in GPU -------- #
class GPUStat(object):
def __init__(self, gpus_pod_usage={}):
"""Constructor of GPUStat class
Args:
gpus_pod_usage (py dictionary, default empty): Information of GPU usage by Pods (should be dict)
Fields:
gpus_pod_usage (py dictionary) : A detailed information of per-container GPU utilization in each GPU on a machine
hostname (string) : The hostname of current machine
query_time (datetime) : Time information when the object created
"""
self.gpus_pod_usage = gpus_pod_usage
# attach host and time information of each GPUStat
self.hostname = socket.gethostname()
self.query_time = datetime.now()
@staticmethod
def new_query():
"""Query the information of all the GPUs on the machine & Trace Pod Processes that utilize them
Returns:
GPUStat Object : Statistics and details to account GPU usage by Pods
"""
def get_process_info(nv_process):
"""Get the process information of specific GPU process ; username, command, pid, and GPU memory usage
Args:
nv_process (nvmlFriendlyObject) : A process that utilize the resource of NVIDIA GPU
Returns:
process (py dictionary) : Contains the desired information of GPU process
"""
# init dict to store process' information
process = {}
# Store pid and GPU memory usage into dict
# get pid of the process
process['pid'] = nv_process.pid
# Bytes to MBytes
process['gpu_memory_usage'] = int(nv_process.usedGpuMemory / 1024 / 1024)
# get process detail (process object) for given pid of a nvidia process
ps_process = psutil.Process(pid = nv_process.pid)
# get process username
process['username'] = ps_process.username()
# figure out OS command that execute the process
# cmdline returns full path; as in `ps -o comm`, get short cmdnames.
_cmdline = ps_process.cmdline()
# sometimes, zombie or unknown (e.g. [kworker/8:2H])
if not _cmdline:
process['command'] = '?'
else:
process['command'] = os.path.basename(_cmdline[0])
return process
def get_parent_process_info(nv_process):
"""Get the information of parent container (pod) of specific container pid
Args:
nv_process (nvmlFriendlyObject) : A process that utilize the resource of NVIDIA GPU
Returns:
process (psutil.Process) : Prosess object for parent container (pod)
"""
# get process detail (process object) for given pid of a nvidia process
process = psutil.Process(pid = nv_process.pid)
# In context of kubernetes, pod is parent process of containers (jobs) that run across the nodes in kubernetes cluster
# Loop until we find the parent container (pod), the keyword is "docker-containerd-shim" (daemonless containers)
while process.parent().name() != "docker-containerd-shim":
process = process.parent()
return process
def get_pod_info(pod_pid):
"""Get the pod information ; pod pid, container's name, pod name, pod namespace, container's id
Args:
pod_pid (int) : Pid of given pod
Returns:
pod (py dictionary) : Contains the desired information of pod.
"""
# init dict to store pod's information
pod = {}
# Equal to "docker ps -q" in shell terminal, listing all ids of running containers
docker_ps = subprocess.Popen(
["docker", "ps", "-q"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
docker_ps_out, docker_ps_err = docker_ps.communicate()
# Store the result of "docker ps -q" as an array of containers' id
container_ids = docker_ps_out.split("\n")
# Loop through all container ids,
# inspect each and try to find container details that match given pod_pid
for container_id in container_ids:
# Equal to "docker inspect --format '{{.State.Pid}} {{.Name}} {{.Id}}' [container_id]" in shell,
# Inspect pid, docker process's name, and container's id of each container's id
# (some are redundants, but it is to ensure we inspect the same process)
docker_inspect = subprocess.Popen(
["docker", "inspect", "--format","'{{printf \"%.0f %s %s\" .State.Pid .Name .Id}}'",container_id],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
docker_inspect_out, docker_inspect_err = docker_inspect.communicate()
# The result of docker inspect should contain desired information
# Split the result of docker inspect: [process_pid/pod_pid] [docker_process_name] [container_id]
pod_info = docker_inspect_out.split("\n")[0].replace("'","").split()
# Break the loop if we find the desired container details of a pod
if pod_info[0] == str(pod_pid):
break
# The name format for each running docker process in kubernetes is as follows:
# /k8s_[pod-name]_[container_name]_[pod_namespace]_[random_hash]
# Split them as pod_containers_details to obtain "container_name", "name", "namespace"
pod_container_details = pod_info[1].split("_")
# Store the results
pod['pid'] = pod_info[0]
pod['container_name'] = pod_container_details[1]
pod['name'] = pod_container_details[2]
pod['namespace'] = pod_container_details[3]
pod['container_id'] = pod_info[2]
return pod
def benchmark_gpu():
"""Query all utilizations in each GPU and resolve them to pod information and identity"""
# detect all NVIDIA GPU in machine
device_count = N.nvmlDeviceGetCount()
# Init empty list to store usage by each GPU
gpus_usage = []
# Iterate through available GPU
for index in range(device_count):
# get the NVML object based on GPU's index target
# get the name and uuid of NVIDIA GPU
handle = N.nvmlDeviceGetHandleByIndex(index)
name = (N.nvmlDeviceGetName(handle))
uuid = (N.nvmlDeviceGetUUID(handle))
# init list to store process and parent container (pod) for each process that utilizes NVIDIA
# process = jobs (container) that utilize NVIDA GPU
# parent process = parent container (pod) that utilize NVIDIA GPU
processes = []
parentProcesses = []
# Get running processes in each GPU
try:
nv_comp_processes = N.nvmlDeviceGetComputeRunningProcesses(handle)
except N.NVMLError:
nv_comp_processes = None # Not supported
# Get running graphics processes in each GPU
try:
nv_graphics_processes = N.nvmlDeviceGetGraphicsRunningProcesses(handle)
except N.NVMLError:
nv_graphics_processes = None # Not supported
# Check if process is found or not
if nv_comp_processes is None and nv_graphics_processes is None:
processes = None # Not supported (in both cases)
else:
nv_comp_processes = nv_comp_processes or []
nv_graphics_processes = nv_graphics_processes or []
# Iterate through running process found, inspect each process
# and find corresponding pod (parent container) that run the process
for nv_process in (nv_comp_processes + nv_graphics_processes):
try:
process = get_process_info(nv_process)
parentProcess = get_parent_process_info(nv_process)
processes.append(process)
parentProcesses.append(parentProcess)
except psutil.NoSuchProcess:
LOGGER.error("PSutil No Such Process")
except psutil.Error:
LOGGER.error("PSutil General Error")
# list, each GPU can have >1 running process(es) (but in Kubernetes 1.8, they should come from same container/pod)
pod_details = []
# iterate throught the pair process (container) & parent process (pod/parent container)
for proc,parentProc in zip(processes,parentProcesses):
# get pod detail from parentProc.pid
pod = get_pod_info(parentProc.pid)
# store the detail
pod_detail = {
"pod_container_name": pod['container_name'],
"pod_name" : pod['name'],
"pod_namespace" : pod['namespace'],
"pod_proc_username" : proc['username'],
"pod_gpu_usage" : proc['gpu_memory_usage'],
"pod_proc_pid" : proc['pid'] # long data type
}
# information of each pod that runs jobs in kubernetes cluster
pod_details.append(pod_detail)
# Store utilization per gpu
per_gpu_usage = {
"gpu_name" : name,
"gpu_index": index,
"gpu_uuid" : uuid,
"gpu_usage": pod_details
}
# append per-gpu usage
gpus_usage.append(per_gpu_usage)
return gpus_usage
# init the python-nvml driver
N.nvmlInit()
# get current utilization in each GPU and corresponding pods details
gpus_pod_usage = benchmark_gpu()
# close the python-nvml driver
N.nvmlShutdown()
# return query result as GPUStat object
return GPUStat(gpus_pod_usage)
# --------- Class InfluxdbDriver : handle write process of GPU stats into Influxdb server -------- #
class InfluxDBDriver:
def __init__(self, influxdb_host, influxdb_port, influxdb_user, influxdb_pass, influxdb_db, *args):
"""Constructor of InfluxDBDriver class
Args:
influxdb_host (string) : Hostname (URL) of influxdb server, to store the data for.
influxdb_port (string) : Port which infludb server is running on.
influxdb_user (string) : Access username.
influxdb_pass (string) : Access password.
influxdb_db (string) : db name to write the GPU stats for.
Fields:
client (InfluxDBClient): Connection object for the given Influxdb
"""
# Try connecting to influxdb instance
try:
client = InfluxDBClient(influxdb_host,
influxdb_port,
influxdb_user,
influxdb_pass,
influxdb_db
)
except InfluxDBClientError:
client = None
LOGGER.error("Influxdb connection does not working")
# this->object->client
self.client = client
def write(self, gpu_stats):
"""Write the gpus' usage statistics to influxdb server
Args:
gpu_stats (GPUStat Obj) : Statistics and details to account GPU usage by Pods.
Returns:
None
"""
# get hostname and timestamp of the query
nodename = gpu_stats.hostname
stat_time = gpu_stats.query_time
# iterate though all available GPU in machine
for gpu_stat in gpu_stats.gpus_pod_usage:
# Assign and gather identity of each GPU
gpu_name = gpu_stat["gpu_name"]
gpu_index = gpu_stat["gpu_index"]
gpu_uuid = gpu_stat["gpu_uuid"]
gpu_usage | |
<filename>src/tankoh2/control_cl.py
"""control a tank optimization"""
import sys
import statistics
from tankoh2.existingdesigns import kautextDesign, NGTBITDesign
#from builtins import True, False
#from builtins import
sys.path.append('C:/MikroWind/MyCrOChain_Version_0_95_4_x64/MyCrOChain_Version_0_95_4_x64/abaqus_interface_0_95_4')
import os
import numpy as np
from scipy.optimize import curve_fit
from tankoh2 import programDir, log, pychain
from tankoh2.service import indent, getRunDir
from tankoh2.settings import myCrOSettings as settings
from tankoh2.utilities import updateName, getRadiusByShiftOnMandrel, changeSimulationOptions
from tankoh2.contour import getLiner, getDome, getReducedDomePoints #, getLengthContourPath
from tankoh2.material import getMaterial, getComposite, readLayupData
from tankoh2.optimize import optimizeFriction, optimizeHoopShift, optimizeFrictionGlobal_differential_evolution, optimizeHoopShiftForPolarOpeningX,\
optimizeNegativeFrictionGlobal_differential_evolution
from tankoh2.control_sf import createWindingDesign
import tankoh2.existingdesigns
#import mymodels.myvesselAxSolid as vesselAxSolid
#from builtins import True
def builtVesselAsBuilt(symmetricTank, servicepressure, saftyFactor, layersToWind, optimizeWindingHelical, optimizeWindingHoop, tankname,
dataDir, dzyl, polarOpening, lzylinder, dpoints, defaultLayerthickness, hoopLayerThickness, helixLayerThickenss, rovingWidth, numberOfRovingsHelical,
numberOfRovingsHoop, tex, rho, hoopStart, hoopRisePerBandwidth, minThicknessValue, hoopLayerCompressionStart, domeContourFilename):
# #########################################################################################
# SET Parameters of vessel
# #########################################################################################
log.info(f'built tank with polar opening of {polarOpening}')
bandWidthHelical = rovingWidth * numberOfRovingsHelical
bandWidthHoop = rovingWidth * numberOfRovingsHoop
log.info(f'for helical winding using {numberOfRovingsHelical} rovings with {rovingWidth}mm resulting in bandwith of {bandWidthHelical}')
log.info(f'for hoop winding using {numberOfRovingsHoop} rovings with {rovingWidth}mm resulting in bandwith of {bandWidthHoop}')
sectionAreaFibre = tex / (1000. * rho)
print(sectionAreaFibre)
log.info(f'section fibre area within roving is {sectionAreaFibre}')
# input files
layupDataFilename = os.path.join(dataDir, "Winding_" + tankname + ".txt")
#materialFilename = os.path.join(dataDir, "CFRP_T700SC_LY556.json")
materialFilename = os.path.join(dataDir, "CFRP_T700SC_LY556.json")
if symmetricTank == False:
dome2ContourFilename = os.path.join(dataDir, "Dome2_contour_" + tankname + "_48mm.txt")
# output files
runDir = getRunDir()
fileNameReducedDomeContour = os.path.join(runDir, f"Dome_contour_{tankname}_reduced.dcon")
if symmetricTank == False:
fileNameReducedDome2Contour = os.path.join(runDir, f"Dome2_contour_{tankname}_reduced.dcon")
linerFilename = os.path.join(runDir, tankname + ".liner")
designFilename = os.path.join(runDir, tankname + ".design")
windingFile = os.path.join(runDir, tankname + "_realised_winding.txt")
vesselFilename = os.path.join(runDir, tankname + ".vessel")
windingResultFilename = os.path.join(runDir, tankname + ".wresults")
#print(getLengthContourPath(domeContourFilename, 24., 51.175/2., 1))
# #########################################################################################
# Create Liner
# #########################################################################################
x, r = getReducedDomePoints(domeContourFilename,
dpoints, fileNameReducedDomeContour)
dome = getDome(dzyl / 2., polarOpening, pychain.winding.DOME_TYPES.ISOTENSOID,
x, r)
dome2 = None
if symmetricTank == False:
x, r = getReducedDomePoints(dome2ContourFilename,
dpoints, fileNameReducedDome2Contour)
dome2 = getDome(dzyl / 2., polarOpening, pychain.winding.DOME_TYPES.ISOTENSOID,
x, r)
liner = getLiner(dome, lzylinder, linerFilename, tankname, dome2=dome2)
# ###########################################
# Create material
# ###########################################
log.info(f'get material')
material = getMaterial(materialFilename)
angles, thicknesses, wendekreisradien, krempenradien = readLayupData(layupDataFilename)
log.info(f'{angles[0:layersToWind]}')
composite = getComposite(angles[0:layersToWind], thicknesses[0:layersToWind], hoopLayerThickness,
helixLayerThickenss, material, sectionAreaFibre, rovingWidth, numberOfRovingsHelical, numberOfRovingsHoop,
tex, designFilename, tankname)
# create vessel and set liner and composite
vessel = pychain.winding.Vessel()
vessel.setLiner(liner)
vessel.setComposite(composite)
# #############################################################################
# run winding simulation
# #############################################################################
# vessel.finishWinding()
with open(windingFile, "w") as file:
file.write('\t'.join(["Layer number", "Angle", "Polar opening"]) + '\n')
outArr = []
vessel.resetWindingSimulation()
anzHoop = 0.
anzHelix = 0.
for i, angle, krempenradius, wendekreisradius in zip(range(layersToWind), angles, krempenradien,
wendekreisradien): # len(angle_degree)
log.info('--------------------------------------------------')
layerindex = i
# Hoop Layer
if abs(angle - 90.) < 1e-8:
#po_goal = krempenradius
po_goal = hoopStart + lzylinder/2. - anzHoop*hoopRisePerBandwidth*bandWidthHoop
anzHoop = anzHoop+1
#po_goal = wendekreisradius
log.info(f'apply layer {i+1} with angle {angle}, and hoop position {po_goal}')
if optimizeWindingHoop:
shift, err_wk, iterations = optimizeHoopShiftForPolarOpeningX(vessel, po_goal, layerindex)
log.info(f'{iterations} iterations. Shift is {shift} resulting in a hoop position error {err_wk} '
f'as current polar opening is {vessel.getPolarOpeningR(layerindex, True)}')
else:
# winding without optimization, but direct correction of shift
vessel.setHoopLayerShift(layerindex, 0., True)
vessel.runWindingSimulation(layerindex + 1)
coor = po_goal - vessel.getPolarOpeningX(layerindex, True)
vessel.setHoopLayerShift(layerindex, coor, True)
if symmetricTank == False:
vessel.setHoopLayerShift(layerindex, -coor, False) # shift in opposite direction on opposite dome/mandrel
vessel.runWindingSimulation(layerindex + 1)
# Helix layer
else:
anzHelix = anzHelix+1
# global arr_fric, arr_wk
# global arr_fric, arr_wk
# arr_fric = []
# arr_wk = []
po_goal = max(wendekreisradius, polarOpening) # prevent bandmiddle path corssing polar opening
log.info(f'apply layer {i+1} with band mid path at polar opening of {po_goal}')
po = getRadiusByShiftOnMandrel(vessel.getVesselLayer(layerindex - 1).getOuterMandrel1(), wendekreisradius, bandWidthHelical)
log.info(f'applied layer {i+1} with angle {angle} without friction with band outer path at polar opening {po}')
log.info(f'radius difference is {po-wendekreisradius} with bandwith {bandWidthHelical}')
# firts estimation with no frcition
vessel.setLayerFriction(layerindex, 0., True)
vessel.runWindingSimulation(layerindex + 1)
log.info(f' polar opening with no friction is {vessel.getPolarOpeningR(layerindex, True)}')
diff = vessel.getPolarOpeningR(layerindex, True)-po_goal
if optimizeWindingHelical and abs(diff) > 0.:
log.info(f'using optimizeFriction')
#friction, err_wk, iterations = optimizeFriction(vessel, wendekreisradius, layerindex, verbose=False)
#log.info(f'{iterations} iterations. Friction is {friction} resulting in a polar opening error of {err_wk} '
# f'as current polar opening is {vessel.getPolarOpeningR(layerindex, True)}')
#po_local = vessel.getPolarOpeningR(layerindex, True)
if diff > 0:
log.info(f' current polar opening is too large, frcition musst be negative')
log.info(f'using optimizeFrictionGlobal_differential_evolution')
friction, err_wk, iterations = optimizeNegativeFrictionGlobal_differential_evolution(vessel, po_goal, layerindex, verbose=False)
if diff < 0:
log.info(f' current polar opening is too small, frcition musst be positive')
log.info(f'using optimizeFrictionGlobal_differential_evolution')
friction, err_wk, iterations = optimizeFrictionGlobal_differential_evolution(vessel, po_goal, layerindex, verbose=False)
log.info(f'{iterations} iterations. Friction is {friction} resulting in a polar opening error of {err_wk} '
f'as current polar opening is {vessel.getPolarOpeningR(layerindex, True)}')
if err_wk > 1.:
log.info(f'!!!!! ERROR FOR POLAR OPEING IS LARGER THAN 1mm !!!')
# file = open("data.txt", "w")
# for j in range(len(arr_fric)):
# file.write(str(arr_fric[j])+'\t'+str(arr_wk[j])+'\n')
# file.close()
# plt.plot(arr_fric, arr_wk, marker = 'o', linewidth = 0.)
# m, n = fitting_linear(arr_fric,arr_wk)
# log.info(m,n)
# friction_corr = (wendekreisradius[i] - n) / m
# vessel.setLayerFriction(layerindex, friction_corr, True)
# vessel.runWindingSimulation(layerindex+1)
# wk_korr = vessel.getPolarOpeningR(layerindex, True)
# print (friction_corr, wk_korr)
# y = linear(arr_fric, np.ones(len(arr_fric))*m, np.ones(len(arr_fric))*n)
# plt.plot(arr_fric, y,'k--', lw = 1.)
# plt.plot(friction_corr, wk_korr, 'ro')
# plt.xlim((0., 0.0001))
# plt.ylim((25., 27.))
# plt.show()
po = vessel.getPolarOpeningR(layerindex, True)
outArr.append([i+1, angle, po, po*2, po_goal, abs(po-po_goal)])
with open(windingFile, "a") as file:
file.write('\t'.join([str(s) for s in outArr[-1]]) + '\n')
with open(windingFile, "w") as file:
file.write(indent([["Layer \#", "Angle", "Polar opening", "Polar opening diameter", "Target Polar opening"]] + outArr))
# save vessel
vessel.saveToFile(vesselFilename) # save vessel
updateName(vesselFilename, tankname, ['vessel'])
# manipulate .vessel-file and run winding simulation again
changeSimulationOptions(vesselFilename, layersToWind, minThicknessValue, hoopLayerCompressionStart)
# re-run winding simulation with modified simulation options
vessel.loadFromFile(vesselFilename)
vessel.finishWinding()
# save winding results
windingResults = pychain.winding.VesselWindingResults()
windingResults.buildFromVessel(vessel)
statistics = vessel.calculateVesselStatistics()
#print("working pressure", statistics.burstPressure)
#import inspect
#print("statistics", inspect.getmembers(statistics))
windingResults.saveToFile(windingResultFilename)
# #############################################################################
# run internal calculation
# #############################################################################
# build shell model for internal calculation
#converter = pychain.mycrofem.VesselConverter()
#shellModel = converter.buildAxShellModell(vessel, 10)
# run linear solver
#linerSolver = pychain.mycrofem.LinearSolver(shellModel)
#linerSolver.run(True)
# get stresses in the fiber COS
#S11, S22, S12 = shellModel.calculateLayerStressesBottom()
# get x coordinates (element middle)
#xCoords = shellModel.getElementCoordsX()
# #############################################################################
# run ABAQUS
# #############################################################################
# create model options for abaqus calculation
#modelOptions = pychain.mycrofem.VesselFEMModelOptions()
#modelOptions.modelName = tankname + "_Vessel"
#modelOptions.jobName = tankname + "_Job"
#modelOptions.windingResultsFileName = tankname
#modelOptions.useMaterialPhi = False # false uses micromechanical estimations of fvg effect an porperties
#modelOptions.fittingContactWinding = pychain.mycrofem.CONTACT_TYPE.PENALTY
#modelOptions.frictionFitting = 0.3
#modelOptions.globalMeshSize = 2.0
#modelOptions.pressureInBar = servicepressure
#modelOptions.saveCAE = True
#modelOptions.buildMandrel1 = True
#modelOptions.buildMandrel2 = False
# write abaqus scripts
#scriptGenerator = pychain.abaqus.AbaqusVesselScriptGenerator()
#scriptGenerator.writeVesselAxSolidBuildScript(os.path.join(runDir, tankname + "_Build.py"), settings, modelOptions)
#scriptGenerator.writeVesselAxSolidBuildScript(os.path.join(runDir, tankname + "_Eval.py"), settings, modelOptions)
# create vessel model according to version 95_2 documentation 'Axis-Symmetric Vessel Model'
#create vessel model
#vesselAxSolid = mymodels.myvesselAxSolidContacts
#model = vesselAxSolid.MyVesselAxSolid(modelName = tankname + "_Vessel", umat = True, buildFitting = True, saveCAE = True, useMaterialPhi = False, buildLiner = True)
#load winding results
#model.loadData(tankname)
#build mandrel 1
#model.buildOnlyMandrel1(servicepressure, 1, friction = 0.3, fittingContactWinding = pychain.mycrofem.CONTACT_TYPE.PENALT)
#mesh model
#model.mesh(2.0)
#export inp file
#model.exportInp(tankname + "_Job")
import matplotlib.pylab as plt
# fig = plt.figure()
# ax = fig.gca()
# ax.plot(S11[:, 0])
# ax.plot(S11[:, 1])
# ax.plot(S11[:, 2])
# plt.show()
def builtVesselByOptimizedDesign(design, domeContourFilename):
tankname = design.get('tankname')
# create liner x,r data
dpoints = 4
runDir = getRunDir()
if domeContourFilename == None:
createWindingDesign(**design)
else:
fileNameReducedDomeContour = os.path.join(runDir, f"Dome_contour_{tankname}_reduced.dcon")
x, r = getReducedDomePoints(domeContourFilename,
dpoints, fileNameReducedDomeContour)
# start design optimization with specified design and given (x,r)-liner contour data
createWindingDesign(**design, domeContour = (x,r), runDir=runDir)
def main():
#
# What do you want to do?
#
# - As-Built of existing vessel
AsBuilt = False
# --- Parameters for As-Built
symmetricTank = True
servicepressure = 700. #bar
saftyFactor = 1.
layersToWind = 48 #48
optimizeWindingHelical = True #False
optimizeWindingHoop = False
tankname = 'NGT-BIT-2020-09-16'
dataDir = os.path.join(programDir, 'data')
dzyl = 400. # mm
polarOpening = 46./2. # mm
lzylinder = 500. # mm
dpoints = 4 # data points for liner contour
defaultLayerthickness | |
#!/usr/bin/env python
""" Python script example
Usage:
test_maxram.py [--target=<ram_target>][--count=<word_count>]
Options:
--help Shows this help message.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from docopt import docopt
import sys
import io
import os
import time
import struct
import subprocess
import ctypes
from collections import OrderedDict
import threading
from newton_control_main import newton as newton
if __name__ == "__main__":
args = docopt(__doc__, version='0.1')
newtonTarget = os.environ["NEWTON_TARGET"]
if args['--target']:
ram_target = args['--target']
else:
ram_target = "all"
if args['--count']:
count = int( args['--count'] )
else:
count = 0
rc = newton.adi_newton_config( 0 )
if rc != 0:
print( "ERROR: newton.adi_newton_config return an error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "useq_wave_ram":
if count == 0 or count > 2048:
count = 2048
cmd_file = os.path.expanduser( "~/host_api/dataFiles/useq_wave_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py useq_wave_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "useq_map_ram":
if count == 0 or count > 128:
count = 128
cmd_file = os.path.expanduser( "~/host_api/dataFiles/useq_map_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py useq_map_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
elif ram_target == "all" or ram_target == "useq_seq_ram":
if count == 0 or count > 4096:
count = 4096
cmd_file = os.path.expanduser( "~/host_api/dataFiles/useq_seq_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py useq_seq_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "datapath_ram":
if count == 0 or count > 4096:
count = 4096
cmd_file = os.path.expanduser( "~/host_api/dataFiles/datapath_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py datapath_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "de_ram":
if count == 0 or count > 512:
count = 512
cmd_file = os.path.expanduser( "~/host_api/dataFiles/de_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py de_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "lps1_ram":
if count == 0 or count > 256:
count = 256
cmd_file = os.path.expanduser( "~/host_api/dataFiles/lps1_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py lps1_ram " + cmd_file + " --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
if ram_target == "all" or ram_target == "lps2_ram":
if count == 0 or count > 256:
count = 256
cmd_file = os.path.expanduser( "~/host_api/dataFiles/lps2_ram.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
print( "INFO: Generating file \"" + cmd_file + "\" with count = " + str( count ) )
rc = os.system( "~/host_api/examples/python/generateBootImage.py lps2_ram \"" + cmd_file + "\" --seed=1 --count=" + str( count ) )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\" ..." )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes | |
from flask import Flask, jsonify
app = Flask(__name__)
# Get information about electricity generation without license in Europe
# k1 = country
# k2 = Amount of the fine for electricity generation without license and not connected to the national grid for
# internal needs ?
# k3 = Number of years of imprisonment for electricity generation without license and not linked to any grid or
# network for internal needs ?
# k4 = Possible use of free energy devices for internal needs without licence ?
# k5 = Possible control of the power plant by any jurisdictions without that the producer holds a license ?
# k6 = Flixbus in the capital ?
@app.route("/international_electricity_generation_without_license_in_europe")
def international_electricity_generation_without_license_in_europe():
data = [
{'k1': 'Albania', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'Yes'},
{'k1': 'Germany', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Andorra', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Austria', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Belgium', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 5 Mws / Hydrogen', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Bielorussia / Belarus', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Bosnia Herzegovine', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Bulgarie', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 5 MW / Any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Chyprus', 'k2': '90000,00 €', 'k3': '3', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Croatia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 1 MW / Any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Danmark', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Spain', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 250 kW / any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Estonia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 100 kW / Any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Finland', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'France', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 50 Mws / Hydrogen', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Greece', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 20 kW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Hungary', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 50 MW / Any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Ireland', 'k2': '100000,00 €', 'k3': '0', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Iceland', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 1 MW / Hydrogen', 'k5': 'No', 'k6': 'No'},
{'k1': 'Italy', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Lettonie', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'Yes'},
{'k1': 'Liechtenstein', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Lithuania', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'Yes'},
{'k1': 'Luxembourg', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 50 kW / Hydrogen', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'North Macedoine', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Malta', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Moldova', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 20 MW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Monaco', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Montenegro', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited powers / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Norway', 'k2': '?', 'k3': '1', 'k4': 'No', 'k5': 'Yes', 'k6': 'Yes'},
{'k1': 'The Netherlands', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 500 Mws / Hydrogen', 'k5': 'No',
'k6': 'Yes'},
{'k1': 'Poland', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Portugal', 'k2': '?', 'k3': '?', 'k4': 'No', 'k5': 'Yes', 'k6': 'Yes'},
{'k1': 'Republic Tcheque', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices',
'k5': 'No', 'k6': 'Yes'},
{'k1': 'Roumania', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'Yes'},
{'k1': 'England', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 100 Mws / Hydrogen', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Russia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'Yes'},
{'k1': 'San Marino', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Serbia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 1 MW / Any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Slovakia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 1 MW / Any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Slovenia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 1 MW / Any devices', 'k5': 'No', 'k6': 'Yes'},
{'k1': 'Sweden', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Switzerland', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No',
'k6': 'Yes'},
{'k1': 'Ukraine', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 5 MW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Vatican', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'Yes'},
{'k1': 'Guernesey', 'k2': '2500000,00 €', 'k3': '5', 'k4': '?', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Isle of Man', 'k2': '5000,00 €', 'k3': '0', 'k4': '?', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Jersey island', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Kosovo', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited powers / Any devices', 'k5': 'No',
'k6': 'No'},
{'k1': 'Ile Aland', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Faroe Islands', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Gibraltar', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Svalbard', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Akrotiri and Dhekelia', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'North Chyprus', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Transnistria', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'}
]
return jsonify(data)
# Get information about electricity generation without license in Asia
# k1 = country
# k2 = Amount of the fine for electricity generation without license and not connected to the national grid for
# internal needs ?
# k3 = Number of years of imprisonment for electricity generation without license and not linked to any grid or
# network for internal needs ?
# k4 = Possible use of free energy devices for internal needs without licence ?
# k5 = Possible control of the power plant by any jurisdictions without that the producer holds a license ?
# k6 = Flixbus in the capital ?
@app.route("/international_electricity_generation_without_license_in_asia")
def international_electricity_generation_without_license_in_asia():
data = [
{'k1': 'Afghanistan', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Saudi Arabia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Armenia', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Azerbaïdjan', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Bahrain', 'k2': '?', 'k3': '?', 'k4': '?', 'k5': '?', 'k6': 'No'},
{'k1': 'Bangladesh', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Bhutan', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to 500 kW / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Myanmar', 'k2': 'Three hundred thousand kyats to a maximum of one million kyats', 'k3': '0', 'k4': 'No', 'k5': 'Yes', 'k6': 'No'},
{'k1': 'Brunei', 'k2': '0,00 €', 'k3': '0', 'k4': 'Yes up to unlimited power / Any devices', 'k5': 'No', 'k6': 'No'},
{'k1': 'Cambodia', 'k2': '400,000 Riels to 4,000,000 Riels per day for every day ', 'k3': '1 to 3 years', 'k4': 'Yes up | |
status_change_index == 16:
self.b_arrive_point = 0
self.server_data_obj.mqtt_send_get_obj.b_draw = 1
# 断网返航到返航到家
elif status_change_index == 17:
pass
# 低电量返航到返航到家
elif status_change_index == 18:
pass
# 断网返航到电脑手动
elif status_change_index == 19:
pass
# 低电量返航到电脑手动
elif status_change_index == 20:
pass
# 断网返航到遥控器控制
elif status_change_index == 21:
pass
# 低电量返航到电脑手动
elif status_change_index == 22:
pass
# 返航到家到空闲
elif status_change_index == 23:
pass
if b_clear_status:
self.clear_all_status()
self.ship_status = target_status
# 检查是否需要返航
def check_backhome(self):
"""
返回返航状态或者None
:return:返回None为不需要返航,返回低电量返航或者断网返航
"""
return_ship_status = None
if config.network_backhome and self.server_data_obj.mqtt_send_get_obj.is_connected:
if int(config.network_backhome) > 600:
network_backhome_time = int(config.network_backhome)
else:
network_backhome_time = 600
# 使用过电脑端按键操作过才能进行断网返航
if self.server_data_obj.mqtt_send_get_obj.b_receive_mqtt:
if time.time() - self.server_data_obj.mqtt_send_get_obj.last_command_time > network_backhome_time:
return_ship_status = ShipStatus.backhome_network
if self.low_dump_energy_warnning:
# 记录是因为按了低电量判断为返航
return_ship_status = ShipStatus.backhome_low_energy
return return_ship_status
# 平滑路径
def smooth_path(self):
"""
平滑路径
:return:平滑路径线路
"""
smooth_path_lng_lat = []
distance_matrix = []
for index, target_lng_lat in enumerate(self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps):
if index == 0:
theta = lng_lat_calculate.angleFromCoordinate(self.lng_lat[0],
self.lng_lat[1],
target_lng_lat[0],
target_lng_lat[1])
distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
self.lng_lat[1],
target_lng_lat[0],
target_lng_lat[1])
if distance < config.smooth_path_ceil_size:
smooth_path_lng_lat.append(target_lng_lat)
else:
for i in range(1, int((distance / config.smooth_path_ceil_size) + 1)):
cal_lng_lat = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
self.lng_lat[1],
theta,
config.smooth_path_ceil_size * i)
smooth_path_lng_lat.append(cal_lng_lat)
smooth_path_lng_lat.append(target_lng_lat)
else:
theta = lng_lat_calculate.angleFromCoordinate(
self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][0],
self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][1],
target_lng_lat[0],
target_lng_lat[1])
distance = lng_lat_calculate.distanceFromCoordinate(
self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][0],
self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][1],
target_lng_lat[0],
target_lng_lat[1])
if distance < config.smooth_path_ceil_size:
smooth_path_lng_lat.append(target_lng_lat)
else:
for i in range(1, int(distance / config.smooth_path_ceil_size + 1)):
cal_lng_lat = lng_lat_calculate.one_point_diatance_to_end(
self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][0],
self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][1],
theta,
config.smooth_path_ceil_size * i)
smooth_path_lng_lat.append(cal_lng_lat)
smooth_path_lng_lat.append(target_lng_lat)
for smooth_lng_lat_i in smooth_path_lng_lat:
distance_list = []
for sampling_points_gps_i in self.server_data_obj.mqtt_send_get_obj.sampling_points_gps:
s_d = lng_lat_calculate.distanceFromCoordinate(sampling_points_gps_i[0],
sampling_points_gps_i[1],
smooth_lng_lat_i[0],
smooth_lng_lat_i[1])
distance_list.append(s_d)
distance_matrix.append(distance_list)
a_d_m = np.asarray(distance_matrix)
for k in range(len(distance_matrix[0])):
temp_a = a_d_m[:, k]
temp_list = temp_a.tolist()
index_l = temp_list.index(min(temp_list))
self.smooth_path_lng_lat_index.append(index_l)
return smooth_path_lng_lat
# 根据当前点和路径计算下一个经纬度点
def calc_target_lng_lat(self, index_):
"""
根据当前点和路径计算下一个经纬度点
:return:
"""
# 离散按指定间距求取轨迹点数量
if not self.smooth_path_lng_lat:
self.smooth_path_lng_lat = self.smooth_path()
# 搜索最临近的路点
distance_list = []
start_index = self.smooth_path_lng_lat_index[index_]
print('self.smooth_path_lng_lat, index_,', self.smooth_path_lng_lat_index, index_)
if index_ == 0:
self.search_list = copy.deepcopy(self.smooth_path_lng_lat[:start_index])
else:
self.search_list = copy.deepcopy(
self.smooth_path_lng_lat[self.smooth_path_lng_lat_index[index_ - 1]:start_index])
for target_lng_lat in self.search_list:
distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
self.lng_lat[1],
target_lng_lat[0],
target_lng_lat[1])
distance_list.append(distance)
# 如果没有可以去路径
if len(distance_list) == 0:
return self.server_data_obj.mqtt_send_get_obj.sampling_points_gps[index_]
index = distance_list.index(min(distance_list))
# if index + 1 == len(self.search_list):
# return self.server_data_obj.mqtt_send_get_obj.sampling_points_gps[index_]
lng_lat = self.search_list[index]
index_point_distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
self.lng_lat[1],
lng_lat[0],
lng_lat[1])
while config.smooth_path_ceil_size > index_point_distance and (index + 1) < len(
self.search_list):
lng_lat = self.search_list[index]
index_point_distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
self.lng_lat[1],
lng_lat[0],
lng_lat[1])
index += 1
# 超过第一个点后需要累积之前计数
if index_ > 0:
self.path_info = [self.smooth_path_lng_lat_index[index_ - 1] + index, len(self.smooth_path_lng_lat)]
else:
self.path_info = [index, len(self.smooth_path_lng_lat)]
return self.search_list[index]
# 构建障碍物地图
def build_obstacle_map(self):
"""
根据超声波距离构建障碍物地图
:return: 障碍物位置举证
"""
method = 1
if method == 0:
map_size = int(20 / 0.5)
obstacle_map = np.zeros((map_size, map_size))
# 判断前方距离是否有障碍物,根据障碍物改变目标点
for k, v in self.pi_main_obj.distance_dict.items():
v = min(v, 20)
row = int(map_size - math.ceil(math.cos(math.radians(k)) * v / 0.5))
col = int((map_size / 2) - 1 - math.ceil(math.sin(math.radians(k)) * v / 0.5))
for row_index in range(row):
obstacle_map[row_index, col] = 1
else:
obstacle_map = [0] * len(self.pi_main_obj.distance_dict.items())
for k, v in self.pi_main_obj.distance_dict.items():
if v < 5:
obstacle_map[10 + int(k / 0.9)] = 1
return obstacle_map
# 发送数据
def send(self, method, data, topic='test', qos=0, http_type='POST', url=''):
"""
:param url:
:param http_type:
:param qos:
:param topic:
:param data: 发送数据
:param method 获取数据方式 http mqtt com
"""
assert method in ['http', 'mqtt', 'com'], 'method error not in http mqtt com'
if method == 'http':
return_data = self.server_data_obj.send_server_http_data(http_type, data, url)
self.logger.info({'请求 url': url, 'status_code': return_data.status_code})
# 如果是POST返回的数据,添加数据到地图数据保存文件中
if http_type == 'POST' and r'map/save' in url:
content_data = json.loads(return_data.content)
self.logger.info({'map/save content_data success': content_data["success"]})
if not content_data["success"]:
self.logger.error('POST请求发送地图数据失败')
# POST 返回湖泊ID
pool_id = content_data['data']['id']
return pool_id
# http发送检测数据给服务器
elif http_type == 'POST' and r'data/save' in url:
content_data = json.loads(return_data.content)
self.logger.debug({'data/save content_data success': content_data["success"]})
if not content_data["success"]:
self.logger.error('POST发送检测请求失败')
elif http_type == 'GET' and r'device/binding' in url:
content_data = json.loads(return_data.content)
if not content_data["success"]:
self.logger.error('GET请求失败')
save_data_binding = content_data["data"]
return save_data_binding
else:
# 如果是GET请求,返回所有数据的列表
content_data = json.loads(return_data.content)
if not content_data["success"]:
self.logger.error('GET请求失败')
save_data_map = content_data["data"]["mapList"]
return save_data_map
elif method == 'mqtt':
self.server_data_obj.send_server_mqtt_data(data=data, topic=topic, qos=qos)
# 往东南西北运动控制
def nesw_control(self, nest):
if nest == Nwse.north:
angle = 0
elif nest == Nwse.west:
angle = 90
elif nest == Nwse.south:
angle = 180
else:
angle = 270
point = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
self.lng_lat[1],
angle,
config.min_steer_distance * 5)
self.points_arrive_control(point, point, False, False)
# 计算障碍物下目标点
def get_avoid_obstacle_point(self, path_planning_point_gps=None):
"""
根据障碍物地图获取下一个运动点
:return: 下一个目标点,是否需要紧急停止
"""
next_point_lng_lat = copy.deepcopy(path_planning_point_gps)
if config.b_millimeter_wave:
print('config.obstacle_avoid_type', config.obstacle_avoid_type)
# 不避障
if config.obstacle_avoid_type == 0:
return path_planning_point_gps, False
# 避障停止
elif config.obstacle_avoid_type == 1:
if 1 in self.pi_main_obj.obstacle_list[
int(self.pi_main_obj.cell_size / 2) - 3:int(self.pi_main_obj.cell_size / 2) + 3]:
return next_point_lng_lat, True
else:
return path_planning_point_gps, False
# 避障绕行,根据障碍物计算下一个目标点
elif config.obstacle_avoid_type == 2:
angle = vfh.vfh_func(9, self.pi_main_obj.obstacle_list)
print('angle', angle)
if angle == -1:
abs_angle = (self.pi_main_obj.theta + 180) % 360
next_point_lng_lat = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
self.lng_lat[1],
abs_angle,
config.min_steer_distance)
print('abs_angle', abs_angle)
return next_point_lng_lat, False
elif angle == 0:
# 为0表示原始路径可以通行此时不跳过
return next_point_lng_lat, False
else:
abs_angle = (self.pi_main_obj.theta + angle) % 360
next_point_lng_lat = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
self.lng_lat[1],
abs_angle,
config.min_steer_distance)
print('abs_angle', abs_angle)
return next_point_lng_lat, False
else:
return path_planning_point_gps, False
# 控制到达目标点
def points_arrive_control(self, target_lng_lat_gps, sample_lng_lat_gps, b_force_arrive=False, b_back_home=False):
"""
:param target_lng_lat_gps: 目标点真实经纬度
:param sample_lng_lat_gps: 下一个采样点真实经纬度
:param b_force_arrive: 是否约束一定要到达
:param b_back_home 是否是正在返航
:return:
"""
distance = lng_lat_calculate.distanceFromCoordinate(
self.lng_lat[0],
self.lng_lat[1],
sample_lng_lat_gps[0],
sample_lng_lat_gps[1])
self.distance_p = distance
if distance < config.arrive_distance:
return True
# 超时不到则跳过 达到30米且60秒不到则跳过
if distance < 30 and self.point_arrive_start_time is None:
self.point_arrive_start_time = time.time()
elif self.point_arrive_start_time and time.time() - self.point_arrive_start_time > 60:
return True
while distance >= config.arrive_distance:
if distance < 30 and self.point_arrive_start_time is None:
self.point_arrive_start_time = time.time()
elif self.point_arrive_start_time and time.time() - self.point_arrive_start_time > 60:
return True
distance_sample = lng_lat_calculate.distanceFromCoordinate(
self.lng_lat[0],
self.lng_lat[1],
sample_lng_lat_gps[0],
sample_lng_lat_gps[1])
self.distance_p = distance_sample
if distance_sample < config.arrive_distance:
return True
# 避障判断下一个点
b_stop = False
if not config.home_debug:
target_lng_lat_gps, b_stop = self.get_avoid_obstacle_point(target_lng_lat_gps)
all_distance = lng_lat_calculate.distanceFromCoordinate(
self.lng_lat[0], self.lng_lat[1], target_lng_lat_gps[0],
target_lng_lat_gps[1])
# 当前点到目标点角度
point_theta = lng_lat_calculate.angleFromCoordinate(self.lng_lat[0],
self.lng_lat[1],
target_lng_lat_gps[0],
target_lng_lat_gps[1])
theta_error = point_theta - self.current_theta
if abs(theta_error) > 180:
if theta_error > 0:
theta_error = theta_error - 360
else:
theta_error = 360 + theta_error
self.theta_error = theta_error
left_pwm, right_pwm = self.path_track_obj.pid_pwm_2(distance=all_distance,
theta_error=theta_error)
self.last_left_pwm = left_pwm
self.last_right_pwm = right_pwm
# 在家调试模式下预测目标经纬度
if config.home_debug:
time.sleep(0.1)
# 计算当前行驶里程
if self.last_lng_lat:
speed_distance = lng_lat_calculate.distanceFromCoordinate(self.last_lng_lat[0],
self.last_lng_lat[1],
self.lng_lat[0],
self.lng_lat[1])
self.run_distance += speed_distance
left_delta_pwm = int(self.last_left_pwm + left_pwm) / 2 - config.stop_pwm
right_delta_pwm = int(self.last_right_pwm + right_pwm) / 2 - config.stop_pwm
steer_power = left_delta_pwm - right_delta_pwm
forward_power = left_delta_pwm + right_delta_pwm
delta_distance = forward_power * 0.002
delta_theta = steer_power * 0.08
self.last_lng_lat = copy.deepcopy(self.lng_lat)
if self.current_theta is not None:
self.current_theta = (self.current_theta - delta_theta / 2) % 360
self.lng_lat = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
self.lng_lat[1],
self.current_theta,
delta_distance)
else:
# 判断是否需要避障处理
print('b_stop', b_stop)
if b_stop:
self.obstacle_info = '1'
self.pi_main_obj.stop()
# 记录是因为按了暂停按钮而终止
self.b_stop_path_track = True
return False
else:
self.obstacle_info = '0'
self.pi_main_obj.set_pwm(left_pwm, right_pwm)
if self.ship_status != ShipStatus.computer_auto:
self.b_stop_path_track = True
return False
# 如果目标点改变并且不是强制到达 b_force_arrive
if not b_force_arrive:
return False
if b_back_home:
if distance_sample < config.arrive_distance:
return True
# 处理状态切换
def change_status(self):
while True:
# 删除任务模式,将抽水单独控制
time.sleep(0.1)
d = int(self.server_data_obj.mqtt_send_get_obj.control_move_direction)
self.direction = d
# 判断是否需要返航
return_ship_status = None
if self.ship_status != ShipStatus.at_home:
return_ship_status = self.check_backhome()
# 判断空闲状态切换到其他状态
if self.ship_status == ShipStatus.idle:
# 切换到遥控器控制模式
if not config.home_debug and self.pi_main_obj.b_start_remote:
self.change_status_info(target_status=ShipStatus.remote_control)
# 切换到电脑手动模式
elif d in [-1, 0, 90, 180, 270, 10, 190, 1180, 1270]:
self.change_status_info(target_status=ShipStatus.computer_control)
# 切换到返航
elif return_ship_status is not None:
self.change_status_info(target_status=return_ship_status)
# 切换到自动巡航模式
elif self.server_data_obj.mqtt_send_get_obj.keep_point and \
len(self.server_data_obj.mqtt_send_get_obj.path_planning_points) > 0:
if self.lng_lat is None:
self.logger.error('无当前GPS,不能自主巡航')
time.sleep(0.5)
else:
self.change_status_info(target_status=ShipStatus.computer_auto)
# 判断电脑手动状态切换到其他状态
if self.ship_status == ShipStatus.computer_control:
# 切换到遥控器控制
if not config.home_debug and self.pi_main_obj.b_start_remote:
# 此时为遥控器控制模式 清除d控制状态
self.change_status_info(target_status=ShipStatus.remote_control)
# 切换到自动巡航模式
elif d == -1 and \
self.server_data_obj.mqtt_send_get_obj.keep_point and \
len(self.server_data_obj.mqtt_send_get_obj.path_planning_points) > 0:
if self.lng_lat is None:
self.logger.error('无当前GPS,不能自主巡航')
time.sleep(0.5)
else:
self.change_status_info(target_status=ShipStatus.computer_auto)
d = int(self.server_data_obj.mqtt_send_get_obj.control_move_direction)
# 点击抽水
elif self.server_data_obj.mqtt_send_get_obj.b_draw:
self.last_ship_status = ShipStatus.computer_control
self.change_status_info(target_status=ShipStatus.tasking)
# 切换到返航
elif return_ship_status is not None:
self.change_status_info(target_status=return_ship_status)
# 判断电脑自动切换到其他状态情况
if self.ship_status == ShipStatus.computer_auto:
# 切换到遥控器控制
if not config.home_debug and self.pi_main_obj.b_start_remote:
self.change_status_info(target_status=ShipStatus.remote_control, b_clear_status=True)
# 切换到返航
elif return_ship_status is not None:
self.change_status_info(target_status=return_ship_status)
# 取消自动模式
elif d == -1:
self.change_status_info(target_status=ShipStatus.computer_control, b_clear_status=True)
# 切换到手动
elif d in [0, 90, 180, 270, 10, 190, 1180, 1270]:
self.change_status_info(target_status=ShipStatus.computer_control)
# 到点
elif self.b_arrive_point:
self.last_ship_status = ShipStatus.computer_auto
self.change_status_info(target_status=ShipStatus.tasking)
# 点击抽水
| |
# Tile the flat_values for the uniform dimensions (i.e., for `axis=0` plus
# `axis=range(ragged_rank, rank)`).
inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]],
axis=0)
return array_ops.tile(ragged_tiled_values, inner_repeats)
def _tile_ragged_splits(rt_input, multiples, const_multiples=None):
"""Builds nested_split tensors for a tiled `RaggedTensor`.
Returns a list of split tensors that can be used to construct the
`RaggedTensor` that tiles `rt_input` as specified by `multiples`.
Args:
rt_input: The `RaggedTensor` that is being tiled.
multiples: A 1-D integer `tensor`, indicating how many times each dimension
should be repeated.
const_multiples: Optional constant value for multiples. Used to skip tiling
dimensions where `multiples=1`.
Returns:
A list of 1-D `int64` `Tensor`s (one for each ragged dimension in
`rt_input`).
#### Example:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_splits(rt, [3, 2])
[0, 4, 6, 10, 12, 16, 18]
```
"""
ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits
# projected_splits[src_axis, dst_axis] contains the split points that divide
# the rows from src_axis in the list of dst_axis values. E.g.,
# projected_splits[i, i] = nested_splits[i], and
# projected_splits[i, i+1] = gather(nested_splits[i+1], nested_splits[i]).
projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)]
for src_axis in range(ragged_rank):
for dst_axis in range(src_axis + 1, ragged_rank - 1):
projected_splits[src_axis][dst_axis] = array_ops.gather(
nested_splits[dst_axis],
projected_splits[src_axis][dst_axis - 1])
# For each ragged dimension: nested_splits[axis] -> result_splits[axis].
result_splits = []
for axis in range(ragged_rank):
# Get the length of each row for the input tensor for this dimension.
input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1]
# Multiply those lengths by the `multiples` of dimension axis+1, since
# each value will be repeated that number of times.
output_lengths = input_lengths * multiples[axis + 1]
# Repeat ranges of the row lengths as necessary for them to be tiled in
# each ragged dimension `d < axis`. (Start with dimension d=axis-1, and
# work our way up to dimension d=0.)
repeats = 1
for d in range(axis - 1, -1, -1):
if const_multiples is None or const_multiples[d + 1] != 1:
splits = projected_splits[d][axis - 1] * repeats
output_lengths = ragged_util.repeat_ranges(output_lengths, splits,
multiples[d + 1])
repeats *= multiples[d + 1]
# Tile splits for the outermost (uniform) dimension.
output_lengths = array_ops.tile(output_lengths, multiples[:1])
# Convert to splits.
result_splits.append(ragged_util.lengths_to_splits(output_lengths))
return result_splits
#===============================================================================
# Reshaping
#===============================================================================
def expand_dims(input, axis, name=None): # pylint: disable=redefined-builtin
"""Inserts a dimension with shape 1 into a potentially ragged tensor's shape.
Given a potentially ragged tenor `input`, this operation inserts a
dimension with size 1 at the dimension `axis` of `input`'s shape.
* If `input` is a `Tensor`, then this is equivalent to
`tf.expand_dims`.
* If `input` is ragged, and `axis=0`, then the new dimension will be
uniform; but the previously outermost dimension will become ragged.
* If `input` is ragged, and `0 < axis < input.ragged_rank`, then the
new dimension will be ragged.
* If `input` is ragged, and axis >= input.ragged_rank`, then the new
dimension will be uniform.
The following table gives some examples showing how `ragged.expand_dims`
impacts the shapes of different input tensors. Ragged dimensions are
indicated by enclosing them in parentheses.
input.shape | axis | result.shape
----------------------- | ---- | -----------------------------
`[D1, D2]` | `0` | `[1, D1, D2]`
`[D1, D2]` | `1` | `[D1, 1, D2]`
`[D1, D2]` | `2` | `[D1, D2, 1]`
`[D1, (D2), (D3), D4]` | `0` | `[1, (D1), (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `1` | `[D1, (1), (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), (1), (D3), D4]`
`[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]`
`[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]`
Args:
input: The potentially tensor that should be expanded with a new
dimension.
axis: An integer constant indicating where the new dimension should be
inserted.
name: A name for the operation (optional).
Returns:
A tensor with the same values as `input`, with an added dimension of
size 1 at `axis`.
#### Examples:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> print rt.shape
TensorShape([2, None])
>>> expanded = ragged.expand_dims(rt, axis=0)
>>> print(expanded.shape, expanded)
TensorShape([1, None, None]) [[[1, 2], [3]]]
>>> expanded = ragged.expand_dims(rt, axis=1)
>>> print(expanded.shape, expanded)
TensorShape([2, None, None]) [[[1, 2]], [[3]]]
>>> expanded = ragged.expand_dims(rt, axis=2)
>>> print(expanded.shape, expanded)
TensorShape([2, None, 1]) [[[1], [2]], [[3]]]
```
"""
with ops.name_scope(name, 'RaggedExpandDims', [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, name='input')
if not ragged_tensor.is_ragged(input):
return array_ops.expand_dims(input, axis)
ndims = None if input.shape.ndims is None else input.shape.ndims + 1
axis = ragged_util.get_positive_axis(axis, ndims)
if axis == 0:
values = input
splits = array_ops.stack([0, input.nrows()])
elif axis == 1:
values = input
splits = math_ops.range(input.nrows() + 1)
else:
values = expand_dims(input.values, axis - 1)
splits = input.row_splits
return ragged_tensor.RaggedTensor.from_row_splits(values, splits)
#===============================================================================
# ragged.where
#===============================================================================
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
: If both `x` and `y` are `None`:
Returns the coordinates of true elements of `condition`. The coordinates
are returned in a 2-D tensor with shape
`[num_true_values, dim_size(condition)]`, where `result[i]` is the
coordinates of the `i`th true value (in row-major order).
: If both `x` and `y` are non-`None`:
Returns a tensor formed by selecting values from `x` where condition is
true, and from `y` when condition is false. In particular:
: If `condition`, `x`, and `y` all have the same shape:
* `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true.
* `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false.
: Otherwise:
* `condition` must be a vector.
* `x` and `y` must have the same number of dimensions.
* The outermost dimensions of `condition`, `x`, and `y` must all have the
same size.
* `result[i] = x[i]` if `condition[i]` is true.
* `result[i] = y[i]` if `condition[i]` is false.
Args:
condition: A potentially ragged tensor of type `bool`
x: A potentially ragged tensor (optional).
y: A potentially ragged tensor (optional). Must be specified if `x` is
specified. Must have the same rank and type as `x`.
name: A name of the operation (optional)
Returns:
: If both `x` and `y` are `None`:
A `Tensor` with shape `(num_true, dim_size(condition))`.
: Otherwise:
A potentially ragged tensor with the same type, rank, and outermost
dimension size as `x` and `y`.
`result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`.
Raises:
ValueError: When exactly one of `x` or `y` is non-`None`; or when
`condition`, `x`, and `y` have incompatible shapes.
#### Examples:
```python
>>> # Coordinates where condition is true.
>>> condition = tf.ragged.constant_value(
... [[True, False, True], [False, True]])
>>> ragged.where(condition)
[[0, 0], [0, 2], [1, 1]]
>>> # Elementwise selection between x and y, based on condition.
>>> condition = tf.ragged.constant_value(
... [[True, False, True], [False, True]])
>>> x = tf.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']])
>>> y = tf.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']])
>>> ragged.where(condition, x, y)
[['A', 'b', 'C'], ['d', 'E']]
>>> # Row selection between x and y, based on condition.
>>> condition = [True, False]
>>> x = tf.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']])
>>> y = tf.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']])
>>> ragged.where(condition, x, y)
[['A', 'B', 'C'], ['d', 'e']]
```
"""
if (x is None) != (y is None):
raise ValueError('x and y must be either both None or both non-None')
with ops.name_scope('RaggedWhere', name, [condition, x, y]):
condition = ragged_tensor.convert_to_tensor_or_ragged_tensor(
condition, name='condition')
if x is None:
return _coordinate_where(condition)
else:
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y')
return _elementwise_where(condition, x, y)
def _elementwise_where(condition, x, y):
"""Ragged version of tf.where(condition, x, y)."""
condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)
x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)
y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)
if not (condition_is_ragged or x_is_ragged or y_is_ragged):
return array_ops.where(condition, x, y)
elif condition_is_ragged and x_is_ragged and y_is_ragged:
return ragged_functional_ops.map_flat_values(array_ops.where, condition, x,
y)
elif not condition_is_ragged:
# Concatenate x and y, and then use `gather` to assemble the selected rows.
condition.shape.assert_has_rank(1)
x_nrows = _nrows(x)
x_and_y = concat([x, y], axis=0)
indices = array_ops.where(condition, math_ops.range(x_nrows),
x_nrows + math_ops.range(_nrows(y)))
return gather(x_and_y, indices)
else:
raise ValueError('Input shapes do not match.')
def _coordinate_where(condition):
"""Ragged version of tf.where(condition)."""
if not isinstance(condition, ragged_tensor.RaggedTensor):
| |
<gh_stars>1-10
from typing import Any, Sequence
from datetime import datetime
from threading import Thread
from vnpy.api.xtp.vnxtp import (
XTP,
set_async_callback_exception_handler,
AsyncDispatchException,
OrderBookStruct,
XTPMarketDataStruct,
XTPQuoteStaticInfo,
XTPRspInfoStruct,
XTPSpecificTickerStruct,
XTPTickByTickStruct,
XTPTickerPriceInfo,
XTPOrderInsertInfo,
XTPOrderInfo,
XTPTradeReport,
XTPOrderCancelInfo,
XTPCrdDebtInfo,
XTPQueryStkPositionRsp,
XTPQueryAssetRsp,
XTPStructuredFundInfo,
XTPFundTransferNotice,
XTPQueryETFBaseRsp,
XTPQueryETFComponentRsp,
XTPQueryIPOTickerRsp,
XTPQueryIPOQuotaRsp,
XTPQueryOptionAuctionInfoRsp,
XTP_EXCHANGE_TYPE,
XTP_LOG_LEVEL,
XTP_PROTOCOL_TYPE,
XTP_TE_RESUME_TYPE,
XTP_SIDE_BUY,
XTP_SIDE_SELL,
XTP_SIDE_MARGIN_TRADE,
XTP_SIDE_SHORT_SELL,
XTP_SIDE_REPAY_MARGIN,
XTP_SIDE_REPAY_STOCK,
XTP_ACCOUNT_TYPE,
XTP_BUSINESS_TYPE,
XTP_TICKER_TYPE,
XTP_MARKET_TYPE,
XTP_PRICE_TYPE,
XTP_ORDER_STATUS_TYPE
)
from vnpy.event import EventEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.constant import Exchange, Product, Direction, OrderType, Status, Offset
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (CancelRequest, OrderRequest, SubscribeRequest,
TickData, ContractData, OrderData, TradeData,
PositionData, AccountData)
from vnpy.trader.utility import get_folder_path
API = XTP.API
EXCHANGE_XTP2VT = {
XTP_EXCHANGE_TYPE.XTP_EXCHANGE_SH: Exchange.SSE,
XTP_EXCHANGE_TYPE.XTP_EXCHANGE_SZ: Exchange.SZSE,
}
EXCHANGE_VT2XTP = {v: k for k, v in EXCHANGE_XTP2VT.items()}
MARKET_XTP2VT = {
XTP_MARKET_TYPE.XTP_MKT_SH_A: Exchange.SSE,
XTP_MARKET_TYPE.XTP_MKT_SZ_A: Exchange.SZSE
}
MARKET_VT2XTP = {v: k for k, v in MARKET_XTP2VT.items()}
PRODUCT_XTP2VT = {
XTP_TICKER_TYPE.XTP_TICKER_TYPE_STOCK: Product.EQUITY,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_INDEX: Product.INDEX,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_FUND: Product.FUND,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_BOND: Product.BOND,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_OPTION: Product.OPTION
}
# DIRECTION_VT2XTP = {
# Direction.LONG: XTP_SIDE_BUY,
# Direction.SHORT: XTP_SIDE_SELL
# }
DIRECTION_VT2XTP = {
(Direction.LONG, Offset.OPEN): XTP_SIDE_MARGIN_TRADE,
(Direction.SHORT, Offset.CLOSE): XTP_SIDE_REPAY_MARGIN,
(Direction.SHORT, Offset.OPEN): XTP_SIDE_SHORT_SELL,
(Direction.LONG, Offset.CLOSE): XTP_SIDE_REPAY_STOCK,
(Direction.SHORT, Offset.NONE): XTP_SIDE_BUY,
(Direction.LONG, Offset.NONE): XTP_SIDE_SELL,
}
DIRECTION_XTP2VT = {v: k for k, v in DIRECTION_VT2XTP.items()}
ORDERTYPE_VT2XTP = {
OrderType.LIMIT: XTP_PRICE_TYPE.XTP_PRICE_LIMIT,
OrderType.MARKET: XTP_PRICE_TYPE.XTP_PRICE_BEST5_OR_CANCEL
}
ORDERTYPE_XTP2VT = {v: k for k, v in ORDERTYPE_VT2XTP.items()}
STATUS_XTP2VT = {
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_INIT: Status.SUBMITTING,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_ALLTRADED: Status.ALLTRADED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_PARTTRADEDQUEUEING: Status.PARTTRADED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_PARTTRADEDNOTQUEUEING: Status.CANCELLED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_NOTRADEQUEUEING: Status.NOTTRADED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_CANCELED: Status.CANCELLED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_REJECTED: Status.REJECTED,
}
symbol_name_map = {}
symbol_exchange_map = {}
class XtpGateway(BaseGateway):
default_setting = {
"账号": "",
"密码": "",
"客户号": 1,
"行情地址": "",
"行情端口": 0,
"交易地址": "",
"交易端口": 0,
"行情协议": ["TCP", "UDP"],
"授权码": ""
}
exchanges = list(EXCHANGE_VT2XTP.keys())
def __init__(self, event_engine: EventEngine):
""""""
super().__init__(event_engine, "XTP")
self.quote_api = XtpQuoteApi(self)
self.trader_api = XtpTraderApi(self)
set_async_callback_exception_handler(
self._async_callback_exception_handler)
def connect(self, setting: dict):
""""""
userid = setting['账号']
password = setting['密码']
client_id = int(setting['客户号'])
quote_ip = setting['行情地址']
quote_port = int(setting['行情端口'])
trader_ip = setting['交易地址']
trader_port = int(setting['交易端口'])
quote_protocol = setting["行情协议"]
software_key = setting["授权码"]
self.quote_api.connect(userid, password, client_id,
quote_ip, quote_port, quote_protocol)
self.trader_api.connect(userid, password, client_id,
trader_ip, trader_port, software_key)
self.init_query()
def close(self):
""""""
self.quote_api.close()
self.trader_api.close()
def subscribe(self, req: SubscribeRequest):
""""""
self.quote_api.subscrbie(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.trader_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.trader_api.cancel_order(req)
def query_account(self):
""""""
self.trader_api.query_account()
def query_position(self):
""""""
self.trader_api.query_position()
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def _async_callback_exception_handler(self, e: AsyncDispatchException):
error_str = f"发生内部错误:\n" f"位置:{e.instance}.{e.function_name}" f"详细信息:{e.what}"
print(error_str)
class XtpQuoteApi(API.QuoteSpi):
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.userid = ""
self.password = ""
self.client_id: int = 0
self.server_ip = ""
self.server_port: int = 0
self.server_protocol = ""
self.api = None
def connect(
self,
userid: str,
password: str,
client_id: int,
server_ip: str,
server_port: int,
quote_protocol: str
):
""""""
if self.api:
return
self.userid = userid
self.password = password
self.client_id = client_id
self.server_ip = server_ip
self.server_port = server_port
if quote_protocol == "CTP":
self.quote_protocol = XTP_PROTOCOL_TYPE.XTP_PROTOCOL_TCP
else:
self.quote_protocol = XTP_PROTOCOL_TYPE.XTP_PROTOCOL_UDP
# Create API object
path = str(get_folder_path(self.gateway_name.lower()))
self.api = API.QuoteApi.CreateQuoteApi(
self.client_id,
path,
XTP_LOG_LEVEL.XTP_LOG_LEVEL_TRACE
)
self.api.RegisterSpi(self)
self.gateway.write_log("行情接口初始化成功")
# Login to server
Thread(target=self.login).start()
def login(self):
""""""
ret = self.api.Login(
self.server_ip,
self.server_port,
self.userid,
self.password,
self.quote_protocol
)
if not ret:
msg = "行情服务器登录成功"
self.query_contract()
else:
msg = f"行情服务器登录失败,原因:{ret}"
self.gateway.write_log(msg)
def close(self):
""""""
if self.api:
self.api.RegisterSpi(None)
self.api.Release()
def subscrbie(self, req: SubscribeRequest):
""""""
xtp_exchange = EXCHANGE_VT2XTP.get(req.exchange, "")
self.api.SubscribeMarketData([req.symbol], xtp_exchange)
def query_contract(self):
""""""
for exchange_id in EXCHANGE_XTP2VT.keys():
self.api.QueryAllTickers(exchange_id)
def check_error(self, func_name: str, error_info: XTPRspInfoStruct):
""""""
if error_info and error_info.error_id:
msg = f"{func_name}发生错误, 代码:{error_info.error_id},信息:{error_info.error_msg}"
self.gateway.write_log(msg)
return True
else:
return False
def OnDisconnected(self, reason: int) -> Any:
""""""
self.gateway.write_log("行情服务器连接断开")
self.login()
def OnError(self, error_info: XTPRspInfoStruct) -> Any:
""""""
self.check_error("行情接口", error_info)
def OnSubMarketData(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
self.check_error("订阅行情", error_info)
def OnUnSubMarketData(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnDepthMarketData(self, market_data: XTPMarketDataStruct, bid1_qty: Sequence[int],
bid1_count: int, max_bid1_count: int, ask1_qty: Sequence[int],
ask1_count: int, max_ask1_count: int) -> Any:
""""""
timestamp = str(market_data.data_time)
dt = datetime.strptime(timestamp, "%Y%m%d%H%M%S%f")
tick = TickData(
symbol=market_data.ticker,
exchange=EXCHANGE_XTP2VT[market_data.exchange_id],
datetime=dt,
volume=market_data.qty,
last_price=market_data.last_price,
limit_up=market_data.upper_limit_price,
limit_down=market_data.lower_limit_price,
open_price=market_data.open_price,
high_price=market_data.high_price,
low_price=market_data.low_price,
pre_close=market_data.pre_close_price,
bid_price_1=market_data.bid[0],
bid_price_2=market_data.bid[1],
bid_price_3=market_data.bid[2],
bid_price_4=market_data.bid[3],
bid_price_5=market_data.bid[4],
ask_price_1=market_data.ask[0],
ask_price_2=market_data.ask[1],
ask_price_3=market_data.ask[2],
ask_price_4=market_data.ask[3],
ask_price_5=market_data.ask[4],
bid_volume_1=market_data.bid_qty[0],
bid_volume_2=market_data.bid_qty[1],
bid_volume_3=market_data.bid_qty[2],
bid_volume_4=market_data.bid_qty[3],
bid_volume_5=market_data.bid_qty[4],
ask_volume_1=market_data.ask_qty[0],
ask_volume_2=market_data.ask_qty[1],
ask_volume_3=market_data.ask_qty[2],
ask_volume_4=market_data.ask_qty[3],
ask_volume_5=market_data.ask_qty[4],
gateway_name=self.gateway_name
)
tick.name = symbol_name_map.get(tick.vt_symbol, tick.symbol)
self.gateway.on_tick(tick)
def OnSubOrderBook(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnUnSubOrderBook(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnOrderBook(self, order_book: OrderBookStruct) -> Any:
""""""
pass
def OnSubTickByTick(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnUnSubTickByTick(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnTickByTick(self, tbt_data: XTPTickByTickStruct) -> Any:
""""""
pass
def OnSubscribeAllMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnQueryAllTickers(self, ticker_info: XTPQuoteStaticInfo, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
if self.check_error("查询合约", error_info):
return
contract = ContractData(
symbol=ticker_info.ticker,
exchange=EXCHANGE_XTP2VT[ticker_info.exchange_id],
name=ticker_info.ticker_name,
product=PRODUCT_XTP2VT[ticker_info.ticker_type],
size=1,
pricetick=ticker_info.price_tick,
min_volume=ticker_info.buy_qty_unit,
gateway_name=self.gateway_name
)
self.gateway.on_contract(contract)
symbol_name_map[contract.vt_symbol] = contract.name
if contract.product != Product.INDEX:
symbol_exchange_map[contract.symbol] = contract.exchange
if is_last:
self.gateway.write_log(f"{contract.exchange.value}合约信息查询成功")
def OnQueryTickersPriceInfo(self, ticker_info: XTPTickerPriceInfo, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnSubscribeAllOptionMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOptionMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllOptionOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOptionOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllOptionTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOptionTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
class XtpTraderApi(API.TraderSpi):
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.userid = ""
self.password = ""
self.client_id = ""
self.server_ip = ""
self.server_port = ""
self.software_key = ""
self.api = None
self.session_id = 0
self.reqid = 0
# Whether current account supports margin or option
self.margin_trading = False
self.option_trading = False
#
self.short_positions = {}
def connect(
self,
userid: str,
password: str,
client_id: int,
server_ip: str,
server_port: int,
software_key: str
):
""""""
if self.api:
return
self.userid = userid
self.password = password
self.client_id = client_id
self.server_ip = server_ip
self.server_port = server_port
self.software_key = software_key
# Create API object
path = str(get_folder_path(self.gateway_name.lower()))
self.api = API.TraderApi.CreateTraderApi(
self.client_id,
path,
XTP_LOG_LEVEL.XTP_LOG_LEVEL_TRACE
)
self.api.RegisterSpi(self)
self.api.SetSoftwareKey(self.software_key)
self.api.SubscribePublicTopic(XTP_TE_RESUME_TYPE.XTP_TERT_RESTART)
self.gateway.write_log("交易接口初始化成功")
# Login to server
Thread(target=self.login).start()
def login(self):
""""""
self.session_id = self.api.Login(
self.server_ip,
self.server_port,
self.userid,
self.password,
XTP_PROTOCOL_TYPE.XTP_PROTOCOL_TCP
)
if self.session_id:
msg = "交易服务器登录成功"
else:
error = self.api.GetApiLastError()
msg = f"交易服务器登录失败,原因:{error.error_msg}"
self.gateway.write_log(msg)
def close(self):
""""""
if self.api:
self.api.RegisterSpi(None)
self.api.Release()
def send_order(self, req: OrderRequest) -> str:
""""""
if req.exchange not in MARKET_VT2XTP:
self.gateway.write_log(f"委托失败,不支持的交易所{req.exchange.value}")
return ""
if req.type not in ORDERTYPE_VT2XTP:
self.gateway.write_log(f"委托失败,不支持的委托类型{req.type.value}")
return ""
xtp_req = XTPOrderInsertInfo()
xtp_req.ticker = req.symbol
xtp_req.market = MARKET_VT2XTP[req.exchange]
xtp_req.price = req.price
xtp_req.quantity = int(req.volume)
xtp_req.side = DIRECTION_VT2XTP.get((req.direction, req.offset), "")
xtp_req.price_type = ORDERTYPE_VT2XTP[req.type]
if req.offset == Offset.NONE:
xtp_req.business_type = XTP_BUSINESS_TYPE.XTP_BUSINESS_TYPE_CASH
else:
xtp_req.business_type = XTP_BUSINESS_TYPE.XTP_BUSINESS_TYPE_MARGIN
orderid = self.api.InsertOrder(xtp_req, self.session_id)
order = req.create_order_data(str(orderid), self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
self.api.CancelOrder(int(req.orderid), self.session_id)
def query_account(self):
""""""
if not self.api:
return
self.reqid += 1
self.api.QueryAsset(self.session_id, self.reqid)
def query_position(self):
""""""
if not self.api:
return
self.reqid += 1
self.api.QueryPosition("", self.session_id, self.reqid)
if self.margin_trading:
self.reqid += 1
self.api.QueryCreditDebtInfo(self.session_id, self.reqid)
def check_error(self, func_name: str, error_info: XTPRspInfoStruct):
""""""
if error_info and error_info.error_id:
msg = f"{func_name}发生错误, 代码:{error_info.error_id},信息:{error_info.error_msg}"
self.gateway.write_log(msg)
return True
else:
return False
def OnDisconnected(self, session_id: int, reason: int) -> Any:
""""""
self.gateway.write_log("交易服务器连接断开")
self.login()
def OnError(self, error_info: XTPRspInfoStruct) -> Any:
""""""
self.check_error("交易接口", error_info)
def OnOrderEvent(self, order_info: XTPOrderInfo, error_info: XTPRspInfoStruct,
session_id: int) -> Any:
""""""
self.check_error("委托下单", error_info)
direction, offset = DIRECTION_XTP2VT[order_info.side]
order = OrderData(
symbol=order_info.ticker,
exchange=MARKET_XTP2VT[order_info.market],
orderid=str(order_info.order_xtp_id),
type=ORDERTYPE_XTP2VT[order_info.price_type],
direction=direction,
offset=offset,
price=order_info.price,
volume=order_info.quantity,
traded=order_info.qty_traded,
status=STATUS_XTP2VT[order_info.order_status],
time=order_info.insert_time,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
def OnTradeEvent(self, trade_info: XTPTradeReport, session_id: int) -> Any:
""""""
direction, offset = DIRECTION_XTP2VT[trade_info.side]
trade = TradeData(
symbol=trade_info.ticker,
exchange=MARKET_XTP2VT[trade_info.market],
orderid=str(trade_info.order_xtp_id),
tradeid=str(trade_info.exec_id),
direction=direction,
offset=offset,
price=trade_info.price,
volume=trade_info.quantity,
time=trade_info.trade_time,
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def OnCancelOrderError(self, cancel_info: XTPOrderCancelInfo, error_info: XTPRspInfoStruct,
session_id: int) -> Any:
""""""
self.check_error("委托撤单", error_info)
def OnQueryOrder(self, order_info: XTPOrderInfo, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
if self.check_error("查询委托", error_info):
return
self.updateOrder(order_info)
if is_last:
self.gateway.write_log("查询委托信息成功")
def OnQueryTrade(self, trade_info: | |
only printed in label.
All other remaining parameters can be used to further customize the Map
configuration.
When producing the map, only one of 'centermap_geo' or 'centermap_delta'
options can be used at a time.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
allowed_kwargs = ['alpha', 'arrow', 'atcolor', 'atm', 'centermap_delta', 'centermap_geo', 'centerproj',
'chcolor', 'chord_delta', 'chord_geo', 'countries', 'cpoints', 'cscale', 'dpi', 'ercolor',
'error', 'fmt', 'hcolor', 'heights', 'labels', 'lncolor', 'mapsize', 'mapstyle', 'meridians',
'nameimg', 'nscale', 'offset', 'outcolor', 'parallels', 'path', 'pscale', 'ptcolor',
'resolution', 'ring', 'rncolor', 'site_name', 'sites', 'sscale', 'states', 'zoom',
'site_box_alpha']
input_tests.check_kwargs(kwargs, allowed_kwargs=allowed_kwargs)
if not type(name) == str:
raise TypeError('name keyword must be a string')
radius = radius*u.km
occs = {}
try:
occs['stars'] = SkyCoord(coord, frame='icrs', unit=(u.hourangle, u.degree))
except:
raise KeyError('"star" keyword is not in the format: "hh mm ss.sss dd mm ss.sss" or "hh.hhhhhhhh dd.dddddddd"')
try:
occs['datas'] = Time(time)
except:
raise KeyError('"time" keyword is not a iso or isot time format')
occs['ca'] = ca*u.arcsec
occs['posa'] = pa*u.deg
occs['vel'] = vel*(u.km/u.s)
occs['dist'] = dist*u.AU
occs['magG'] = mag
occs['longi'] = longi
mapstyle = kwargs.get('mapstyle', 1)
if mapstyle not in [1, 2]:
raise ValueError('mapstyle must be 1 or 2]')
resolution = kwargs.get('resolution', 2)
if resolution not in [1, 2, 3]:
raise TypeError('resolution keyword must be one of these: [1, 2, 3] where 1=10m, 2=50m and 3=100m')
res = ['10m', '50m', '110m']
resolution = res[resolution-1]
nameimg = kwargs.get('nameimg', '{}_{}'.format(name, occs['datas'].isot.replace(':', '_')))
fmt = kwargs.get('fmt', 'png')
dpi = kwargs.get('dpi', 100)
step = kwargs.get('step', 1)
mapsize = kwargs.get('mapsize', [46.0, 38.0])*u.cm
erro = kwargs.get('error', None)
ring = kwargs.get('ring', None)
atm = kwargs.get('atm', None)
cpoints = kwargs.get('cpoints', 60)
states = kwargs.get('states', True)
labels = kwargs.get('labels', True)
meridians = kwargs.get('meridians', 30)
parallels = kwargs.get('parallels', 30)
nscale = kwargs.get('nscale', 1)
cscale = kwargs.get('cscale', 1)
sscale = kwargs.get('sscale', 1)
pscale = kwargs.get('pscale', 1)
heights = np.array(kwargs.get('heights'), None)
alpha = kwargs.get('alpha', 0.2)
site_box_alpha = kwargs.get('site_box_alpha', 0.0)
centermap_geo = kwargs.get('centermap_geo', None)
centermap_delta = kwargs.get('centermap_delta', None)
if 'centermap_geo' in kwargs and 'centermap_delta' in kwargs:
raise ValueError('User must give "centermap_geo" OR "centermap_delta"')
zoom = kwargs.get('zoom', 1)
if zoom <= 0:
raise ValueError('zoom can not be equal or smaller than 0.')
off_ra, off_de = kwargs.get('offset', [0.0, 0.0])*u.mas
arrow = kwargs.get('arrow', True)
site_name = kwargs.get('site_name', True)
path = kwargs.get('path', '.')
if not os.path.exists(path):
raise IOError('Path does not exists')
chord_delta = np.array(kwargs.get('chord_delta', []), ndmin=1)*u.km
chord_geo = kwargs.get('chord_geo', [])
if len(chord_geo) > 0:
try:
b = np.array(chord_geo, ndmin=2)
chord_geo = b.reshape(len(b), 2)
except:
raise ValueError('chord_geo must a set of pairs with longitude and latitude')
chord_geo = EarthLocation(*chord_geo.T)
sites = {}
if 'sites' in kwargs.keys():
if type(kwargs['sites']) == str and os.path.isfile(kwargs['sites']):
data = np.loadtxt(kwargs['sites'], dtype={'names': ('name', 'lon', 'lat', 'offx', 'offy', 'color'),
'formats': ('S30', 'f8', 'f8', 'f8', 'f8', 'S30')},
delimiter=',', ndmin=1)
for i, s in enumerate(data):
sites[s['name'].strip().decode()] = [s['lon'], s['lat'], s['offx'], s['offy'], s['color'].strip().decode()]
elif type(kwargs['sites']) == dict:
sites = kwargs['sites']
else:
raise TypeError('sites keyword must be a file or a dictionary')
countries = {}
if 'countries' in kwargs.keys():
if type(kwargs['countries']) == str and os.path.isfile(kwargs['countries']):
data = np.loadtxt(kwargs['countries'], dtype={'names': ('name', 'lon', 'lat'), 'formats': ('S30', 'f8', 'f8')},
delimiter=',', ndmin=1)
for i, c in enumerate(data):
countries[c['name'].strip().decode()] = [c['lon'], c['lat']]
elif type(kwargs['countries']) == dict:
countries = kwargs['countries']
else:
raise TypeError('country keyword must be a file or a dictionary')
# calculates offsets
dca = off_ra*np.sin(occs['posa']) + off_de*np.cos(occs['posa'])
dt = (-(off_ra * np.cos(occs['posa']) - off_de * np.sin(occs['posa'])).to(u.rad) * occs['dist'].to(u.km) / np.absolute(
occs['vel'])).value * u.s
ca1 = occs['ca'] + dca
data = occs['datas'] + dt
# define map parameters
center_gcrs = GCRS(occs['stars'].ra, occs['stars'].dec, 1*u.R_earth, obstime=data)
center_itrs = center_gcrs.transform_to(ITRS(obstime=data))
center_map = center_itrs.earth_location
centert = True
if 'centerproj' in kwargs.keys():
if type(kwargs['centerproj']) == EarthLocation:
center_map = kwargs['centerproj']
elif np.array(kwargs['centerproj']).shape == (2,):
center_map = EarthLocation.from_geodetic(*kwargs['centerproj'], 0.0)
else:
raise TypeError('centerproj must be an Astropy EarthLocation Object or an array with Longitude and Latitude only')
centert = False
fig = plt.figure(figsize=(mapsize.to(u.imperial.inch).value),facecolor='w')
projection = ccrs.Orthographic(central_longitude=center_map.lon.value, central_latitude=center_map.lat.value)
if labels:
axf = plt.axes(projection=projection)
else:
axf = plt.axes([-0.001, -0.001, 1.002, 1.002], projection=projection)
axf.set_global()
# calculates regions for zoom
limits = None
r = const.R_earth.to(u.m).value
if centermap_geo is not None:
cx, cy = latlon2xy(centermap_geo[0], centermap_geo[1], center_map.lon.value, center_map.lat.value)
limits = [cx/1000.0, cy/1000.0]
if np.any(np.absolute(limits) > r):
raise ValueError('Coordinates for centermap_geo are outside the visible range.')
elif centermap_delta is not None:
limits = centermap_delta
elif zoom != 1:
limits = [0, 0]
if limits is not None:
dr = r/zoom
l0 = (limits[0]*u.km).to(u.m).value
l1 = (limits[1]*u.km).to(u.m).value
dmsize = mapsize[0]/mapsize[1]
if mapsize[1] < mapsize[0]:
lx = l0 - dr*dmsize
ux = l0 + dr*dmsize
ly = l1 - dr
uy = l1 + dr
else:
lx = l0 - dr
ux = l0 + dr
ly = l1 - dr/dmsize
uy = l1 + dr/dmsize
axf.set_xlim(lx, ux)
axf.set_ylim(ly, uy)
if labels and zoom > 1:
centert = False
# plots features
axf.coastlines(resolution=resolution, color='0.3')
ocean = cfeature.NaturalEarthFeature('physical', 'ocean', resolution)
land = cfeature.NaturalEarthFeature('physical', 'land', resolution)
border = cfeature.NaturalEarthFeature('cultural', 'admin_0_countries', resolution)
if mapstyle == 1:
axf.add_feature(ocean, zorder=0, color='0.9')
axf.add_feature(land, zorder=0, edgecolor='None', color='1.0')
axf.add_feature(border, zorder=0.1, edgecolor='0.4', facecolor='None')
axf.add_feature(cfeature.RIVERS, zorder=0, edgecolor='0.7')
axf.add_feature(cfeature.LAKES, zorder=0, color='0.7')
ptcolor = 'black'
lncolor = 'blue'
ercolor = 'blue'
rncolor = 'blue'
atcolor = 'blue'
outcolor = 'red'
hcolor = 'black'
chcolor = 'gray'
elif mapstyle == 2:
axf.add_feature(ocean, zorder=0, facecolor=cfeature.COLORS['water'])
axf.add_feature(land, zorder=0, edgecolor='None', facecolor=cfeature.COLORS['land'])
axf.add_feature(border, zorder=0, edgecolor='0.5', facecolor=cfeature.COLORS['land'])
axf.add_feature(border, zorder=0.1, edgecolor='0.5', facecolor='None')
axf.add_feature(cfeature.RIVERS, zorder=0)
axf.add_feature(cfeature.LAKES, zorder=0)
ptcolor = 'red'
lncolor = 'blue'
ercolor = 'red'
rncolor = 'black'
atcolor = 'black'
outcolor = 'red'
hcolor = 'black'
chcolor = 'gray'
if states:
states_r = cfeature.NaturalEarthFeature('cultural', 'admin_1_states_provinces', resolution)
axf.add_feature(states_r, zorder=0, edgecolor='0.6', facecolor='None')
gl = axf.gridlines(xlocs=np.arange(-180, 180.001, meridians), ylocs=np.arange(-90, 90.001, parallels))
gl.n_steps = 180
sun = get_sun(data)
sun_lat = sun.dec
sun_lon = sun.ra - data.sidereal_time('mean', 'greenwich')
pole_lon = sun_lon.deg
pole_lat = sun_lat.deg
proj_sun = ccrs.Orthographic(central_longitude=pole_lon+180, central_latitude=-pole_lat)
bordx = r*np.cos(np.arange(0, 361, 0.5)*u.deg)
bordy = r*np.sin(np.arange(0, 361, 0.5)*u.deg)
axf.fill(bordx, bordy, transform=proj_sun, linewidth=0, color='black', alpha=alpha)
axf.fill(bordx*np.cos(18*u.deg), bordy*np.cos(18*u.deg), transform=proj_sun, linewidth=0, color='black', alpha=alpha)
ptcolor = kwargs.get('ptcolor', ptcolor)
lncolor = kwargs.get('lncolor', lncolor)
ercolor = kwargs.get('ercolor', ercolor)
rncolor = kwargs.get('rncolor', rncolor)
atcolor = kwargs.get('atcolor', atcolor)
outcolor = kwargs.get('outcolor', outcolor)
hcolor = kwargs.get('hcolor', hcolor)
chcolor = kwargs.get('chcolor', chcolor)
# calculates path
vec = np.arange(0, int(8000/(np.absolute(occs['vel'].value))), step)
vec = np.sort(np.concatenate((vec, -vec[1:]), axis=0))
pa = Angle(occs['posa'])
pa.wrap_at('180d', inplace=True)
if pa > 90*u.deg:
paplus = pa - 180*u.deg
elif pa < -90*u.deg:
paplus = pa + 180*u.deg
else:
paplus = pa
deltatime = vec*u.s
datas1 = data + deltatime
centers_gcrs = GCRS(np.repeat(occs['stars'].ra, len(datas1)), np.repeat(occs['stars'].dec, len(datas1)),
1*u.R_earth, obstime=datas1)
centers_itrs = centers_gcrs.transform_to(ITRS(obstime=datas1))
centers = centers_itrs.earth_location
dista = (occs['dist'].to(u.km)*ca1.to(u.rad)).value*u.km
ax = dista*np.sin(pa) + (deltatime*occs['vel'])*np.cos(paplus)
by = dista*np.cos(pa) - (deltatime*occs['vel'])*np.sin(paplus)
ax2 = ax - radius * np.sin(paplus)
by2 = by - radius * np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=lncolor)
j = np.where(lon1 > 1e+30)
if 'centerproj' not in kwargs:
plt.plot(ax2[j].to(u.m).value, by2[j].to(u.m).value, color=outcolor, clip_on=(not centert), zorder=-0.2)
ax3 = ax + radius * np.sin(paplus)
by3 = by + radius * np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], transform=ccrs.Geodetic(), color=lncolor)
j = np.where(lon2 > 1e+30)
if 'centerproj' not in kwargs:
plt.plot(ax3[j].to(u.m).value, by3[j].to(u.m).value, color=outcolor, clip_on=(not centert), zorder=-0.2)
# plots chords_delta
for val in chord_delta:
ax2 = ax + val*np.sin(paplus)
by2 = by + val*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=chcolor)
# plots chords_geo
for coord_geo in chord_geo:
xt, yt = latlon2xy(coord_geo.lon.deg, coord_geo.lat.deg, centers.lon.value, centers.lat.value)*u.m
val = np.sqrt((xt-ax)**2 + (yt-by)**2)
k = val.argmin()
ang = np.arctan2((yt-by)[k], (xt-ax)[k])
val = np.sign(np.sin(ang))*val[k]
ax2 = ax + val*np.sin(paplus)
by2 = by + val*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=chcolor)
# plots error
if erro is not None:
err = erro*u.mas
errd = (occs['dist'].to(u.km)*err.to(u.rad)).value*u.km
ax2 = ax - errd*np.sin(paplus) - radius*np.sin(paplus)
by2 = by - errd*np.cos(paplus) - radius*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], '--', | |
<reponame>rgraebert/skia
#!/usr/bin/python
"""
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Calulate differences between image pairs, and store them in a database.
"""
import contextlib
import csv
import logging
import os
import re
import shutil
import sys
import tempfile
import urllib
try:
from PIL import Image, ImageChops
except ImportError:
raise ImportError('Requires PIL to be installed; see '
+ 'http://www.pythonware.com/products/pil/')
# Set the PYTHONPATH to include the tools directory.
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir,
'tools'))
import find_run_binary
SKPDIFF_BINARY = find_run_binary.find_path_to_program('skpdiff')
DEFAULT_IMAGE_SUFFIX = '.png'
DEFAULT_IMAGES_SUBDIR = 'images'
DISALLOWED_FILEPATH_CHAR_REGEX = re.compile('[^\w\-]')
DIFFS_SUBDIR = 'diffs'
WHITEDIFFS_SUBDIR = 'whitediffs'
VALUES_PER_BAND = 256
# Keys used within DiffRecord dictionary representations.
# NOTE: Keep these in sync with static/constants.js
KEY__DIFFERENCE_DATA__MAX_DIFF_PER_CHANNEL = 'maxDiffPerChannel'
KEY__DIFFERENCE_DATA__NUM_DIFF_PIXELS = 'numDifferingPixels'
KEY__DIFFERENCE_DATA__PERCENT_DIFF_PIXELS = 'percentDifferingPixels'
KEY__DIFFERENCE_DATA__PERCEPTUAL_DIFF = 'perceptualDifference'
KEY__DIFFERENCE_DATA__WEIGHTED_DIFF = 'weightedDiffMeasure'
class DiffRecord(object):
""" Record of differences between two images. """
def __init__(self, storage_root,
expected_image_url, expected_image_locator,
actual_image_url, actual_image_locator,
expected_images_subdir=DEFAULT_IMAGES_SUBDIR,
actual_images_subdir=DEFAULT_IMAGES_SUBDIR,
image_suffix=DEFAULT_IMAGE_SUFFIX):
"""Download this pair of images (unless we already have them on local disk),
and prepare a DiffRecord for them.
TODO(epoger): Make this asynchronously download images, rather than blocking
until the images have been downloaded and processed.
Args:
storage_root: root directory on local disk within which we store all
images
expected_image_url: file or HTTP url from which we will download the
expected image
expected_image_locator: a unique ID string under which we will store the
expected image within storage_root (probably including a checksum to
guarantee uniqueness)
actual_image_url: file or HTTP url from which we will download the
actual image
actual_image_locator: a unique ID string under which we will store the
actual image within storage_root (probably including a checksum to
guarantee uniqueness)
expected_images_subdir: the subdirectory expected images are stored in.
actual_images_subdir: the subdirectory actual images are stored in.
image_suffix: the suffix of images.
"""
expected_image_locator = _sanitize_locator(expected_image_locator)
actual_image_locator = _sanitize_locator(actual_image_locator)
# Download the expected/actual images, if we don't have them already.
# TODO(rmistry): Add a parameter that makes _download_and_open_image raise
# an exception if images are not found locally (instead of trying to
# download them).
expected_image_file = os.path.join(
storage_root, expected_images_subdir,
str(expected_image_locator) + image_suffix)
actual_image_file = os.path.join(
storage_root, actual_images_subdir,
str(actual_image_locator) + image_suffix)
try:
expected_image = _download_and_open_image(
expected_image_file, expected_image_url)
except Exception:
logging.exception('unable to download expected_image_url %s to file %s' %
(expected_image_url, expected_image_file))
raise
try:
actual_image = _download_and_open_image(
actual_image_file, actual_image_url)
except Exception:
logging.exception('unable to download actual_image_url %s to file %s' %
(actual_image_url, actual_image_file))
raise
# Generate the diff image (absolute diff at each pixel) and
# max_diff_per_channel.
diff_image = _generate_image_diff(actual_image, expected_image)
diff_histogram = diff_image.histogram()
(diff_width, diff_height) = diff_image.size
self._weighted_diff_measure = _calculate_weighted_diff_metric(
diff_histogram, diff_width * diff_height)
self._max_diff_per_channel = _max_per_band(diff_histogram)
# Generate the whitediff image (any differing pixels show as white).
# This is tricky, because when you convert color images to grayscale or
# black & white in PIL, it has its own ideas about thresholds.
# We have to force it: if a pixel has any color at all, it's a '1'.
bands = diff_image.split()
graydiff_image = ImageChops.lighter(ImageChops.lighter(
bands[0], bands[1]), bands[2])
whitediff_image = (graydiff_image.point(lambda p: p > 0 and VALUES_PER_BAND)
.convert('1', dither=Image.NONE))
# Calculate the perceptual difference percentage.
skpdiff_csv_dir = tempfile.mkdtemp()
try:
skpdiff_csv_output = os.path.join(skpdiff_csv_dir, 'skpdiff-output.csv')
expected_img = os.path.join(storage_root, expected_images_subdir,
str(expected_image_locator) + image_suffix)
actual_img = os.path.join(storage_root, actual_images_subdir,
str(actual_image_locator) + image_suffix)
find_run_binary.run_command(
[SKPDIFF_BINARY, '-p', expected_img, actual_img,
'--csv', skpdiff_csv_output, '-d', 'perceptual'])
with contextlib.closing(open(skpdiff_csv_output)) as csv_file:
for row in csv.DictReader(csv_file):
perceptual_similarity = float(row[' perceptual'].strip())
if not 0 <= perceptual_similarity <= 1:
# skpdiff outputs -1 if the images are different sizes. Treat any
# output that does not lie in [0, 1] as having 0% perceptual
# similarity.
perceptual_similarity = 0
# skpdiff returns the perceptual similarity, convert it to get the
# perceptual difference percentage.
self._perceptual_difference = 100 - (perceptual_similarity * 100)
finally:
shutil.rmtree(skpdiff_csv_dir)
# Final touches on diff_image: use whitediff_image as an alpha mask.
# Unchanged pixels are transparent; differing pixels are opaque.
diff_image.putalpha(whitediff_image)
# Store the diff and whitediff images generated above.
diff_image_locator = _get_difference_locator(
expected_image_locator=expected_image_locator,
actual_image_locator=actual_image_locator)
basename = str(diff_image_locator) + image_suffix
_save_image(diff_image, os.path.join(
storage_root, DIFFS_SUBDIR, basename))
_save_image(whitediff_image, os.path.join(
storage_root, WHITEDIFFS_SUBDIR, basename))
# Calculate difference metrics.
(self._width, self._height) = diff_image.size
self._num_pixels_differing = (
whitediff_image.histogram()[VALUES_PER_BAND - 1])
def get_num_pixels_differing(self):
"""Returns the absolute number of pixels that differ."""
return self._num_pixels_differing
def get_percent_pixels_differing(self):
"""Returns the percentage of pixels that differ, as a float between
0 and 100 (inclusive)."""
return ((float(self._num_pixels_differing) * 100) /
(self._width * self._height))
def get_perceptual_difference(self):
"""Returns the perceptual difference percentage."""
return self._perceptual_difference
def get_weighted_diff_measure(self):
"""Returns a weighted measure of image diffs, as a float between 0 and 100
(inclusive).
TODO(epoger): Delete this function, now that we have perceptual diff?
"""
return self._weighted_diff_measure
def get_max_diff_per_channel(self):
"""Returns the maximum difference between the expected and actual images
for each R/G/B channel, as a list."""
return self._max_diff_per_channel
def as_dict(self):
"""Returns a dictionary representation of this DiffRecord, as needed when
constructing the JSON representation."""
return {
KEY__DIFFERENCE_DATA__NUM_DIFF_PIXELS: self._num_pixels_differing,
KEY__DIFFERENCE_DATA__PERCENT_DIFF_PIXELS:
self.get_percent_pixels_differing(),
KEY__DIFFERENCE_DATA__WEIGHTED_DIFF: self.get_weighted_diff_measure(),
KEY__DIFFERENCE_DATA__MAX_DIFF_PER_CHANNEL: self._max_diff_per_channel,
KEY__DIFFERENCE_DATA__PERCEPTUAL_DIFF: self._perceptual_difference,
}
class ImageDiffDB(object):
""" Calculates differences between image pairs, maintaining a database of
them for download."""
def __init__(self, storage_root):
"""
Args:
storage_root: string; root path within the DB will store all of its stuff
"""
self._storage_root = storage_root
# Dictionary of DiffRecords, keyed by (expected_image_locator,
# actual_image_locator) tuples.
self._diff_dict = {}
def add_image_pair(self,
expected_image_url, expected_image_locator,
actual_image_url, actual_image_locator):
"""Download this pair of images (unless we already have them on local disk),
and prepare a DiffRecord for them.
TODO(epoger): Make this asynchronously download images, rather than blocking
until the images have been downloaded and processed.
When we do that, we should probably add a new method that will block
until all of the images have been downloaded and processed. Otherwise,
we won't know when it's safe to start calling get_diff_record().
jcgregorio notes: maybe just make ImageDiffDB thread-safe and create a
thread-pool/worker queue at a higher level that just uses ImageDiffDB?
Args:
expected_image_url: file or HTTP url from which we will download the
expected image
expected_image_locator: a unique ID string under which we will store the
expected image within storage_root (probably including a checksum to
guarantee uniqueness)
actual_image_url: file or HTTP url from which we will download the
actual image
actual_image_locator: a unique ID string under which we will store the
actual image within storage_root (probably including a checksum to
guarantee uniqueness)
"""
expected_image_locator = _sanitize_locator(expected_image_locator)
actual_image_locator = _sanitize_locator(actual_image_locator)
key = (expected_image_locator, actual_image_locator)
if not key in self._diff_dict:
try:
new_diff_record = DiffRecord(
self._storage_root,
expected_image_url=expected_image_url,
expected_image_locator=expected_image_locator,
actual_image_url=actual_image_url,
actual_image_locator=actual_image_locator)
except Exception:
logging.exception('got exception while creating new DiffRecord')
return
self._diff_dict[key] = new_diff_record
def get_diff_record(self, expected_image_locator, actual_image_locator):
"""Returns the DiffRecord for this image pair.
Raises a KeyError if we don't have a DiffRecord for this image pair.
"""
key = (_sanitize_locator(expected_image_locator),
_sanitize_locator(actual_image_locator))
return self._diff_dict[key]
# Utility functions
def _calculate_weighted_diff_metric(histogram, num_pixels):
"""Given the histogram of a diff image (per-channel diff at each
pixel between two images), calculate the weighted diff metric (a
stab at how different the two images really are).
TODO(epoger): Delete this function, now that we have perceptual diff?
Args:
histogram: PIL histogram of a per-channel diff between two images
num_pixels: integer; the total number of pixels in the diff image
Returns: a weighted diff metric, as a float between 0 and 100 (inclusive).
"""
# TODO(epoger): As a wild guess at an appropriate metric, weight each
# different pixel by the square of its delta value. (The more different
# a pixel is from its expectation, the more we care about it.)
assert(len(histogram) % VALUES_PER_BAND == 0)
num_bands = len(histogram) / VALUES_PER_BAND
max_diff = num_pixels * num_bands * (VALUES_PER_BAND - 1)**2
total_diff = 0
for index in xrange(len(histogram)):
total_diff += histogram[index] * (index % VALUES_PER_BAND)**2
return float(100 * total_diff) / max_diff
def _max_per_band(histogram):
"""Given the histogram of an image, return the maximum value of each band
(a.k.a. "color channel", such as R/G/B) across the entire image.
Args:
histogram: PIL histogram
Returns the maximum value of each band within the image histogram, as a list.
"""
max_per_band = []
assert(len(histogram) % VALUES_PER_BAND == 0)
num_bands = len(histogram) / VALUES_PER_BAND
for band in xrange(num_bands):
# Assuming that VALUES_PER_BAND is | |
keys
irobs = sum([v.values() for v in matching_irobs.values()], [])
# list of lists of IROBs
irobs = sum(irobs, [])
# list of IROBs
dropped_irobs = filter(lambda x: x.wasDropped(), irobs)
fail = (len(dropped_irobs) > 0)
if fail:
dprint("Session: %s" % session)
dprint(" IROBs: " % irobs)
return fail
failover_sessions = filter(failed_over, self._sessions)
print ("Failover sessions: %d/%d%s, total %f seconds" %
(len(failover_sessions), num_sessions,
perc_str(failover_sessions, num_sessions),
sum([duration(s) for s in failover_sessions])))
# check the sessions that started in a single-network period
# but finished after wifi arrived.
def needed_reevaluation(session):
session_start = self.getAdjustedTime(session['start'])
session_end = session_start + duration(session)
for start, length in wifi_periods:
if session_start >= start and session_start <= (start + length):
return False
# didn't start during this wifi period.
# did this wifi period come in the middle of the session?
if start > session_start and start < session_end:
# wifi arrived sometime during session
return True
return False
reevaluation_sessions = filter(needed_reevaluation, self._sessions)
print ("Needed-reevaluation sessions: %d/%d%s" %
(len(reevaluation_sessions), num_sessions,
perc_str(reevaluation_sessions, num_sessions)))
# TODO: print average wifi, 3G session times
self._debug_sessions = failover_sessions
#self._debug_sessions = reevaluation_sessions
def _printIROBTimesByNetwork(self):
irobs = self._getIROBs()
print "Average IROB durations:"
for network_type, direction in product(['wifi', '3G'], ['down', 'up']):
dprint("%s sessions:" % network_type)
times = [irob.getDuration() for irob in irobs[network_type][direction]]
if len(times) > 0:
avg = sum(times) / len(times)
print " %5s, %4s: %f" % (network_type, direction, avg)
else:
print " %5s, %4s: (no IROBs)" % (network_type, direction)
def _getIROBs(self, start=-1.0, end=None):
"""Get all IROBs that start in the specified time range.
Returns a dictionary: d[network_type][direction] => [IROB(),...]
start -- relative starting time
end -- relative ending time
"""
if end is None:
end = self._end + 1.0
matching_irobs = {'wifi': {'down': [], 'up': []},
'3G': {'down': [], 'up': []}}
def time_matches(irob):
irob_start, irob_end = [self.getAdjustedTime(t) for t in irob.getTimeInterval()]
return (irob_start >= start and irob_end <= end)
for network_type, direction in product(['wifi', '3G'], ['down', 'up']):
if network_type in self._networks and direction in self._networks[network_type]:
irobs = self._networks[network_type][direction].values()
matching_irobs[network_type][direction] = filter(time_matches, irobs)
return matching_irobs
def _drawDebugging(self):
self._drawSomeSessions(self._debug_sessions,
marker='s', color='red', markersize=10,
markerfacecolor='none', linestyle='none')
def getIROBPosition(self, irob):
# TODO: allow for simultaneous (stacked) IROB plotting.
return (self._network_pos_offsets[irob.network_type] +
self._direction_pos_offsets[irob.direction])
def getIROBHeight(self, irob):
# TODO: adjust based on the number of stacked IROBs.
return self._irob_height
def getIROBColor(self, irob):
return self._irob_colors[irob.network_type]
def getAdjustedTime(self, timestamp):
return timestamp - self._start
def getAdjustedTraceLatency(self, latency):
if not self._cross_country_latency or latency < 0.0001:
return latency
LATENCY_ADJUSTMENT = 0.100 # 100ms cross-country
return latency + LATENCY_ADJUSTMENT
def setStart(self, start):
# for resetting the experiment start, to avoid including the
# setup transfers and waiting time at the server.
self._start = start
def parseLine(self, line):
timestamp = getTimestamp(line)
if self._start == None:
self._start = timestamp
self._end = timestamp
if "Got update from scout" in line:
#[time][pid][tid] Got update from scout: 192.168.1.2 is up,
# bandwidth_down 43226 bandwidth_up 12739 bytes/sec RTT 97 ms
# type wifi
ip, status, network_type = re.search(self._network_regex, line).groups()
if not self._is_server:
self._modifyNetwork(timestamp, ip, status, network_type)
elif "Successfully bound" in line:
# [time][pid][CSockSender 57] Successfully bound osfd 57 to 192.168.1.2:0
self._addConnection(line)
elif "Adding connection" in line:
# [time][pid][Listener 13] Adding connection 14 from 192.168.1.2
# bw_down 43226 bw_up 12739 RTT 97
# type wifi(peername 192.168.127.12)
self._addIncomingConnection(line, timestamp)
elif re.search(self._csocket_destroyed_regex, line) != None:
# [time][pid][CSockSender 57] CSocket 57 is being destroyed
self._removeConnection(line)
elif "Getting bytes to send from IROB" in line:
# [time][pid][CSockSender 57] Getting bytes to send from IROB 6
irob = int(line.strip().split()[-1])
network = self._getNetworkType(line)
self._currentSendingIROB = irob
self._addIROB(timestamp, network, irob, 'up')
elif "...returning " in line:
# [time][pid][CSockSender 57] ...returning 1216 bytes, seqno 0
assert self._currentSendingIROB != None
datalen = int(line.strip().split()[3])
network = self._getNetworkType(line)
self._addIROBBytes(timestamp, network, self._currentSendingIROB,
datalen, 'up')
elif "About to send message" in line:
# [time][pid][CSockSender 57] About to send message: Type: Begin_IROB(1)
# Send labels: FG,SMALL IROB: 0 numdeps: 0
self._addTransfer(line, 'up')
elif "Received message" in line:
# [time][pid][CSockReceiver 57] Received message: Type: Begin_IROB(1)
# Send labels: FG,SMALL IROB: 0 numdeps: 0
self._addTransfer(line, 'down')
elif "network estimator" in line:
network_type = re.search(self._network_estimator_regex, line).group(1)
dprint("got observation: %s" % line)
bw_match = re.search(self._network_bandwidth_regex, line)
lat_match = re.search(self._network_latency_regex, line)
bw, latency = None, None
if bw_match and float(bw_match.groups()[0]) > 0.0:
bw = bw_match.groups()
if lat_match and float(lat_match.groups()[0]) > 0.0:
latency = lat_match.groups()
self._addEstimates(network_type, timestamp, bw=bw, latency=latency)
elif "New spot values" in line:
# TODO: parse values, call self._addNetworkObservation
network_type = self._getNetworkType(line)
pass
elif "New estimates" in line:
# TODO: parse values, call self._addNetworkEstimate
network_type = self._getNetworkType(line)
pass
elif "chooseNetwork" in line:
duration = timestamp - getTimestamp(self._last_line)
time_match = re.search(choose_network_time_regex, line)
if time_match:
duration = float(time_match.group(1))
self._choose_network_calls.append((timestamp, duration))
elif "redundancy_strategy_type" in line:
# [timestamp][pid][Bootstrapper 49] Sending hello: Type: Hello(0)
# Send labels: listen port: 42424
# num_ifaces: 2
# redundancy_strategy_type: intnw_redundant
redundancy_strategy = \
re.search(self._redundancy_strategy_regex, line).group(1)
if redundancy_strategy not in self._title:
self._title += " - " + redundancy_strategy
else:
pass # ignore it
self._last_line = line
def _initRegexps(self):
self._irob_regex = re.compile("IROB: ([0-9]+)")
self._datalen_regex = re.compile("datalen: ([0-9]+)")
self._expected_bytes_regex = re.compile("expected_bytes: ([0-9]+)")
self._network_regex = re.compile("scout: (.+) is (down|up).+ type ([A-Za-z0-9]+)")
ip_regex_string = "([0-9]+(?:\.[0-9]+){3})"
self._ip_regex = re.compile(ip_regex_string)
self._socket_regex = re.compile("\[CSock(?:Sender|Receiver) ([0-9]+)\]")
self._intnw_message_type_regex = \
re.compile("(?:About to send|Received) message: Type: ([A-Za-z_]+)")
self._csocket_destroyed_regex = re.compile("CSocket ([0-9]+) is being destroyed")
self._network_estimator_regex = \
re.compile("Adding new stats to (.+) network estimator")
float_regex = "([0-9]+" + "(?:\.[0-9]+)?)"
stats_regex = "obs %s est %s" % (float_regex, float_regex)
self._network_bandwidth_regex = re.compile("bandwidth: " + stats_regex)
self._network_latency_regex = re.compile("latency: " + stats_regex)
self._redundancy_strategy_regex = \
re.compile("redundancy_strategy_type: ([a-z_]+)\s*")
self._incoming_connection_regex = \
re.compile("Adding connection ([0-9]+) from " + ip_regex_string +
".+type ([A-Za-z0-9]+)")
def _getIROBId(self, line):
return int(re.search(self._irob_regex, line).group(1))
def _getSocket(self, line):
return int(re.search(self._socket_regex, line).group(1))
def _getIP(self, line):
return re.search(self._ip_regex, line).group(1)
def _addNetworkType(self, network_type):
if network_type not in self._networks:
self._networks[network_type] = {
'down': {}, # download IROBs
'up': {} # upload IROBs
}
self._network_periods[network_type] = []
def _modifyNetwork(self, timestamp, ip, status, network_type):
self._addNetworkType(network_type)
if status == 'down':
period = self._network_periods[network_type][-1]
if period['end'] is not None:
print "Warning: double-ending %s period at %f" % (network_type, timestamp)
period['end'] = timestamp
if ip in self._network_type_by_ip:
del self._network_type_by_ip[ip]
elif status == 'up':
self._startNetworkPeriod(network_type, ip,
start=timestamp, end=None, sock=None)
placeholder = (ip in self._network_type_by_ip and
self._network_type_by_ip[ip] == "placeholder")
self._network_type_by_ip[ip] = network_type
if placeholder:
sock = self._placeholder_sockets[ip]
del self._placeholder_sockets[ip]
self._addNetworkPeriodSocket(sock, ip)
else: assert False
def _startNetworkPeriod(self, network_type, ip, start, end=None, sock=None):
periods = self._network_periods[network_type]
if len(periods) > 0 and periods[-1]['end'] == None:
# two perfectly adjacent periods with no 'down' in between. whatevs.
periods[-1]['end'] = start
periods.append({
'start': start, 'end': end,
'ip': ip, 'sock': sock
})
def _addConnection(self, line):
sock = self._getSocket(line)
ip = self._getIP(line)
if ip not in self._network_type_by_ip:
self._network_type_by_ip[ip] = "placeholder"
assert ip not in self._placeholder_sockets
self._placeholder_sockets[ip] = sock
else:
self._addNetworkPeriodSocket(sock, ip)
def _addEstimates(self, network_type, timestamp, bw=None, latency=None):
if network_type not in self._estimates:
self._estimates[network_type] = {}
for values, name in zip((bw, latency), ("bandwidth_up", "latency")):
if values:
obs, est = values
self._addNetworkObservation(network_type, name,
float(timestamp), float(obs))
self._addNetworkEstimate(network_type, name, float(est))
def _getEstimates(self, network_type, name):
all_estimates = self._estimates[network_type]
if name not in all_estimates:
all_estimates[name] = []
return all_estimates[name]
def _addNetworkObservation(self, network_type, name, timestamp, obs):
if obs == 0.0:
debug_trace()
estimates = self._getEstimates(network_type, name)
estimates.append({'timestamp': float(timestamp),
'observation': float(obs),
'estimate': None})
def _addNetworkEstimate(self, network_type, name, est):
estimates = self._getEstimates(network_type, name)
assert estimates[-1]['estimate'] is None
estimates[-1]['estimate'] = est
def _addNetworkPeriodSocket(self, sock, ip):
network_type = self._network_type_by_ip[ip]
network_period = self._network_periods[network_type][-1]
assert network_period['start'] != None
assert network_period['ip'] == ip
assert network_period['sock'] == None
network_period['sock'] = sock
assert sock not in self._network_type_by_sock
self._network_type_by_sock[sock] = network_type
def _addIncomingConnection(self, line, timestamp):
match = re.search(self._incoming_connection_regex, line)
sock, ip, network_type = match.groups()
sock = int(sock)
self._addNetworkType(network_type)
self._startNetworkPeriod(network_type, ip, start=timestamp, end=None, sock=None)
self._network_type_by_ip[ip] = network_type
self._addNetworkPeriodSocket(sock, ip)
def _removeConnection(self, line):
timestamp = getTimestamp(line)
sock = int(re.search(self._csocket_destroyed_regex, line).group(1))
if sock in self._network_type_by_sock:
network_type = self._network_type_by_sock[sock]
network_period = self._network_periods[network_type][-1]
network_period['sock'] = None
del self._network_type_by_sock[sock]
self._markDroppedIROBs(timestamp, network_type)
if self._is_server:
# client will | |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train utility functions for T5X."""
import os
from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Tuple, Union
from absl import logging
import dataclasses
from flax import jax_utils
from flax.optim.base import Optimizer
from flax.training import common_utils
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import ml_collections
import numpy as np
from t5x import decode
from t5x import models
from tensorflow.io import gfile
# pylint:disable=invalid-name
Array = Any
ConfigDict = ml_collections.ConfigDict
PyTreeDef = type(jax.tree_structure(None))
TransformerConfig = models.TransformerConfig
TpuMesh = Tuple[int, int, int, int]
OtherMesh = Tuple[int, int]
Mesh = Union[TpuMesh, OtherMesh]
@dataclasses.dataclass(frozen=True)
class Topology:
"""Info about the overall topology and the current host's position in it.
TODO(danielandor): Split into overall and current host information.
"""
num_replicas: int
num_replica_sets: int
per_replica_mesh: Mesh
per_replica_set_mesh: Mesh
per_replica_set_num_replicas: int
per_host_num_partitions: int
replica_set_id: int
per_replica_set_host_id: int
device_assignment: List[jax.lib.xla_client.Device]
this_host_device_assignment: List[jax.lib.xla_client.Device]
# -----------------------------------------------------------------------------
# Jax utility functions
# -----------------------------------------------------------------------------
def _unbroadcast(x):
"""Assuming `x` is replicated along its leading axis, remove that axis."""
# Unbroadcast is a hack to take the output of a pmap with out_axes=0 and turn
# it into the input of a pmap with in_axes=None. This is necessary because we
# don't have out_axes=None in pmap, so the output arrays of the training step
# function all still end up with an extra leading logical axis of size
# `num_local_devices`.
sharding_spec = x.sharding_spec
# The leading logical axis should be sharded like the result of a pmap with
# out_axes=0.
assert sharding_spec.sharding[0] == jax.pxla.Unstacked(x.shape[0])
# Remove that leading logical axis and its corresponding sharding.
aval = jax.abstract_arrays.ShapedArray(x.shape[1:], x.dtype)
sharding = sharding_spec.sharding[1:]
# Replace the mesh mapping entry that pointed to that axis with Replicated,
# and decrement the other entries.
def replace_mesh_mapping(mm):
if isinstance(mm, jax.pxla.ShardedAxis):
if mm.axis == 0:
return jax.pxla.Replicated(x.shape[0])
return jax.pxla.ShardedAxis(mm.axis - 1)
return mm
mesh_mapping = map(replace_mesh_mapping, sharding_spec.mesh_mapping)
sharding_spec = jax.pxla.ShardingSpec(sharding, mesh_mapping)
return jax.pxla.ShardedDeviceArray(aval, sharding_spec, x.device_buffers)
def unbroadcast(tree):
"""Assuming `tree` is replicated along its leading axis, remove that axis."""
return jax.tree_map(_unbroadcast, tree)
def broadcast(tree,
num_replicas,
num_partitions,
devices=None):
"""Broadcast `tree` according to `num_replicas` and `num_partitions`.
Replications are duplicates of `tree` along the leading axis. Partitions are
further replications of `tree` using `replication_factors` in the
`ShardingSpec` of the returned arrays.
Args:
tree: pytree of arrays
num_replicas: number of replicas (i.e. pmap dimension size).
num_partitions: number of partitions
devices: flattened device assignment (defaults to jax.local_devices())
Returns:
A tree of ShardedDeviceArrays with leading sharded axis of size
`num_replicas`, each of which contains a copy of the tree element, and is
further replicated `num_partitions` times. This is suitable for passing to
pmap(sharded_jit) if the data should be replicated on every device.
"""
assert num_replicas * num_partitions == jax.local_device_count()
# Replicate across all devices.
replicated = jax_utils.replicate(tree, devices=devices)
# Rewrite the sharding specs to include replicated partitioning.
def redo_sharding_spec(x):
assert isinstance(x, jax.pxla.ShardedDeviceArray)
sharding_spec = x.sharding_spec
# We replicated `tree` across all devices, but we only want a leading axis
# of size `num_replicas`.
aval = jax.abstract_arrays.ShapedArray((num_replicas,) + x.shape[1:],
x.dtype)
# Fix the size of the corresponding sharding.
sharding = (jax.pxla.Unstacked(num_replicas),) + sharding_spec.sharding[1:]
# Add replication over the remaining axis of the mesh.
mesh_mapping = sharding_spec.mesh_mapping + (
jax.pxla.Replicated(num_partitions),)
sharding_spec = jax.pxla.ShardingSpec(sharding, mesh_mapping)
return jax.pxla.ShardedDeviceArray(aval, sharding_spec, x.device_buffers)
if num_partitions > 1:
return jax.tree_map(redo_sharding_spec, replicated)
else:
return replicated
def compute_multihost_topology(num_partitions):
"""Logic to handle the multi-host data+model parallel topology.
We need to relate three things:
- the physical topology of devices and their interconnect
- the logical topology of replicas + partitions (data + model parallelism)
- the topology of which devices are connected to which hosts
Since model parallelism involves more communication, partitions are
assumed to be local. Both hosts and replicas enclose rectangular subgroups
of devices that tile the overall physical mesh.
Variables referring to tilings of the physical mesh are (x, y, z, core).
Most such mesh variables are in units of devices, although variables called
X_mesh and X_coords are shapes of and positions within meshes in units of X,
and variables called per_X_Y are shapes of, or counts/indices within, a
particular instance of X.
Args:
num_partitions: Requested number of partitions.
Returns:
a Topology object containing the device assignments etc.
"""
num_replicas = max(1, jax.device_count() // num_partitions)
logging.info('num_replicas: %d; num_partitions: %d', num_replicas,
num_partitions)
def bounds_from_last_device(device):
# Must be passed the device at the highest-coordinate corner of the
# relevant mesh, which is a requirement we know is satisfied by the last
# device in jax.devices()
if hasattr(device, 'coords'):
x, y, z = device.coords
return x + 1, y + 1, z + 1, device.id % 2 + 1
else:
# On non-TPU platforms, the "mesh" is hosts x devices per host in order
# to take advantage of faster within-host interconnect
return jax.host_count(), jax.local_device_count()
global_mesh = bounds_from_last_device(jax.devices()[-1])
logging.info('global_mesh: %s', global_mesh)
if jax.local_devices()[0].platform == 'tpu':
# TODO(jekbradbury): potentially move per_replica_mesh to config
if num_partitions == 1:
per_replica_mesh = (1, 1, 1, 1)
elif num_partitions == 2:
per_replica_mesh = (1, 1, 1, 2)
elif num_partitions == 4:
per_replica_mesh = (1, 2, 1, 2)
elif num_partitions == global_mesh[1] * 2:
# The y-axis is more likely to have the wraparound torus links, e.g. on
# 16x32 or multipod topologies
per_replica_mesh = (1, num_partitions // 2, 1, 2)
elif num_partitions == 8:
per_replica_mesh = (2, 2, 1, 2)
elif num_partitions == 16:
per_replica_mesh = (4, 2, 1, 2)
else:
raise NotImplementedError()
else:
per_replica_mesh = (max(1, num_partitions // jax.local_device_count()),
min(num_partitions, jax.local_device_count()))
logging.info('per_replica_mesh: %s', per_replica_mesh)
per_host_mesh = bounds_from_last_device(jax.local_devices(0)[-1])
for per_replica, per_host in zip(per_replica_mesh, per_host_mesh):
assert per_replica % per_host == 0 or per_host % per_replica == 0
per_host_partition_mesh = tuple(
min(pr, ph) for pr, ph in zip(per_replica_mesh, per_host_mesh))
per_host_num_partitions = np.prod(per_host_partition_mesh)
# Hosts and replicas are both tilings of the physical device mesh, and they're
# not aligned with each other. But they both affect the data pipeline: all
# devices within a host get fed data together, while all devices within a
# replica need to be fed the same data, whether or not they're attached to the
# same host.
# A "replica set" is the least common multiple of hosts and replicas, or the
# minimal group of devices that contains both an integer number of replicas
# and an integer number of hosts. Each replica set will correspond to a unique
# instance of the data pipeline, and the data coming out of the data pipeline
# on each replica set will be the same on each host in that replica set and
# will be split among the constituent replicas.
per_replica_set_mesh = tuple(
max(pr, ph) for pr, ph in zip(per_replica_mesh, per_host_mesh))
num_replica_sets = max(1, jax.device_count() // np.prod(per_replica_set_mesh))
per_replica_set_num_replicas = num_replicas // num_replica_sets
# Here we begin to compute values that are specific to this host.
first_local_device = jax.local_devices()[0]
def get_coords(device):
if hasattr(device, 'coords'):
return (*device.coords, device.id % 2)
return (device.host_id, device.id % jax.local_device_count())
# The device coordinates of this host are those of its "first" device
device_coords = get_coords(first_local_device)
replica_set_coords = tuple(
dc // prsm for dc, prsm in zip(device_coords, per_replica_set_mesh))
# An X_id is a linear index of a particular X within the mesh of Xs (a value
# in 0 <= X_id < num_Xs). The order of enumeration is arbitrary but must be
# computed consistently between hosts.
replica_set_id = 0
for gm, prsm, rsc in zip(global_mesh, per_replica_set_mesh,
replica_set_coords):
replica_set_id = replica_set_id * gm // prsm + rsc
per_replica_set_host_coords = tuple(dc % prsm // phm for dc, prsm, phm in zip(
device_coords, per_replica_set_mesh, per_host_mesh))
per_replica_set_host_id = 0
for prshc, | |
channels 音频数/声道数
self.channels = channels # type: int
# code_name 音频编码模式
self.code_name = code_name # type: str
# duration 单位 秒
self.duration = duration # type: str
# sample_rate 音频采样率
self.sample_rate = sample_rate # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.bit_rate is not None:
result['bit_rate'] = self.bit_rate
if self.channel_layout is not None:
result['channel_layout'] = self.channel_layout
if self.channels is not None:
result['channels'] = self.channels
if self.code_name is not None:
result['code_name'] = self.code_name
if self.duration is not None:
result['duration'] = self.duration
if self.sample_rate is not None:
result['sample_rate'] = self.sample_rate
return result
def from_map(self, map={}):
if map.get('bit_rate') is not None:
self.bit_rate = map.get('bit_rate')
if map.get('channel_layout') is not None:
self.channel_layout = map.get('channel_layout')
if map.get('channels') is not None:
self.channels = map.get('channels')
if map.get('code_name') is not None:
self.code_name = map.get('code_name')
if map.get('duration') is not None:
self.duration = map.get('duration')
if map.get('sample_rate') is not None:
self.sample_rate = map.get('sample_rate')
return self
class VideoMediaResponse(TeaModel):
"""
*
"""
def __init__(self, address_line=None, city=None, country=None, district=None, duration=None, height=None,
location=None, province=None, time=None, township=None, video_media_audio_stream=None,
video_media_video_stream=None, width=None):
# address_line
self.address_line = address_line # type: str
# city
self.city = city # type: str
# country
self.country = country # type: str
# district
self.district = district # type: str
# duration 单位 秒
self.duration = duration # type: str
# height
self.height = height # type: int
# location
self.location = location # type: str
# province
self.province = province # type: str
# time
self.time = time # type: str
# township
self.township = township # type: str
self.video_media_audio_stream = video_media_audio_stream # type: List[VideoMediaAudioStream]
self.video_media_video_stream = video_media_video_stream # type: List[VideoMediaVideoStream]
# width
self.width = width # type: int
def validate(self):
if self.video_media_audio_stream:
for k in self.video_media_audio_stream:
if k:
k.validate()
if self.video_media_video_stream:
for k in self.video_media_video_stream:
if k:
k.validate()
def to_map(self):
result = {}
if self.address_line is not None:
result['address_line'] = self.address_line
if self.city is not None:
result['city'] = self.city
if self.country is not None:
result['country'] = self.country
if self.district is not None:
result['district'] = self.district
if self.duration is not None:
result['duration'] = self.duration
if self.height is not None:
result['height'] = self.height
if self.location is not None:
result['location'] = self.location
if self.province is not None:
result['province'] = self.province
if self.time is not None:
result['time'] = self.time
if self.township is not None:
result['township'] = self.township
result['video_media_audio_stream'] = []
if self.video_media_audio_stream is not None:
for k in self.video_media_audio_stream:
result['video_media_audio_stream'].append(k.to_map() if k else None)
result['video_media_video_stream'] = []
if self.video_media_video_stream is not None:
for k in self.video_media_video_stream:
result['video_media_video_stream'].append(k.to_map() if k else None)
if self.width is not None:
result['width'] = self.width
return result
def from_map(self, map={}):
if map.get('address_line') is not None:
self.address_line = map.get('address_line')
if map.get('city') is not None:
self.city = map.get('city')
if map.get('country') is not None:
self.country = map.get('country')
if map.get('district') is not None:
self.district = map.get('district')
if map.get('duration') is not None:
self.duration = map.get('duration')
if map.get('height') is not None:
self.height = map.get('height')
if map.get('location') is not None:
self.location = map.get('location')
if map.get('province') is not None:
self.province = map.get('province')
if map.get('time') is not None:
self.time = map.get('time')
if map.get('township') is not None:
self.township = map.get('township')
self.video_media_audio_stream = []
if map.get('video_media_audio_stream') is not None:
for k in map.get('video_media_audio_stream'):
temp_model = VideoMediaAudioStream()
self.video_media_audio_stream.append(temp_model.from_map(k))
self.video_media_video_stream = []
if map.get('video_media_video_stream') is not None:
for k in map.get('video_media_video_stream'):
temp_model = VideoMediaVideoStream()
self.video_media_video_stream.append(temp_model.from_map(k))
if map.get('width') is not None:
self.width = map.get('width')
return self
class VideoMediaVideoStream(TeaModel):
"""
*
"""
def __init__(self, bitrate=None, clarity=None, code_name=None, duration=None, fps=None):
# bitrate 视频比特率 单位:bps
self.bitrate = bitrate # type: str
# clarity 清晰度(扫描)
self.clarity = clarity # type: str
# code_name 视频编码模式
self.code_name = code_name # type: str
# duration 单位 秒
self.duration = duration # type: str
# fps 视频平均帧率
self.fps = fps # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.bitrate is not None:
result['bitrate'] = self.bitrate
if self.clarity is not None:
result['clarity'] = self.clarity
if self.code_name is not None:
result['code_name'] = self.code_name
if self.duration is not None:
result['duration'] = self.duration
if self.fps is not None:
result['fps'] = self.fps
return result
def from_map(self, map={}):
if map.get('bitrate') is not None:
self.bitrate = map.get('bitrate')
if map.get('clarity') is not None:
self.clarity = map.get('clarity')
if map.get('code_name') is not None:
self.code_name = map.get('code_name')
if map.get('duration') is not None:
self.duration = map.get('duration')
if map.get('fps') is not None:
self.fps = map.get('fps')
return self
class VideoPreviewResponse(TeaModel):
"""
*
"""
def __init__(self, audio_format=None, bitrate=None, duration=None, frame_rate=None, height=None,
sprite_info=None, template_list=None, thumbnail=None, video_format=None, width=None):
# audio_format
self.audio_format = audio_format # type: str
# bitrate
self.bitrate = bitrate # type: str
# duration
self.duration = duration # type: str
# frame_rate
self.frame_rate = frame_rate # type: str
# height
self.height = height # type: int
self.sprite_info = sprite_info # type: VideoPreviewSprite
# template_list
self.template_list = template_list # type: List[VideoPreviewTranscode]
# thumbnail
self.thumbnail = thumbnail # type: str
# video_format
self.video_format = video_format # type: str
# width
self.width = width # type: int
def validate(self):
if self.sprite_info:
self.sprite_info.validate()
if self.template_list:
for k in self.template_list:
if k:
k.validate()
def to_map(self):
result = {}
if self.audio_format is not None:
result['audio_format'] = self.audio_format
if self.bitrate is not None:
result['bitrate'] = self.bitrate
if self.duration is not None:
result['duration'] = self.duration
if self.frame_rate is not None:
result['frame_rate'] = self.frame_rate
if self.height is not None:
result['height'] = self.height
if self.sprite_info is not None:
result['sprite_info'] = self.sprite_info.to_map()
result['template_list'] = []
if self.template_list is not None:
for k in self.template_list:
result['template_list'].append(k.to_map() if k else None)
if self.thumbnail is not None:
result['thumbnail'] = self.thumbnail
if self.video_format is not None:
result['video_format'] = self.video_format
if self.width is not None:
result['width'] = self.width
return result
def from_map(self, map={}):
if map.get('audio_format') is not None:
self.audio_format = map.get('audio_format')
if map.get('bitrate') is not None:
self.bitrate = map.get('bitrate')
if map.get('duration') is not None:
self.duration = map.get('duration')
if map.get('frame_rate') is not None:
self.frame_rate = map.get('frame_rate')
if map.get('height') is not None:
self.height = map.get('height')
if map.get('sprite_info') is not None:
temp_model = VideoPreviewSprite()
self.sprite_info = temp_model.from_map(map['sprite_info'])
self.template_list = []
if map.get('template_list') is not None:
for k in map.get('template_list'):
temp_model = VideoPreviewTranscode()
self.template_list.append(temp_model.from_map(k))
if map.get('thumbnail') is not None:
self.thumbnail = map.get('thumbnail')
if map.get('video_format') is not None:
self.video_format = map.get('video_format')
if map.get('width') is not None:
self.width = map.get('width')
return self
class VideoPreviewSprite(TeaModel):
"""
*
"""
def __init__(self, col=None, count=None, frame_count=None, frame_height=None, frame_width=None, row=None,
status=None):
# col
self.col = col # type: int
# count
self.count = count # type: int
# frame_count
self.frame_count = frame_count # type: int
# frame_height
self.frame_height = frame_height # type: int
# frame_width
self.frame_width = frame_width # type: int
# row
self.row = row # type: int
# status
self.status = status # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.col is not None:
result['col'] = self.col
if self.count is not None:
result['count'] = self.count
if self.frame_count is not None:
result['frame_count'] = self.frame_count
if self.frame_height is not None:
result['frame_height'] = self.frame_height
if self.frame_width is not None:
result['frame_width'] = self.frame_width
if self.row is not None:
result['row'] = self.row
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, map={}):
if map.get('col') is not None:
self.col = map.get('col')
if map.get('count') is not None:
self.count = map.get('count')
if map.get('frame_count') is not None:
self.frame_count = map.get('frame_count')
if map.get('frame_height') is not None:
self.frame_height = map.get('frame_height')
if map.get('frame_width') is not None:
self.frame_width = map.get('frame_width')
if map.get('row') is not None:
self.row = map.get('row')
if map.get('status') is not None:
self.status = map.get('status')
return self
class VideoPreviewTranscode(TeaModel):
"""
*
"""
def __init__(self, status=None, template_id=None):
# status
self.status = status # type: str
# template_id
self.template_id = template_id # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.status is not None:
result['status'] = self.status
if self.template_id is not None:
result['template_id'] = self.template_id
return result
def from_map(self, map={}):
if map.get('status') is not None:
self.status = map.get('status')
if map.get('template_id') is not None:
self.template_id = map.get('template_id')
return self
class AdminListStoresModel(TeaModel):
def __init__(self, headers=None, body=None):
self.headers = headers # type: Dict[str, str]
self.body = body # type: ListStoresResponse
| |
""" resolves an entity synchronously
Args:
input_umf_: G2 style JSON
"""
if type(input_umf_) == str:
input_umf_string = input_umf_.encode('utf-8')
elif type(input_umf_) == bytearray:
input_umf_string = str(input_umf_)
else:
input_umf_string = input_umf_
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2_processWithResponseResize.argtypes = [c_char_p, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_processWithResponseResize(input_umf_string,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += responseBuf.value
def checkRecord(self, input_umf_, recordQueryList, response):
# type: (str,str,str) -> str
""" Scores the input record against the specified one
Args:
input_umf_: A JSON document containing the attribute information
for the observation.
dataSourceCode: The data source for the observation.
recordID: The ID for the record
"""
_inputUmfString = self.prepareStringArgument(input_umf_)
_recordQueryList = self.prepareStringArgument(recordQueryList)
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2_checkRecord.argtypes = [c_char_p, c_char_p, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_checkRecord(_inputUmfString,
_recordQueryList,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += responseBuf.value
def exportJSONEntityReport(self, exportFlags):
""" Generate a JSON export
This is used to export entity data from known entities. This function
returns an export-handle that can be read from to get the export data
in the requested format. The export-handle should be read using the "G2_fetchNext"
function, and closed when work is complete.
"""
self._lib_handle.G2_exportJSONEntityReport.restype = c_void_p
self._lib_handle.G2_exportJSONEntityReport.argtypes = [c_int]
exportHandle = self._lib_handle.G2_exportJSONEntityReport(exportFlags)
if exportHandle == None:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
return exportHandle
def exportCSVEntityReportV2(self, headersForCSV, exportFlags):
""" Generate a CSV export
This is used to export entity data from known entities. This function
returns an export-handle that can be read from to get the export data
in the requested format. The export-handle should be read using the "G2_fetchNext"
function, and closed when work is complete. Tthe first output row returned
by the export-handle contains the CSV column headers as a string. Each
following row contains the exported entity data.
"""
_headersForCSV = self.prepareStringArgument(headersForCSV)
self._lib_handle.G2_exportCSVEntityReport_V2.restype = c_void_p
self._lib_handle.G2_exportCSVEntityReport_V2.argtypes = [c_char_p, c_int]
exportHandle = self._lib_handle.G2_exportCSVEntityReport_V2(_headersForCSV,exportFlags)
if exportHandle == None:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
return exportHandle
def fetchNext(self,exportHandle,response):
""" Fetch a record from an export
Args:
exportHandle: handle from generated export
Returns:
str: Record fetched, empty if there is no more data
"""
response[::]=b''
self._lib_handle.G2_fetchNext.restype = c_longlong
self._lib_handle.G2_fetchNext.argtypes = [c_void_p, c_char_p, c_size_t]
resultValue = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
while resultValue != 0:
if resultValue == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif resultValue < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += tls_var.buf.value
if (response)[-1] == 0x0a:
break
else:
resultValue = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
return response
def closeExport(self, exportHandle):
self._lib_handle.G2_closeExport.restype = None
self._lib_handle.G2_closeExport.argtypes = [c_void_p]
self._lib_handle.G2_closeExport(c_void_p(exportHandle))
def exportJSONEntityReportV3(self, exportFlags):
""" Generate a JSON export
This is used to export entity data from known entities. This function
returns an export-handle that can be read from to get the export data
in the requested format. The export-handle should be read using the "G2_fetchNext"
function, and closed when work is complete.
"""
self._lib_handle.G2_exportJSONEntityReport_V3.restype = c_int
self._lib_handle.G2_exportJSONEntityReport_V3.argtypes = [c_int,POINTER(c_void_p)]
exportHandle = c_void_p(0)
ret_code = self._lib_handle.G2_exportJSONEntityReport_V3(exportFlags,byref(exportHandle))
if ret_code == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
return exportHandle.value
def exportCSVEntityReportV3(self, headersForCSV, exportFlags):
""" Generate a CSV export
This is used to export entity data from known entities. This function
returns an export-handle that can be read from to get the export data
in the requested format. The export-handle should be read using the "G2_fetchNext"
function, and closed when work is complete. Tthe first output row returned
by the export-handle contains the CSV column headers as a string. Each
following row contains the exported entity data.
"""
_headersForCSV = self.prepareStringArgument(headersForCSV)
self._lib_handle.G2_exportCSVEntityReport_V3.restype = c_int
self._lib_handle.G2_exportCSVEntityReport_V3.argtypes = [c_char_p,c_int,POINTER(c_void_p)]
exportHandle = c_void_p(0)
ret_code = self._lib_handle.G2_exportCSVEntityReport_V3(_headersForCSV,exportFlags,byref(exportHandle))
if ret_code == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
return exportHandle.value
def fetchNextV3(self,exportHandle,response):
""" Fetch a record from an export
Args:
exportHandle: handle from generated export
Returns:
str: Record fetched, empty if there is no more data
"""
response[::]=b''
self._lib_handle.G2_fetchNext_V3.restype = c_int
self._lib_handle.G2_fetchNext_V3.argtypes = [c_void_p, c_char_p, c_size_t]
resultValue = self._lib_handle.G2_fetchNext_V3(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
while resultValue != 0:
if resultValue == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif resultValue < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += tls_var.buf.value
if (response)[-1] == 0x0a:
break
else:
resultValue = self._lib_handle.G2_fetchNext_V3(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
return response
def closeExportV3(self, exportHandle):
self._lib_handle.G2_closeExport_V3.restype = c_int
self._lib_handle.G2_closeExport_V3.argtypes = [c_void_p]
self._lib_handle.G2_closeExport_V3(c_void_p(exportHandle))
def prepareStringArgument(self, stringToPrepare):
# type: (str) -> str
""" Internal processing function """
#handle null string
if stringToPrepare == None:
return None
#if string is unicode, transcode to utf-8 str
if type(stringToPrepare) == str:
return stringToPrepare.encode('utf-8')
#if input is bytearray, assumt utf-8 and convert to str
elif type(stringToPrepare) == bytearray:
return stringToPrepare.decode().encode('utf-8')
elif type(stringToPrepare) == bytes:
return str(stringToPrepare).encode('utf-8')
#input is already a str
return stringToPrepare
def prepareIntArgument(self, valueToPrepare):
# type: (str) -> int
""" Internal processing function """
""" This converts many types of values to an integer """
#handle null string
if valueToPrepare == None:
return None
#if string is unicode, transcode to utf-8 str
if type(valueToPrepare) == str:
return int(valueToPrepare.encode('utf-8'))
#if input is bytearray, assumt utf-8 and convert to str
elif type(valueToPrepare) == bytearray:
return int(valueToPrepare)
elif type(valueToPrepare) == bytes:
return int(valueToPrepare)
#input is already an int
return valueToPrepare
def addRecord(self,dataSourceCode,recordId,jsonData,loadId=None):
# type: (str,str,str,str) -> int
""" Loads the JSON record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
jsonData: A JSON document containing the attribute information
for the observation.
loadID: The observation load ID for the record, can be null and will default to dataSourceCode
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_loadId = self.prepareStringArgument(loadId)
_recordId = self.prepareStringArgument(recordId)
_jsonData = self.prepareStringArgument(jsonData)
self._lib_handle.G2_addRecord.argtypes = [c_char_p, c_char_p, c_char_p]
ret_code = self._lib_handle.G2_addRecord(_dataSourceCode,_recordId,_jsonData,_loadId)
if ret_code == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
def addRecordWithReturnedRecordID(self,dataSourceCode,recordID,jsonData,loadId=None):
# type: (str,str,str,str) -> int
""" Loads the JSON record
Args:
dataSourceCode: The data source for the observation.
recordID: A memory buffer for returning the recordID
jsonData: A JSON document containing the attribute information
for the observation.
loadID: The observation load ID for the record, can be null and will default to dataSourceCode
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_jsonData = self.prepareStringArgument(jsonData)
_loadId = self.prepareStringArgument(loadId)
recordID[::]=b''
self._lib_handle.G2_addRecordWithReturnedRecordID.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, c_size_t]
ret_code = self._lib_handle.G2_addRecordWithReturnedRecordID(_dataSourceCode,_jsonData,_loadId, tls_var.buf, sizeof(tls_var.buf))
if ret_code == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
recordID += tls_var.buf.value
def addRecordWithInfo(self,dataSourceCode,recordId,jsonData,response,loadId=None,flags=0):
# type: (str,str,str,str,str,int) -> str
""" Loads the JSON record and returns info about the load
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
jsonData: A JSON document containing the attribute information
for the observation.
response: Json document with info about the modified resolved entities
loadID: The observation load ID for the record, can be null and will default to dataSourceCode
flags: reserved for future use
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_loadId = self.prepareStringArgument(loadId)
_recordId = self.prepareStringArgument(recordId)
_jsonData = self.prepareStringArgument(jsonData)
response[::]=b''
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2_addRecordWithInfo.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_addRecordWithInfo(_dataSourceCode,_recordId,_jsonData,_loadId,flags,pointer(responseBuf),pointer(responseSize),self._resize_func)
if ret_code == -1:
raise G2ModuleNotInitialized('G2Engine has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
#Add the bytes to the response bytearray from calling function
response += tls_var.buf.value
def addRecordWithInfoWithReturnedRecordID(self,dataSourceCode,jsonData,flags,recordID,info,loadId=None):
""" Loads the JSON record
Args:
dataSourceCode: The data source for the observation.
recordID: A memory buffer for returning the recordID
jsonData: A JSON document containing the attribute information
for the observation.
info: Json document with info about the modified resolved entities
loadID: The observation load ID for the record, can be null and will default to dataSourceCode
flags: reserved for future use
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_jsonData = self.prepareStringArgument(jsonData)
_loadId = self.prepareStringArgument(loadId)
recordID[::]=b''
info[::]=b''
infoBuf = c_char_p(addressof(tls_var3.buf))
infoBufSize = c_size_t(tls_var3.bufSize)
self._lib_handle.G2_addRecordWithInfoWithReturnedRecordID.restype = c_int
self._lib_handle.G2_addRecordWithInfoWithReturnedRecordID.argtypes = [c_char_p, c_char_p, c_char_p, c_int, c_char_p, c_size_t, | |
self._entity_data.get('DefaultAnim')
return ""
class info_npc_spawn_destination(Targetname, Parentname):
pass
icon_sprite = "editor/info_target.vmat"
@property
def ReuseDelay(self):
if "ReuseDelay" in self._entity_data:
return float(self._entity_data.get('ReuseDelay'))
return float(1)
@property
def RenameNPC(self):
if "RenameNPC" in self._entity_data:
return self._entity_data.get('RenameNPC')
return ""
class BaseNPCMaker(Targetname, Parentname):
pass
icon_sprite = "editor/npc_maker.vmat"
@property
def StartDisabled(self):
if "StartDisabled" in self._entity_data:
return bool(self._entity_data.get('StartDisabled'))
return bool(1)
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Fade Corpse': (16, 0), 'Infinite Children': (32, 0), 'Do Not Drop': (64, 0),
"Don't Spawn While Visible": (128, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def Radius(self):
if "Radius" in self._entity_data:
return float(self._entity_data.get('Radius'))
return float(256)
@property
def DestinationGroup(self):
if "DestinationGroup" in self._entity_data:
return self._entity_data.get('DestinationGroup')
return None
@property
def CriterionVisibility(self):
if "CriterionVisibility" in self._entity_data:
return self._entity_data.get('CriterionVisibility')
return "2"
@property
def CriterionDistance(self):
if "CriterionDistance" in self._entity_data:
return self._entity_data.get('CriterionDistance')
return "2"
@property
def MinSpawnDistance(self):
if "MinSpawnDistance" in self._entity_data:
return int(self._entity_data.get('MinSpawnDistance'))
return int(0)
@property
def MaxNPCCount(self):
if "MaxNPCCount" in self._entity_data:
return int(self._entity_data.get('MaxNPCCount'))
return int(1)
@property
def SpawnFrequency(self):
if "SpawnFrequency" in self._entity_data:
return int(self._entity_data.get('SpawnFrequency'))
return int(5)
@property
def RetryFrequency(self):
if "RetryFrequency" in self._entity_data:
return int(self._entity_data.get('RetryFrequency'))
return int(-1)
@property
def MaxLiveChildren(self):
if "MaxLiveChildren" in self._entity_data:
return int(self._entity_data.get('MaxLiveChildren'))
return int(5)
@property
def HullCheckMode(self):
if "HullCheckMode" in self._entity_data:
return self._entity_data.get('HullCheckMode')
return "0"
class npc_template_maker(BaseNPCMaker):
pass
icon_sprite = "editor/npc_maker.vmat"
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {"Don't preload template models": (512, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def TemplateName(self):
if "TemplateName" in self._entity_data:
return self._entity_data.get('TemplateName')
return ""
class BaseHelicopter(BaseNPC):
pass
@property
def InitialSpeed(self):
if "InitialSpeed" in self._entity_data:
return self._entity_data.get('InitialSpeed')
return "0"
@property
def target(self):
if "target" in self._entity_data:
return self._entity_data.get('target')
return None
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'No Rotorwash': (32, 0), 'Await Input': (64, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
class PlayerClass:
pass
def __init__(self, entity_data: dict):
self._entity_data = entity_data
class Light:
pass
def __init__(self, entity_data: dict):
self._entity_data = entity_data
@property
def _light(self):
if "_light" in self._entity_data:
return parse_int_vector(self._entity_data.get('_light'))
return parse_int_vector("255 255 255 1500")
@property
def _lightHDR(self):
if "_lightHDR" in self._entity_data:
return parse_int_vector(self._entity_data.get('_lightHDR'))
return parse_int_vector("-1 -1 -1 1")
@property
def _lightscaleHDR(self):
if "_lightscaleHDR" in self._entity_data:
return float(self._entity_data.get('_lightscaleHDR'))
return float(1)
@property
def style(self):
if "style" in self._entity_data:
return self._entity_data.get('style')
return "0"
@property
def pattern(self):
if "pattern" in self._entity_data:
return self._entity_data.get('pattern')
return ""
@property
def _constant_attn(self):
if "_constant_attn" in self._entity_data:
return self._entity_data.get('_constant_attn')
return "0"
@property
def _linear_attn(self):
if "_linear_attn" in self._entity_data:
return self._entity_data.get('_linear_attn')
return "0"
@property
def _quadratic_attn(self):
if "_quadratic_attn" in self._entity_data:
return self._entity_data.get('_quadratic_attn')
return "1"
@property
def _fifty_percent_distance(self):
if "_fifty_percent_distance" in self._entity_data:
return self._entity_data.get('_fifty_percent_distance')
return "0"
@property
def _zero_percent_distance(self):
if "_zero_percent_distance" in self._entity_data:
return self._entity_data.get('_zero_percent_distance')
return "0"
@property
def _hardfalloff(self):
if "_hardfalloff" in self._entity_data:
return int(self._entity_data.get('_hardfalloff'))
return int(0)
class Node:
pass
def __init__(self, entity_data: dict):
self._entity_data = entity_data
@property
def nodeid(self):
if "nodeid" in self._entity_data:
return int(self._entity_data.get('nodeid'))
return int(0)
class HintNode(Node):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Allow jump up': (65536, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def hinttype(self):
if "hinttype" in self._entity_data:
return self._entity_data.get('hinttype')
return "0"
@property
def generictype(self):
if "generictype" in self._entity_data:
return self._entity_data.get('generictype')
return ""
@property
def hintactivity(self):
if "hintactivity" in self._entity_data:
return self._entity_data.get('hintactivity')
return ""
@property
def nodeFOV(self):
if "nodeFOV" in self._entity_data:
return self._entity_data.get('nodeFOV')
return "180"
@property
def StartHintDisabled(self):
if "StartHintDisabled" in self._entity_data:
return bool(self._entity_data.get('StartHintDisabled'))
return bool(0)
@property
def Group(self):
if "Group" in self._entity_data:
return self._entity_data.get('Group')
return ""
@property
def TargetNode(self):
if "TargetNode" in self._entity_data:
return int(self._entity_data.get('TargetNode'))
return int(-1)
@property
def radius(self):
if "radius" in self._entity_data:
return int(self._entity_data.get('radius'))
return int(0)
@property
def IgnoreFacing(self):
if "IgnoreFacing" in self._entity_data:
return self._entity_data.get('IgnoreFacing')
return "2"
@property
def MinimumState(self):
if "MinimumState" in self._entity_data:
return self._entity_data.get('MinimumState')
return "1"
@property
def MaximumState(self):
if "MaximumState" in self._entity_data:
return self._entity_data.get('MaximumState')
return "3"
class TriggerOnce(Targetname, Parentname, EnableDisable, Global):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Clients': (1, 1), 'NPCs': (2, 0), 'Pushables': (4, 0), 'Physics Objects': (8, 0),
'Only player ally NPCs': (16, 0), 'Only clients in vehicles': (32, 0),
'Everything (not including physics debris)': (64, 0),
'Only clients *not* in vehicles': (512, 0), 'Physics debris': (1024, 0),
'Only NPCs in vehicles (respects player ally flag)': (2048, 0),
'Correctly account for object mass (trigger_push used to assume 100Kg) and multiple component physobjs (car, blob...)': (
4096, 1), "Ignore client's hands": (8192, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def filtername(self):
if "filtername" in self._entity_data:
return self._entity_data.get('filtername')
return None
class Trigger(TriggerOnce):
pass
class worldbase:
pass
def __init__(self, entity_data: dict):
self._entity_data = entity_data
@property
def targetname(self):
if "targetname" in self._entity_data:
return self._entity_data.get('targetname')
return None
@property
def skyname(self):
if "skyname" in self._entity_data:
return self._entity_data.get('skyname')
return "sky_day01_01"
@property
def startdark(self):
if "startdark" in self._entity_data:
return bool(self._entity_data.get('startdark'))
return bool(0)
@property
def startcolor(self):
if "startcolor" in self._entity_data:
return parse_int_vector(self._entity_data.get('startcolor'))
return parse_int_vector("0 0 0")
@property
def pvstype(self):
if "pvstype" in self._entity_data:
return self._entity_data.get('pvstype')
return "10"
@property
def newunit(self):
if "newunit" in self._entity_data:
return self._entity_data.get('newunit')
return "0"
@property
def maxpropscreenwidth(self):
if "maxpropscreenwidth" in self._entity_data:
return float(self._entity_data.get('maxpropscreenwidth'))
return float(-1)
@property
def minpropscreenwidth(self):
if "minpropscreenwidth" in self._entity_data:
return float(self._entity_data.get('minpropscreenwidth'))
return float(0)
@property
def vrchaperone(self):
if "vrchaperone" in self._entity_data:
return self._entity_data.get('vrchaperone')
return "0"
@property
def vrmovement(self):
if "vrmovement" in self._entity_data:
return self._entity_data.get('vrmovement')
return "0"
class ambient_generic(Targetname, Parentname):
pass
icon_sprite = "editor/ambient_generic.vmat"
@property
def message(self):
if "message" in self._entity_data:
return self._entity_data.get('message')
return ""
@property
def health(self):
if "health" in self._entity_data:
return int(self._entity_data.get('health'))
return int(10)
@property
def preset(self):
if "preset" in self._entity_data:
return self._entity_data.get('preset')
return "0"
@property
def volstart(self):
if "volstart" in self._entity_data:
return int(self._entity_data.get('volstart'))
return int(0)
@property
def fadeinsecs(self):
if "fadeinsecs" in self._entity_data:
return int(self._entity_data.get('fadeinsecs'))
return int(0)
@property
def fadeoutsecs(self):
if "fadeoutsecs" in self._entity_data:
return int(self._entity_data.get('fadeoutsecs'))
return int(0)
@property
def pitch(self):
if "pitch" in self._entity_data:
return int(self._entity_data.get('pitch'))
return int(100)
@property
def pitchstart(self):
if "pitchstart" in self._entity_data:
return int(self._entity_data.get('pitchstart'))
return int(100)
@property
def spinup(self):
if "spinup" in self._entity_data:
return int(self._entity_data.get('spinup'))
return int(0)
@property
def spindown(self):
if "spindown" in self._entity_data:
return int(self._entity_data.get('spindown'))
return int(0)
@property
def lfotype(self):
if "lfotype" in self._entity_data:
return int(self._entity_data.get('lfotype'))
return int(0)
@property
def lforate(self):
if "lforate" in self._entity_data:
return int(self._entity_data.get('lforate'))
return int(0)
@property
def lfomodpitch(self):
if "lfomodpitch" in self._entity_data:
return int(self._entity_data.get('lfomodpitch'))
return int(0)
@property
def lfomodvol(self):
if "lfomodvol" in self._entity_data:
return int(self._entity_data.get('lfomodvol'))
return int(0)
@property
def cspinup(self):
if "cspinup" in self._entity_data:
return int(self._entity_data.get('cspinup'))
return int(0)
@property
def radius(self):
if "radius" in self._entity_data:
return self._entity_data.get('radius')
return "1250"
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Play everywhere': (1, 0), 'Start Silent': (16, 1),
'Is NOT Looped': (32, 1)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def SourceEntityName(self):
if "SourceEntityName" in self._entity_data:
return self._entity_data.get('SourceEntityName')
return None
class point_soundevent(Targetname, Parentname):
pass
icon_sprite = "editor/snd_event.vmat"
@property
def soundName(self):
if "soundName" in self._entity_data:
return self._entity_data.get('soundName')
return ""
@property
def sourceEntityName(self):
if "sourceEntityName" in self._entity_data:
return self._entity_data.get('sourceEntityName')
return ""
@property
def startOnSpawn(self):
if "startOnSpawn" in self._entity_data:
return bool(self._entity_data.get('startOnSpawn'))
return bool()
@property
def toLocalPlayer(self):
if "toLocalPlayer" in self._entity_data:
return bool(self._entity_data.get('toLocalPlayer'))
return bool()
@property
def stopOnNew(self):
if "stopOnNew" in self._entity_data:
return bool(self._entity_data.get('stopOnNew'))
return bool(1)
@property
def saveAndRestore(self):
if "saveAndRestore" in self._entity_data:
return bool(self._entity_data.get('saveAndRestore'))
return bool(0)
@property
def sourceEntityAttachment(self):
if "sourceEntityAttachment" in self._entity_data:
return self._entity_data.get('sourceEntityAttachment')
return None
class snd_event_point(point_soundevent):
pass
icon_sprite = "editor/snd_event.vmat"
class snd_event_alignedbox(point_soundevent):
pass
icon_sprite = "editor/snd_event.vmat"
@property
def box_mins(self):
if "box_mins" in self._entity_data:
return parse_int_vector(self._entity_data.get('box_mins'))
return parse_int_vector("-64 -64 -64")
@property
def box_maxs(self):
if "box_maxs" in self._entity_data:
return parse_int_vector(self._entity_data.get('box_maxs'))
return parse_int_vector("64 64 64")
class snd_stack_save(Targetname):
pass
icon_sprite = "editor/snd_event.vmat"
@property
def stackToSave(self):
if "stackToSave" in self._entity_data:
return self._entity_data.get('stackToSave')
return ""
class snd_event_param(Targetname, Parentname):
pass
icon_sprite = "editor/snd_opvar_set.vmat"
@property
def parameterName(self):
if "parameterName" in self._entity_data:
return self._entity_data.get('parameterName')
return ""
@property
def floatValue(self):
if "floatValue" in self._entity_data:
return float(self._entity_data.get('floatValue'))
return float()
class snd_opvar_set(Targetname):
pass
icon_sprite | |
oprot.writeListBegin(TType.I32, len(self.success))
for iter366 in self.success:
oprot.writeI32(iter366)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 1)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compareHyperparameters_args(object):
"""
Attributes:
- modelId1
- modelId2
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'modelId1', None, None, ), # 1
(2, TType.I32, 'modelId2', None, None, ), # 2
)
def __init__(self, modelId1=None, modelId2=None,):
self.modelId1 = modelId1
self.modelId2 = modelId2
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.modelId1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.modelId2 = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compareHyperparameters_args')
if self.modelId1 is not None:
oprot.writeFieldBegin('modelId1', TType.I32, 1)
oprot.writeI32(self.modelId1)
oprot.writeFieldEnd()
if self.modelId2 is not None:
oprot.writeFieldBegin('modelId2', TType.I32, 2)
oprot.writeI32(self.modelId2)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compareHyperparameters_result(object):
"""
Attributes:
- success
- rnfEx
- svEx
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CompareHyperParametersResponse, CompareHyperParametersResponse.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, rnfEx=None, svEx=None,):
self.success = success
self.rnfEx = rnfEx
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CompareHyperParametersResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.rnfEx = ResourceNotFoundException()
self.rnfEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compareHyperparameters_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.rnfEx is not None:
oprot.writeFieldBegin('rnfEx', TType.STRUCT, 1)
self.rnfEx.write(oprot)
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 2)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compareFeatures_args(object):
"""
Attributes:
- modelId1
- modelId2
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'modelId1', None, None, ), # 1
(2, TType.I32, 'modelId2', None, None, ), # 2
)
def __init__(self, modelId1=None, modelId2=None,):
self.modelId1 = modelId1
self.modelId2 = modelId2
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.modelId1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.modelId2 = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compareFeatures_args')
if self.modelId1 is not None:
oprot.writeFieldBegin('modelId1', TType.I32, 1)
oprot.writeI32(self.modelId1)
oprot.writeFieldEnd()
if self.modelId2 is not None:
oprot.writeFieldBegin('modelId2', TType.I32, 2)
oprot.writeI32(self.modelId2)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compareFeatures_result(object):
"""
Attributes:
- success
- rnfEx
- svEx
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CompareFeaturesResponse, CompareFeaturesResponse.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'rnfEx', (ResourceNotFoundException, ResourceNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, rnfEx=None, svEx=None,):
self.success = success
self.rnfEx = rnfEx
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CompareFeaturesResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.rnfEx = ResourceNotFoundException()
self.rnfEx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compareFeatures_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.rnfEx is not None:
oprot.writeFieldBegin('rnfEx', TType.STRUCT, 1)
self.rnfEx.write(oprot)
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 2)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class groupByProblemType_args(object):
"""
Attributes:
- modelIds
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'modelIds', (TType.I32, None, False), None, ), # 1
)
def __init__(self, modelIds=None,):
self.modelIds = modelIds
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.modelIds = []
(_etype370, _size367) = iprot.readListBegin()
for _i371 in range(_size367):
_elem372 = iprot.readI32()
self.modelIds.append(_elem372)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('groupByProblemType_args')
if self.modelIds is not None:
oprot.writeFieldBegin('modelIds', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.modelIds))
for iter373 in self.modelIds:
oprot.writeI32(iter373)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class groupByProblemType_result(object):
"""
Attributes:
- success
- svEx
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.I32, None, TType.LIST, (TType.I32, None, False), False), None, ), # 0
(1, TType.STRUCT, 'svEx', (ServerLogicException, ServerLogicException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, svEx=None,):
self.success = success
self.svEx = svEx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype375, _vtype376, _size374) = iprot.readMapBegin()
for _i378 in range(_size374):
_key379 = iprot.readI32()
_val380 = []
(_etype384, _size381) = iprot.readListBegin()
for _i385 in range(_size381):
_elem386 = iprot.readI32()
_val380.append(_elem386)
iprot.readListEnd()
self.success[_key379] = _val380
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.svEx = ServerLogicException()
self.svEx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('groupByProblemType_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.I32, TType.LIST, len(self.success))
for kiter387, viter388 in self.success.items():
oprot.writeI32(kiter387)
oprot.writeListBegin(TType.I32, len(viter388))
for iter389 in viter388:
oprot.writeI32(iter389)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.svEx is not None:
oprot.writeFieldBegin('svEx', TType.STRUCT, 1)
self.svEx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ | |
test_no_copy(self):
assert copy(HYPOT) is HYPOT
assert deepcopy(HYPOT) is HYPOT
def test_call(self):
assert_token(HYPOT, [1, 1], [math.sqrt(2)], approx=True)
assert_token(HYPOT, [math.sqrt(3), 1], [2], approx=True)
assert_token(
HYPOT,
[1, np.array([np.sqrt(3), 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), 1],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), np.array([1, 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
# extra stack elements
assert_token(HYPOT, [0, math.sqrt(3), 1], [0, 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
HYPOT([], {})
with pytest.raises(StackUnderflowError):
HYPOT([1], {})
class TestR2Operator:
def test_repr(self):
assert repr(R2) == "R2"
def test_pops(self):
assert R2.pops == 2
def test_puts(self):
assert R2.puts == 1
def test_no_copy(self):
assert copy(R2) is R2
assert deepcopy(R2) is R2
def test_call(self):
assert_token(R2, [2, 3], [13])
assert_token(R2, [2, np.array([3, 4])], [np.array([13, 20])])
assert_token(R2, [np.array([3, 4]), 2], [np.array([13, 20])])
assert_token(R2, [np.array([1, 2]), np.array([3, 4])], [np.array([10, 20])])
# extra stack elements
assert_token(R2, [0, 2, 3], [0, 13], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2([], {})
with pytest.raises(StackUnderflowError):
R2([1], {})
class TestEQOperator:
def test_repr(self):
assert repr(EQ) == "EQ"
def test_pops(self):
assert EQ.pops == 2
def test_puts(self):
assert EQ.puts == 1
def test_no_copy(self):
assert copy(EQ) is EQ
assert deepcopy(EQ) is EQ
def test_call(self):
assert_token(EQ, [2, 2], [True])
assert_token(EQ, [2, 3], [False])
assert_token(
EQ, [2, np.array([1, np.nan, 2])], [np.array([False, False, True])]
)
assert_token(
EQ, [np.array([1, np.nan, 2]), 2], [np.array([False, False, True])]
)
assert_token(
EQ,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([True, False, False, True])],
)
# extra stack elements
assert_token(EQ, [0, 2, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
EQ([], {})
with pytest.raises(StackUnderflowError):
EQ([1], {})
class TestNEOperator:
def test_repr(self):
assert repr(NE) == "NE"
def test_pops(self):
assert NE.pops == 2
def test_puts(self):
assert NE.puts == 1
def test_no_copy(self):
assert copy(NE) is NE
assert deepcopy(NE) is NE
def test_call(self):
assert_token(NE, [2, 2], [False])
assert_token(NE, [2, 3], [True])
assert_token(NE, [2, np.array([1, np.nan, 2])], [np.array([True, True, False])])
assert_token(NE, [np.array([1, np.nan, 2]), 2], [np.array([True, True, False])])
assert_token(
NE,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([False, True, True, False])],
)
# extra stack elements
assert_token(NE, [0, 2, 2], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NE([], {})
with pytest.raises(StackUnderflowError):
NE([1], {})
class TestLTOperator:
def test_repr(self):
assert repr(LT) == "LT"
def test_pops(self):
assert LT.pops == 2
def test_puts(self):
assert LT.puts == 1
def test_no_copy(self):
assert copy(LT) is LT
assert deepcopy(LT) is LT
def test_call(self):
assert_token(LT, [2, 3], [True])
assert_token(LT, [2, 2], [False])
assert_token(LT, [3, 2], [False])
assert_token(LT, [2, np.array([1, 2, 3])], [np.array([False, False, True])])
assert_token(LT, [np.array([1, 2, 3]), 2], [np.array([True, False, False])])
assert_token(
LT,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([True, False, False])],
)
# extra stack elements
assert_token(LT, [0, 2, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
LT([], {})
with pytest.raises(StackUnderflowError):
LT([1], {})
class TestLEOperator:
def test_repr(self):
assert repr(LE) == "LE"
def test_pops(self):
assert LE.pops == 2
def test_puts(self):
assert LE.puts == 1
def test_no_copy(self):
assert copy(LE) is LE
assert deepcopy(LE) is LE
def test_le(self):
assert_token(LE, [2, 3], [True])
assert_token(LE, [2, 2], [True])
assert_token(LE, [3, 2], [False])
assert_token(LE, [2, np.array([1, 2, 3])], [np.array([False, True, True])])
assert_token(LE, [np.array([1, 2, 3]), 2], [np.array([True, True, False])])
assert_token(
LE,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([True, True, False])],
)
# # extra stack elements
assert_token(LE, [0, 2, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
LE([], {})
with pytest.raises(StackUnderflowError):
LE([1], {})
class TestGTOperator:
def test_repr(self):
assert repr(GT) == "GT"
def test_pops(self):
assert GT.pops == 2
def test_puts(self):
assert GT.puts == 1
def test_no_copy(self):
assert copy(GT) is GT
assert deepcopy(GT) is GT
def test_call(self):
assert_token(GT, [2, 3], [False])
assert_token(GT, [2, 2], [False])
assert_token(GT, [3, 2], [True])
assert_token(GT, [2, np.array([1, 2, 3])], [np.array([True, False, False])])
assert_token(GT, [np.array([1, 2, 3]), 2], [np.array([False, False, True])])
assert_token(
GT,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([False, False, True])],
)
# extra stack elements
assert_token(GT, [0, 2, 3], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
GT([], {})
with pytest.raises(StackUnderflowError):
GT([1], {})
class TestGEOperator:
def test_repr(self):
assert repr(GE) == "GE"
def test_pops(self):
assert GE.pops == 2
def test_puts(self):
assert GE.puts == 1
def test_no_copy(self):
assert copy(GE) is GE
assert deepcopy(GE) is GE
def test_call(self):
assert_token(GE, [2, 3], [False])
assert_token(GE, [2, 2], [True])
assert_token(GE, [3, 2], [True])
assert_token(GE, [2, np.array([1, 2, 3])], [np.array([True, True, False])])
assert_token(GE, [np.array([1, 2, 3]), 2], [np.array([False, True, True])])
assert_token(
GE,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([False, True, True])],
)
# extra stack elements
assert_token(GE, [0, 2, 3], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
GE([], {})
with pytest.raises(StackUnderflowError):
GE([1], {})
class TestNANOperator:
def test_repr(self):
assert repr(NAN) == "NAN"
def test_pops(self):
assert NAN.pops == 2
def test_puts(self):
assert NAN.puts == 1
def test_no_copy(self):
assert copy(NAN) is NAN
assert deepcopy(NAN) is NAN
def test_call(self):
assert_token(NAN, [2, 2], [float("nan")])
assert_token(NAN, [2, 3], [2])
assert_token(NAN, [2, np.array([2, 3])], [np.array([np.nan, 2])])
assert_token(NAN, [np.array([2, 3]), 2], [np.array([np.nan, 3])])
assert_token(
NAN, [np.array([1, 2, 3]), np.array([3, 2, 1])], [np.array([1, np.nan, 3])]
)
# as float
assert_token(
NAN,
[np.array([1.0, 2.0, 3.0]), np.array([3, 2, 1])],
[np.array([1, np.nan, 3])],
approx=True,
)
# extra stack elements
assert_token(NAN, [0, 2, 2], [0, float("nan")])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NAN([], {})
with pytest.raises(StackUnderflowError):
NAN([1], {})
class TestANDOperator:
def test_repr(self):
assert repr(AND) == "AND"
def test_pops(self):
assert AND.pops == 2
def test_puts(self):
assert AND.puts == 1
def test_no_copy(self):
assert copy(AND) is AND
assert deepcopy(AND) is AND
def test_call(self):
assert_token(AND, [2, 3], [2])
assert_token(AND, [float("nan"), 3], [3])
assert_token(AND, [float("nan"), np.array([2, 3])], [np.array([2, 3])])
assert_token(AND, [np.array([np.nan, 3]), 2], [np.array([2, 3])])
assert_token(
AND,
[np.array([10, np.nan, 30]), np.array([1, 2, 3])],
[np.array([10, 2, 30])],
)
# extra stack elements
assert_token(AND, [0, float("nan"), 3], [0, 3])
# not enough stack elements
with pytest.raises(StackUnderflowError):
AND([], {})
with pytest.raises(StackUnderflowError):
AND([1], {})
class TestOROperator:
def test_repr(self):
assert repr(OR) == "OR"
def test_pops(self):
assert OR.pops == 2
def test_puts(self):
assert OR.puts == 1
def test_no_copy(self):
assert copy(OR) is OR
assert deepcopy(OR) is OR
def test_call(self):
assert_token(OR, [2, 3], [2])
assert_token(OR, [2, float("nan")], [float("nan")])
assert_token(OR, [2, np.array([3, np.nan])], [np.array([2, np.nan])])
assert_token(OR, [np.array([2, 3]), np.nan], [np.array([np.nan, np.nan])])
assert_token(
OR,
[np.array([1, 2, 3]), np.array([10, np.nan, 30])],
[np.array([1, np.nan, 3])],
)
# as float
assert_token(
OR,
[np.array([1.0, 2.0, 3.0]), np.array([10, np.nan, 30])],
[np.array([1, np.nan, 3])],
)
# extra stack elements
assert_token(OR, [0, 2, float("nan")], [0, float("nan")])
# not enough stack elements
with pytest.raises(StackUnderflowError):
OR([], {})
with pytest.raises(StackUnderflowError):
OR([1], {})
class TestIANDOperator:
def test_repr(self):
assert repr(IAND) == "IAND"
def test_pops(self):
assert IAND.pops == 2
def test_puts(self):
assert IAND.puts == 1
def test_no_copy(self):
assert copy(IAND) is IAND
assert deepcopy(IAND) is IAND
def test_call(self):
assert_token(IAND, [5, 3], [1])
assert_token(IAND, [15, 21], [5])
assert_token(IAND, [21, 15], [5])
assert_token(IAND, [15, np.array([9, 21, 35])], [np.array([9, 5, 3])])
assert_token(IAND, [np.array([9, 21, 35]), 15], [np.array([9, 5, 3])])
assert_token(
IAND,
[np.array([9, 21, 35]), np.array([3, 15, 127])],
[np.array([1, 5, 35])],
)
# extra stack elements
assert_token(IAND, [0, 15, 21], [0, 5])
# floats are not supported
with pytest.raises(TypeError):
IAND([1.0, 2], {})
with pytest.raises(TypeError):
IAND([1, 2.0], {})
with pytest.raises(TypeError):
IAND([1, np.array([2.0, 3.0])], {})
with pytest.raises(TypeError):
IAND([np.array([2.0, 3.0]), 1], {})
# not enough stack elements
with pytest.raises(StackUnderflowError):
IAND([], {})
with pytest.raises(StackUnderflowError):
IAND([1], {})
class TestIOROperator:
def test_repr(self):
assert repr(IOR) == "IOR"
def test_pops(self):
assert IOR.pops == 2
def test_puts(self):
assert IOR.puts == 1
def test_no_copy(self):
assert copy(IOR) is IOR
assert deepcopy(IOR) is IOR
def test_call(self):
assert_token(IOR, [5, 3], [7])
assert_token(IOR, [15, 21], [31])
assert_token(IOR, [21, 15], [31])
assert_token(IOR, [15, np.array([9, 21, 35])], [np.array([15, 31, 47])])
assert_token(IOR, [np.array([9, 21, 35]), 15], [np.array([15, 31, 47])])
assert_token(
IOR,
[np.array([9, 21, 35]), np.array([3, 15, 127])],
[np.array([11, 31, 127])],
)
# extra stack elements
assert_token(IOR, [0, 15, 21], [0, 31])
# floats are not supported
with pytest.raises(TypeError):
IOR([1.0, 2], {})
with pytest.raises(TypeError):
IOR([1, 2.0], {})
with pytest.raises(TypeError):
IOR([1, np.array([2.0, 3.0])], {})
with pytest.raises(TypeError):
IOR([np.array([2.0, 3.0]), 1], {})
# not enough stack elements
with pytest.raises(StackUnderflowError):
IOR([], {})
with pytest.raises(StackUnderflowError):
IOR([1], {})
class TestBTESTOperator:
def test_repr(self):
assert repr(BTEST) == "BTEST"
def test_pops(self):
assert BTEST.pops == 2
def test_puts(self):
assert BTEST.puts == 1
def test_no_copy(self):
assert copy(BTEST) is BTEST
assert deepcopy(BTEST) is BTEST
def test_call(self):
| |
from __future__ import annotations
import asyncio
import datetime
import typing
from . import base, fields
from .chat_invite_link import ChatInviteLink
from .chat_location import ChatLocation
from .chat_member import ChatMember
from .chat_permissions import ChatPermissions
from .chat_photo import ChatPhoto
from .input_file import InputFile
from ..utils import helper, markdown
from ..utils.deprecated import deprecated, DeprecatedReadOnlyClassVar
class Chat(base.TelegramObject):
"""
This object represents a chat.
https://core.telegram.org/bots/api#chat
"""
id: base.Integer = fields.Field()
type: base.String = fields.Field()
title: base.String = fields.Field()
username: base.String = fields.Field()
first_name: base.String = fields.Field()
last_name: base.String = fields.Field()
all_members_are_administrators: base.Boolean = fields.Field()
photo: ChatPhoto = fields.Field(base=ChatPhoto)
bio: base.String = fields.Field()
description: base.String = fields.Field()
invite_link: base.String = fields.Field()
pinned_message: 'Message' = fields.Field(base='Message')
permissions: ChatPermissions = fields.Field(base=ChatPermissions)
slow_mode_delay: base.Integer = fields.Field()
sticker_set_name: base.String = fields.Field()
can_set_sticker_set: base.Boolean = fields.Field()
linked_chat_id: base.Integer = fields.Field()
location: ChatLocation = fields.Field()
def __hash__(self):
return self.id
@property
def full_name(self) -> base.String:
if self.type == ChatType.PRIVATE:
full_name = self.first_name
if self.last_name:
full_name += ' ' + self.last_name
return full_name
return self.title
@property
def mention(self) -> typing.Optional[base.String]:
"""
Get mention if a Chat has a username, or get full name if this is a Private Chat, otherwise None is returned
"""
if self.username:
return '@' + self.username
if self.type == ChatType.PRIVATE:
return self.full_name
return None
@property
def user_url(self) -> base.String:
if self.type != ChatType.PRIVATE:
raise TypeError('`user_url` property is only available in private chats!')
return f"tg://user?id={self.id}"
@property
def shifted_id(self) -> int:
"""
Get shifted id of chat, e.g. for private links
For example: -1001122334455 -> 1122334455
"""
if self.type == ChatType.PRIVATE:
raise TypeError('`shifted_id` property is not available for private chats')
shift = -1_000_000_000_000
return shift - self.id
def get_mention(self, name=None, as_html=True) -> base.String:
if as_html is None and self.bot.parse_mode and self.bot.parse_mode.lower() == 'html':
as_html = True
if name is None:
name = self.mention
if as_html:
return markdown.hlink(name, self.user_url)
return markdown.link(name, self.user_url)
async def get_url(self) -> base.String:
"""
Use this method to get chat link.
Private chat returns user link.
Other chat types return either username link (if they are public) or invite link (if they are private).
:return: link
:rtype: :obj:`base.String`
"""
if self.type == ChatType.PRIVATE:
return f"tg://user?id={self.id}"
if self.username:
return f'https://t.me/{self.username}'
if self.invite_link:
return self.invite_link
await self.update_chat()
return self.invite_link
async def update_chat(self):
"""
Use this method to update Chat data
:return: None
"""
other = await self.bot.get_chat(self.id)
for key, value in other:
self[key] = value
async def set_photo(self, photo: InputFile) -> base.Boolean:
"""
Use this method to set a new profile photo for the chat. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchatphoto
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: :obj:`base.InputFile`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
return await self.bot.set_chat_photo(self.id, photo)
async def delete_photo(self) -> base.Boolean:
"""
Use this method to delete a chat photo. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#deletechatphoto
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
return await self.bot.delete_chat_photo(self.id)
async def set_title(self, title: base.String) -> base.Boolean:
"""
Use this method to change the title of a chat. Titles can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchattitle
:param title: New chat title, 1-255 characters
:type title: :obj:`base.String`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
return await self.bot.set_chat_title(self.id, title)
async def set_description(self, description: base.String) -> base.Boolean:
"""
Use this method to change the description of a supergroup or a channel.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#setchatdescription
:param description: New chat description, 0-255 characters
:type description: :obj:`typing.Optional[base.String]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
return await self.bot.set_chat_description(self.id, description)
async def kick(self,
user_id: base.Integer,
until_date: typing.Union[base.Integer, datetime.datetime,
datetime.timedelta, None] = None,
revoke_messages: typing.Optional[base.Boolean] = None,
) -> base.Boolean:
"""
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return
to the chat on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have
the appropriate admin rights.
Source: https://core.telegram.org/bots/api#kickchatmember
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned. If user is banned
for more than 366 days or less than 30 seconds from the current time they
are considered to be banned forever. Applied for supergroups and channels
only.
:type until_date: :obj:`typing.Union[base.Integer, datetime.datetime,
datetime.timedelta, None]`
:param revoke_messages: Pass True to delete all messages from the chat for
the user that is being removed. If False, the user will be able to see
messages in the group that were sent before the user was removed. Always
True for supergroups and channels.
:type revoke_messages: :obj:`typing.Optional[base.Boolean]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
return await self.bot.kick_chat_member(
chat_id=self.id,
user_id=user_id,
until_date=until_date,
revoke_messages=revoke_messages,
)
async def unban(self,
user_id: base.Integer,
only_if_banned: typing.Optional[base.Boolean] = None,
) -> base.Boolean:
"""
Use this method to unban a previously kicked user in a supergroup or channel.
The user will not return to the group or channel automatically, but will be
able to join via link, etc. The bot must be an administrator for this to
work. By default, this method guarantees that after the call the user is not
a member of the chat, but will be able to join it. So if the user is a member
of the chat they will also be removed from the chat. If you don't want this,
use the parameter only_if_banned. Returns True on success.
Source: https://core.telegram.org/bots/api#unbanchatmember
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param only_if_banned: Do nothing if the user is not banned
:type only_if_banned: :obj:`typing.Optional[base.Boolean]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
return await self.bot.unban_chat_member(
chat_id=self.id,
user_id=user_id,
only_if_banned=only_if_banned,
)
async def restrict(self, user_id: base.Integer,
permissions: typing.Optional[ChatPermissions] = None,
until_date: typing.Union[base.Integer, datetime.datetime, datetime.timedelta, None] = None,
can_send_messages: typing.Optional[base.Boolean] = None,
can_send_media_messages: typing.Optional[base.Boolean] = None,
can_send_other_messages: typing.Optional[base.Boolean] = None,
can_add_web_page_previews: typing.Optional[base.Boolean] = None) -> base.Boolean:
"""
Use this method to restrict a user in a supergroup.
The bot must be an administrator in the supergroup for this to work and must have the appropriate admin rights.
Pass True for all boolean parameters to lift restrictions from a user.
Source: https://core.telegram.org/bots/api#restrictchatmember
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param permissions: New user permissions
:type permissions: :obj:`ChatPermissions`
:param until_date: Date when restrictions will be lifted for the user, unix time.
:type until_date: :obj:`typing.Optional[base.Integer]`
:param can_send_messages: Pass True, if the user can send text messages, contacts, locations and venues
:type can_send_messages: :obj:`typing.Optional[base.Boolean]`
:param can_send_media_messages: Pass True, if the user can send audios, documents, photos, videos,
video notes and voice notes, implies can_send_messages
:type can_send_media_messages: :obj:`typing.Optional[base.Boolean]`
:param can_send_other_messages: Pass True, if the user can send animations, games, stickers and
use inline bots, implies can_send_media_messages
:type can_send_other_messages: :obj:`typing.Optional[base.Boolean]`
:param can_add_web_page_previews: Pass True, if the user may add web page previews to their messages,
implies can_send_media_messages
:type can_add_web_page_previews: :obj:`typing.Optional[base.Boolean]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
return await self.bot.restrict_chat_member(self.id, user_id=user_id,
permissions=permissions,
until_date=until_date,
can_send_messages=can_send_messages,
can_send_media_messages=can_send_media_messages,
can_send_other_messages=can_send_other_messages,
can_add_web_page_previews=can_add_web_page_previews)
async def promote(self,
user_id: base.Integer,
is_anonymous: typing.Optional[base.Boolean] | |
import os
from datetime import datetime
import numpy as np
from msl.io import JSONWriter, read
from .. import __version__
from ..log import log
from ..constants import REL_UNC, DELTA_STR, SUFFIX, MU_STR
def num_to_eng_format(num):
for key, val in SUFFIX.items():
renum = num/val
if abs(renum) < 1000:
eng_num = "{} {}".format(round(renum, 3), key)
return eng_num
def filter_mass_set(masses, inputdata):
"""Takes a set of masses and returns a copy with only the masses included in the data which will be
input into the final mass calculation.
Uses Set type key to determine which other keys are present in the masses dictionary.
Parameters
----------
masses : dict
mass set as stored in the Configuration class object (from AdminDetails)
inputdata : numpy structured array
use format np.asarray(<data>, dtype =[('+ weight group', object), ('- weight group', object),
('mass difference (g)', 'float64'), ('balance uncertainty (ug)', 'float64')])
Returns
-------
dict of only the masses which appear in inputdata
"""
weightgroups = []
for i in np.append(inputdata['+ weight group'], inputdata['- weight group']):
if '+' in i:
for j in i.split('+'):
weightgroups.append(j)
else:
weightgroups.append(i)
# create copy of masses with empty mass lists
masses_new = dict()
for key, val in masses.items():
masses_new[key] = val
if masses['Set type'] == 'Standard' or masses['Set type'] == 'Check':
to_append = ['Shape/Mark', 'Nominal (g)', 'Weight ID', 'mass values (g)', 'u_cal', 'uncertainties (' + MU_STR + 'g)', 'u_drift']
elif masses['Set type'] == 'Client':
to_append = ['Weight ID', 'Nominal (g)', 'Shape/Mark', 'Container',
'u_mag (mg)', 'Density (kg/m3)', 'u_density (kg/m3)']
else:
log.error("Mass Set type not recognised: must be 'std' or 'client'")
return None
for key in to_append:
masses_new[key] = []
# add info for included masses only
for i, item in enumerate(masses['Weight ID']):
if item in weightgroups:
for key in to_append:
masses_new[key].append(masses[key][i])
return masses_new
class FinalMassCalc(object):
def __init__(self, folder, client, client_masses, check_masses, std_masses, inputdata, nbc=True, corr=None):
"""Initialises the calculation of mass values using matrix least squares methods
Parameters
----------
folder : url
folder in which to save json file with output data; ideally an absolute path
client : str
name of client
client_masses : dict
dict of client weights
Weight IDs are the strings used in the circular weighing scheme
check_masses : dict or None
dict of check weights as for std_masses, or None if no check weights are used
std_masses : dict
keys: 'MASSREF file', 'Sheet name', 'Set name', 'Set type', 'Set identifier', 'Calibrated',
'Shape/Mark', 'Nominal (g)', 'Weight ID', 'mass values (g)', 'u_cal', 'uncertainties (' + MU_STR + 'g)',
'u_drift'
Weight ID values must match those used in the circular weighing scheme
inputdata : numpy structured array
use format np.asarray(<data>, dtype =[('+ weight group', object), ('- weight group', object),
('mass difference (g)', 'float64'), ('balance uncertainty (ug)', 'float64')])
Returns
-------
json file containing structured array of weight IDs, mass values, and uncertainties,
along with a record of the input data and other relevant information
"""
self.folder = folder
self.client = client
self.filesavepath = os.path.join(folder, client + '_finalmasscalc.json')
metadata = {
'Program Version': __version__,
'Timestamp': datetime.now().isoformat(sep=' ', timespec='minutes'),
"Client": client
}
self.finalmasscalc = JSONWriter(metadata=metadata)
self.structure_jsonfile()
self.client_masses = client_masses
self.client_wt_IDs = client_masses["Weight ID"]
self.check_masses = check_masses
self.std_masses = std_masses
self.inputdata = inputdata
self.nbc = nbc
self.corr = corr
self.num_client_masses = None
self.num_check_masses = None
self.num_stds = None
self.num_unknowns = None
self.allmassIDs = None
self.num_obs = None
self.leastsq_meta = {}
self.differences = np.empty(len(inputdata))
self.uncerts = np.empty(len(inputdata))
self.designmatrix = None
self.inputdatares = None
self.b = None
self.psi_bmeas = None
self.std_uncert_b = None
self.summarytable = None
def structure_jsonfile(self):
"Creates relevant groups in JSONWriter object"
mass_sets = self.finalmasscalc.require_group('1: Mass Sets')
mass_sets.require_group('Client')
mass_sets.require_group('Check')
mass_sets.require_group('Standard')
def import_mass_lists(self, ):
# import lists of masses from supplied info
log.info('Beginning mass calculation for the following client masses:\n' + str(self.client_wt_IDs))
# get client Weight IDs for metadata
self.num_client_masses = len(self.client_wt_IDs)
self.finalmasscalc['1: Mass Sets']['Client'].add_metadata(**{
'Number of masses': self.num_client_masses,
'Weight ID': self.client_wt_IDs
})
# get number of check masses, if used, and save as dataset
if not self.check_masses:
self.num_check_masses = 0
check_wt_IDs = []
self.finalmasscalc['1: Mass Sets']['Check'].add_metadata(**{
'Number of masses': self.num_check_masses,
'Set identifier': 'No check set'})
log.info('Checks: None')
else:
check_wt_IDs = self.check_masses['Weight ID']
self.num_check_masses = make_stds_dataset('Checks', self.check_masses, self.finalmasscalc['1: Mass Sets']['Check'])
# get number of standards, and save as dataset
self.num_stds = make_stds_dataset('Standards', self.std_masses, self.finalmasscalc['1: Mass Sets']['Standard'])
self.num_unknowns = self.num_client_masses + self.num_check_masses + self.num_stds
log.info('Number of unknowns = '+str(self.num_unknowns))
self.allmassIDs = np.append(np.append(self.client_wt_IDs, check_wt_IDs), self.std_masses['Weight ID'])
# note that stds are grouped last
self.num_obs = len(self.inputdata) + self.num_stds
self.leastsq_meta['Number of observations'] = self.num_obs
self.leastsq_meta['Number of unknowns'] = self.num_unknowns
self.leastsq_meta['Degrees of freedom'] = self.num_obs - self.num_unknowns
def parse_inputdata_to_matrices(self, ):
if self.allmassIDs is None:
self.import_mass_lists()
# Create design matrix and collect relevant data into differences and uncerts arrays
designmatrix = np.zeros((self.num_obs, self.num_unknowns))
rowcounter = 0
log.debug('Input data: \n+ weight group, - weight group, mass difference (g), balance uncertainty (' + MU_STR + 'g)'
'\n' + str(self.inputdata))
for entry in self.inputdata:
log.debug("{} {} {} {}".format(entry[0], entry[1], entry[2], entry[3]))
grp1 = entry[0].split('+')
for m in range(len(grp1)):
try:
log.debug('mass ' + grp1[m] + ' is in position ' + str(np.where(self.allmassIDs == grp1[m])[0][0]))
designmatrix[rowcounter, np.where(self.allmassIDs == grp1[m])] = 1
except IndexError:
log.error("Index error raised at mass {}".format(grp1[m]))
grp2 = entry[1].split('+')
for m in range(len(grp2)):
log.debug('mass ' + grp2[m] + ' is in position ' + str(np.where(self.allmassIDs == grp2[m])[0][0]))
designmatrix[rowcounter, np.where(self.allmassIDs == grp2[m])] = -1
self.differences[rowcounter] = entry[2]
self.uncerts[rowcounter] = entry[3]
rowcounter += 1
for std in self.std_masses['Weight ID']:
designmatrix[rowcounter, np.where(self.allmassIDs == std)] = 1
rowcounter += 1
self.differences = np.append(self.differences, self.std_masses['mass values (g)']) # corresponds to Y, in g
self.uncerts = np.append(self.uncerts, self.std_masses['uncertainties (' + MU_STR + 'g)']) # balance uncertainties in ug
log.debug('differences:\n' + str(self.differences))
log.debug('uncerts:\n' + str(self.uncerts))
self.designmatrix = designmatrix
def check_design_matrix(self,):
if self.designmatrix is None:
self.parse_inputdata_to_matrices()
# double checks that all columns in the design matrix contain at least one non-zero value
error_tally = 0
for i in range(self.num_unknowns):
sum = 0
for r in range(self.num_obs):
sum += self.designmatrix[r, i] ** 2
if not sum:
log.error(f"No comparisons in design matrix for {self.allmassIDs[i]}")
error_tally += 1
if error_tally > 0:
return False
return True
def do_least_squares(self):
if not self.check_design_matrix():
log.error("Error in design matrix. Calculation aborted")
return False
# Calculate least squares solution, following the mathcad example in Tech proc MSLT.M.001.008
x = self.designmatrix
xT = self.designmatrix.T
# Hadamard product: element-wise multiplication
uumeas = np.vstack(self.uncerts) * np.hstack(self.uncerts) # becomes square matrix dim num_obs
rmeas = np.identity(self.num_obs)
if type(self.corr) == np.ndarray: # Add off-diagonal terms for correlations
for mass1 in self.std_masses['Weight ID']:
i = np.where(self.std_masses['Weight ID'] == mass1)
for mass2 in self.std_masses['Weight ID']:
j = np.where(self.std_masses['Weight ID'] == mass2)
rmeas[len(self.inputdata)+i[0], len(self.inputdata)+j[0]] = self.corr[i, j]
log.debug(f'rmeas matrix includes correlations for stds:\n{rmeas[:, len(self.inputdata)-self.num_obs:]}')
psi_y_hadamard = np.zeros((self.num_obs, self.num_obs)) # Hadamard product is element-wise multiplication
for i in range(self.num_obs):
for j in range(self.num_obs):
if not rmeas[i, j] == 0:
psi_y_hadamard[i, j] = uumeas[i, j] * rmeas[i, j]
psi_y_inv = np.linalg.inv(psi_y_hadamard)
psi_bmeas_inv = np.linalg.multi_dot([xT, psi_y_inv, x])
self.psi_bmeas = np.linalg.inv(psi_bmeas_inv)
self.b = np.linalg.multi_dot([self.psi_bmeas, xT, psi_y_inv, self.differences])
log.debug('Mass values before corrections:\n'+str(self.b))
r0 = (self.differences - np.dot(x, self.b))*1e6 # residuals, converted from g to ug
sum_residues_squared = np.dot(r0, r0)
self.leastsq_meta['Sum of residues squared (' + MU_STR + 'g^2)'] = np.round(sum_residues_squared, 6)
log.debug('Residuals:\n'+str(np.round(r0, 4))) # also save as column with input data for checking
inputdata = self.inputdata
inputdatares = np.empty((self.num_obs, 5), dtype=object)
# dtype =[('+ weight group', object), ('- weight group', object), ('mass difference (g)', object),
# ('balance uncertainty (ug)', 'float64'), ('residual (ug)', 'float64')])
inputdatares[0:len(inputdata), 0] = inputdata['+ weight group']
inputdatares[len(inputdata):, 0] = self.std_masses['Weight ID']
inputdatares[0:len(inputdata), 1] = inputdata['- weight group']
inputdatares[:, 2] = self.differences
inputdatares[:, 3] = self.uncerts
inputdatares[:, 4] = np.round(r0, 3)
self.inputdatares = inputdatares
def check_residuals(self):
if self.inputdatares is None:
self.do_least_squares()
# check that the calculated residuals are less than twice the balance uncertainties in ug
flag = []
for entry in self.inputdatares:
if np.absolute(entry[4]) > 2 * entry[3]:
flag.append(str(entry[0]) + ' - ' + str(entry[1]))
log.warn(f"A residual for {entry[0]} - {entry[1]} is too large")
if flag:
self.leastsq_meta['Residuals greater than 2 balance uncerts'] = flag
def cal_rel_unc(self, ):
| |
import os
import socket
import sys
import tempfile
from collections import OrderedDict
from typing import List, Tuple
import click
import click_spinner
from src import settings
from src.cli import console
from src.graphql import GraphQL
from src.local.providers.helper import get_cluster_or_exit
from src.local.system import Docker, KubeAPI, KubeCtl, Telepresence
from src.settings import UNIKUBE_FILE
from src.unikubefile.selector import unikube_file_selector
def _is_local_port_free(port):
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if a_socket.connect_ex(("127.0.0.1", int(port))) == 0:
return False
else:
return True
def get_deck_from_arguments(ctx, organization_id: str, project_id: str, deck_id: str):
# context
organization_id, project_id, deck_id = ctx.context.get_context_ids_from_arguments(
organization_argument=organization_id, project_argument=project_id, deck_argument=deck_id
)
# argument
if not deck_id:
deck_id = console.deck_list(ctx, organization_id=organization_id, project_id=project_id)
if not deck_id:
exit(1)
# GraphQL
try:
graph_ql = GraphQL(authentication=ctx.auth)
data = graph_ql.query(
"""
query($id: UUID) {
deck(id: $id) {
id
title
environment {
namespace
}
project {
id
}
}
}
""",
query_variables={"id": deck_id},
)
deck = data["deck"]
project_id = deck["project"]["id"]
except Exception as e:
console.debug(e)
console.exit_generic_error()
# cluster data
cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)
if project_id not in [cluster.id for cluster in cluster_list]:
console.info(f"The project cluster for '{project_id}' is not up or does not exist yet.", _exit=True)
cluster_data = ctx.cluster_manager.get(id=project_id)
if not cluster_data:
console.error("The cluster could not be found.", _exit=True)
return cluster_data, deck
def argument_apps(k8s, apps: List[str], multiselect: bool = False) -> List[str]:
if not apps:
app_choices = [
pod.metadata.name
for pod in k8s.get_pods().items
if pod.status.phase not in ["Terminating", "Evicted", "Pending"]
]
message = "Please select an app" if not multiselect else "Please select one or multiple apps"
kwargs = {
"message": message,
"choices": app_choices,
"multiselect": multiselect,
}
if multiselect:
kwargs["transformer"] = lambda result: f"{', '.join(result)}"
apps = console.list(**kwargs)
else:
apps = [console.list(**kwargs)]
if not apps:
console.error("No apps available.", _exit=True)
if apps and any(c_app not in [pod.metadata.name for pod in k8s.get_pods().items] for c_app in apps):
console.error("Some apps do not exist.", _exit=True)
return apps
def argument_app(k8s, app: str) -> str:
return argument_apps(k8s, [app] if app else [])[0]
@click.command()
@click.option("--organization", "-o", help="Select an organization")
@click.option("--project", "-p", help="Select a project")
@click.option("--deck", "-d", help="Select a deck")
@click.pass_obj
def list(ctx, organization, project, deck, **kwargs):
"""List all apps."""
cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
# get cluster
cluster = get_cluster_or_exit(ctx, cluster_data.id)
provider_data = cluster.storage.get()
# list
k8s = KubeAPI(provider_data, deck)
pod_table = []
def _ready_ind(c) -> Tuple[bool, str]:
# get container count
if not c:
container_count = 0
else:
container_count = len(c)
ready_count = sum([val.ready for val in c])
return container_count == ready_count, f"{ready_count}/{container_count}"
for pod in k8s.get_pods().items:
if pod.status.phase in ["Terminating", "Evicted", "Pending"]:
continue
all_ready, count = _ready_ind(pod.status.container_statuses)
pod_table.append(
OrderedDict({"name": pod.metadata.name, "ready": count, "state": "Ok" if all_ready else "Not Ok"})
)
console.table(
data=pod_table,
headers={
"name": "Name",
"ready": "Ready",
"state": "State",
},
)
@click.command()
@click.argument("app", required=False)
@click.option("--organization", "-o", help="Select an organization")
@click.option("--project", "-p", help="Select a project")
@click.option("--deck", "-d", help="Select a deck")
@click.pass_obj
def info(ctx, app, organization, project, deck, **kwargs):
"""Display the status for the given app name."""
cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
# get cluster
cluster = get_cluster_or_exit(ctx, cluster_data.id)
provider_data = cluster.storage.get()
# shell
k8s = KubeAPI(provider_data, deck)
app = argument_app(k8s, app)
# get the data of the selected pod
data = k8s.get_pod(app)
pod_status = data.status
console.info(f"This app runs {len(pod_status.container_statuses)} container(s).")
for idx, status in enumerate(pod_status.container_statuses):
console.info(f"Container {idx + 1}: {status.image}")
print("\nStartup command from workload manifest:")
console.table(
[
("Command", " ".join(data.spec.containers[idx].command) if data.spec.containers[idx].command else None),
("Args", " ".join(data.spec.containers[idx].args) if data.spec.containers[idx].args else None),
]
)
print("\nApp status:")
console.table(
[
{"State": "Running", "Value": status.state.running.started_at if status.state.running else None},
{
"State": "Terminated",
"Value": status.state.terminated.finished_at if status.state.terminated else None,
},
{"State": "Waiting", "Value": status.state.waiting.message if status.state.waiting else None},
]
)
conditions = []
for condition in pod_status.conditions:
conditions.append(
OrderedDict(
{
"type": condition.type,
"status": condition.status,
"reason": condition.reason,
"last_transition_time": condition.last_transition_time,
"last_probe_time": condition.last_probe_time,
"message": condition.message,
}
)
)
if conditions:
conditions = sorted(conditions, key=lambda x: x.get("last_transition_time").timestamp())
# print a line for padding on the console
print()
console.info("All conditions for this app:")
console.table(
conditions,
headers={
"type": "Type",
"status": "Status",
"reason": "Reason",
"last_transition_time": "Time",
"last_probe_time": "Probe Time",
"message": "Message",
},
)
else:
console.info("No condition to display")
@click.command()
@click.argument("app", required=False)
@click.option("--organization", "-o", help="Select an organization")
@click.option("--project", "-p", help="Select a project")
@click.option("--deck", "-d", help="Select a deck")
@click.option("--container", "-c", help="Specify the container in this app")
@click.pass_obj
def shell(ctx, app, organization=None, project=None, deck=None, container=None, **kwargs):
"""
Drop into an interactive shell.
"""
cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
# get cluster
cluster = get_cluster_or_exit(ctx, cluster_data.id)
provider_data = cluster.storage.get()
# shell
k8s = KubeAPI(provider_data, deck)
app = argument_app(k8s, app)
# get the data of the selected pod
data = k8s.get_pod(app)
telepresence = Telepresence(provider_data)
# the corresponding deployment by getting rid of the pod name suffix
deployment = "-".join(data.metadata.name.split("-")[0:-2])
# 1. check if this pod is of a switched deployment (in case of an active Telepresence)
if telepresence.is_swapped(deployment, namespace=data.metadata.namespace):
# the container name generated in "app switch" for that pod
container_name = settings.TELEPRESENCE_DOCKER_IMAGE_FORMAT.format(
project=cluster_data.name.lower(), deck=deck["title"].lower(), name=deployment.lower()
).replace(":", "")
if Docker().check_running(container_name):
# 2. Connect to that container
# 2.a connect using Docker
Docker().exec(container_name, "/bin/sh", interactive=True)
else:
console.error(
"This is a Telepresence Pod with no corresponding Docker container "
"running in order to connect (inconsistent state?)"
)
else:
if not container and len(data.spec.containers) > 1:
container = console.container_list(data=data)
if not container:
return None
# 2.b connect using kubernetes
KubeCtl(provider_data).exec_pod(
app, deck["environment"][0]["namespace"], "/bin/sh", interactive=True, container=container
)
@click.command()
@click.argument("app", required=False)
@click.option("--organization", "-o", help="Select an organization")
@click.option("--project", "-p", help="Select a project")
@click.option("--deck", "-d", help="Select a deck")
@click.pass_context
def exec(ctx, **kwargs):
ctx.forward(shell)
@click.command()
@click.argument("app", required=False)
@click.option("--organization", "-o", help="Select an organization")
@click.option("--project", "-p", help="Select a project")
@click.option("--deck", "-d", help="Select a deck")
@click.option("--deployment", help="Specify the deployment if not set in the Unikubefile")
@click.option("--unikubefile", help="Specify the path to the Unikubefile", type=str)
@click.option(
"--no-build", "-n", is_flag=True, help="Do not build a new container image for the switch operation", default=False
)
@click.pass_obj
def switch(ctx, app, organization, project, deck, deployment, unikubefile, no_build, **kwargs):
"""
Switch a running deployment with a local Docker container.
"""
cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck)
# get cluster
cluster = get_cluster_or_exit(ctx, cluster_data.id)
# unikube file input
if unikubefile:
path_unikube_file = unikubefile
else:
path_unikube_file = os.path.join(os.getcwd(), UNIKUBE_FILE)
unikube_file = unikube_file_selector.get(path_unikube_file=path_unikube_file)
# 2: Get a deployment
# 2.1.a Check the deployment identifier
if not deployment and unikube_file:
# 1.1.b check the unikubefile
deployment = unikube_file.get_deployment()
if not deployment:
console.error("Please specify the 'deployment' key of your app in your unikube.yaml.", _exit=True)
else:
console.error(
"Please specify the deployment either using the '--deployment' option or in the unikube.yaml. "
"Run 'unikube app switch' in a directory containing the unikube.yaml file.",
_exit=True,
)
# 2.2 Fetch available "deployment:", deployments
# GraphQL
try:
graph_ql = GraphQL(authentication=ctx.auth)
data = graph_ql.query(
"""
query($id: UUID) {
deck(id: $id) {
deployments(level: "local") {
id
title
description
ports
isSwitchable
}
environment {
id
type
valuesPath
namespace
}
}
}
""",
query_variables={
"id": deck["id"],
},
)
except Exception as e:
console.debug(e)
console.exit_generic_error()
target_deployment = None
for _deployment in data["deck"]["deployments"]:
if _deployment["title"] == deployment:
target_deployment = _deployment
# 2.3 Check and select deployment data
if target_deployment is None:
console.error(
f"The deployment '{deployment}' you specified could not be found.",
_exit=True,
)
ports = target_deployment["ports"].split(",")
deployment = target_deployment["title"]
namespace = deck["environment"][0]["namespace"]
console.info("Please wait while unikube prepares the switch.")
with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout):
# check telepresence
provider_data = cluster.storage.get()
telepresence = Telepresence(provider_data)
available_deployments = telepresence.list(namespace, flat=True)
if deployment not in available_deployments:
console.error(
"The given deployment cannot be switched. " f"You may have to run 'unikube deck install {deck}' first.",
_exit=True,
)
is_swapped = telepresence.is_swapped(deployment, namespace)
k8s = KubeAPI(provider_data, deck)
# service account token, service cert
service_account_tokens = k8s.get_serviceaccount_tokens(deployment)
# 3: Build an new Docker image
# 3.1 Grab the docker file
context, dockerfile, target = unikube_file.get_docker_build()
console.debug(f"{context}, {dockerfile}, {target}")
# 3.2 Set an image name
image_name = settings.TELEPRESENCE_DOCKER_IMAGE_FORMAT.format(
project=cluster_data.name.replace(" ", "").lower(), deck=deck["title"], name=deployment
)
docker = Docker()
if is_swapped:
console.warning("It seems this app is already switched in another process. ")
if click.confirm("Do you want to kill it and switch here?"):
telepresence.leave(deployment, namespace, silent=True)
if docker.check_running(image_name):
docker.kill(name=image_name)
else:
sys.exit(0)
# 3.3 Build image
if not docker.image_exists(image_name) or not no_build:
if no_build:
console.warning(f"Ignoring --no-build since the required image '{image_name}' does not exist")
console.info(f"Building a Docker image for {dockerfile} with context {context}")
with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout):
status, msg = docker.build(image_name, context, dockerfile, target)
if not status:
console.debug(msg)
console.error("Failed to build Docker image.", _exit=True)
console.info(f"Docker image successfully built: {image_name}")
# 4. Start the | |
"""
nucs = set()
for child in self.getChildren():
nucs.update(child.getNuclides())
return nucs
def getFissileMass(self):
"""Returns fissile mass in grams."""
return self.getMass(nuclideBases.NuclideBase.fissile)
def getHMMass(self):
"""Returns heavy metal mass in grams"""
nucs = []
for nucName in self.getNuclides():
if nucDir.isHeavyMetal(nucName):
nucs.append(nucName)
mass = self.getMass(nucs)
return mass
def getHMMoles(self):
"""
Get the number of moles of heavy metal in this object in full symmetry.
Notes
-----
If an object is on a symmetry line, the number of moles will be scaled up by the
symmetry factor. This is done because this is typically used for tracking
burnup, and BOL moles are computed in full objects too so there are no
complications as things move on and off of symmetry lines.
Warning
-------
getHMMoles is different than every other get mass call since it multiplies by
symmetry factor but getVolume() on the block level divides by symmetry factor
causing them to cancel out.
This was needed so that HM moles mass did not change based on if the
block/assembly was on a symmetry line or not.
"""
return (
self.getHMDens()
/ units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
* self.getVolume()
* self.getSymmetryFactor()
)
def getHMDens(self):
"""
Compute the total heavy metal density of this object.
Returns
-------
hmDens : float
The total heavy metal number (atom) density in atoms/bn-cm.
"""
hmNuclides = [
nuclide for nuclide in self.getNuclides() if nucDir.isHeavyMetal(nuclide)
]
hmDens = sum(self.getNuclideNumberDensities(hmNuclides))
return hmDens
def getPuMass(self):
"""Get the mass of Pu in this object in grams."""
nucs = []
for nucName in [nuc.name for nuc in elements.byZ[94].nuclideBases]:
nucs.append(nucName)
pu = self.getMass(nucs)
return pu
def getPuFrac(self):
"""
Compute the Pu/HM mass fraction in this object.
Returns
-------
puFrac : float
The pu mass fraction in heavy metal in this assembly
"""
hm = self.getHMMass()
pu = self.getPuMass()
if hm == 0.0:
return 0.0
else:
return pu / hm
def getZrFrac(self):
"""return the total zr/(hm+zr) fraction in this assembly"""
hm = self.getHMMass()
zrNucs = [nuc.name for nuc in elements.bySymbol["ZR"].nuclideBases]
zr = self.getMass(zrNucs)
if hm + zr > 0:
return zr / (hm + zr)
else:
return 0.0
def getMaxUraniumMassEnrich(self):
maxV = 0
for child in self:
v = child.getUraniumMassEnrich()
if v > maxV:
maxV = v
return maxV
def getFPMass(self):
"""Returns mass of fission products in this block in grams"""
nucs = []
for nucName in self.getNuclides():
if "LFP" in nucName:
nucs.append(nucName)
mass = self.getMass(nucs)
return mass
def getFuelMass(self):
"""returns mass of fuel in grams."""
return sum([fuel.getMass() for fuel in self.iterComponents(Flags.FUEL)], 0.0)
def constituentReport(self):
"""A print out of some pertinent constituent information"""
from armi.utils import iterables
rows = [["Constituent", "HMFrac", "FuelFrac"]]
columns = [-1, self.getHMMass(), self.getFuelMass()]
for base_ele in ["U", "PU"]:
total = sum(
[self.getMass(nuclide.name) for nuclide in elements.bySymbol[base_ele]]
)
rows.append([base_ele, total, total])
fp_total = self.getFPMass()
rows.append(["FP", fp_total, fp_total])
ma_nuclides = iterables.flatten(
[
ele.nuclideBases
for ele in [
elements.byZ[key] for key in elements.byZ.keys() if key > 94
]
]
)
ma_total = sum([self.getMass(nuclide.name) for nuclide in ma_nuclides])
rows.append(["MA", ma_total, ma_total])
for i, row in enumerate(rows):
for j, entry in enumerate(row):
try:
percent = entry / columns[j] * 100.0
rows[i][j] = percent or "-"
except ZeroDivisionError:
rows[i][j] = "NaN"
except TypeError:
pass # trying to divide the string name
return "\n".join(["{:<14}{:<10}{:<10}".format(*row) for row in rows])
def getAtomicWeight(self):
r"""
Calculate the atomic weight of this object in g/mole of atoms.
.. warning:: This is not the molecular weight, which is grams per mole of
molecules (grams/gram-molecule). That requires knowledge of the chemical
formula. Don't be surprised when you run this on UO2 and find it to be 90;
there are a lot of Oxygen atoms in UO2.
.. math::
A = \frac{\sum_i N_i A_i }{\sum_i N_i}
"""
numerator = 0.0
denominator = 0.0
numDensities = self.getNumberDensities()
for nucName, nDen in numDensities.items():
atomicWeight = nuclideBases.byName[nucName].weight
numerator += atomicWeight * nDen
denominator += nDen
return numerator / denominator
def getMasses(self):
"""
Return a dictionary of masses indexed by their nuclide names.
Notes
-----
Implemented to get number densities and then convert to mass
because getMass is too slow on a large tree.
"""
numDensities = self.getNumberDensities()
vol = self.getVolume()
return {
nucName: densityTools.getMassInGrams(nucName, vol, ndens)
for nucName, ndens in numDensities.items()
}
def getIntegratedMgFlux(self, adjoint=False, gamma=False):
raise NotImplementedError
def getMgFlux(self, adjoint=False, average=False, volume=None, gamma=False):
"""
Return the multigroup neutron flux in [n/cm^2/s]
The first entry is the first energy group (fastest neutrons). Each additional
group is the next energy group, as set in the ISOTXS library.
On blocks, it is stored integrated over volume on <block>.p.mgFlux
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
average : bool, optional
If true, will return average flux between latest and previous. Doesn't work
for pin detailed yet
volume: float, optional
If average=True, the volume-integrated flux is divided by volume before
being returned. The user may specify a volume here, or the function will
obtain the block volume directly.
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
flux : numpy.array
multigroup neutron flux in [n/cm^2/s]
"""
if average:
raise NotImplementedError(
"{} class has no method for producing average MG flux -- try"
"using blocks".format(self.__class__)
)
volume = volume or self.getVolume()
return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume
def removeMass(self, nucName, mass):
self.addMass(nucName, -mass)
def addMass(self, nucName, mass):
"""
Parameters
----------
nucName : str
nuclide name e.g. 'U235'
mass : float
mass in grams of nuclide to be added to this armi Object
"""
volume = self.getVolume()
addedNumberDensity = densityTools.calculateNumberDensity(nucName, mass, volume)
self.setNumberDensity(
nucName, self.getNumberDensity(nucName) + addedNumberDensity
)
def addMasses(self, masses):
"""
Adds a vector of masses.
Parameters
----------
masses : dict
a dictionary of masses (g) indexed by nucNames (string)
"""
for nucName, mass in masses.items():
if mass:
self.addMass(nucName, mass)
def setMass(self, nucName, mass):
"""
Set the mass in an object by adjusting the ndens of the nuclides.
Parameters
----------
nucName : str
Nuclide name to set mass of
mass : float
Mass in grams to set.
"""
d = calculateNumberDensity(nucName, mass, self.getVolume())
self.setNumberDensity(nucName, d)
def setMasses(self, masses):
"""
Set a vector of masses.
Parameters
----------
masses : dict
a dictionary of masses (g) indexed by nucNames (string)
"""
self.clearNumberDensities()
for nucName, mass in masses.items():
self.setMass(nucName, mass)
def getSymmetryFactor(self):
"""
Return a scaling factor due to symmetry on the area of the object or its children.
See Also
--------
armi.reactor.blocks.HexBlock.getSymmetryFactor : concrete implementation
"""
return 1.0
def getBoundingIndices(self):
"""
Find the 3-D index bounds (min, max) of all children in the spatial grid of this object.
Returns
-------
bounds : tuple
((minI, maxI), (minJ, maxJ), (minK, maxK))
"""
minI = minJ = minK = float("inf")
maxI = maxJ = maxK = -float("inf")
for obj in self:
i, j, k = obj.spatialLocator.getCompleteIndices()
if i >= maxI:
maxI = i
if i <= minI:
minI = i
if j >= maxJ:
maxJ = j
if j <= minJ:
minJ = j
if k >= maxK:
maxK = k
if k <= minK:
minK = k
return ((minI, maxI), (minJ, maxJ), (minK, maxK))
def getComponentNames(self):
r"""
Get all unique component names of this Composite.
Returns
-------
set or str
A set of all unique component names found in this Composite.
"""
return set(c.getName() for c in self.iterComponents())
def getComponentsOfShape(self, shapeClass):
"""
Return list of components in this block of a particular shape.
Parameters
----------
shapeClass : Component
The class of component, e.g. Circle, Helix, Hexagon, etc.
Returns
-------
param : list
List of components in this block that are of the given shape.
"""
return [c for c in self.iterComponents() if isinstance(c, shapeClass)]
def getComponentsOfMaterial(self, material=None, materialName=None):
"""
Return list of components in this block that are made of a particular material
Only one of the selectors may be used
Parameters
----------
material : Material object, optional
The material to match
materialName : str, optional
The material name to match.
Returns
-------
componentsWithThisMat | |
# -*- coding=utf-8 -*- #
# @author;zhangyihao
# @date: 2021-01
'''爬取指定网站的讲座信息并保存至本地
功能介绍
1. Selenium框架
2. 全部/指定网站爬取
3. 接口支持后期拓展
Typical usage example:
sp=SpiderMan()
sp.scrapy()
'''
from selenium import webdriver
import requests
import pandas as pd
import time
import os
from bs4 import BeautifulSoup
import warnings
import argparse
warnings.filterwarnings("ignore")
# TODO(<EMAIL>):
# 1. 定时爬虫
# 2. 详细信息精准提取
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--chrome', type=str, default='./chromedriver.exe')
# sjtu_law_seminar, ecupl_seminar, ecupl_gjf_seminar, shupl_seminar
# sufe_law_seminar, ecupl_ipschool_seminar, shu_law_seminar, lawyers_seminar
parser.add_argument('--target_web',type=str,default='shu_law_seminar') #
parser.add_argument('--if_all', type=bool, default=False)
args = parser.parse_known_args()[0]
return args
class SpiderMan(object):
def __init__(self,args=get_args()):
self.target_web=args.target_web
self.if_all = args.if_all # 是否一次性爬所有网站
self.driver= webdriver.Chrome(args.chrome)
self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',}
self.driver.implicitly_wait(10)
self.chrome=args.chrome
def scrapy(self):
if self.if_all:
pass
else:
if self.target_web is 'sjtu_law_seminar':
case_url='https://law.sjtu.edu.cn/Article0903_0.aspx' # 确保可以打开,否则请更改
url = 'https://law.sjtu.edu.cn/Article0903_{}.aspx'
self.sjtu_law_seminar(url,pages=2) # 请指定爬取页数
elif self.target_web is 'ecupl_seminar':
case_url = 'https://www.ecupl.edu.cn/649/list1.htm'
url = 'https://www.ecupl.edu.cn/649/list{}.htm'
self.ecupl_seminar(url,pages=2)
elif self.target_web is 'ecupl_gjf_seminar':
case_url = 'https://gjf.ecupl.edu.cn/8595/list.htm'
url = 'https://gjf.ecupl.edu.cn/8595/list{}.htm'
self.ecupl_gjf_seminar(url,pages=2) # 翻页
elif self.target_web is 'shupl_seminar':
case_url='http://www.shupl.edu.cn/1219/list.htm'
url='http://www.shupl.edu.cn/1219/list{}.htm'
self.shupl_seminar(url,pages=2)
elif self.target_web is 'sufe_law_seminar':
case_url = 'http://law.sufe.edu.cn/jzyg2/list.htm'
url = 'http://law.sufe.edu.cn/jzyg2/list{}.htm'
self.sufe_law_seminar(url,pages=2)
elif self.target_web is 'ecupl_ipschool_seminar':
case_url = 'https://ipschool.ecupl.edu.cn/3962/list.htm'
url = 'https://ipschool.ecupl.edu.cn/3962/list{}.htm'
self.ecupl_ipschool_seminar(url,pages=2)
elif self.target_web is 'shu_law_seminar':
case_url = 'https://law.shu.edu.cn/zxzx/jzhyth.htm'
url = 'https://law.shu.edu.cn/zxzx/jzhyth.htm'
self.shu_law_seminar(url) # 只读第一页
else:
case_url = 'http://www.lawyers.org.cn/member/unionnotice?currentPageNo=1'
url = 'http://www.lawyers.org.cn/member/unionnotice?currentPageNo={}'
self.lawyers_seminar(url,pages=2) # 翻页
def sjtu_law_seminar(self,url: str, pages: int) -> None:
"""爬取上海交通大学法学院讲座信息
Args:
url:
网站地址
pages:
需要爬的页数
Returns:
None
"""
count = 0
organizer = '上海交通大学凯源法学院'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, post_dates, presenters,pre_dates, addresses = [], [],[],[],[],[],[]
other_webs,others_titles=[],[]
for num in range(1,pages):
print(url.format(num))
self.driver.get(url.format(num))
mainWindow = self.driver.current_window_handle
elements = self.driver.find_elements_by_css_selector('.body1 li')
if not elements:
print('该网站布局改变,跳过!!')
continue
else:
for ele in elements:
times = ele.find_elements_by_css_selector('.box_r')
titles = ele.find_elements_by_css_selector('a')
for ind, _ in enumerate(titles):
flag=0
# 【会议】、【研讨会】只保存网站url和标题
if titles[ind].text.startswith('【会议】') or titles[ind].text.startswith('【研讨会】'):
other_webs.append(url.format(num))
others_titles.append(titles[ind].text)
continue
# search in page
print(titles[ind].text) # 标题
try:
self.driver.find_element_by_css_selector("[title='{}']".format(titles[ind].text)).click()
#css="//*[starts-with(text(),'{}')]".format(titles[ind].text)
#print(css)
#self.driver.find_element_by_css_selector(css).click()
except Exception:
title_text = titles[ind].text
cnt=0
while True:
title_text = title_text + ' '
cnt+=1
m=self.driver.find_elements_by_css_selector("[title='{}']".format(title_text))
if not m:
if cnt >= 4:
flag=1
break
else:
continue
else:
m[0].click()
break
#css = "//*[starts-with(text(),'{}')]".format(titles[ind].text)
#print(css)
#self.driver.find_element_by_css_selector(css).click()
if flag==1:
continue
titles_.append(titles[ind].text)
post_dates.append(times[ind].text)
count += 1
ids.append(count)
organizers.append(organizer)
self.driver.switch_to.window(self.driver.window_handles[1]) # 所有打开的窗口中的第1个
# print(self.driver.current_url)
time.sleep(2)
######### 主讲人特殊处理 #######
try:
speaker = self.driver.find_element_by_xpath("//*[starts-with(text(),'主讲')]")
presenters.append(speaker.text.split(':')[1])
except Exception:
speaker_father = self.driver.find_elements_by_xpath(
"//span[1 and starts-with(text(),'主')]/..")
if not speaker_father:
print('no speaker')
presenters.append('NAN')
else:
father_sons = speaker_father[0].find_elements_by_xpath("span")
content = ''
if len(father_sons) == 1:
# 包装在span下的 多个span里
speaker_father_sons = father_sons[0].find_elements_by_xpath("span")
for so in speaker_father_sons:
content = content + so.text
else:
for line in father_sons:
content = content + line.text
presenters.append(content)
######### 时间特殊处理 #######
try:
pre_date = self.driver.find_element_by_xpath(
"//*[starts-with(text(),'时间') or starts-with(text(),'讲座时间') or contains(text(),'间:')]")
pre_dates.append(pre_date.text.split(':')[1])
except Exception:
speaker_father = self.driver.find_elements_by_xpath(
"//span[1 and starts-with(text(),'时')]/..")
if not speaker_father:
print('no time')
pre_dates.append('NAN')
else:
father_sons = speaker_father[0].find_elements_by_xpath("span")
content = ''
if len(father_sons) == 1:
# 包装在span下的 多个span里
speaker_father_sons = father_sons[0].find_elements_by_xpath("span")
for so in speaker_father_sons:
content = content + so.text
else:
for line in father_sons:
content = content + line.text
pre_dates.append(content)
######### 地点特殊处理 #######
try:
address = self.driver.find_element_by_xpath("//*[contains(text(),'地点')]") # starts-with
addresses.append(address.text.split(':')[1])
except Exception:
speaker_father = self.driver.find_elements_by_xpath(
"//span[1 and contains(text(),'地')]/..") # starts-with
if not speaker_father:
print('no place')
addresses.append('NAN')
else:
father_sons = speaker_father[0].find_elements_by_xpath("span")
content = ''
if len(father_sons) == 1:
# 包装在span下的 多个span里
speaker_father_sons = father_sons[0].find_elements_by_xpath("span")
for so in speaker_father_sons:
content = content + so.text
else:
for line in father_sons:
content = content + line.text
addresses.append(content)
print(len(ids),len(organizers),len(titles_),len(post_dates),len(presenters),len(pre_dates),len(addresses))
self.driver.close()
self.driver.switch_to.window(mainWindow)
# 数据保存
data = {'编号': ids, '主办单位': organizers, '标题': titles_, '公告日期': post_dates,
'主讲人': presenters, '讲座日期': pre_dates, '讲座地址': addresses}
res=pd.DataFrame(data)
res.to_csv(os.path.join('data','{}_results.csv'.format(self.target_web)))
print(res)
other_data={'标题':others_titles,'网址':other_webs}
other_res = pd.DataFrame(other_data)
print(other_res)
other_res.to_csv(os.path.join('data','{}_other_results.csv'.format(self.target_web)))
self.driver.quit()
def ecupl_seminar(self,url: str, pages: int) -> None:
count = 0
organizer = '华东政法大学官网'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, post_dates, contents = [], [], [], [], []
for num in range(1, pages):
print(url.format(num))
self.driver.get(url.format(num))
mainWindow = self.driver.current_window_handle
elements = self.driver.find_elements_by_css_selector('.list_item')
for ele in elements:
times = ele.find_elements_by_css_selector('.Article_PublishDate')
titles = ele.find_elements_by_css_selector('.Article_Title')
for ind, _ in enumerate(titles):
# search in page
print(titles[ind].text) # 标题
titles_.append(titles[ind].text)
post_dates.append(times[ind].text)
count += 1
ids.append(count)
organizers.append(organizer)
# path="//*[starts-with(@title,'{}')]".format(titles[ind].text)
'''
此处应该用find_element_by_xpath 而不是 find_element_by_css_selector
'''
try:
self.driver.find_element_by_xpath("//*[starts-with(@title,'{}')]".format(titles[ind].text)).click()
except Exception:
part_title=titles[ind].text[:42]
self.driver.find_element_by_xpath(
"//*[starts-with(@title,'{}')]".format(part_title)).click()
time.sleep(1)
# self.driver.find_element_by_css_selector(
# "[title='{}']".format(titles[ind].text)).click()
# time.sleep(1)
hand= self.driver.window_handles
self.driver.switch_to.window(hand[1]) # 切换到最高句柄
currentPageUrl = self.driver.current_url
html_ = requests.get(currentPageUrl, headers=self.headers)
html_.encoding = 'utf-8'
text = html_.text
soup = BeautifulSoup(text, 'html.parser')
lines = soup.find_all('div', class_='wp_articlecontent')
contents.append(lines[0].text)
self.driver.close()
self.driver.switch_to.window(mainWindow)
# 数据保存
data = {'编号': ids, '主办单位': organizers, '标题': titles_, '公告日期': post_dates,'内容': contents}
res = pd.DataFrame(data)
res.to_csv(os.path.join('data','{}_results.csv'.format(self.target_web)))
self.driver.quit()
def ecupl_gjf_seminar(self, url: str, pages: int):
count = 0
organizer = '华东政法大学国际法学院'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, post_dates, contents = [], [], [], [], []
mainWindow = self.driver.current_window_handle
mainPageUrl = self.driver.current_url
for num in range(1, pages):
print(url.format(num))
self.driver.get(url.format(num))
elements = self.driver.find_elements_by_css_selector('h3 a')
for ind,ele in enumerate(elements): # 对于不弹窗的网页,需要解决页面刷新的问题
title = ele.text
print(title)
titles_.append(title)
count += 1
ids.append(count)
organizers.append(organizer)
self.driver.close()
print('**********************')
for val in titles_:
print(val)
self.driver = webdriver.Chrome(self.chrome)
self.driver.get(url.format(num))
self.driver.find_element_by_css_selector(
"[title='{}']".format(val)).click()
time.sleep(1)
hand = self.driver.window_handles
self.driver.switch_to.window(hand[0]) # 切换到最高句柄
currentPageUrl = self.driver.current_url
html_ = requests.get(currentPageUrl, headers=self.headers)
html_.encoding = 'utf-8'
text = html_.text
soup = BeautifulSoup(text, 'html.parser')
lines = soup.find_all('div', class_='wp_articlecontent')
try:
contents.append(lines[0].text)
except Exception:
contents.append([])
self.driver.close()
#self.driver.switch_to.window(mainWindow)
#self.driver.get(url.format(num))
#time.sleep(1)
# 数据保存
data = {'编号': ids, '主办单位': organizers, '标题': titles_, '内容': contents}
res = pd.DataFrame(data)
res.to_csv(os.path.join('data','{}_results.csv'.format(self.target_web)))
self.driver.quit()
def shupl_seminar(self,url,pages):
count = 0
organizer = '上海政法学院'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, post_dates, contents = [], [], [], [], []
for num in range(1, pages):
print(url.format(num))
self.driver.get(url.format(num))
mainWindow = self.driver.current_window_handle
elements = self.driver.find_elements_by_xpath('//*[starts-with(@class,"column-news-item")]')
for ele in elements:
times = ele.find_elements_by_xpath('./span[2]') ## 必须要加. 否则是从全局找
titles = ele.find_elements_by_xpath('./span[1]')
for ind, _ in enumerate(titles):
# search in page
print(titles[ind].text) # 标题
titles_.append(titles[ind].text)
post_dates.append(times[ind].text)
print(times[ind].text)
count += 1
ids.append(count)
organizers.append(organizer)
try:
self.driver.find_element_by_xpath(
"//*[starts-with(text(),'{}')]".format(titles[ind].text)).click()
except Exception:
part_title = titles[ind].text[:42] # 如果超过42个字符,取前42位
self.driver.find_element_by_xpath(
"//*[starts-with(text(),'{}')]".format(part_title)).click()
time.sleep(1)
# self.driver.find_element_by_css_selector(
# "[title='{}']".format(titles[ind].text)).click()
# time.sleep(1)
hand = self.driver.window_handles
self.driver.switch_to.window(hand[1]) # 切换到最高句柄
currentPageUrl = self.driver.current_url
html_ = requests.get(currentPageUrl, headers=self.headers)
html_.encoding = 'utf-8'
text = html_.text
soup = BeautifulSoup(text, 'html.parser')
lines = soup.find_all('div', class_='wp_articlecontent')
contents.append(lines[0].text)
self.driver.close()
self.driver.switch_to.window(mainWindow)
# 数据保存
data = {'编号': ids, '主办单位': organizers, '标题': titles_, '公告日期': post_dates, '内容': contents}
res = pd.DataFrame(data)
res.to_csv(os.path.join('data','{}_results.csv'.format(self.target_web)))
self.driver.quit()
def sufe_law_seminar(self,url,pages):
count = 0
organizer = '上海财经大学法学院'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, post_dates, contents = [], [], [], [], []
for num in range(1, pages):
print(url.format(num))
self.driver.get(url.format(num))
mainWindow = self.driver.current_window_handle
whole=self.driver.find_element_by_css_selector('[class="news_list list2"]')
elements = whole.find_elements_by_css_selector("li[class^='news']")
for ele in elements:
titles = ele.find_elements_by_css_selector('.news_title')
times = ele.find_elements_by_css_selector('.news_meta')
for ind, _ in enumerate(titles):
# search in page
print(titles[ind].text) # 标题
titles_.append(titles[ind].text)
post_dates.append(times[ind].text)
print(times[ind].text)
count += 1
ids.append(count)
organizers.append(organizer)
# try:
# self.driver.find_element_by_xpath(
# "//*[starts-with(text(),'{}')]".format(titles[ind].text)).click()
# except Exception:
# part_title = titles[ind].text[:42] # 如果超过42个字符,取前42位
# self.driver.find_element_by_xpath(
# "//*[starts-with(text(),'{}')]".format(part_title)).click()
self.driver.find_element_by_css_selector(
"[title='{}']".format(titles[ind].text)).click()
time.sleep(1)
hand = self.driver.window_handles
self.driver.switch_to.window(hand[1]) # 切换到最高句柄
currentPageUrl = self.driver.current_url
html_ = requests.get(currentPageUrl, headers=self.headers)
html_.encoding = 'utf-8'
text = html_.text
soup = BeautifulSoup(text, 'html.parser')
lines = soup.find_all('div', class_='wp_articlecontent')
contents.append(lines[0].text)
self.driver.close()
self.driver.switch_to.window(mainWindow)
# 数据保存
data = {'编号': ids, '主办单位': organizers, '标题': titles_, '公告日期': post_dates, '内容': contents}
res = pd.DataFrame(data)
res.to_csv(os.path.join('data', '{}_results.csv'.format(self.target_web)))
self.driver.quit()
def ecupl_ipschool_seminar(self,url,pages):
count = 0
organizer = '华东政法大学知识产权学院'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, post_dates, contents = [], [], [], [], []
for num in range(1, pages):
print(url.format(num))
self.driver.get(url.format(num))
mainWindow = self.driver.current_window_handle
elements = self.driver.find_elements_by_css_selector('[id="wp_news_w05"] li')
for num,ele in enumerate(elements):
titles = ele.find_elements_by_css_selector('[title]')
times = ele.find_elements_by_css_selector('span') ## 必须要加. 否则是从全局找
for ind, _ in enumerate(titles):
# search in page
print(titles[ind].text) # 标题
titles_.append(titles[ind].text)
post_dates.append(times[ind].text)
print(times[ind].text)
count += 1
ids.append(count)
organizers.append(organizer)
try:
self.driver.find_element_by_css_selector(
"[title='{}']".format(titles[ind].text)).click()
except Exception:
ele.find_element_by_css_selector("a:nth-of-type(2)").click()
time.sleep(1)
hand = self.driver.window_handles
self.driver.switch_to.window(hand[1]) # 切换到最高句柄
currentPageUrl = self.driver.current_url
html_ = requests.get(currentPageUrl, headers=self.headers)
html_.encoding = 'utf-8'
text = html_.text
soup = BeautifulSoup(text, 'html.parser')
lines = soup.find_all('div', class_='wp_articlecontent')
contents.append(lines[0].text)
self.driver.close()
self.driver.switch_to.window(mainWindow)
# 数据保存
data = {'编号': ids, '主办单位': organizers, '标题': titles_, '公告日期': post_dates, '内容': contents}
res = pd.DataFrame(data)
res.to_csv(os.path.join('data', '{}_results.csv'.format(self.target_web)))
self.driver.quit()
def shu_law_seminar(self,url):
count = 0
organizer = '上海大学法学院'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, post_dates, contents = [], [], [], [], []
self.driver.get(url)
elements = self.driver.find_elements_by_css_selector('.d-right-down ul li')
for ind, ele in enumerate(elements): # 对于不弹窗的网页,需要解决页面刷新的问题
title = ele.find_element_by_css_selector('a').text
time_ = ele.find_element_by_css_selector('span').text
print(title)
titles_.append(title)
count += 1
post_dates.append(time_)
ids.append(count)
organizers.append(organizer)
# 数据保存
data = {'编号': ids, '主办单位': organizers, '标题': titles_, '公告日期': post_dates,}
res = pd.DataFrame(data)
res.to_csv(os.path.join('data', '{}_results.csv'.format(self.target_web)))
self.driver.quit()
def lawyers_seminar(self,url,pages):
count = 0
organizer = '东方律师'
## |编号|主办单位|标题|公告日期|主讲人|讲座日期|讲座地址
ids, organizers, titles_, | |
<reponame>TwinklePie/evennia-nvn
"""
Contains everything involved in editplayer/editchar
"""
from ev import syscmdkeys
from ev import Command
from contrib.menusystem import MenuNode, MenuTree
from ev import utils
CMD_NOMATCH = syscmdkeys.CMD_NOMATCH
CMD_NOINPUT = syscmdkeys.CMD_NOINPUT
def printchar(caller):
"""
Prints current character values
"""
db = caller.db
outstr = " Full Name: %s\n" % db.full_name
outstr += " Gender: %s\n" % db.gender
outstr += " Species: %s\n" % db.species
outstr += " Alignment: %s\n" % db.alignment
outstr += " Age: %s\n" % db.age
outstr += " Apparent Age: %s\n" % db.apparent_age
outstr += " Sexuality: %s\n" % db.sexuality
outstr += " Coat: %s\n" % db.coat
outstr += " Mane: %s\n" % db.mane
outstr += " Cutie Mark: %s\n" % db.cutie_mark
outstr += " Eyes: %s\n" % db.eyes
outstr += " Height: %s\n" % db.height
outstr += " Weight: %s\n" % db.weight
outstr += "Character Notes: %s\n" % db.character_notes
outstr += " Player Notes: %s\n" % db.player_notes
outstr += " RP Preferences: %s\n" % db.rp_prefs
caller.msg(outstr)
return
class CmdBackToStart(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("START")
class CmdBackToFullName(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("fullname")
class CmdFullNameSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.full_name = self.args
self.menutree.goto("fullname")
class CmdFullNameDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.full_name = ""
self.caller.msg("Full Name Cleared.")
self.menutree.goto("fullname")
class CmdBackToAlignment(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("alignment")
class CmdAlignmentSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.alignment = self.args
self.menutree.goto("alignment")
class CmdAlignmentDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.alignment = ""
self.caller.msg("Alignment Cleared.")
self.menutree.goto("alignment")
class CmdBackToAge(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("age")
class CmdAgeSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.age = self.args
self.menutree.goto("age")
class CmdAgeDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.age = ""
self.caller.msg("Age Cleared.")
self.menutree.goto("age")
class CmdBackToApparentAge(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("apparent_age")
class CmdApparentAgeSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.apparent_age = self.args
self.menutree.goto("apparent_age")
class CmdApparentAgeDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.apparent_age = ""
self.caller.msg("Apparent Age Cleared.")
self.menutree.goto("apparent_age")
class CmdBackToSexuality(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("sexuality")
class CmdSexualitySelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.sexuality = self.args
self.menutree.goto("sexuality")
class CmdSexualityDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.sexuality = ""
self.caller.msg("Sexuality Cleared.")
self.menutree.goto("sexuality")
class CmdBackToGender(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("gender")
class CmdGenderSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.gender = self.args
self.menutree.goto("gender")
class CmdGenderDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.gender = ""
self.caller.msg("Gender Cleared.")
self.menutree.goto("gender")
class CmdBackToJob(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("job")
class CmdJobSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.job = self.args
self.menutree.goto("job")
class CmdJobDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.job = ""
self.caller.msg("Job Cleared.")
self.menutree.goto("job")
class CmdBackToMane(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("mane")
class CmdManeSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.mane = self.args
self.menutree.goto("mane")
class CmdManeDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.mane = ""
self.caller.msg("Mane Cleared.")
self.menutree.goto("mane")
class CmdBackToEyes(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("eyes")
class CmdEyesSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.eyes = self.args
self.menutree.goto("eyes")
class CmdEyesDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.eyes = ""
self.caller.msg("Eye Color Cleared.")
self.menutree.goto("eyes")
class CmdBackToHeight(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("height")
class CmdHeightSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.height = self.args
self.menutree.goto("height")
class CmdHeightDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.height = ""
self.caller.msg("Height Cleared.")
self.menutree.goto("height")
class CmdBackToWeight(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("weight")
class CmdWeightSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.weight = self.args
self.menutree.goto("weight")
class CmdWeightDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.weight = ""
self.caller.msg("Weight Cleared.")
self.menutree.goto("weight")
class CmdBackToCutieMark(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("cutie_mark")
class CmdCutieMarkSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.cutie_mark = self.args
self.menutree.goto("cutie_mark")
class CmdCutieMarkDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.cutie_mark = ""
self.caller.msg("Cutie Mark Cleared.")
self.menutree.goto("cutie_mark")
class CmdBackToShortDesc(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("short_desc")
class CmdShortDescSelect(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.short_desc = self.args
self.menutree.goto("short_desc")
class CmdShortDescDelete(Command):
"""
Handle setting the full name
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.db.short_desc = ""
self.caller.msg("Short Desc Cleared.")
self.menutree.goto("short_desc")
class CmdEditCharacter(Command):
"""
This allows the player to edit their character.
Usage:
+editcharacter
This command allows the player to set fields on the current character. Must be @ic in order to set
values.
"""
key = "+editcharacter"
aliases = ["editplayer", "editcharacter", "+editplayer"]
help_category = "General"
def func(self):
"""This performs the actual command"""
caller = self.caller
printchar(self.caller)
"Testing the menu system"
node0 = MenuNode("START", text="Character Editing",
links=["fullname", "gender", "species", "alignment", "job", "age", "apparent_age", "coat",
"mane", "eyes", "cutie_mark", "sexuality", "height", "weight", "short_desc", "END"],
linktexts=[
"Full Name", "Gender", "Species", "Alignment", "Job or Class", "Age", "Apparent Age",
"Coat", "Mane", "Eyes", "Cutie Mark", "Sexuality", "Height when Standing", "Weight",
"Short Desc for ws", "Quit"],
keywords=["F", "G", "S", "AL", "J", "A", "AP", "C", "M", "E", "CM", "SE", "H", "W", "SD",
"Q"],
cols=2)
node1 = MenuNode("fullname",
links=["fullname_change", None, "END", "START"],
selectcmds=[None, CmdFullNameDelete, None, None],
linktexts=["Change Value", "Delete Value", "Quit", "Back to start"],
keywords=["C", "D", "Q", "B"],
code="self.caller.msg('Current Full Name: %s' % self.caller.db.full_name)")
node1b = MenuNode("fullname_change",
text="What do you want your full name to be? (Space + CR to make no changes)",
links=["START", "END"],
keywords=[CMD_NOINPUT, CMD_NOMATCH],
selectcmds=[CmdBackToFullName, CmdFullNameSelect],
nodefaultcmds=True,
code="self.caller.msg('Current Full Name: %s' % self.caller.db.full_name)")
node2 = MenuNode("gender",
links=["gender_change", None, "END", "START"],
selectcmds=[None, CmdGenderDelete, None, None],
linktexts=["Change Value", "Delete Value", "Quit", "Back to start"],
keywords=["C", "D", "Q", "B"],
code="self.caller.msg('Current Gender: %s' % self.caller.db.gender)")
node2b = MenuNode("gender_change",
text="What do you want | |
scheme',
min_entries=1)
identifiers = FieldList(
FormField(IdentifierForm), 'Identifiers for this scheme',
min_entries=1)
versions = FieldList(
FormField(VersionForm), 'Version history', min_entries=1)
# Editing organizations
# ---------------------
class OrganizationForm(FlaskForm):
name = StringField('Name of organization')
description = TextAreaField('Description')
types = SelectMultipleField(
'Type of organization', choices=organization_types)
locations = FieldList(
FormField(LocationForm), 'Relevant links', min_entries=1)
identifiers = FieldList(
FormField(IdentifierForm), 'Identifiers for this organization',
min_entries=1)
# Editing tools
# -------------
class ToolForm(FlaskForm):
title = StringField('Name of tool')
description = TextAreaField('Description')
supported_schemes = SelectMultipleField(
'Metadata scheme(s) supported by this tool')
types = FieldList(
StringField('Type', validators=[
validators.Regexp(tool_type_regexp, message=tool_type_help)]),
'Type of tool', min_entries=1)
creators = FieldList(
FormField(CreatorForm), 'People responsible for this tool',
min_entries=1)
maintainers = SelectMultipleField('Organizations that maintain this tool')
funders = SelectMultipleField('Organizations that funded this tool')
locations = FieldList(
FormField(LocationForm), 'Links to this tool', min_entries=1)
identifiers = FieldList(
FormField(IdentifierForm), 'Identifiers for this tool', min_entries=1)
versions = FieldList(
FormField(VersionForm), 'Version history', min_entries=1)
# Editing mappings
# ----------------
class MappingForm(FlaskForm):
description = TextAreaField('Description')
input_schemes = SelectMultipleField('Input metadata scheme(s)')
output_schemes = SelectMultipleField('Output metadata scheme(s)')
creators = FieldList(
FormField(CreatorForm), 'People responsible for this mapping',
min_entries=1)
maintainers = SelectMultipleField(
'Organizations that maintain this mapping',
choices=get_choices('g'))
funders = SelectMultipleField('Organizations that funded this mapping')
locations = FieldList(
FormField(FreeLocationForm), 'Links to this mapping', min_entries=1)
identifiers = FieldList(
FormField(IdentifierForm), 'Identifiers for this mapping',
min_entries=1)
versions = FieldList(
FormField(VersionForm), 'Version history', min_entries=1)
# Editing endorsements
# --------------------
class EndorsementForm(FlaskForm):
citation = StringField('Citation')
issued = NativeDateField('Endorsement date')
valid_from = NativeDateField('Endorsement period')
valid_to = NativeDateField('until')
locations = FieldList(
FormField(LocationForm), 'Links to this endorsement', min_entries=1)
identifiers = FieldList(
FormField(IdentifierForm), 'Identifiers for this endorsement',
min_entries=1)
endorsed_schemes = FieldList(
FormField(SchemeVersionForm), 'Endorsed schemes', min_entries=1)
originators = SelectMultipleField('Endorsing organizations')
Forms = {
'm': SchemeForm,
'g': OrganizationForm,
't': ToolForm,
'c': MappingForm,
'e': EndorsementForm}
# Ensuring consistency of data type/URL pairs
# -------------------------------------------
def propagate_data_types(msc_data, table, t):
"""Takes a record, a table, and a transaction. For each data type URL/label
pair, ensures all other occurrences of the URL in the table are accompanied
by the same label. Returns the number of updated records."""
changes_made = 0
if 'dataTypes' not in msc_data:
return changes_made
Scheme = Version = DataType = Query()
for dataType in msc_data['dataTypes']:
if not dataType.get('url'):
continue
if not dataType.get('label'):
continue
matches = table.search(
Scheme.dataTypes.any(
(DataType.url == dataType['url'])))
for match in matches:
needs_updating = False
old_dataTypes = match['dataTypes']
new_dataTypes = list()
for type in old_dataTypes:
if (type.get('url') == dataType['url'] and
type.get('label') != dataType['label']):
needs_updating = True
new_dataTypes.append(dataType)
else:
new_dataTypes.append(type)
if needs_updating:
t.update({'dataTypes': new_dataTypes}, eids=[match.doc_id])
changes_made += 1
matches = table.search(
Scheme.versions.any(
Version.dataTypes.any(
(DataType.url == dataType['url']))))
for match in matches:
needs_updating = False
new_versions = list()
for version in match['versions']:
if 'dataTypes' not in version:
new_versions.append(version)
continue
old_dataTypes = version['dataTypes']
new_dataTypes = list()
for type in old_dataTypes:
if (type.get('url') == dataType['url'] and
type.get('label') != dataType['label']):
needs_updating = True
new_dataTypes.append(dataType)
else:
new_dataTypes.append(type)
new_version = version
new_version['dataTypes'] = new_dataTypes
new_versions.append(new_version)
if needs_updating:
t.update({'versions': new_versions}, eids=[match.doc_id])
changes_made += 1
return changes_made
# Generic editing form view
# -------------------------
@app.route('/edit/<string(length=1):series><int:number>',
methods=['GET', 'POST'])
@login_required
def edit_record(series, number):
document = tables[series].get(doc_id=number)
version = request.values.get('version')
if version and request.referrer == request.base_url:
# This is the version screen, opened from the main screen
flash('Only provide information here that is different from the'
' information in the main (non-version-specific) record.')
# Instantiate form
if document:
# Translate from internal data model to form data
if version:
for release in document['versions']:
if 'number' in release and\
str(release['number']) == str(version):
form = Forms[series](
data=msc_to_form(release))
break
else:
form = Forms[series]()
del form['versions']
else:
form = Forms[series](data=msc_to_form(document))
else:
if number != 0:
return redirect(url_for('edit_record', series=series, number=0))
form = Forms[series]()
# Form-specific value lists
params = dict()
scheme_choices = get_choices('m')
organization_choices = get_choices('g')
if series == 'm':
# Subject keyword help
subject_list = get_subject_terms(complete=True)
params['subjects'] = subject_list
# Data type help
type_url_set = set()
type_label_set = set()
for scheme in tables['m'].all():
if 'dataTypes' in scheme:
for type in scheme['dataTypes']:
type_url = type.get('url')
if type_url:
type_url_set.add(type_url)
type_label = type.get('label')
if type_label:
type_label_set.add(type_label)
type_url_list = list(type_url_set)
type_label_list = list(type_label_set)
type_url_list.sort(key=lambda k: k.lower())
type_label_list.sort(key=lambda k: k.lower())
params['dataTypeURLs'] = type_url_list
params['dataTypeLabels'] = type_label_list
# Validation for parent schemes
form.parent_schemes.choices = scheme_choices
# Validation for organizations
form.maintainers.choices = organization_choices
form.funders.choices = organization_choices
form.users.choices = organization_choices
# Validation for URL types
for f in form.locations:
f['type'].choices = scheme_locations
elif series == 'g':
# Validation for URL types
for f in form.locations:
f['type'].choices = organization_locations
elif series == 't':
# Tool type help
params['toolTypes'] = tool_type_list
# Validation for parent schemes
form.supported_schemes.choices = scheme_choices
# Validation for organizations
form.maintainers.choices = organization_choices
form.funders.choices = organization_choices
# Validation for URL types
for f in form.locations:
f['type'].choices = tool_locations
elif series == 'c':
# Validation for parent schemes
form.input_schemes.choices = scheme_choices
form.output_schemes.choices = scheme_choices
# Validation for organizations
form.maintainers.choices = organization_choices
form.funders.choices = organization_choices
# Validation for URL types
for f in form.locations:
f['type'].validators.append(
validators.Regexp(
regex=mapping_location_regexp, message=mapping_location_help))
params['locationTypes'] = mapping_location_list
elif series == 'e':
# Validation for organizations
form.originators.choices = organization_choices
# Validation for URL types; note that as there is a choice of one,
# we apply it automatically, not via the form.
for f in form.locations:
f['type'].choices = endorsement_locations
f.url.validators = [validators.Optional()]
f['type'].validators = [validators.Optional()]
# Processing the request
if request.method == 'POST' and form.validate():
form_data = form.data
if series == 'e':
# Here is where we automatically insert the URL type
filtered_locations = list()
for f in form.locations:
if f.url.data:
location = {'url': f.url.data, 'type': 'document'}
filtered_locations.append(location)
form_data['locations'] = filtered_locations
# Translate form data into internal data model
msc_data = form_to_msc(form_data, document)
if version:
# Editing the version-specific overrides
if document and 'versions' in document:
version_list = document['versions']
for index, item in enumerate(version_list):
if str(item['number']) == str(version):
version_dict = {
k: v for k, v in item.items()
if k in ['number', 'available', 'issued', 'valid']}
version_dict.update(msc_data)
version_list[index] = version_dict
Record = Query()
Version = Query()
tables[series].update(
{'versions': version_list},
Record.versions.any(Version.number == version),
doc_ids=[number])
records_updated = 0
if 'dataTypes' in msc_data:
with transaction(tables[series]) as t:
records_updated = propagate_data_types(
msc_data, tables[series], t)
flash('Successfully updated record for version {}.'
.format(version), 'success')
if records_updated:
flash('Also updated the data types of {:/1 other'
' record/N other records}.'
.format(Pluralizer(records_updated)),
'success')
flash('If this page opened in a new window or tab, feel'
' free to close it now.')
break
else:
# This version is not in the list
flash('Could not apply changes. Have you saved details for'
' version {} in the main record?'.format(version),
'error')
else:
# The version list or the main record is missing
flash('Could not apply changes. Have you saved details for'
' version {} in the main record?'.format(version),
'error')
return redirect('{}?version={}'.format(url_for(
'edit_record', series=series, number=number), version))
elif document:
# Editing an existing record
msc_data = fix_admin_data(msc_data, series, number)
with transaction(tables[series]) as t:
for key in (k for k in document if k not in msc_data):
t.update_callable(delete(key), eids=[number])
t.update(msc_data, eids=[number])
# Ensure consistency of dataType url/label pairs
records_updated = 0
if 'dataTypes' in msc_data:
with transaction(tables[series]) as t:
records_updated = propagate_data_types(
msc_data, tables[series], t)
flash('Successfully updated record.', 'success')
if records_updated:
flash('Also updated the data types of {:/1 other record/N other'
' records}.'.format(Pluralizer(records_updated)),
'success')
else:
# Adding a new record
msc_data = fix_admin_data(msc_data, series, number)
number = tables[series].insert(msc_data)
flash('Successfully added record.', 'success')
return redirect(url_for('edit_record', series=series, number=number))
if form.errors:
flash('Could not save changes as there {:/was an error/were N errors}.'
' See below for details.'.format(Pluralizer(len(form.errors))),
'error')
for field, errors in form.errors.items():
if len(errors) > 0:
if isinstance(errors[0], str):
# Simple field
form[field].errors = clean_error_list(form[field])
else:
# Subform
for subform in errors:
for subfield, suberrors in subform.items():
for f in form[field]:
f[subfield].errors = clean_error_list(f[subfield])
return render_template(
'edit-' + templates[series], form=form, doc_id=number, version=version,
idSchemes=id_scheme_list, **params)
def clean_error_list(field):
seen_errors = set()
for error in field.errors:
seen_errors.add(error)
return list(seen_errors)
# Generic API contribution handling
# =================================
#
# Conformance checking function
# -----------------------------
def assess_conformance(series, document):
"""Examines the contents of an document and assesses its compliance with the
MSC data model, giving the result as an integer score.
Arguments:
series (str): Record series
document (dict or Document): MSC record
Returns:
dict: 'level' contains the conformance level of the record as an int,
| |
#!/usr/bin/env python
import signal
import rospy
import smach
import smach_ros
import math
from geometry_msgs.msg import Twist
import numpy as np
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Joy
from geometry_msgs.msg import PoseWithCovarianceStamped
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import time
import trig
import actionlib
from std_srvs.srv import Empty
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped
from kobuki_msgs.msg import Led
from kobuki_msgs.msg import Sound
from geometry_msgs.msg import Point
import event_two
import sys
sys.path.insert(1, '/home/malcolm/Documents/CMPUT_412/Competition/CS412T1C4/shapeTesting')
import v2
import traceback
global box8_position
global box2_position
class Localize(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done4', 'find_markers', 'box2', 'box8', 'box1'])
self.initial = PoseWithCovarianceStamped()
self.callbacks = callbacks
self.initial_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=1)
self.led1_pub = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=1)
self.led2_pub = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=1)
self.sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
#self.notified_box_found = False
#self.notified_box_target_found = False
def execute(self, userdata):
global shutdown_requested
start = time.time()
while time.time() - start < 5:
self.twist.linear.x = 0.5
self.twist.angular.z = -0.3
self.cmd_vel_pub.publish(self.twist)
if shutdown_requested:
return 'done4'
self.twist.linear.x = 0
self.twist.angular.z = 0
target_heading = (self.callbacks.bot_odom_heading + 350) % 360
self.initial = self.callbacks.bot_map_pose
self.initial.pose.covariance = [0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.06853891945200942]
self.initial_pub.publish(self.initial) # publish to get a more precise location
turning = True
previous_difference = None
while turning:
if shutdown_requested:
return 'done4'
difference = trig.minimum_angle_between_headings(target_heading, self.callbacks.bot_odom_heading)
if previous_difference is None:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
else:
if abs(difference) < 1:
turning = False
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
else:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
if previous_difference != difference:
previous_difference = difference
'''
if self.callbacks.box_position is not None:
self.led2_pub.publish(3) # red
self.sound_pub.publish(1)
time.sleep(1)
print("Found box")
if self.callbacks.box_target_position is not None:
self.led1_pub.publish(1) # Green
self.sound_pub.publish(1)
time.sleep(1)
print("Found box target")
'''
if shutdown_requested:
return 'done4'
return 'box1' # TODO: return 'box1'
'''
if self.callbacks.box_target_position is None or self.callbacks.box_position is None:
return 'find_markers'
else:
self.led2_pub.publish(0) # Off
self.led1_pub.publish(0) # Off
distance_from_box_target = trig.get_distance(self.callbacks.bot_map_position, self.callbacks.box_target_position)
distance_from_box = trig.get_distance(self.callbacks.bot_map_position, self.callbacks.box_position)
if distance_from_box < distance_from_box_target:
return 'box2'
else:
return 'box8'
'''
class FindMarkers(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done4', 'box1', 'box2', 'box8', 'return'])
self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
self.client.wait_for_server()
self.target = MoveBaseGoal()
self.target.target_pose.header.frame_id = "map"
self.target.target_pose.header.stamp = rospy.Time.now()
self.target.target_pose.pose.position.x = -0.534573222531
self.target.target_pose.pose.position.y = -2.64808881431
self.target.target_pose.pose.orientation.x = 0.0
self.target.target_pose.pose.orientation.y = 0.0
self.target.target_pose.pose.orientation.z = -0.990251526545
self.target.target_pose.pose.orientation.w = 0.13929075409
self.callbacks = callbacks
self.led1_pub = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=1)
self.led2_pub = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=1)
self.sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
def execute(self, userdata):
global shutdown_requested
self.client.send_goal(self.target)
self.client.wait_for_result()
print("Goal reached")
target_heading = (self.callbacks.bot_odom_heading + 350) % 360
turning = True
previous_difference = None
while turning:
if shutdown_requested:
return 'done4'
difference = trig.minimum_angle_between_headings(target_heading, self.callbacks.bot_odom_heading)
if previous_difference is None:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
else:
if abs(difference) < 1 or (self.callbacks.box_position is not None and self.callbacks.box_target_position is not None):
turning = False
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
else:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
if previous_difference != difference:
previous_difference = difference
if self.callbacks.box_position is not None:
self.led2_pub.publish(3) # red
self.sound_pub.publish(1)
time.sleep(1)
if self.callbacks.box_target_position is not None:
self.led1_pub.publish(1) # Green
self.sound_pub.publish(1)
time.sleep(1)
self.led2_pub.publish(0) # Off
self.led1_pub.publish(0) # Off
if self.callbacks.box_target_position is None or self.callbacks.box_position is None:
return 'return' # Can't find box and/or box target, so forget about pushing boxes
else:
global box2_position
global box8_position
box_to_8_distance = trig.get_distance(self.callbacks.box_position, box8_position)
box_target_to_8_distance = trig.get_distance(self.callbacks.box_target_position, box8_position)
if box_to_8_distance < box_target_to_8_distance:
return 'box8'
else:
return 'box2'
class MoveCloseToBox(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done4', 'push', 'box1', 'return'])
self.callbacks = callbacks
self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
self.client.wait_for_server()
self.goal = MoveBaseGoal()
self.clear_costmap = rospy.ServiceProxy('/move_base/clear_costmaps', Empty())
def execute(self, userdata):
global shutdown_requested
if shutdown_requested:
return 'done4'
get_point_heading = trig.get_heading_between_points(self.callbacks.box_position, self.callbacks.box_target_position)
# Move around box if bot is closer to goal than box
box_distance_to_goal = trig.get_distance(self.callbacks.bot_map_position, self.callbacks.box_target_position)
bot_distance_to_goal = trig.get_distance(self.callbacks.bot_map_position, self.callbacks.box_target_position)
offset = 0
self.clear_costmap()
if bot_distance_to_goal < box_distance_to_goal:
offset = 0.32
print("Going to intermediate goal")
left_get_point_heading = get_point_heading - 90
if left_get_point_heading < 0:
left_get_point_heading += 360
left_goal_position = trig.get_point(self.callbacks.box_position, 0.8, left_get_point_heading)
right_goal_position = trig.get_point(self.callbacks.box_position, 0.8, (get_point_heading + 90) % 360)
#print("------------------")
#print("left")
#print(left_goal_position)
#print("right")
#print(right_goal_position)
#print("real")
#print(self.callbacks.bot_position)
#print("------------------")
bot_distance_to_left = trig.get_distance(self.callbacks.bot_map_position, left_goal_position)
bot_distance_to_right = trig.get_distance(self.callbacks.bot_map_position, right_goal_position)
if bot_distance_to_left < bot_distance_to_right:
print("Going to left side")
#print(left_goal_position)
self.goal.target_pose.header.frame_id = "map"
self.goal.target_pose.header.stamp = rospy.Time.now()
self.goal.target_pose.pose.position.x = left_goal_position.x
self.goal.target_pose.pose.position.y = left_goal_position.y
else:
print("Going to right side")
#print(right_goal_position)
self.goal.target_pose.header.frame_id = "map"
self.goal.target_pose.header.stamp = rospy.Time.now()
self.goal.target_pose.pose.position.x = right_goal_position.x
self.goal.target_pose.pose.position.y = right_goal_position.y
#print(self.callbacks.box_position)
goal_yaw = math.radians(90 - 180)
goal_quaternion = quaternion_from_euler(0.0, 0.0, goal_yaw)
#print(goal_quaternion)
self.goal.target_pose.pose.orientation.x = goal_quaternion[0]
self.goal.target_pose.pose.orientation.y = goal_quaternion[1]
self.goal.target_pose.pose.orientation.z = goal_quaternion[2]
self.goal.target_pose.pose.orientation.w = goal_quaternion[3]
print("sending intermediate goal")
self.client.send_goal(self.goal)
self.client.wait_for_result()
if self.callbacks.move_base_status == 4:
return 'return'
self.clear_costmap()
# Move to proper pushing location
goal_position = trig.get_point(self.callbacks.box_position, offset + 0.5, get_point_heading)
goal_heading = (get_point_heading + 180) % 360
goal_yaw = math.radians(goal_heading - 180)
goal_quaternion = quaternion_from_euler(0.0, 0.0, goal_yaw)
#print(goal_quaternion)
self.goal.target_pose.header.frame_id = "map"
self.goal.target_pose.header.stamp = rospy.Time.now()
self.goal.target_pose.pose.position.x = goal_position.x
self.goal.target_pose.pose.position.y = goal_position.y
self.goal.target_pose.pose.orientation.x = goal_quaternion[0]
self.goal.target_pose.pose.orientation.y = goal_quaternion[1]
self.goal.target_pose.pose.orientation.z = goal_quaternion[2]
self.goal.target_pose.pose.orientation.w = goal_quaternion[3]
if shutdown_requested:
return 'done4'
print("Going to main goal")
self.client.send_goal(self.goal)
self.client.wait_for_result()
print(self.callbacks.move_base_status)
if self.callbacks.move_base_status == 4:
return 'return'
if shutdown_requested:
return 'done4'
else:
return 'push'
class Push(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done4', 'box1', 'reverse', 'return'])
self.callbacks = callbacks
self.cmd_vel_pub = rospy.Publisher('mobile_base/commands/velocity', Twist, queue_size=1)
self.twist = Twist()
self.led1_pub = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=1)
self.led2_pub = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=1)
self.sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
def execute(self, userdata):
# self.callbacks.left_bumper_pressed
# self.callbacks.middle_bumper_pressed
# self.callbacks.right_bumper_pressed
# Get the current x position, we'll move half a meter backwards
initX = self.callbacks.bot_odom_position.x
currX = self.callbacks.bot_odom_position.x
# Timer incase we can't backup or are taking too long
start = time.time()
duration = 10
# move forwards while moved < 0.2 meters and taking < 3 secs
detected_first_press = False
returnState = 'reverse'
while time.time() - start < duration and not detected_first_press:
self.move_command(0.2, False)
if shutdown_requested:
return 'done4'
# If we hit the box, the bumper should be pressed and we keep going
if self.callbacks.middle_bumper_pressed:
returnState = self.push_until_fail()
detected_first_press = True
return returnState
def move_command(self, speed, turn_enabled):
# this way I didn't have to write the 2 lines of code a million times
target_heading = trig.get_heading_between_points(self.callbacks.box_target_position, self.callbacks.bot_map_position)
error = trig.minimum_angle_between_headings(self.callbacks.bot_map_heading, target_heading)
if turn_enabled:
self.twist.angular.z = -error * 0.1
self.twist.linear.x = speed
self.cmd_vel_pub.publish(self.twist)
def push_until_fail(self):
global shutdown_requested
hit_time = time.time()
# push forever
while time.time() - hit_time < 2:
if self.callbacks.middle_bumper_pressed:
hit_time = time.time()
self.move_command(0.2, True)
if shutdown_requested:
return 'done4'
box_position = trig.get_point(self.callbacks.bot_map_position, 0.405, self.callbacks.bot_map_heading)
if trig.get_distance(box_position, self.callbacks.box_target_position) < 0.15:
self.led1_pub.publish(1) # green
self.led2_pub.publish(3) # red
self.sound_pub.publish(1)
time.sleep(5)
self.led1_pub.publish(0) # off
self.led2_pub.publish(0) # off
return 'return'
self.twist.linear.x = 0
self.cmd_vel_pub.publish(self.twist)
return 'reverse'
class Reverse(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done4', 'move_close_to_box', 'find_markers'])
self.callbacks = callbacks
self.cmd_vel_pub = rospy.Publisher('mobile_base/commands/velocity', Twist, queue_size=1)
self.twist = Twist()
def execute(self, userdata):
global shutdown_requested
self.callbacks.box_position = None
# Get the current x position, we'll move half a meter backwards
# initX = self.callbacks.bot_odom_position.x
# currX = self.callbacks.bot_odom_position.x
# move backwards while moved < 0.5 meters and taking < 5 secs
while self.callbacks.box_position is None:
self.twist.linear.x = -0.4 # TODO 0.7
self.cmd_vel_pub.publish(self.twist)
currX = self.callbacks.bot_odom_position.x
if shutdown_requested:
return 'done4'
# rospy.loginfo("Time! duration: {}".format(rospy.Time.now() - time))
return 'move_close_to_box' # TODO: handle lost box
class Box1(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done4', 'box4'])
self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
#self.client.wait_for_server()
'''
position:
x: -1.5099166388
y: -0.890386754885
z: 0.0
orientation:
x: 0.0
y: 0.0
z: 0.808986962348
w: 0.587826585611
'''
self.target = MoveBaseGoal()
self.target.target_pose.header.frame_id = "map"
self.target.target_pose.header.stamp = rospy.Time.now()
self.target.target_pose.pose.position.x = -1.5099166388
self.target.target_pose.pose.position.y = -0.890386754885
self.target.target_pose.pose.orientation.x = 0.0
self.target.target_pose.pose.orientation.y = 0.0
self.target.target_pose.pose.orientation.z = 0.853781929879
self.target.target_pose.pose.orientation.w = 0.520630786846
self.callbacks = callbacks
self.led1_pub = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=1)
self.led2_pub = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=1)
self.sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
def execute(self, userdata):
global shutdown_requested
self.client.send_goal(self.target)
self.client.wait_for_result()
print("Goal reached")
try:
shape = v2.shapeDetection('red', 1)
print("red shape detected:" + shape)
except Exception as e:
print(e)
traceback.print_exc()
shape = "triangle"
if shape == event_two.previous_shape:
self.led1_pub.publish(Led.GREEN)
self.led2_pub.publish(Led.ORANGE)
self.sound_pub.publish(1)
time.sleep(1)
self.led1_pub.publish(0)
self.led2_pub.publish(0)
if shutdown_requested:
return 'done4'
else:
return 'box4'
class Box2(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done4', 'move_close_to_box'])
self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
self.client.wait_for_server()
'''
position:
x: -2.36646936837
y: -1.27823477483
z: 0.0
orientation:
x: 0.0
y: 0.0
z: -0.991971583453
w: 0.126460972722
'''
self.target = MoveBaseGoal()
self.target.target_pose.header.frame_id = | |
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Z3 Python interface
#
# Author: <NAME> (leonardo)
############################################
"""Z3 is a high performance theorem prover developed at Microsoft Research. Z3 is used in many applications such as: software/hardware verification and testing, constraint solving, analysis of hybrid systems, security, biology (in silico analysis), and geometrical problems.
Several online tutorials for Z3Py are available at:
http://rise4fun.com/Z3Py/tutorial/guide
Please send feedback, comments and/or corrections on the Issue tracker for https://github.com/Z3prover/z3.git. Your comments are very valuable.
Small example:
>>> x = Int('x')
>>> y = Int('y')
>>> s = Solver()
>>> s.add(x > 0)
>>> s.add(x < 2)
>>> s.add(y == x + 1)
>>> s.check()
sat
>>> m = s.model()
>>> m[x]
1
>>> m[y]
2
Z3 exceptions:
>>> try:
... x = BitVec('x', 32)
... y = Bool('y')
... # the expression x + y is type incorrect
... n = x + y
... except Z3Exception as ex:
... print("failed: %s" % ex)
failed: sort mismatch
"""
from . import z3core
from .z3core import *
from .z3types import *
from .z3consts import *
from .z3printer import *
from fractions import Fraction
import sys
import io
import math
import copy
Z3_DEBUG = __debug__
def z3_debug():
global Z3_DEBUG
return Z3_DEBUG
if sys.version < '3':
def _is_int(v):
return isinstance(v, (int, long))
else:
def _is_int(v):
return isinstance(v, int)
def enable_trace(msg):
Z3_enable_trace(msg)
def disable_trace(msg):
Z3_disable_trace(msg)
def get_version_string():
major = ctypes.c_uint(0)
minor = ctypes.c_uint(0)
build = ctypes.c_uint(0)
rev = ctypes.c_uint(0)
Z3_get_version(major, minor, build, rev)
return "%s.%s.%s" % (major.value, minor.value, build.value)
def get_version():
major = ctypes.c_uint(0)
minor = ctypes.c_uint(0)
build = ctypes.c_uint(0)
rev = ctypes.c_uint(0)
Z3_get_version(major, minor, build, rev)
return (major.value, minor.value, build.value, rev.value)
def get_full_version():
return Z3_get_full_version()
# We use _z3_assert instead of the assert command because we want to
# produce nice error messages in Z3Py at rise4fun.com
def _z3_assert(cond, msg):
if not cond:
raise Z3Exception(msg)
def _z3_check_cint_overflow(n, name):
_z3_assert(ctypes.c_int(n).value == n, name + " is too large")
def open_log(fname):
"""Log interaction to a file. This function must be invoked immediately after init(). """
Z3_open_log(fname)
def append_log(s):
"""Append user-defined string to interaction log. """
Z3_append_log(s)
def to_symbol(s, ctx=None):
"""Convert an integer or string into a Z3 symbol."""
if _is_int(s):
return Z3_mk_int_symbol(_get_ctx(ctx).ref(), s)
else:
return Z3_mk_string_symbol(_get_ctx(ctx).ref(), s)
def _symbol2py(ctx, s):
"""Convert a Z3 symbol back into a Python object. """
if Z3_get_symbol_kind(ctx.ref(), s) == Z3_INT_SYMBOL:
return "k!%s" % Z3_get_symbol_int(ctx.ref(), s)
else:
return Z3_get_symbol_string(ctx.ref(), s)
# Hack for having nary functions that can receive one argument that is the
# list of arguments.
# Use this when function takes a single list of arguments
def _get_args(args):
try:
if len(args) == 1 and (isinstance(args[0], tuple) or isinstance(args[0], list)):
return args[0]
elif len(args) == 1 and (isinstance(args[0], set) or isinstance(args[0], AstVector)):
return [arg for arg in args[0]]
else:
return args
except: # len is not necessarily defined when args is not a sequence (use reflection?)
return args
# Use this when function takes multiple arguments
def _get_args_ast_list(args):
try:
if isinstance(args, set) or isinstance(args, AstVector) or isinstance(args, tuple):
return [arg for arg in args]
else:
return args
except:
return args
def _to_param_value(val):
if isinstance(val, bool):
if val == True:
return "true"
else:
return "false"
else:
return str(val)
def z3_error_handler(c, e):
# Do nothing error handler, just avoid exit(0)
# The wrappers in z3core.py will raise a Z3Exception if an error is detected
return
class Context:
"""A Context manages all other Z3 objects, global configuration options, etc.
Z3Py uses a default global context. For most applications this is sufficient.
An application may use multiple Z3 contexts. Objects created in one context
cannot be used in another one. However, several objects may be "translated" from
one context to another. It is not safe to access Z3 objects from multiple threads.
The only exception is the method `interrupt()` that can be used to interrupt() a long
computation.
The initialization method receives global configuration options for the new context.
"""
def __init__(self, *args, **kws):
if z3_debug():
_z3_assert(len(args) % 2 == 0, "Argument list must have an even number of elements.")
conf = Z3_mk_config()
for key in kws:
value = kws[key]
Z3_set_param_value(conf, str(key).upper(), _to_param_value(value))
prev = None
for a in args:
if prev is None:
prev = a
else:
Z3_set_param_value(conf, str(prev), _to_param_value(a))
prev = None
self.ctx = Z3_mk_context_rc(conf)
self.eh = Z3_set_error_handler(self.ctx, z3_error_handler)
Z3_set_ast_print_mode(self.ctx, Z3_PRINT_SMTLIB2_COMPLIANT)
Z3_del_config(conf)
def __del__(self):
Z3_del_context(self.ctx)
self.ctx = None
self.eh = None
def ref(self):
"""Return a reference to the actual C pointer to the Z3 context."""
return self.ctx
def interrupt(self):
"""Interrupt a solver performing a satisfiability test, a tactic processing a goal, or simplify functions.
This method can be invoked from a thread different from the one executing the
interruptible procedure.
"""
Z3_interrupt(self.ref())
# Global Z3 context
_main_ctx = None
def main_ctx():
"""Return a reference to the global Z3 context.
>>> x = Real('x')
>>> x.ctx == main_ctx()
True
>>> c = Context()
>>> c == main_ctx()
False
>>> x2 = Real('x', c)
>>> x2.ctx == c
True
>>> eq(x, x2)
False
"""
global _main_ctx
if _main_ctx is None:
_main_ctx = Context()
return _main_ctx
def _get_ctx(ctx):
if ctx is None:
return main_ctx()
else:
return ctx
def get_ctx(ctx):
return _get_ctx(ctx)
def set_param(*args, **kws):
"""Set Z3 global (or module) parameters.
>>> set_param(precision=10)
"""
if z3_debug():
_z3_assert(len(args) % 2 == 0, "Argument list must have an even number of elements.")
new_kws = {}
for k in kws:
v = kws[k]
if not set_pp_option(k, v):
new_kws[k] = v
for key in new_kws:
value = new_kws[key]
Z3_global_param_set(str(key).upper(), _to_param_value(value))
prev = None
for a in args:
if prev is None:
prev = a
else:
Z3_global_param_set(str(prev), _to_param_value(a))
prev = None
def reset_params():
"""Reset all global (or module) parameters.
"""
Z3_global_param_reset_all()
def set_option(*args, **kws):
"""Alias for 'set_param' for backward compatibility.
"""
return set_param(*args, **kws)
def get_param(name):
"""Return the value of a Z3 global (or module) parameter
>>> get_param('nlsat.reorder')
'true'
"""
ptr = (ctypes.c_char_p * 1)()
if Z3_global_param_get(str(name), ptr):
r = z3core._to_pystr(ptr[0])
return r
raise Z3Exception("failed to retrieve value for '%s'" % name)
#########################################
#
# ASTs base class
#
#########################################
# Mark objects that use pretty printer
class Z3PPObject:
"""Superclass for all Z3 objects that have support for pretty printing."""
def use_pp(self):
return True
def _repr_html_(self):
in_html = in_html_mode()
set_html_mode(True)
res = repr(self)
set_html_mode(in_html)
return res
class AstRef(Z3PPObject):
"""AST are Direct Acyclic Graphs (DAGs) used to represent sorts, declarations and expressions."""
def __init__(self, ast, ctx=None):
self.ast = ast
self.ctx = _get_ctx(ctx)
Z3_inc_ref(self.ctx.ref(), self.as_ast())
def __del__(self):
if self.ctx.ref() is not None and self.ast is not None:
Z3_dec_ref(self.ctx.ref(), self.as_ast())
self.ast = None
def __deepcopy__(self, memo={}):
return _to_ast_ref(self.ast, self.ctx)
def __str__(self):
return obj_to_string(self)
def __repr__(self):
return obj_to_string(self)
def __eq__(self, other):
return self.eq(other)
def __hash__(self):
return self.hash()
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
if is_true(self):
return True
elif is_false(self):
return False
elif is_eq(self) and self.num_args() == 2:
return self.arg(0).eq(self.arg(1))
else:
raise Z3Exception("Symbolic expressions cannot be cast to concrete Boolean values.")
def sexpr(self):
"""Return a string representing the AST node in s-expression notation.
>>> x = Int('x')
>>> ((x + 1)*x).sexpr()
'(* (+ x 1) x)'
"""
return Z3_ast_to_string(self.ctx_ref(), self.as_ast())
def as_ast(self):
"""Return a pointer to the corresponding C Z3_ast object."""
return self.ast
def get_id(self):
"""Return unique identifier for object. It can be used for hash-tables and maps."""
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def ctx_ref(self):
"""Return a reference to the C context where this AST node is stored."""
return self.ctx.ref()
def eq(self, other):
"""Return `True` if `self` and `other` are structurally identical.
>>> x = Int('x')
>>> n1 = x + 1
>>> n2 = 1 + x
>>> n1.eq(n2)
False
>>> n1 = simplify(n1)
>>> n2 = simplify(n2)
>>> n1.eq(n2)
True
"""
if z3_debug():
_z3_assert(is_ast(other), "Z3 AST expected")
return Z3_is_eq_ast(self.ctx_ref(), self.as_ast(), other.as_ast())
def translate(self, target):
"""Translate `self` to the context `target`. That is, return a copy of `self` in the context `target`.
>>> c1 = Context()
>>> c2 = Context()
>>> x = Int('x', c1)
>>> y = Int('y', c2)
>>> # Nodes in different contexts can't be mixed.
>>> # However, we can translate nodes from one context to another.
>>> x.translate(c2) + y
x + y
"""
if z3_debug():
_z3_assert(isinstance(target, Context), "argument must be a Z3 context")
return _to_ast_ref(Z3_translate(self.ctx.ref(), self.as_ast(), target.ref()), target)
def __copy__(self):
return self.translate(self.ctx)
def hash(self):
"""Return a hashcode for the `self`.
>>> n1 = simplify(Int('x') + 1)
>>> n2 = simplify(2 + Int('x') - 1)
>>> n1.hash() == n2.hash()
True
"""
| |
<filename>SatQuMA_1.1.py
# -*- coding: utf-8 -*-
"""
This script calculates the secure key length for an aysmmetric BB84 security
protocol with weak coherent pulses and three signal states (or 2 'decoy'
states).
The 5 main protocol parameters can be either optimised or specified:
Px = X basis polarisation bias (Alice & Bob)
pk1 = Probability Alice sends a signal with intensity 1
pk2 = Probability Alice sends a signal with intensity 2
mu1 = Intensity of signal 1
mu2 = Intesnity of signal 2
This version employs either the Chernoff or Hoeffding tail bounds for finite
block keys. It can also calculate the asymptotic key length (accumulated over
an infinite number of identical satellite overpasses).
This script is based on the Mathematica script SatQKD_finite_key.nb written by
<NAME> and <NAME>.
Both are primarily based upon the paper,
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, "Concise
security bounds for practical decoy-state quantum key distribution", Phys. Rev.
A, 89, 022307 (2014),
with the bounds on the statistical fluctuations for the number of n-photon
events taken from,
[2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, "Tight
security bounds for decoy-state quantum key distribution", Sci. Rep., vol. 10,
14312 (2020),
and the estimation of the error correction term from,
[3] <NAME>, <NAME>, <NAME>, and <NAME>, "Fundamental
finite key limits for one-way information reconciliation in quantum key
distribution," Quant. Inf. Proc., vol. 16, 280, (2017).
----------
When running the code the user should check the values in the input sections
marked (1) and (2) below, where:
(1) The type of calculation is selected and the optimised/specified
parameters are initialised.
(2) The global parameters defining the system are set.
"""
import numpy as np
from sys import exit
from time import perf_counter, process_time
from os.path import join
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
F_or_T = [False, True] # List used to switch between False or True values
#******************************************************************************
#******************************************************************************
# --- USER INPUT SECTION (1) ---
# Select the type of secure key length calculation and intialise the
# parameters
#******************************************************************************
#******************************************************************************
#******************************************************************************
# Select SKL calculation type via tOptimise
#******************************************************************************
# True: Optimise over the main protocol parameters.
# False: Specify the main protocol parameters.
#******************************************************************************
tOptimise = F_or_T[1] # False (0) or True (1)
if (tOptimise):
#**************************************************************************
# Calculate the secure key length by optimising over the protocol
# parameters.
#**************************************************************************
#**************************************************************************
# Limit the bounds for each parameter for the optimisation search.
# Each row of the array xb gives the lower and upper bound [lb, ub] for
# each of the parameters in the order [Px,pk1,pk2,mu1,mu2]
#**************************************************************************
xb = np.array([[0.3,1.0],[0.6,0.9999],[0.0,0.4],[0.3,1.0],[0.1,0.5]])
#xb = np.array([[0.0,1.0],[0.0,1.0],[0.0,1.0],[0.0,1.0],[0.0,1.0]]) # Default
#**************************************************************************
# Select parameter intialisation procedure via tInit
#**************************************************************************
# True: Provide initial values for optimised parameters.
# False: Randomly select initial values for optimised parameters
# based on the protocol bounds and constraints.
#**************************************************************************
tInit = F_or_T[0] # False (0) or True (1)
if (tInit):
#**********************************************************************
# Provide initial values for optimised parameters
#**********************************************************************
Px_i = 0.5 # Asymmetric polarisation probability
pk1_i = 0.7 # Probability Alice prepares intensity 1
pk2_i = 0.1 # Probability Alice prepares intensity 2
mu1_i = 0.8 # Intensity 1
mu2_i = 0.3 # Intensity 2
else:
#**************************************************************************
# Calculate the secure key length using specified values for the protocol
# parameters
#**************************************************************************
Px_i = 0.7611 # Asymmetric polarisation probability
pk1_i = 0.7501 # Probability Alice prepares intensity 1
pk2_i = 0.1749 # Probability Alice prepares intensity 2
mu1_i = 0.7921 # Intensity 1
mu2_i = 0.1707 # Intensity 2
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#******************************************************************************
#******************************************************************************
# --- USER INPUT SECTION (2) ---
# Define the global parameters used to calculate the secure key length
#******************************************************************************
#******************************************************************************
#******************************************************************************
# Input file options
#******************************************************************************
# Path to loss file (empty = current directory)
# E.g. loss_path = 'C:\\path\\to\\directory'
loss_path = '..'
# File containing loss data (for given xi value below)
loss_file = 'FS_loss_XI0.csv'
lc = 3 # Column containing loss data in file (counting from 1)
#******************************************************************************
# Fixed system parameters
#******************************************************************************
# Angle between receiver zenith and satellite (from Earth's centre)
xi = 0.0 # [rad.]
#mu3 = 10**(-9) # Weak coherent pulse 3 intensity, mu_3
mu3 = 0 # Intensity of pulse 3 (fixed)
# Prescribed errors in protocol correctness and secrecy
eps_c = 10**(-15) # Correctness parameter
eps_s = 10**(-9) # Secrecy parameter
# Intrinsic Quantum Bit Error Rate (QBER_I)
QBERI_list = [0.001,0.003,0.005] # list, array, tuple or singleton
# Extraneous count probability
Pec_list = [1e-8,1e-7,1e-6] # list, array, tuple or singleton
# Afterpulse probability
Pap = 0.001 # After-pulse probability
# Number of satellite passes
NoPass = 1 # Number of satellite passes
# Repetition rate of the source in Hz
Rrate = 1*10**(9) # Source rate (Hz)
# Number of pulses sent in Hz
Npulse = NoPass*Rrate
#******************************************************************************
# Define (inclusive) range for looped parameters: dt and ls
#******************************************************************************
# Index for windowing time slot arrays, e.g. A(t)[t0-dt:t0+dt], with dt <= 346
dt_range = np.array([200, 350, 10]) # Start, stop, step index (defualt)
# Set a minimum elevation angle for transmission, this value will override
# the values corresponding to dt_range[1] if necessary.
min_elev = 10.0 # Minimum elevation transmission angle (degs)
shift_elev0 = 0.0 # Shift the elevation angle taken as t = 0 (degs).
# Excess system loss in dB
ls_range = np.array([0, 12, 2]) # Start, stop, step value
#******************************************************************************
# NOTE: All nominal losses have been moved into FS_loss_XI<xi>.csv for now and
# so we simply set the efficiencies listed below to unity.
#******************************************************************************
# Detector efficiency
eta_d = 1.0 # Typically 40-60%
# Internal telescope transmitter loss
eta_transmitter = 1.0 # Typically ~50%
# Combined losses
eta = eta_d * eta_transmitter
#******************************************************************************
# Output file options
#******************************************************************************
# Write output to CSV file?
tFullData = F_or_T[1] # False (0) or True (1)
# Write out only max values of SKL for dt?
tOptiData = F_or_T[1] # False (0) or True (1)
# Write out optimised values for each system in one file?
tMultiOpt = F_or_T[1] # False (0) or True (1)
# Write out optimiser metrics for each (final) calculation?
tMetrics = F_or_T[1] # False (0) or True (1)
# Path for output files (empty = current directory)
# E.g. outpath = 'C:\\path\\to\\directory'
outpath = ''
# Basename for output file (excluding .csv)
outbase = "out"
# Print values to StdOut during calculations?
tPrint = F_or_T[1] # False (0) or True (1)
#******************************************************************************
# Advanced/additional parameters
#******************************************************************************
# Use the Chernoff bounds when estimating statistical fluctuations in the
# count statistics? Otherwise use the Hoeffding bound.
#tChernoff = F_or_T[1] # False (0) or True (1)
# Select the type of tail bounds to use for estimating statistical fluctuations
# in the count statistics.
# 'Chernoff' = boundFunc[0] => Different upper and lower bound terms
# 'Hoeffding' = boundFunc[1] => Same upper and lower bound terms
# 'Asymptotic' = boundFunc[2] => No tail bounds. This choice forces also
# errcorrFunc = 'block' below.
boundOpts = ['Chernoff','Hoeffding','Asymptotic']
boundFunc = boundOpts[0] # Select an option from the list above.
# Select the method for estimating the number of bits sacrificed for error
# correction, listed below in order of decreasing precision (increasing
# smoothness).
# 'logM' = logM(nX, QBERx, eps_c) = errcorrOpts[0]
# 'block' = 1.16 * nX * h(QBERx) = errcorrOpts[1]
# 'mXtot' = 1.16 * mXtot = errcorrOpts[2]
# 'None' = 0 = errcorrOpts[3]
errcorrOpts = ['logM','block','mXtot','None']
errcorrFunc = errcorrOpts[0] # Select a method from the list above.
# Compare SKL optimised with/without EC (for optimised calculation only).
tCompareEC = F_or_T[0] # False (0) or True (1)
# Numerical value to use when denominator values are potentially zero
num_zero = 10**(-10) # Note to self: Use num_min below?
opt_methods = ['COBYLA','SLSQP','trust-constr']
method = opt_methods[0] # Select a optimisation method
NoptMin = 10 # Minimum No. of optimisations to strive for
NoptMax = 1000 # Maximum No. of optimisations (not used)
tStopZero = F_or_T[1] # Stop optimizing if the first NoptMin return SKL = 0?
tStopBetter = F_or_T[1] # Stop after NoptMin optimizations if SKL improved?
if method == 'trust-constr':
# Optimiser options: minimize, method='trust-constr'
xtol = 1.0e-8 # Tolerance to terminate by change of independent variable(s)
gtol = 1.0e-10 # Tolerance to terminate Lagrangian gradient
btol = 1.0e-8 # Threshold on the barrier parameter for termination
Nmax = 1000 # Max No. of iterations
const_pen = 1.0 # Initial constraint penalty parameter (default = 1.0)
tr_rad = 1.0 # Initial trust radius (default = 1.0)
barr_par = 0.1 # Initial barrier parameter (default = 0.1)
barr_tol = 0.1 # Initial barrier tolerance for barrier sub- (default = 0.1)
elif method == 'COBYLA':
# Optimiser | |
<gh_stars>10-100
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import scipy.io as sio
import gym
import time
import random
import datetime
import os
import imageio
import glob
import tqdm
import json
# tf.random.set_seed(11)
# tf.keras.backend.set_floatx('float64')
def discrete_circle_sample_count(n):
count = 0
move_dict = {}
for x in range(-n, n + 1):
y_l = int(np.floor(np.sqrt(n**2 - x**2)))
for y in range(-y_l, y_l + 1):
move_dict[count] = np.array([y, x])
count += 1
return (count), move_dict
def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = tf.abs(error) <= clip_delta
squared_loss = 0.5 * tf.square(error)
quadratic_loss = 0.5 * tf.square(clip_delta) + clip_delta * (tf.abs(error) - clip_delta)
return tf.math.reduce_mean(tf.where(cond, squared_loss, quadratic_loss))
# agent actor net: inputs state map,pos,buffer,operation,bandwidth; outputs: move,operation
def agent_actor(input_dim_list, cnn_kernel_size, move_r):
state_map = keras.Input(shape=input_dim_list[0])
# position = keras.Input(shape=input_dim_list[1])
total_buffer = keras.Input(shape=input_dim_list[1])
done_buffer = keras.Input(shape=input_dim_list[2])
bandwidth = keras.Input(shape=input_dim_list[3])
# CNN for map
cnn_map = layers.Conv2D(input_dim_list[0][2], cnn_kernel_size, activation='relu', padding='same')(state_map)
cnn_map = layers.AveragePooling2D(pool_size=int(input_dim_list[0][0] / (2 * move_r + 1)))(cnn_map)
cnn_map = layers.AlphaDropout(0.2)(cnn_map)
move_out = layers.Dense(1, activation='relu')(cnn_map)
# move_out = move_out / tf.reduce_sum(move_out, [1, 2, 3], keepdims=True)
# move_out = tf.exp(move_out) / tf.reduce_sum(tf.exp(move_out), [1, 2, 3], keepdims=True)
# cnn_map = layers.Conv2D(input_dim_list[0][2], cnn_kernel_size, activation='relu', padding='same')(cnn_map)
# cnn_map = layers.MaxPooling2D(pool_size=cnn_kernel_size)(cnn_map)
# cnn_map = layers.Dropout(0.2)(cnn_map)
# cnn_output = layers.Flatten()(cnn_map)
# cnn_output = layers.Dense(128, activation='relu')(cnn_output)
# move_dist = layers.Dense(move_count, activation='softmax')(move_out)
# operation
# total_mlp = layers.Dense(2, activation='relu')(total_buffer)
# done_mlp = layers.Dense(2, activation='relu')(done_buffer)
# buffer_mlp = layers.concatenate([total_mlp, done_mlp], axis=-1)
# bandwidth_in = tf.expand_dims(bandwidth, axis=-1)
# bandwidth_in = tf.tile(bandwidth_in, [1, 2, 1])
# # concatenate on dim[1] batch*new*2
# op_output = layers.concatenate([buffer_mlp, bandwidth_in], axis=-1)
# op_dist = layers.Dense(input_dim_list[2][1], activation='softmax')(op_output)
total_mlp = tf.transpose(total_buffer, perm=[0, 2, 1])
total_mlp = layers.Dense(1, activation='relu')(total_mlp)
total_mlp = tf.transpose(total_mlp, perm=[0, 2, 1])
exe_op = layers.Dense(input_dim_list[1][1], activation='softmax')(total_mlp)
done_mlp = tf.transpose(done_buffer, perm=[0, 2, 1])
done_mlp = layers.Dense(1, activation='relu')(done_mlp)
done_mlp = tf.transpose(done_mlp, perm=[0, 2, 1])
bandwidth_in = tf.expand_dims(bandwidth, axis=-1)
bandwidth_in = layers.Dense(1, activation='relu')(bandwidth_in)
done_mlp = layers.concatenate([done_mlp, bandwidth_in], axis=-1)
off_op = layers.Dense(input_dim_list[2][1], activation='softmax')(done_mlp)
op_dist = layers.concatenate([exe_op, off_op], axis=1)
model = keras.Model(inputs=[state_map, total_buffer, done_buffer, bandwidth], outputs=[move_out, op_dist])
# model.compile(loss=huber_loss, optimizer=keras.optimizers.Adam(learning_rate=self.lr_aa))
return model
# center actor net: inputs sensor_map,agent_map,bandwidth_vector; outputs: bandwidth_vec
def center_actor(input_dim_list, cnn_kernel_size):
done_buffer_list = keras.Input(shape=input_dim_list[0])
pos_list = keras.Input(shape=input_dim_list[1])
# buffer
buffer_state = layers.Dense(1, activation='relu')(done_buffer_list)
buffer_state = tf.squeeze(buffer_state, axis=-1)
# pos list
pos = layers.Dense(2, activation='relu')(pos_list)
bandwidth_out = layers.concatenate([buffer_state, pos], axis=-1)
# bandwidth_out = layers.AlphaDropout(0.2)(bandwidth_out)
bandwidth_out = layers.Dense(1, activation='relu')(bandwidth_out)
bandwidth_out = tf.squeeze(bandwidth_out, axis=-1)
# bandwidth_out += 1 / (input_dim_list[2] * 5)
bandwidth_out = layers.Softmax()(bandwidth_out)
# bandwidth_out += 1 / (input_dim_list[2] * 5)
# bandwidth_out = bandwidth_out / tf.reduce_sum(bandwidth_out, 1, keepdims=True)
# bandwidth_out = bandwidth_out / tf.expand_dims(tf.reduce_sum(bandwidth_out, 1), axis=-1)
model = keras.Model(inputs=[done_buffer_list, pos_list], outputs=bandwidth_out, name='center_actor_net')
# model.compile(loss=huber_loss, optimizer=keras.optimizers.Adam(learning_rate=self.lr_ca))
# sensor_map = keras.Input(shape=input_dim_list[0])
# agent_map = keras.Input(shape=input_dim_list[1])
# # sensor map:cnn*2
# sensor_cnn = layers.Conv2D(input_dim_list[0][2], cnn_kernel_size, activation='relu', padding='same')(sensor_map)
# sensor_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(sensor_cnn)
# # sensor_cnn = layers.Dropout(0.2)(sensor_cnn)
# # sensor_cnn = layers.Conv2D(input_dim_list[0][2], cnn_kernel_size, activation='relu', padding='same')(sensor_cnn)
# # sensor_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(sensor_cnn)
# # sensor_cnn = layers.Dropout(0.2)(sensor_cnn)
# sensor_cnn = layers.Flatten()(sensor_cnn)
# sensor_cnn = layers.Dense(4, activation='softmax')(sensor_cnn)
# # agent map
# agent_cnn = layers.Conv2D(input_dim_list[1][2], cnn_kernel_size, activation='relu', padding='same')(agent_map)
# agent_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(agent_cnn)
# # agent_cnn = layers.Dropout(0.2)(agent_cnn)
# # agent_cnn = layers.Conv2D(input_dim_list[1][2], cnn_kernel_size, activation='relu', padding='same')(agent_cnn)
# # agent_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(agent_cnn)
# # agent_cnn = layers.Dropout(0.2)(agent_cnn)
# agent_cnn = layers.Flatten()(agent_cnn)
# agent_cnn = layers.Dense(4, activation='softmax')(agent_cnn)
# # add bandwidth
# bandwidth_out = layers.concatenate([sensor_cnn, agent_cnn], axis=-1)
# bandwidth_out = layers.Dense(input_dim_list[2], activation='softmax')(bandwidth_out)
# model = keras.Model(inputs=[sensor_map, agent_map], outputs=bandwidth_out, name='center_actor_net')
# # model.compile(loss=huber_loss, optimizer=keras.optimizers.Adam(learning_rate=self.lr_ca))
return model
# agent aritic net
def agent_critic(input_dim_list, cnn_kernel_size):
state_map = keras.Input(shape=input_dim_list[0])
# position = keras.Input(shape=input_dim_list[1])
total_buffer = keras.Input(shape=input_dim_list[1])
done_buffer = keras.Input(shape=input_dim_list[2])
move = keras.Input(shape=input_dim_list[3])
onehot_op = keras.Input(shape=input_dim_list[4])
bandwidth = keras.Input(shape=input_dim_list[5])
# map CNN
# merge last dim
map_cnn = layers.Dense(1, activation='relu')(state_map)
map_cnn = layers.Conv2D(1, kernel_size=cnn_kernel_size, activation='relu', padding='same')(map_cnn)
map_cnn = layers.AveragePooling2D(pool_size=cnn_kernel_size * 2)(map_cnn)
map_cnn = layers.AlphaDropout(0.2)(map_cnn)
# map_cnn = layers.Conv2D(input_dim_list[0][2], kernel_size=cnn_kernel_size, activation='relu', padding='same')(map_cnn)
# map_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(map_cnn)
# map_cnn = layers.Dropout(0.2)(map_cnn)
map_cnn = layers.Flatten()(map_cnn)
map_cnn = layers.Dense(2, activation='relu')(map_cnn)
# mlp
# pos_mlp = layers.Dense(1, activation='relu')(position)
band_mlp = layers.Dense(1, activation='relu')(bandwidth)
total_mlp = tf.transpose(total_buffer, perm=[0, 2, 1])
total_mlp = layers.Dense(1, activation='relu')(total_mlp)
total_mlp = tf.squeeze(total_mlp, axis=-1)
total_mlp = layers.Dense(2, activation='relu')(total_mlp)
done_mlp = tf.transpose(done_buffer, perm=[0, 2, 1])
done_mlp = layers.Dense(1, activation='relu')(done_mlp)
done_mlp = tf.squeeze(done_mlp, axis=-1)
done_mlp = layers.Dense(2, activation='relu')(done_mlp)
move_mlp = layers.Flatten()(move)
move_mlp = layers.Dense(1, activation='relu')(move_mlp)
onehot_mlp = layers.Dense(1, activation='relu')(onehot_op)
onehot_mlp = tf.squeeze(onehot_mlp, axis=-1)
all_mlp = layers.concatenate([map_cnn, band_mlp, total_mlp, done_mlp, move_mlp, onehot_mlp], axis=-1)
reward_out = layers.Dense(1, activation='relu')(all_mlp)
model = keras.Model(inputs=[state_map, total_buffer, done_buffer, move, onehot_op, bandwidth], outputs=reward_out)
# model.compile(loss=_huber_loss, optimizer=keras.optimizers.Adam(learning_rate=0.02))
return model
# center critic
def center_critic(input_dim_list, cnn_kernel_size):
done_buffer_list = keras.Input(shape=input_dim_list[0])
pos_list = keras.Input(shape=input_dim_list[1])
bandwidth_vec = keras.Input(shape=input_dim_list[2])
# buffer
buffer_state = layers.Dense(1, activation='relu')(done_buffer_list)
buffer_state = tf.squeeze(buffer_state, axis=-1)
buffer_state = layers.Dense(1, activation='relu')(buffer_state)
buffer_state = tf.squeeze(buffer_state, axis=-1)
# pos list
pos = layers.Dense(1, activation='relu')(pos_list)
pos = tf.squeeze(pos, axis=-1)
# bandvec
# band_in = layers.Dense(2, activation='relu')(bandwidth_vec)
r_out = layers.concatenate([buffer_state, pos, bandwidth_vec])
# r_out = layers.AlphaDropout(0.2)(r_out)
r_out = layers.Dense(1, activation='relu')(r_out)
model = keras.Model(inputs=[done_buffer_list, pos_list, bandwidth_vec], outputs=r_out, name='center_critic_net')
# sensor_map = keras.Input(shape=input_dim_list[0])
# agent_map = keras.Input(shape=input_dim_list[1])
# bandwidth_vec = keras.Input(shape=input_dim_list[2])
# # sensor map:cnn*2
# sensor_cnn = layers.Conv2D(input_dim_list[0][2], cnn_kernel_size, activation='relu', padding='same')(sensor_map)
# sensor_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(sensor_cnn)
# # sensor_cnn = layers.Dropout(0.2)(sensor_cnn)
# # sensor_cnn = layers.Conv2D(input_dim_list[0][2], cnn_kernel_size, activation='relu', padding='same')(sensor_cnn)
# # sensor_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(sensor_cnn)
# # sensor_cnn = layers.Dropout(0.2)(sensor_cnn)
# sensor_cnn = layers.Flatten()(sensor_cnn)
# sensor_cnn = layers.Dense(4, activation='relu')(sensor_cnn)
# # agent map
# agent_cnn = layers.Conv2D(input_dim_list[1][2], cnn_kernel_size, activation='relu', padding='same')(agent_map)
# agent_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(agent_cnn)
# # agent_cnn = layers.Dropout(0.2)(agent_cnn)
# # agent_cnn = layers.Conv2D(input_dim_list[1][2], cnn_kernel_size, activation='relu', padding='same')(agent_cnn)
# # agent_cnn = layers.MaxPooling2D(pool_size=cnn_kernel_size)(agent_cnn)
# # agent_cnn = layers.Dropout(0.2)(agent_cnn)
# agent_cnn = layers.Flatten()(agent_cnn)
# agent_cnn = layers.Dense(4, activation='relu')(agent_cnn)
# # add bandwidth
# bandwidth_out = layers.concatenate([sensor_cnn, agent_cnn, bandwidth_vec], axis=-1)
# bandwidth_out = layers.Dense(1, activation='relu')(bandwidth_out)
# model = keras.Model(inputs=[sensor_map, agent_map, bandwidth_vec], outputs=bandwidth_out, name='center_critic_net')
# # model.compile(loss=_huber_loss, optimizer=keras.optimizers.Adam(learning_rate=0.02))
return model
def update_target_net(model, target, tau=0.8):
weights = model.get_weights()
target_weights = target.get_weights()
for i in range(len(target_weights)): # set tau% of target model to be new weights
target_weights[i] = weights[i] * (1 - tau) + target_weights[i] * tau
target.set_weights(target_weights)
def merge_fl(nets, omega=0.5):
for agent_no in range(len(nets)):
target_params = nets[agent_no].get_weights()
other_params = []
for i, net in enumerate(nets):
if i == agent_no:
continue
other_params.append(net.get_weights())
for i in range(len(target_params)):
others = np.sum([w[i] for w in other_params], axis=0) / len(other_params)
target_params[i] = omega * target_params[i] + others * (1 - omega)
# print([others.shape, target_params[i].shape])
nets[agent_no].set_weights(target_params)
def circle_argmax(move_dist, move_r):
max_pos = np.argwhere(tf.squeeze(move_dist, axis=-1) == np.max(move_dist))
# print(tf.squeeze(move_dist, axis=-1))
pos_dist = np.linalg.norm(max_pos - np.array([move_r, move_r]), axis=1)
# print(max_pos)
return max_pos[np.argmin(pos_dist)]
class MAACAgent(object):
def __init__(self, env, tau, gamma, lr_aa, lr_ac, lr_ca, lr_cc, batch, epsilon=0.2):
self.env = env
self.agents = self.env.agents
self.agent_num = self.env.agent_num
self.index_dim = 2
self.obs_r = self.env.obs_r
self.state_map_shape = (self.obs_r * 2 + 1, self.obs_r * 2 + 1, self.index_dim)
self.pos_shape = (2)
self.band_shape = (1)
self.buffstate_shape = (self.index_dim, self.env.max_buffer_size)
# self.sensor_map_shape = (self.env.map_size, self.env.map_size, self.index_dim)
# self.agent_map_shape = (self.env.map_size, self.env.map_size, self.index_dim)
self.buffer_list_shape = (self.agent_num, self.index_dim, self.env.max_buffer_size)
self.pos_list_shape = (self.agent_num, 2)
self.bandvec_shape = (self.env.agent_num)
self.op_shape = (self.index_dim, self.env.max_buffer_size)
self.move_count, self.move_dict = discrete_circle_sample_count(self.env.move_r)
self.movemap_shape = (self.env.move_r * 2 + 1, self.env.move_r * 2 + 1)
self.epsilon = epsilon
# learning params
self.tau = tau
self.cnn_kernel_size = 3
self.gamma = gamma
self.lr_aa = lr_aa
self.lr_ac = lr_ac
self.lr_ca = lr_ca
self.lr_cc = lr_cc
self.batch_size = batch
self.agent_memory = {}
self.softmax_memory = {}
self.center_memory = []
self.sample_prop = 1 / 4
# net init
self.agent_actors = []
self.center_actor = center_actor([self.buffer_list_shape, self.pos_list_shape, self.bandvec_shape], self.cnn_kernel_size)
self.agent_critics = []
self.center_critic = center_critic([self.buffer_list_shape, self.pos_list_shape, self.bandvec_shape], self.cnn_kernel_size)
self.target_agent_actors = []
self.target_center_actor = center_actor([self.buffer_list_shape, self.pos_list_shape, self.bandvec_shape], self.cnn_kernel_size)
update_target_net(self.center_actor, self.target_center_actor, tau=0)
self.target_agent_critics = []
self.target_center_critic = center_critic([self.buffer_list_shape, self.pos_list_shape, self.bandvec_shape], self.cnn_kernel_size)
update_target_net(self.center_critic, self.target_center_critic, tau=0)
self.agent_actor_opt = []
self.agent_critic_opt = []
self.center_actor_opt = keras.optimizers.Adam(learning_rate=lr_ca)
self.center_critic_opt = keras.optimizers.Adam(learning_rate=lr_cc)
self.summaries = {}
for i in range(self.env.agent_num):
self.agent_critic_opt.append(keras.optimizers.Adam(learning_rate=lr_ac))
self.agent_actor_opt.append(keras.optimizers.Adam(learning_rate=lr_aa))
new_agent_actor = agent_actor([self.state_map_shape, self.buffstate_shape, self.buffstate_shape, self.band_shape], self.cnn_kernel_size, self.env.move_r)
target_agent_actor = agent_actor([self.state_map_shape, self.buffstate_shape, self.buffstate_shape, self.band_shape], self.cnn_kernel_size, self.env.move_r)
# new_agent_actor = agent_actor([self.state_map_shape, self.pos_shape, self.buffstate_shape, self.buffstate_shape, self.band_shape], self.cnn_kernel_size, self.env.move_r)
# target_agent_actor = agent_actor([self.state_map_shape, self.pos_shape, self.buffstate_shape, self.buffstate_shape, self.band_shape], self.cnn_kernel_size, self.env.move_r)
update_target_net(new_agent_actor, target_agent_actor, tau=0)
self.agent_actors.append(new_agent_actor)
self.target_agent_actors.append(target_agent_actor)
# new_agent_critic = agent_critic([self.state_map_shape, self.pos_shape, self.buffstate_shape, self.buffstate_shape,
# self.movemap_shape, self.op_shape, self.band_shape], self.cnn_kernel_size)
# t_agent_critic = agent_critic([self.state_map_shape, self.pos_shape,
# | |
<reponame>cpausmit/Tapas
# ==================================================================================================
#
# >> create table Courses (Number char(10), Name char(80), Version int);
#
# >> create table Teachers (FirstName char(20), LastName char(20),
# Email char(40), Position char(20), Status char(20));
#
# >> create table Students (FirstName char(20), LastName char(20),
# Email char(40), AdvisorEmail char(40), SupervisorEmail char(40),
# Year int, Division char(4), Research char(6));
# ==================================================================================================
import MySQLdb
import sys
class DatabaseHandle:
'Class to provide a unique database handle for all work on the database.'
def __init__(self,
file = "/etc/my.cnf",
group = "mysql-teaching",
database = "Teaching"):
self.handle = MySQLdb.connect(read_default_file=file,
read_default_group=group,
db=database)
def getCursor(self):
return self.handle.cursor()
def getHandle(self):
return self.handle
def commit(self):
self.handle.commit()
def disco(self):
self.handle.close()
class CourseResource:
'Class resources for any MIT course.'
def __init__(self, row):
self.fill(row[0],row[1],
int(row[2]),int(row[3]),int(row[4]),int(row[5]),
int(row[6]),int(row[7]),int(row[8]),int(row[9]))
def fill(self, term,number,nA,nL,nR,nFRTa,nHRTa,nFUTa,nHUTa,nPUTa):
self.term = term
self.number = number
self.numAdmins = nA
self.numLecturers = nL
self.numRecitators = nR
self.numFullRecTas = nFRTa
self.numHalfRecTas = nHRTa
self.numFullUtilTas = nFUTa
self.numHalfUtilTas = nHUTa
self.numPartUtilTas = nPUTa
def show(self):
print("%s,%s,%d,%d,%d,%d,%d,%d,%d,%d"%(self.term,self.number,
self.numAdmins,self.numLecturers,self.numRecitators,
self.numFullRecTas,self.numHalfRecTas,
self.numFullUtilTas,self.numHalfUtilTas,
self.numPartUtilTas))
def printSlots(self):
# lecturer slots
i = 0
while i < self.numLecturers:
n = i+1;
i += 1
# recitation instructor slots
i = 0
while i < self.numRecitators:
n = i+1;
i += 1
# TA slots -------------------------------------
i = 0
while i < self.numFullRecTas:
n = i+1;
print('EMPTY,%s-%s-TaFR-%s'%(self.term,self.number,n))
i += 1
i = 0
while i < self.numFullUtilTas:
n = i+1;
print('EMPTY,%s-%s-TaFU-%s'%(self.term,self.number,n))
i += 1
i = 0
while i < self.numHalfRecTas:
n = i+1;
print('EMPTY,%s-%s-TaHR-%s'%(self.term,self.number,n))
i += 1
i = 0
while i < self.numHalfUtilTas:
n = i+1;
print('EMPTY,%s-%s-TaHU-%s'%(self.term,self.number,n))
i += 1
i = 0
while i < self.numPartUtilTas:
n = i+1;
print('EMPTY,%s-%s-TaPU-%s'%(self.term,self.number,n))
i += 1
class Course:
'Base class for any MIT course.'
def __init__(self, number,name,version,teacher = '<EMAIL>',admin = '<EMAIL>.edu'):
self.number = number
self.name = name
self.version = version
self.teacher = teacher
self.admin = admin
def setTeacher(self, teacher):
if self.teacher == '<EMAIL>':
self.teacher = teacher
if self.admin == '<EMAIL>':
self.admin = teacher
else:
self.teacher += ',' + teacher
def setAdmin(self, admin):
self.admin = admin
def show(self):
print(" Course: %s: %s -- version %d (Teacher: %s (%s))"%\
(self.number,self.name,self.version,self.teacher,self.admin))
class Task:
'Class to describe a teaching task.'
def __init__(self, task):
self.task = task
self.term = task.split('-')[0]
self.number = task.split('-')[1]
self.type = task.split('-')[2]
self.n = task.split('-')[3]
def show(self):
print(" Task: %s\n - Term: %s Course: %s Type: %s N: %s )"\
%(self.task,self.term,self.number,self.type,self.n))
class BaseTeacher:
'Base class for any teaching Personnel.'
def __init__(self, firstName,lastName,eMail):
self.firstName = firstName
self.lastName = lastName
self.eMail = eMail
def show(self):
print(" Name (Last, First): %s, %s (%s)"%(self.lastName,self.firstName,self.eMail))
class Student(BaseTeacher):
'Students that fill the slots of Teaching Assistants in the department.'
def __init__(self, firstName,lastName,eMail,advisorEmail,supervisorEmail,year,division,research):
BaseTeacher.__init__(self, firstName,lastName,eMail)
self.advisorEmail = advisorEmail
self.supervisorEmail = supervisorEmail
self.year = year
self.division = division
self.research = research
def show(self):
BaseTeacher.show(self)
print(" Visors (Ad, Super): %s, %s -- %4d %s %s"% \
(self.advisorEmail,self.supervisorEmail,self.year,self.division,self.research))
def insertString(self):
string = "('%s','%s','%s','%s','%s',%d,'%s','%s')"% \
(self.firstName,self.lastName,self.eMail,self.advisorEmail,self.supervisorEmail,
self.year,self.division,self.research)
return string
class Teacher(BaseTeacher):
'Teachers that are teacher and therefore either lecture or give recitations.'
def __init__(self, firstName,lastName,eMail,position,status):
BaseTeacher.__init__(self, firstName,lastName,eMail)
self.position = position
self.status = status
def show(self):
BaseTeacher.show(self)
print(" Position: %s Status, %s"%(self.position,self.status))
def insertString(self):
string = "('%s','%s','%s','%s','%s')"% \
(self.firstName,self.lastName,self.eMail,self.position,self.status)
return string
class Assignment:
'An Assignment class.'
def __init__(self,term,task,person,evalO):
self.term = term
self.task = task
self.person = person
self.evalO = float(evalO)
def insertString(self):
string = "('%s','%s','%s',%d)"%(self.term,self.task,self.person,self.evalO)
return string
def show(self):
print(" Number: %5s %20s %s %3.1f"%(self.term,self.task,self.person,self.evalO))
def update(self,evalO):
self.evalO = evalO
def selectDb(self,database,task):
# grab the cursor
cursor = database.getCursor()
# Prepare SQL query to select all courses from the Courses table
sql = "SELECT * FROM Assignments WHERE Task = '%s'"%(task)
try:
# Execute the SQL command
rc = cursor.execute(sql)
print(' Executed: %s (%d)'%(sql,rc))
results = cursor.fetchall()
for row in results:
self.term = row[0]
self.task = row[1]
self.person = row[2]
self.evalO = row[3]
except:
print(" ERROR - selecting into Assignments table (%s)."%sql)
return 1
return 0
def insertDb(self,database):
# grab the cursor
cursor = database.getCursor()
# Prepare SQL query to select all courses from the Courses table
sql = "INSERT INTO Assignments VALUES " + self.insertString()
try:
# Execute the SQL command
rc = cursor.execute(sql)
database.commit()
print(' Executed: %s (%d)'%(sql,rc))
except:
print(" ERROR - inserting into Assignments table (%s)."%sql)
return 1
return
def deleteDb(self,database):
# grab the cursor
cursor = database.getCursor()
# Prepare SQL query to select all courses from the Courses table
sql = "DELETE FROM Assignments where Task = '%s'"%(self.task)
try:
# Execute the SQL command
rc = cursor.execute(sql)
database.commit()
print(' Executed: %s (%d)'%(sql,rc))
except:
print(" ERROR - deleting from Assignments table (%s)."%sql)
return 1
return
def updateDb(self,database):
# grab the cursor
cursor = database.getCursor()
# Prepare SQL query to select all courses from the Courses table
sql = "UPDATE Assignments SET EvalO = %f WHERE Task = '%s'"%(self.evalO,self.task)
try:
# Execute the SQL command
rc = cursor.execute(sql)
database.commit()
print(' Executed: %s (%d)'%(sql,rc))
except:
print(" ERROR - updating Assignments table (%s)."%sql)
return 1
return
class Container:
'Container class for any type of teacher or course. Basically a hash array. Keys: email or course number.'
def __init__(self):
self.hash = { }
def addElement(self, key, element):
self.hash[key] = element
def retrieveElement(self, key):
##print(" Key: %s"%(key))
return self.hash[key]
def popElement(self, key):
return self.hash.pop(key)
def getHash(self):
return self.hash
def show(self):
for key, value in self.hash.iteritems():
sys.stdout.write(" Key: %-6s -- "%key)
value.show()
def fillWithAssignments(self,database,debug=False):
if debug:
print(" Start fill assignments.")
# grab the cursor
cursor = database.cursor()
# Prepare SQL query to select all courses from the Courses table
sql = "SELECT * FROM Assignments"
if debug:
print(" SQL> " + sql)
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the results
results = cursor.fetchall()
for row in results:
term = row[0]
task = row[1]
person = row[2]
evalO = row[3]
# Now print fetched result
if debug:
print(" found Assignment with ('%s','%s''%s',%d);"\
%(term,task,person,evalO))
# create a new course and add it to our courses object
assignment = Assignment(term,task,person,evalO)
if debug:
print(" Assignment created.")
self.addElement(task,assignment);
except:
print(" ERROR - unable to fetch data from Assignments table.")
return 1
# all went well
return 0
def fillWithCourseResources(self,database,term='F2018',debug=False):
# grab the cursor
cursor = database.cursor()
# Prepare SQL query to select all courses from the Courses table
sql = "select * from CourseResources where term = '%s'"%(term)
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the results
results = cursor.fetchall()
for row in results:
# create a new course and add it to our courses object
courseResource = CourseResource(row)
number = row[1]
self.addElement(number,courseResource);
except:
print(" ERROR - unable to fetch data from CourseResources table.")
return 1
# all went well
return 0
def fillWithCourses(self,database,debug=False):
if debug:
print(" Start fill courses.")
# grab the cursor
cursor = database.cursor()
# Prepare SQL query to select all courses from the Courses table
sql = "SELECT * FROM Courses"
if debug:
print(" SQL> " + sql)
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the results
results = cursor.fetchall()
for row in results:
number = row[0]
name = row[1]
version = row[2]
# Now print fetched result
if debug:
print(" found Course with ('%s','%s',%d);"%(number,name,version))
# create a new course and add it to our courses object
course = Course(number,name,version)
if debug:
print(" Course create.")
self.addElement(number,course);
except:
print(" ERROR - unable to fetch data from Courses table.")
return 1
# all went well
return 0
def fillWithTeachers(self,database,debug=False):
if debug:
print(" Start fill Teachers.")
# grab the cursor
cursor = database.cursor()
# Prepare SQL query to select all courses from the Courses table
sql = "SELECT * FROM Teachers"
if debug:
print(" SQL> " + sql)
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the results
results = cursor.fetchall()
for row in results:
firstName = row[0]
lastName = row[1]
eMail = row[2]
position = row[3]
status = row[4]
# Now print fetched result
if debug:
print(" found Teacher with ('%s','%s','%s','%s','%s');"% \
(firstName,lastName,eMail,position,status))
# create a new teacher and add | |
# -*- coding: utf-8 -*-
'''
Created on Fri Jan 18 10:59:43 2019
@author:
<NAME>
Turku University Hospital
January 2019
@description:
This code is used for feature selection for different classification models
'''
#%% clear variables
%reset -f
%clear
#%% import necessary libraries
import os
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.svm import SVC
#from sklearn.feature_selection import SelectKBest, chi2, f_classif, mutual_info_classif
#from sklearn.utils.class_weight import compute_class_weight
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import f1_score
from skfeature.function.similarity_based import fisher_score
from skfeature.function.similarity_based import reliefF
from skfeature.function.similarity_based import trace_ratio
from skfeature.function.statistical_based import gini_index
from skfeature.function.statistical_based import chi_square # same as chi2
from skfeature.function.statistical_based import f_score # same as f_classif
#from skfeature.function.statistical_based import CFS
#from skfeature.function.statistical_based import t_score # only for binary
from skfeature.function.information_theoretical_based import JMI
from skfeature.function.information_theoretical_based import CIFE
from skfeature.function.information_theoretical_based import DISR
from skfeature.function.information_theoretical_based import MIM
from skfeature.function.information_theoretical_based import CMIM
from skfeature.function.information_theoretical_based import ICAP
from skfeature.function.information_theoretical_based import MRMR
from skfeature.function.information_theoretical_based import MIFS
from save_load_variables import save_load_variables
#%% define logging and data display format
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
pd.options.mode.chained_assignment = None # disable imputation warnings
#%% read data
dataframe = pd.read_csv(r'fibroid_dataframe.csv', sep = ',')
#%% calculate nan percent for each label
nan_percent = pd.DataFrame(dataframe.isnull().mean() * 100, columns = ['NaN ratio'])
#%% display NPV histogram
dataframe['NPV ratio'].hist(bins = 20)
#%% categorise NPV into classes according to bins
NPV_bins = [-1, 29.9, 80, 100]
dataframe['NPV class'] = dataframe[['NPV ratio']].apply(lambda x: pd.cut(x, NPV_bins, labels = False))
#%% define feature and target labels
feature_labels = ['White',
'Black',
'Asian',
'Age',
'Weight',
'Height',
'Gravidity',
'Parity',
'Previous pregnancies',
'Live births',
'C-section',
'Esmya',
'Open myomectomy',
'Laparoscopic myomectomy',
'Hysteroscopic myomectomy',
'Embolisation',
'Subcutaneous fat thickness',
'Front-back distance',
'Abdominal scars',
'Bleeding',
'Pain',
'Mass',
'Urinary',
'Infertility',
'Fibroid diameter',
'Fibroid distance',
'Intramural',
'Subserosal',
'Submucosal',
'Anterior',
'Posterior',
'Lateral',
'Fundus',
'Anteverted',
'Retroverted',
'Type I',
'Type II',
'Type III',
# 'ADC',
'Fibroid volume'
]
target_label = ['NPV class']
#%% define parameters for iteration
# define number of iterations
n_iterations = 200
# define split ratio for training and testing sets
split_ratio = 0.2
# define scaling type ('log', 'minmax', 'standard' or None)
scaling_type = 'log'
# define number of features
n_features = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
# define scorer methods
methods = ['FISH',
'RELF',
'TRAC',
'GINI',
'CHI2',
'FSCR',
'DISR',
'CMIM',
'ICAP',
'JMI',
'CIFE',
'MIM',
'MRMR',
'MIFS'
]
# define scorer functions
scorers = [fisher_score.fisher_score,
reliefF.reliefF,
trace_ratio.trace_ratio,
gini_index.gini_index,
chi_square.chi_square,
f_score.f_score,
DISR.disr,
CMIM.cmim,
ICAP.icap,
JMI.jmi,
CIFE.cife,
MIM.mim,
MRMR.mrmr,
MIFS.mifs
]
# define scorer rankers (for scikit-feature only)
rankers = [fisher_score.feature_ranking,
reliefF.feature_ranking,
None,
gini_index.feature_ranking,
chi_square.feature_ranking,
f_score.feature_ranking,
None,
None,
None,
None,
None,
None,
None,
None
]
# define parameters for parameter search
grid_param = {
'kernel': ['rbf'],
'C': list(np.logspace(-1, 4, 6)),
'gamma': list(np.logspace(-2, 4, 7)),
'random_state': [None]
}
# define data imputation values
impute_labels = ['Height',
'Gravidity'
]
# define classification model
max_iter = 200000
class_weight = 'balanced'
clf_model = SVC(probability = True, class_weight = class_weight, cache_size = 4000,
max_iter = max_iter)
# define parameter search method
cv = 10
scoring = 'f1_micro'
clf_grid = GridSearchCV(clf_model, grid_param, n_jobs = -1, cv = cv,
scoring = scoring, refit = True, iid = False)
# initialise variables
clf_results = pd.DataFrame()
feature_rankings = pd.DataFrame()
k = len(feature_labels)
#%% start the iteration
timestr = time.strftime('%Y%m%d-%H%M%S')
start_time = time.time()
for iteration in range(0, n_iterations):
# define random state
random_state = np.random.randint(0, 10000)
# assign random state to grid parameters
grid_param['random_state'] = [random_state]
# print progress
print('Iteration %d with random state %d at %.1f min' % (iteration, random_state,
((time.time() - start_time) / 60)))
# randomise and divive data for cross-validation
training_set, testing_set = train_test_split(dataframe, test_size = split_ratio,
stratify = dataframe[target_label],
random_state = random_state)
impute_values = {}
for label in impute_labels:
if label in {'Height', 'ADC'}:
impute_values[label] = training_set[label].mean()
training_set[label] = training_set[label].fillna(impute_values[label])
testing_set[label] = testing_set[label].fillna(impute_values[label])
else:
impute_values[label] = training_set[label].mode()[0]
training_set[label] = training_set[label].fillna(impute_values[label])
testing_set[label] = testing_set[label].fillna(impute_values[label])
del label
# define features and targets
training_features = training_set[feature_labels]
testing_features = testing_set[feature_labels]
training_targets = training_set[target_label]
testing_targets = testing_set[target_label]
# scale features
if scaling_type == 'log':
training_features = np.log1p(training_features)
testing_features = np.log1p(testing_features)
elif scaling_type == 'minmax':
scaler = MinMaxScaler(feature_range = (0, 1))
training_features = pd.DataFrame(scaler.fit_transform(training_features),
columns = training_features.columns,
index = training_features.index)
testing_features = pd.DataFrame(scaler.transform(testing_features),
columns = testing_features.columns,
index = testing_features.index)
elif scaling_type == 'standard':
scaler = StandardScaler()
training_features = pd.DataFrame(scaler.fit_transform(training_features),
columns = training_features.columns,
index = training_features.index)
testing_features = pd.DataFrame(scaler.transform(testing_features),
columns = testing_features.columns,
index = testing_features.index)
# find k best features for each feature selection method
k_features = pd.DataFrame(index = range(0, k), columns = methods)
for scorer, ranker, method in zip(scorers, rankers, methods):
if method in ('DISR', 'CMIM', 'ICAP', 'JMI', 'CIFE', 'MIM', 'MRMR', 'MIFS', 'TRAC'):
indices, _, _ = scorer(training_features.values, training_targets.values[:, 0], n_selected_features = k)
k_features[method] = pd.DataFrame(training_features.columns.values[indices], columns = [method])
del indices
else:
scores = scorer(training_features.values, training_targets.values[:, 0])
indices = ranker(scores)
k_features[method] = pd.DataFrame(training_features.columns.values[indices[0:k]], columns = [method])
del scores, indices
del scorer, ranker, method
# calculate feature scores
k_rankings = pd.DataFrame(k_features.T.values.argsort(1),
columns = np.sort(k_features.iloc[:, 0].values),
index = k_features.columns)
k_rankings['method'] = k_rankings.index
k_rankings['iteration'] = iteration
k_rankings['random_state'] = random_state
feature_rankings = feature_rankings.append(k_rankings, sort = False, ignore_index = True)
del k_rankings
# train model using parameter search
for n in n_features:
for method in methods:
# fit parameter search
clf_fit = clf_grid.fit(training_features[k_features[method][0:n]].values, training_targets.values[:, 0])
# calculate predictions
testing_predictions = clf_fit.predict(testing_features[k_features[method][0:n]].values)
test_score = f1_score(testing_targets.values[:, 0], testing_predictions, average = scoring[3:])
# save results
df = pd.DataFrame(clf_fit.best_params_, index = [0])
df['method'] = method
df['validation_score'] = clf_fit.best_score_
df['test_score'] = test_score
df['n_features'] = n
df['iteration'] = iteration
df['random_state'] = random_state
clf_results = clf_results.append(df, sort = False, ignore_index = True)
del clf_fit, testing_predictions, test_score, df
del n, method
del k_features, random_state, impute_values
del training_set, training_features, training_targets
del testing_set, testing_features, testing_targets
del iteration
end_time = time.time()
print('Total execution time: %.1f min' % ((end_time - start_time) / 60))
#%% calculate summaries
# summarise results
mean_vscores = clf_results.groupby(['method', 'n_features'], as_index = False)['validation_score'].mean()
mean_tscores = clf_results.groupby(['method', 'n_features'])['test_score'].mean().values
std_vscores = clf_results.groupby(['method', 'n_features'])['validation_score'].std().values
std_tscores = clf_results.groupby(['method', 'n_features'])['test_score'].std().values
clf_summary = mean_vscores.copy()
clf_summary['test_score'] = mean_tscores
clf_summary['validation_score_std'] = std_vscores
clf_summary['test_score_std'] = std_tscores
del mean_vscores, mean_tscores, std_vscores, std_tscores
# calculate heatmaps for test scores, validation scores and feature reankings
heatmap_vscore_mean = clf_summary.pivot(index = 'method', columns = 'n_features', values = 'validation_score')
heatmap_vscore_mean.columns = heatmap_vscore_mean.columns.astype(int)
heatmap_tscore_mean = clf_summary.pivot(index = 'method', columns = 'n_features', values = 'test_score')
heatmap_tscore_mean.columns = heatmap_tscore_mean.columns.astype(int)
heatmap_rankings_mean = feature_rankings.groupby(['method'], as_index = False)[feature_labels].mean()
heatmap_rankings_mean = heatmap_rankings_mean.set_index('method')
heatmap_rankings_median = feature_rankings.groupby(['method'], as_index = False)[feature_labels].median()
heatmap_rankings_median = heatmap_rankings_median.set_index('method')
# calculate box plot
feature_boxplot = feature_rankings[feature_labels].melt(var_name = 'feature', value_name = 'ranking')
# calculate top features based on mean and median values
top_features_mean = feature_boxplot.groupby(['feature'], as_index = False)['ranking'].mean()
top_features_mean['std'] = feature_boxplot.groupby(['feature'])['ranking'].std().values
top_features_mean = top_features_mean.sort_values('ranking', ascending = True)
top_features_mean = top_features_mean.reset_index(drop = True)
top_features_mean['method'] = 'TOPN'
top_features_median = feature_boxplot.groupby(['feature'], as_index = False)['ranking'].median()
top_features_median['std'] = feature_boxplot.groupby(['feature'])['ranking'].std().values
top_features_median = top_features_median.sort_values('ranking', ascending = True)
top_features_median = top_features_median.reset_index(drop = True)
top_features_median['method'] = 'TOPN'
#%% train model with only top features
top_results = pd.DataFrame()
random_states = clf_results.random_state.unique()
iteration = 0
time_stamp = time.time()
for random_state in random_states:
# assign random state to grid parameters
grid_param['random_state'] = [random_state]
# print progress
print('Iteration %d with random state %d at %.1f min' % (iteration, random_state,
((time.time() - time_stamp) / 60)))
# randomise and divive data for cross-validation
training_set, testing_set = train_test_split(dataframe, test_size = split_ratio,
stratify = dataframe[target_label],
random_state = random_state)
impute_values = {}
for label in impute_labels:
if label in {'Height', 'ADC'}:
impute_values[label] = training_set[label].mean()
training_set[label] = training_set[label].fillna(impute_values[label])
testing_set[label] = testing_set[label].fillna(impute_values[label])
else:
impute_values[label] = training_set[label].mode()[0]
training_set[label] = training_set[label].fillna(impute_values[label])
testing_set[label] = testing_set[label].fillna(impute_values[label])
del label
# define features and targets
training_features = training_set[feature_labels]
testing_features = testing_set[feature_labels]
training_targets = training_set[target_label]
testing_targets = testing_set[target_label]
# scale features
if scaling_type == 'log':
training_features = np.log1p(training_features)
testing_features = np.log1p(testing_features)
elif scaling_type == 'minmax':
scaler = MinMaxScaler(feature_range = (0, 1))
training_features = pd.DataFrame(scaler.fit_transform(training_features),
columns = training_features.columns,
index = training_features.index)
testing_features = pd.DataFrame(scaler.transform(testing_features),
columns = testing_features.columns,
index = testing_features.index)
| |
shuffle=data_type != 'test',
to_fit=data_type != 'test'), _data_samples['crossing']) # set y to None
_data_opt_flow = (DataGenerator(data=[_data_samples['optical_flow']],
labels=_data_samples['crossing'],
data_sizes=[data_type_sizes_dict['optical_flow']],
process=process,
global_pooling=self._global_pooling,
input_type_list=['optical_flow'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test',
stack_feats=True), _data_samples['crossing']) # set y to None
else:
_data_rgb = (_data_samples[feature_type], _data_samples['crossing'])
_data_opt_flow = (_data_samples['optical_flow'], _data_samples['crossing'])
return {'data_rgb': _data_rgb,
'ped_id': data['ped_id'],
'tte': data['tte'],
'data_opt_flow': _data_opt_flow,
'data_params_rgb': {'data_types': [feature_type],
'data_sizes': [data_type_sizes_dict[feature_type]]},
'data_params_opt_flow': {'data_types': ['optical_flow'],
'data_sizes': [data_type_sizes_dict['optical_flow']]},
'effective_dimension': effective_dimension,
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def add_dropout(self, model, add_new_pred=False):
"""
Adds dropout layers to a given vgg16 network. If specified, changes the dimension of
the last layer (predictions)
Args:
model: A given vgg16 model
add_new_pred: Whether to change the final layer
Returns:
Returns the new model
"""
# Change to a single class output and add dropout
fc1_dropout = Dropout(self._dropout)(model.layers[-3].output)
fc2 = model.layers[-2](fc1_dropout)
fc2_dropout = Dropout(self._dropout)(fc2)
if add_new_pred:
output = Dense(self._num_classes, name='predictions', activation='sigmoid')(fc2_dropout)
else:
output = model.layers[-1](fc2_dropout)
return Model(inputs=model.input, outputs=output)
def get_model(self, data_params):
K.clear_session()
data_size = data_params['data_sizes'][0]
net_model = self._conv_model(input_shape=data_size,
include_top=True, weights=self._weights)
net_model = self.add_dropout(net_model, add_new_pred=True)
if self._freeze_conv_layers and self._weights:
for layer in net_model.layers:
if 'conv' in layer.name:
layer.trainable = False
net_model.summary()
return net_model
def train(self, data_train,
data_val=None,
batch_size=32,
epochs=60,
lr=0.000005,
optimizer='sgd',
learning_scheduler=None,
model_opts=None):
# Set the path for saving models
model_folder_name = time.strftime("%d%b%Y-%Hh%Mm%Ss")
path_params = {'save_folder': os.path.join(self.__class__.__name__, model_folder_name),
'save_root_folder': 'data/models/',
'dataset': model_opts['dataset']}
model_opts['reshape'] = True
# Read train data
data_train = self.get_data('train', data_train, {**model_opts, 'batch_size': batch_size})
if data_val is not None:
data_val = self.get_data('val', data_val, {**model_opts, 'batch_size': batch_size})
# Train the model
class_w = self.class_weights(model_opts['apply_class_weights'], data_train['count'])
# Get a copy of local parameters in the function minus self parameter
local_params = {k: v for k, v in locals().items() if k != 'self'}
##### Optical flow model
# Flow data shape: (1, num_frames, 224, 224, 2)
self.train_model(model_type='opt_flow', **local_params)
##### rgb model
self.train_model(model_type='rgb', **local_params)
# Save settings
model_opts_path, saved_files_path = get_path(**path_params,
file_name='model_opts.pkl')
with open(model_opts_path, 'wb') as fid:
pickle.dump(model_opts, fid, pickle.HIGHEST_PROTOCOL)
config_path, _ = get_path(**path_params, file_name='configs.yaml')
self.log_configs(config_path, batch_size, epochs, lr, model_opts)
return saved_files_path
def train_model(self, model_type, data_train, data_val,
class_w, learning_scheduler, path_params, optimizer,
batch_size, epochs, lr, **kwargs):
"""
Trains a single model
Args:
train_data: Training data
val_data: Validation data
model_type: The model type, 'rgb' or 'opt_flow'
path_params: Parameters for generating paths for saving models and configurations
callbacks: List of training call back functions
class_w: Class weights
For other parameters refer to train()
"""
learning_scheduler = learning_scheduler or {}
if model_type == 'opt_flow':
self._weights = None
optimizer = self.get_optimizer(optimizer)(lr=lr)
train_model = self.get_model(data_train['data_params_' + model_type])
train_model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
data_path_params = {**path_params, 'sub_folder': model_type}
model_path, _ = get_path(**data_path_params, file_name='model.h5')
callbacks = self.get_callbacks(learning_scheduler,model_path)
if data_val:
data_val = data_val['data_' + model_type]
if self._generator:
data_val = data_val[0]
history = train_model.fit(x=data_train['data_' + model_type][0],
y=None if self._generator else data_train['data_' + model_type][1],
batch_size=batch_size,
epochs=epochs,
validation_data=data_val,
class_weight=class_w,
verbose=1,
callbacks=callbacks)
if 'checkpoint' not in learning_scheduler:
print('{} train model is saved to {}'.format(model_type, model_path))
train_model.save(model_path)
# Save training history
history_path, saved_files_path = get_path(**data_path_params, file_name='history.pkl')
with open(history_path, 'wb') as fid:
pickle.dump(history.history, fid, pickle.HIGHEST_PROTOCOL)
def test(self, data_test, model_path=''):
with open(os.path.join(model_path, 'model_opts.pkl'), 'rb') as fid:
try:
model_opts = pickle.load(fid)
except:
model_opts = pickle.load(fid, encoding='bytes')
test_data = self.get_data('test', data_test, {**model_opts, 'batch_size': 1})
rgb_model = load_model(os.path.join(model_path, 'rgb', 'model.h5'))
opt_flow_model = load_model(os.path.join(model_path, 'opt_flow', 'model.h5'))
# Evaluate rgb model
results_rgb = rgb_model.predict(test_data['data_rgb'][0], verbose=1)
results_rgb = np.reshape(results_rgb, (-1, test_data['effective_dimension'], 1))
results_rgb = np.mean(results_rgb, axis=1)
# Evaluate optical flow model
results_opt_flow = opt_flow_model.predict(test_data['data_opt_flow'][0], verbose=1)
results_opt_flow = np.reshape(results_opt_flow, (-1, test_data['effective_dimension'], 1))
results_opt_flow = np.mean(results_opt_flow, axis=1)
# Average the predictions for both streams
results = (results_rgb + results_opt_flow) / 2.0
gt = np.reshape(test_data['data_rgb'][1], (-1, test_data['effective_dimension'], 1))[:, 1, :]
acc = accuracy_score(gt, np.round(results))
f1 = f1_score(gt, np.round(results))
auc = roc_auc_score(gt, np.round(results))
roc = roc_curve(gt, results)
precision = precision_score(gt, np.round(results))
recall = recall_score(gt, np.round(results))
pre_recall = precision_recall_curve(gt, results)
print('acc:{:.2f} auc:{:0.2f} f1:{:0.2f} precision:{:0.2f} recall:{:0.2f}'.format(acc, auc, f1, precision,
recall))
save_results_path = os.path.join(model_path, '{:.2f}'.format(acc) + '.yaml')
if not os.path.exists(save_results_path):
results = {'acc': acc,
'auc': auc,
'f1': f1,
'roc': roc,
'precision': precision,
'recall': recall,
'pre_recall_curve': pre_recall}
with open(save_results_path, 'w') as fid:
yaml.dump(results, fid)
return acc, auc, f1, precision, recall
class TwoStreamFusion(ActionPredict):
"""
This is an implementation of two-stream network with fusion mechanisms based
on Feichtenhofer, Christoph et al. "Convolutional two-stream network fusion for
video action recognition." CVPR, 2016.
"""
def __init__(self,
dropout=0.5,
dense_activation='sigmoid',
freeze_conv_layers=True,
weights='imagenet',
fusion_point='early', # early, late, two-stage
fusion_method='sum',
num_classes=1,
backbone='vgg16',
**kwargs):
"""
Class init function
Args:
dropout: Dropout value for fc6-7 of vgg16.
dense_activation: Activation of last dense (predictions) layer.
freeze_conv_layers: If set true, only fc layers of the networks are trained
weights: Pre-trained weights for networks.
fusion_point: At what point the networks are fused (for details refer to the paper).
Options are: 'early' (streams are fused after block 4),'late' (before the loss layer),
'two-stage' (streams are fused after block 5 and before loss).
fusion_method: How the weights of fused layers are combined.
Options are: 'sum' (weights are summed), 'conv' (weights are concatenated and fed into
a 1x1 conv to reduce dimensions to the original size).
num_classes: Number of activity classes to predict.
backbone: Backbone network. Only vgg16 is supported.
"""
super().__init__(**kwargs)
# Network parameters
assert fusion_point in ['early', 'late', 'two-stage'], \
"fusion point {} is not supported".format(fusion_point)
assert fusion_method in ['sum', 'conv'], \
"fusion method {} is not supported".format(fusion_method)
self._dropout = dropout
self._dense_activation = dense_activation
self._freeze_conv_layers = freeze_conv_layers
self._weights = weights
self._num_classes = num_classes
if backbone != 'vgg16':
print("Only vgg16 backbone is supported")
self._conv_models = vgg16.VGG16
self._fusion_point = fusion_point
self._fusion_method = fusion_method
def get_data_sequence(self, data_type, data_raw, opts):
print('\n#####################################')
print('Generating raw data')
print('#####################################')
d = {'box': data_raw['bbox'].copy(),
'crossing': data_raw['activities'].copy(),
'ped_id': data_raw['pid'].copy(),
'image': data_raw['image'].copy()}
balance = opts['balance_data'] if data_type == 'train' else False
obs_length = opts['obs_length']
time_to_event = opts['time_to_event']
if balance:
self.balance_data_samples(d, data_raw['image_dimension'][0])
d['box_org'] = d['box'].copy()
d['tte'] = []
if isinstance(time_to_event, int):
for k in d.keys():
for i in range(len(d[k])):
d[k][i] = d[k][i][- obs_length - time_to_event:-time_to_event]
d['tte'] = [[time_to_event]]*len(data_raw['bbox'])
else:
overlap = opts['overlap'] if data_type == 'train' else 0.0
olap_res = obs_length if overlap == 0 else int((1 - overlap) * obs_length)
olap_res = 1 if olap_res < 1 else olap_res
for k in d.keys():
seqs = []
for seq in d[k]:
start_idx = len(seq) - obs_length - time_to_event[1]
end_idx = len(seq) - obs_length - time_to_event[0]
seqs.extend([seq[i:i + obs_length] for i in
range(start_idx, end_idx + 1, olap_res)])
d[k] = seqs
for seq in d['box']:
start_idx = len(seq) - obs_length - time_to_event[1]
end_idx = len(seq) - obs_length - time_to_event[0]
d['tte'].extend([[len(seq) - (i + obs_length)] for i in
range(start_idx, end_idx + 1, olap_res)])
for k in d.keys():
d[k] = np.array(d[k])
dcount = d['crossing'][:, 0, :]
pos_count = np.count_nonzero(dcount)
neg_count = len(dcount) - pos_count
print("Negative {} and positive {} sample counts".format(neg_count, pos_count))
return d, neg_count, pos_count
def get_data(self, data_type, data_raw, model_opts):
model_opts['normalize_boxes'] = False
process = False
aux_name = '_'.join([self._backbone, 'raw']).strip('_')
dataset = model_opts['dataset']
eratio = model_opts['enlarge_ratio']
self._generator = model_opts.get('generator', False)
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
feature_type = model_opts['obs_input_type'][0]
# Only 3 types of rgb features are supported
assert feature_type in ['local_box', 'local_context', 'scene']
_data_samples = {'crossing': data['crossing']}
data_type_sizes_dict = {}
data_gen_params = {'data_type': data_type, 'crop_type': 'none'}
if feature_type == 'local_box':
data_gen_params['crop_type'] = 'bbox'
data_gen_params['crop_mode'] = 'pad_resize'
elif feature_type == 'local_context':
data_gen_params['crop_type'] = 'context'
data_gen_params['crop_resize_ratio'] = eratio
print('\n#####################################')
print('Generating {} {}'.format(feature_type, data_type))
print('#####################################')
save_folder_name = '_'.join([feature_type, aux_name, str(eratio)]) \
if feature_type in ['local_context', 'local_surround'] \
else '_'.join([feature_type, aux_name])
path_to_features, _ = get_path(save_folder=save_folder_name,
dataset=dataset,
save_root_folder='data/features')
data_gen_params['save_path'] = path_to_features
# Extract relevant rgb frames based on the optical flow length
# Optical flow length is either 5 or 10. For example, for length of 10, and
# sequence size of 16, 7 rgb frames are selected.
ofl = model_opts['optical_flow_length']
stidx = ofl - round((ofl + 1) / 2)
endidx = (ofl + 1) // 2
_data_samples['crossing'] = _data_samples['crossing'][:, stidx:-endidx, ...]
effective_dimension = _data_samples['crossing'].shape[1]
_data_samples[feature_type], feat_shape = self.load_images_crop_and_process(data['image'][:, stidx:-endidx, ...],
data['box_org'][:, stidx:-endidx, ...],
data['ped_id'][:, stidx:-endidx, ...],
process=process,
**data_gen_params)
data_type_sizes_dict[feature_type] = feat_shape
print('\n#####################################')
print('Generating {} optical flow {}'.format(feature_type, data_type))
print('#####################################')
save_folder_name = '_'.join([feature_type, 'flow', str(eratio)]) \
if feature_type in ['local_context', 'local_surround'] \
| |
<filename>green/midhaul_heuristic.py
"""Midhaul Heuristic Module.
Heuristics for Journal 3.
"""
from datetime import datetime
from hcran_generic import HcranGeneric
from hcran_monitor import *
from midhaul import Midhaul
MAGIC_NUMBER = -999987.53463
class MidhaulHeuristic(HcranGeneric):
def __init__(self, city_name, traffic_scen, splitting_method, day):
self.midhaul_generic = Midhaul(self)
HcranGeneric.__init__(self, city_name, traffic_scen, splitting_method)
self.day = day # getting the current it is important to calculate remaining battery energy
self.unstored_energy = [[0 for x in self.r_t] for x in self.r_cloud]
self.reserved_energy = [[MAGIC_NUMBER for x in self.r_t] for x in self.r_cloud]
self.du_load = np.array([[0 for x in self.r_t] for x in self.r_du], dtype='float')
# DECISION VARIABLES
def add_variables(self):
self.decision_s = [[0 for x in self.r_t] for x in self.r_cloud] # amount of renewable energy consumption
self.decision_a = np.array([[0 for x in self.r_t] for x in self.r_du], dtype='bool')
self.decision_m = np.array([[[[0 for x in self.r_t] for x in self.r_up] for x in self.r_du] for x in self.r_i], dtype='bool')
self.decision_be = np.array([[0 for x in self.r_t] for x in self.r_cloud], dtype='float') # amount of remaining renewable energy in battery
if self.ENERGY_TRANSFER_AVAILABLE:
self.decision_x = [[[0 for x in self.r_t] for x in self.r_cloud] for x in self.r_cloud]
self.decision_p = [[0 for x in self.r_t] for x in self.r_cloud]
def _calculate_batt_and_unstored_in_next_time_slot(self, r, t, next_battery_energy):
if next_battery_energy > self.battery_capacity[r]:
self.decision_be[r][t] = self.battery_capacity[r]
self.unstored_energy[r][t] = next_battery_energy - self.battery_capacity[r]
else:
self.decision_be[r][t] = next_battery_energy
self.unstored_energy[r][t] = 0
'''
if self.decision_be[r][t] < 0:
print "Aiee"
'''
def update_the_battery_state(self):
for r in self.r_cloud:
for t in self.r_t:
if t == 0: # initial time
current_battery_energy = self.initial_battery_energy[r]
else:
current_battery_energy = self.decision_be[r][t - 1]
next_battery_energy = current_battery_energy + self.ge[r][t] - self.decision_s[r][t]
self._calculate_batt_and_unstored_in_next_time_slot(r, t, next_battery_energy)
def _delay_constraint_check_pass(self, user_index, time_index):
current_number_of_function_operated_in_cs = 0
for d in self.r_du_in_cc:
for f in self.r_up:
if self.decision_m[user_index][d][f][time_index] == 1:
current_number_of_function_operated_in_cs += 1
next_number_of_function_operated_in_cs = current_number_of_function_operated_in_cs + 1
if next_number_of_function_operated_in_cs > self.delay_threshold[user_index][time_index]:
return False
else:
return True
def _is_fossil_consumption(self, r, t):
if r == self.cc_index:
du_set = self.r_du_in_cc
static_cons = self.PC_CC_STATIC
dynamic_cons = self.PC_CC_DU
else:
du_set = list(range(r * self.n_of_du_per_ec, (r + 1) * self.n_of_du_per_ec))
static_cons = self.PC_EC_STATIC
dynamic_cons = self.PC_EC_DU
number_of_active_du = 0
for d in du_set:
number_of_active_du += self.decision_a[d][t]
total_energy_consumption = int(static_cons + number_of_active_du * dynamic_cons)
if total_energy_consumption - self.decision_s[r][t] > 0:
return True
else:
return False
def _offload_the_function(self, i, d, f, t, rs2cs=True):
self.decision_m[i][d][f][t] = 0
self.du_load[d][t] -= self.traffic_load[i][t]
if self._is_there_any_assignment_in_this_du(d, t) is False:
self.decision_a[d][t] = False
if rs2cs:
du_range = self.r_du_in_cc
capacity = self.L_CC
else:
r = self._get_site_from_user(i)
du_range = list(range(r * self.n_of_du_per_ec, (r + 1) * self.n_of_du_per_ec))
capacity = self.L_EC
for d_newhost in du_range:
if self.du_load[d_newhost][t] + self.traffic_load[i][t] <= capacity:
self.du_load[d_newhost][t] += self.traffic_load[i][t]
# print "i:{} t:{} d_newhost:{} self.du_load[d_newhost][t]:{}".format(i, t, d_newhost, self.du_load[d_newhost][t])
self.decision_m[i][d_newhost][f][t] = 1
self.decision_a[d_newhost][t] = 1
break
def _revise_the_du_assignment(self, r, t):
if r == self.cc_index:
user_set = self.r_i
du_set = self.r_du_in_cc
capacity = self.L_CC
else:
user_set = list(range(r * self.NUMBER_OF_UE_PER_EC, (r + 1) * self.NUMBER_OF_UE_PER_EC))
du_set = list(range(r * self.n_of_du_per_ec, (r + 1) * self.n_of_du_per_ec))
capacity = self.L_EC
decision_m_copy = np.copy(self.decision_m)
for d in du_set:
self.decision_a[d][t] = 0
self.du_load[d][t] = 0
for i in user_set:
for f in self.r_up:
self.decision_m[i][d][f][t] = 0
current_du_index = r * self.n_of_du_per_ec
for i in user_set:
for f in self.r_up:
for check_du in du_set:
if decision_m_copy[i][check_du][f][t] == 1:
if self.du_load[current_du_index][t] + self.traffic_load[i][t] <= capacity:
self.du_load[current_du_index][t] += self.traffic_load[i][t]
else:
current_du_index += 1
self.decision_m[i][self.r_du[current_du_index]][f][t] = 1
break
# print "sum of activation :{}".format( np.sum(self.decision_m))
for d in du_set:
if self._is_there_any_assignment_in_this_du(d, t):
self.decision_a[d][t] = 1
def offloading_to_server_site_experimental(self):
for t in self.r_t:
number_of_active_du_per_site, total_number_of_active_du_cs, total_number_of_active_du_rs = self.calculate_number_of_active_du_per_site(self.decision_a)
total_consumption = self.calculate_total_consumption(number_of_active_du_per_site)
fossil_consumption = self.calculate_fossil_consumption_in_specific_time(t, total_consumption)
for r in self.r_edge_cloud:
print("site:{} fossil_consumption:{}".format(r, fossil_consumption[r]))
ordered_remote_site = np.argsort(fossil_consumption, axis=-1)[::-1]
print("ordered_remote_site:{}".format(ordered_remote_site))
ordered_remote_site = ordered_remote_site[1:21]
print("ordered_remote_site:{}".format(ordered_remote_site))
# self._offloading_to_server_site(self.r_edge_cloud, t)
self._offloading_to_server_site(ordered_remote_site, t)
def _offloading_to_server_site(self, ordered_remote_site, t):
# print "TIME_SLOT:{}".format(t)
for r in ordered_remote_site:
if r == self.cc_index:
continue
# print "REMOTE SITE:{}".format(r)
du_set_in_r = list(range(r * self.n_of_du_per_ec, (r + 1) * self.n_of_du_per_ec))
du_set_in_r_reverse = du_set_in_r[::-1]
user_set_in_r = list(range(r * self.NUMBER_OF_UE_PER_EC, (r + 1) * self.NUMBER_OF_UE_PER_EC))
copy_m = np.copy(self.decision_m)
copy_a = np.copy(self.decision_a)
copy_du_load = np.copy(self.du_load)
prev_active_du, prev_fossil_consumption = self.get_active_du_and_fossil_consumption(t)
# prev_active_du = self.calculate_number_of_active_du_specific_site(r, t, self.decision_a)
for d in du_set_in_r_reverse:
if self.decision_a[d][t] == 0:
continue
for i in user_set_in_r:
for f in self.r_up:
if self.decision_m[i][d][f][t] == 1:
if self._delay_constraint_check_pass(i, t):
self._offload_the_function(i, d, f, t)
self._revise_the_du_assignment(r, t)
active_du = self.calculate_number_of_active_du_specific_site(r, t, self.decision_a)
if True:
active_du, fossil_consumption = self.get_active_du_and_fossil_consumption(t)
# if prev_active_du <= active_du or prev_fossil_consumption <= fossil_consumption:
if prev_active_du <= active_du:
# print "TIME[{}] We could not empty the du:{} in RS:{} prev:{} active:{}".format(t, d, r, prev_active_du, active_du)
self.decision_m = np.copy(copy_m)
self.decision_a = np.copy(copy_a)
self.du_load = np.copy(copy_du_load)
else:
pass
# print "TIME[{}] Empty DU:{} in RS:{} prev:{} active:{}".format(t, d, r, prev_active_du, active_du)
def offloading_to_server_site(self):
for t in self.r_t:
# print "TIME_SLOT:{}".format(t)
for r in self.r_edge_cloud:
# print "REMOTE SITE:{}".format(r)
du_set_in_r = list(range(r * self.n_of_du_per_ec, (r + 1) * self.n_of_du_per_ec))
du_set_in_r_reverse = du_set_in_r[::-1]
user_set_in_r = list(range(r * self.NUMBER_OF_UE_PER_EC, (r + 1) * self.NUMBER_OF_UE_PER_EC))
copy_m = np.copy(self.decision_m)
copy_a = np.copy(self.decision_a)
copy_du_load = np.copy(self.du_load)
prev_active_du = self.calculate_number_of_active_du_specific_site(r, t, self.decision_a)
for d in du_set_in_r_reverse:
if self.decision_a[d][t] == 0:
continue
for i in user_set_in_r:
for f in self.r_up:
if self.decision_m[i][d][f][t] == 1:
if self._delay_constraint_check_pass(i, t):
self._offload_the_function(i, d, f, t)
self._revise_the_du_assignment(r, t)
active_du = self.calculate_number_of_active_du_specific_site(r, t, self.decision_a)
if True:
if prev_active_du <= active_du:
# print "TIME[{}] We could not empty the du:{} in RS:{} prev:{} active:{}".format(t, d, r, prev_active_du, active_du)
self.decision_m = np.copy(copy_m)
self.decision_a = np.copy(copy_a)
self.du_load = np.copy(copy_du_load)
else:
pass
# print "TIME[{}] Empty DU:{} in RS:{} prev:{} active:{}".format(t, d, r, prev_active_du, active_du)
def _calculate_unstored_energy(self):
unstored_energy = [[0 for x in self.r_t] for x in self.r_cloud]
for r in self.r_cloud:
for t in self.r_t:
if t == 0:
remaining_battery_energy_before_consumption = self.initial_battery_energy[r]
else:
remaining_battery_energy_before_consumption = self.decision_be[r][t - 1]
energy_in_an_unlimited_battery = remaining_battery_energy_before_consumption + self.ge[r][t] - self.decision_s[r][t]
if energy_in_an_unlimited_battery > self.battery_capacity[r]:
unstored_energy[r][t] = energy_in_an_unlimited_battery - self.battery_capacity[r]
else:
unstored_energy[r][t] = 0
return unstored_energy
def sold_energy(self):
self.decision_p = [[0 for x in self.r_t] for x in self.r_cloud]
unstored_energy = self._calculate_unstored_energy()
for r in self.r_cloud:
for t in self.r_t:
self.decision_p[r][t] = unstored_energy[r][t]
def get_active_du_and_fossil_consumption(self, t):
active_du = self.calculate_number_of_active_du_specific_site(self.cc_index, t, self.decision_a)
number_of_active_du_per_site, total_number_of_active_du_cs, total_number_of_active_du_rs = self.calculate_number_of_active_du_per_site(self.decision_a)
total_consumption = self.calculate_total_consumption(number_of_active_du_per_site)
fossil_consumption = self.calculate_fossil_consumption_total_in_specific_time(t, total_consumption)
return active_du, fossil_consumption
def user_migration(self):
unstored_energy = self._calculate_unstored_energy()
for t in self.r_t:
if unstored_energy[self.cc_index][t] > 0:
for r in self.r_edge_cloud:
if unstored_energy[r][t] < 0:
pass
print("RS2CS Migration R:{} T:{} CS uns:{} RS uns:{}".format(r, t, unstored_energy[self.cc_index][t], unstored_energy[r][t]))
else:
copy_m = np.copy(self.decision_m)
copy_a = np.copy(self.decision_a)
copy_du_load = np.copy(self.du_load)
prev_active_du, prev_fossil_consumption = self.get_active_du_and_fossil_consumption(t)
for r in self.r_edge_cloud:
if unstored_energy[r][t] > 0:
print("CS2RS Migration R:{} T:{} CS uns:{} RS uns:{}".format(r, t, unstored_energy[self.cc_index][t], unstored_energy[r][t]))
# find the user set that is related with RS but serve in CS
du_set_in_r_reverse = self.r_du_in_cc[::-1]
user_set_in_r = list(range(r * self.NUMBER_OF_UE_PER_EC, (r + 1) * self.NUMBER_OF_UE_PER_EC))
for d in du_set_in_r_reverse:
if self.decision_a[d][t] == 0:
continue
'''
if not self._is_fossil_consumption(self.cc_index, t):
continue
'''
for i in user_set_in_r:
for f in self.r_up:
if self.decision_m[i][d][f][t] == 1:
# print "Migration::t:{} d:{} i:{} f:{}".format(t, d, i, f)
rs2cs = False
self._offload_the_function(i, d, f, t, rs2cs)
self._revise_the_du_assignment(self.cc_index, t)
active_du, fossil_consumption = self.get_active_du_and_fossil_consumption(t)
# if prev_active_du <= active_du or prev_fossil_consumption <= fossil_consumption:
if prev_active_du <= active_du:
print("TIME[{}] MIGRATION CANCELLED! in CS prev_active:{} active:{} prev_fossil_consumption:{} fossil_consumption:{}" \
.format(t, prev_active_du, active_du, prev_fossil_consumption, fossil_consumption))
self.decision_m = np.copy(copy_m)
self.decision_a = np.copy(copy_a)
self.du_load = np.copy(copy_du_load)
else:
pass
print("TIME[{}] MIGRATION COMPLETED! in CS prev_active:{} active:{} prev_fossil_consumption:{} fossil_consumption:{}" \
.format(t, prev_active_du, active_du, prev_fossil_consumption, fossil_consumption))
def initial_ren_en_assignment(self, site_list):
ordered_energy_price_time_slot = np.argsort(self.energy_prices_per_hour, axis=-1)
ordered_energy_price_time_slot = ordered_energy_price_time_slot[::-1]
number_of_active_du_per_site, total_number_of_active_du_cs, total_number_of_active_du_rs = self.calculate_number_of_active_du_per_site(self.decision_a)
total_energy_consumption = self.calculate_total_consumption(number_of_active_du_per_site)
for r in site_list:
for current_t in ordered_energy_price_time_slot:
if self.reserved_energy[r][current_t] == MAGIC_NUMBER:
availableEn = self.ge[r][current_t]
else:
availableEn = - max(self.reserved_energy[r][current_t], 0)
if current_t == 0:
next_battery_energy = self.initial_battery_energy[r] + availableEn
else:
next_battery_energy = self.decision_be[r][current_t - 1] + availableEn
if next_battery_energy >= total_energy_consumption[r][current_t]:
self.decision_s[r][current_t] = total_energy_consumption[r][current_t]
else:
self.decision_s[r][current_t] = next_battery_energy
self.update_the_battery_state()
if current_t != 0: # if it is the first time slot we did not need to think about the previous day
# in the "current_t" we spend renewable energy | |
scrolling of your selected Effect.
"""
# Text attributes for use when printing to the Screen.
A_BOLD = constants.A_BOLD
A_NORMAL = constants.A_NORMAL
A_REVERSE = constants.A_REVERSE
A_UNDERLINE = constants.A_UNDERLINE
# Text colours for use when printing to the Screen.
COLOUR_BLACK = constants.COLOUR_BLACK
COLOUR_RED = constants.COLOUR_RED
COLOUR_GREEN = constants.COLOUR_GREEN
COLOUR_YELLOW = constants.COLOUR_YELLOW
COLOUR_BLUE = constants.COLOUR_BLUE
COLOUR_MAGENTA = constants.COLOUR_MAGENTA
COLOUR_CYAN = constants.COLOUR_CYAN
COLOUR_WHITE = constants.COLOUR_WHITE
# Standard extended key codes.
KEY_ESCAPE = -1
KEY_F1 = -2
KEY_F2 = -3
KEY_F3 = -4
KEY_F4 = -5
KEY_F5 = -6
KEY_F6 = -7
KEY_F7 = -8
KEY_F8 = -9
KEY_F9 = -10
KEY_F10 = -11
KEY_F11 = -12
KEY_F12 = -13
KEY_F13 = -14
KEY_F14 = -15
KEY_F15 = -16
KEY_F16 = -17
KEY_F17 = -18
KEY_F18 = -19
KEY_F19 = -20
KEY_F20 = -21
KEY_F21 = -22
KEY_F22 = -23
KEY_F23 = -24
KEY_F24 = -25
KEY_PRINT_SCREEN = -100
KEY_INSERT = -101
KEY_DELETE = -102
KEY_HOME = -200
KEY_END = -201
KEY_LEFT = -203
KEY_UP = -204
KEY_RIGHT = -205
KEY_DOWN = -206
KEY_PAGE_UP = -207
KEY_PAGE_DOWN = -208
KEY_BACK = -300
KEY_TAB = -301
KEY_BACK_TAB = -302
KEY_NUMPAD0 = -400
KEY_NUMPAD1 = -401
KEY_NUMPAD2 = -402
KEY_NUMPAD3 = -403
KEY_NUMPAD4 = -404
KEY_NUMPAD5 = -405
KEY_NUMPAD6 = -406
KEY_NUMPAD7 = -407
KEY_NUMPAD8 = -408
KEY_NUMPAD9 = -409
KEY_MULTIPLY = -410
KEY_ADD = -411
KEY_SUBTRACT = -412
KEY_DECIMAL = -413
KEY_DIVIDE = -414
KEY_CAPS_LOCK = -500
KEY_NUM_LOCK = -501
KEY_SCROLL_LOCK = -502
KEY_SHIFT = -600
KEY_CONTROL = -601
KEY_MENU = -602
def __init__(self, height, width, buffer_height, unicode_aware):
"""
Don't call this constructor directly.
"""
super(Screen, self).__init__(
height, width, buffer_height, 0, unicode_aware)
# Initialize base class variables - e.g. those used for drawing.
self.height = height
self.width = width
self._last_start_line = 0
# Set up internal state for colours - used by children to determine
# changes to text colour when refreshing the screen.
self._colour = 0
self._attr = 0
self._bg = 0
# tracking of current cursor position - used in screen refresh.
self._cur_x = 0
self._cur_y = 0
# Control variables for playing out a set of Scenes.
self._scenes = []
self._scene_index = 0
self._frame = 0
self._idle_frame_count = 0
self._forced_update = False
self._unhandled_input = self._unhandled_event_default
@classmethod
def open(cls, height=None, catch_interrupt=False, unicode_aware=None):
"""
Construct a new Screen for any platform. This will just create the
correct Screen object for your environment. See :py:meth:`.wrapper` for
a function to create and tidy up once you've finished with the Screen.
:param height: The buffer height for this window (for testing only).
:param catch_interrupt: Whether to catch and prevent keyboard
interrupts. Defaults to False to maintain backwards compatibility.
:param unicode_aware: Whether the application can use unicode or not.
If None, try to detect from the environment if UTF-8 is enabled.
"""
if sys.platform == "win32":
# Clone the standard output buffer so that we can do whatever we
# need for the application, but restore the buffer at the end.
# Note that we need to resize the clone to ensure that it is the
# same size as the original in some versions of Windows.
old_out = win32console.PyConsoleScreenBufferType(
win32file.CreateFile("CONOUT$",
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_ALWAYS,
0,
None))
try:
info = old_out.GetConsoleScreenBufferInfo()
except pywintypes.error:
info = None
win_out = win32console.CreateConsoleScreenBuffer()
if info:
win_out.SetConsoleScreenBufferSize(info['Size'])
else:
win_out.SetStdHandle(win32console.STD_OUTPUT_HANDLE)
win_out.SetConsoleActiveScreenBuffer()
# Get the standard input buffer.
win_in = win32console.PyConsoleScreenBufferType(
win32file.CreateFile("CONIN$",
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_READ,
None,
win32file.OPEN_ALWAYS,
0,
None))
win_in.SetStdHandle(win32console.STD_INPUT_HANDLE)
# Hide the cursor.
win_out.SetConsoleCursorInfo(1, 0)
# Disable scrolling
out_mode = win_out.GetConsoleMode()
win_out.SetConsoleMode(
out_mode & ~ win32console.ENABLE_WRAP_AT_EOL_OUTPUT)
# Enable mouse input, disable quick-edit mode and disable ctrl-c
# if needed.
in_mode = win_in.GetConsoleMode()
new_mode = (in_mode | win32console.ENABLE_MOUSE_INPUT |
ENABLE_EXTENDED_FLAGS)
new_mode &= ~ENABLE_QUICK_EDIT_MODE
if catch_interrupt:
# Ignore ctrl-c handlers if specified.
new_mode &= ~win32console.ENABLE_PROCESSED_INPUT
win_in.SetConsoleMode(new_mode)
screen = _WindowsScreen(win_out, win_in, height, old_out, in_mode,
unicode_aware=unicode_aware)
else:
# Reproduce curses.wrapper()
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
# Fed up with linters complaining about original curses code - trying to be a bit better...
# noinspection PyBroadException
# pylint: disable=broad-except
try:
curses.start_color()
except Exception as e:
logger.debug(e)
screen = _CursesScreen(stdscr, height,
catch_interrupt=catch_interrupt,
unicode_aware=unicode_aware)
return screen
@abstractmethod
def close(self, restore=True):
"""
Close down this Screen and tidy up the environment as required.
:param restore: whether to restore the environment or not.
"""
@classmethod
def wrapper(cls, func, height=None, catch_interrupt=False, arguments=None,
unicode_aware=None):
"""
Construct a new Screen for any platform. This will initialize the
Screen, call the specified function and then tidy up the system as
required when the function exits.
:param func: The function to call once the Screen has been created.
:param height: The buffer height for this Screen (only for test purposes).
:param catch_interrupt: Whether to catch and prevent keyboard
interrupts. Defaults to False to maintain backwards compatibility.
:param arguments: Optional arguments list to pass to func (after the
Screen object).
:param unicode_aware: Whether the application can use unicode or not.
If None, try to detect from the environment if UTF-8 is enabled.
"""
screen = Screen.open(height,
catch_interrupt=catch_interrupt,
unicode_aware=unicode_aware)
restore = True
try:
try:
if arguments:
return func(screen, *arguments)
else:
return func(screen)
except ResizeScreenError:
restore = False
raise
finally:
screen.close(restore)
def _reset(self):
"""
Reset the Screen.
"""
self._last_start_line = 0
self._colour = None
self._attr = None
self._bg = None
self._cur_x = None
self._cur_y = None
def refresh(self):
"""
Refresh the screen.
"""
# Scroll the screen now - we've already sorted the double-buffer to reflect this change.
if self._last_start_line != self._start_line:
self._scroll(self._start_line - self._last_start_line)
self._last_start_line = self._start_line
# Now draw any deltas to the scrolled screen. Note that CJK character sets sometimes
# use double-width characters, so don't try to draw the next 2nd char (of 0 width).
for y, x in self._buffer.deltas(0, self.height):
new_cell = self._buffer.get(x, y)
if new_cell[4] > 0:
self._change_colours(new_cell[1], new_cell[2], new_cell[3])
self._print_at(new_cell[0], x, y, new_cell[4])
# Resynch for next refresh.
self._buffer.sync()
def clear(self):
"""
Clear the Screen of all content.
Note that this will instantly clear the Screen and reset all buffers to the default state,
without waiting for you to call :py:meth:`~.Screen.refresh`.
"""
# Clear the actual terminal
self.reset()
self._change_colours(Screen.COLOUR_WHITE, 0, 0)
self._clear()
def get_key(self):
"""
Check for a key without waiting. This method is deprecated. Use
:py:meth:`.get_event` instead.
"""
event = self.get_event()
if event and isinstance(event, KeyboardEvent):
return event.key_code
return None
@abstractmethod
def get_event(self):
"""
Check for any events (e.g. key-press or mouse movement) without waiting.
:returns: A :py:obj:`.Event` object if anything was detected, otherwise
it returns None.
"""
@staticmethod
def ctrl(char):
"""
Calculate the control code for a given key. For example, this converts
"a" to 1 (which is the code for ctrl-a).
:param char: The key to convert to a control code.
:return: The control code as an integer or None if unknown.
"""
# Convert string to int... assuming any non-integer is a string.
# TODO: Consider asserting a more rigorous test without falling back to past basestring.
if not isinstance(char, int):
char = ord(char.upper())
# Only deal with the characters between '@' and '_'
return char & 0x1f if 64 <= char <= 95 else None
@abstractmethod
def has_resized(self):
"""
Check whether the screen has been re-sized.
:returns: True when the screen has been re-sized since the last check.
"""
def getch(self, x, y):
"""
Get the character at a specified location. This method is deprecated.
Use :py:meth:`.get_from` instead.
:param x: The x coordinate.
:param y: The y coordinate.
"""
return self.get_from(x, y)
def putch(self, text, x, y, colour=7, attr=0, bg=0, transparent=False):
"""
Print text at the specified location. This method is deprecated. Use
:py:meth:`.print_at` instead.
:param text: The (single line) text to be printed.
:param x: The column (x coord) for the start of the text.
:param y: The line (y coord) for the start of the text.
:param colour: The colour of the text to be displayed.
:param attr: The cell attribute of the | |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
from numbers import Number
from geotext import GeoText
import random
from functools import reduce
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from scipy.sparse import lil_matrix
#%%Read the entries from the file and add them to a list
#with optional filtering
def read_words(filename, raw=False):
'''
returns list of [words per lines]. Blank lines removed
Keyword arguments:
raw: False => expected format:each line consits of: word features label
features and labels optional
True => a raw Text file
'''
f = open(filename,'r')
if raw:
words=word_tokenize('')
else:
words = []
for line in f:
l = line.replace('\n','').split(' ')
if len(l)==1:
continue
words.append(l)
f.close()
return words
#%% Generate new features based on words:
def word_shape(word):
'''
Generates a shape string based on the input string.
Rules:
1. capital letters -> X
smallcase -> x
digits -> d
punctuation/other -> unchanged
2. first and last 2 letters are kept
for each intermediary character, a sinlge character of the class is kept
Example:
a2cdEFG:HI-j -> xdxxXXX:XX-x -> xdxX:-x
Credit for the idea:
Information Extraction and Named Entity Recognition, <NAME>
short explanation: https://youtu.be/wxyZTSc2tM0?t=708
'''
#convert capital letters into X, non-capitals into x, digits into d and punctuation unchanged
#.replace() would be slower
shape = list(map(lambda c: 'x' if c.islower() else 'X' if c.isupper() else 'd' if c.isdigit() else c, word))
# convert list into string
shape = reduce(lambda s,c: s+c,shape)
#keep first and last two characters and the type of characters between them
#e.g. 'xxXXxd-:XX' -> xxxDXd-:XX'
#the order is a pre-determined one. If order should be kept, check the commented reduction
shape = shape[:2] + \
('x' if 'x' in shape[2:-2] else '') + \
('X' if 'X' in shape[2:-2] else '') + \
('d' if 'd' in shape[2:-2] else '') + \
shape[2:-2].replace('x','').replace('X','').replace('d','')+\
shape[-2:]
''' shape = shape[:2] + \
reduce((lambda s,c: s + \
('x' if ('x' in c and 'x' not in s) else
'X' if ('X' in c and 'X' not in s) else
'd' if ('d' in c and 'd' not in s) else
c if (c not in s) else '')), shape[2:-2]) + \
shape[-2:]'''
return shape
class feature_list:
word_itself = True
POS = True
stem = True
is_lowercase = True
is_uppercase = True
is_digit = True
is_capitalized = True #the first letter
contains_x = True
is_long = True #longer than 6 chars
is_location = True
last_2_chars = True
last_3_chars = True
word_shape = True
prev_word = True
prev_POS = True
next_word = True
next_POS = True
def append_features(words, features_to_add = None, is_training_set = True,
is_POS_present=False):
'''
Appends features to the words.
Keyword arguments:
words: format list of [word POS class/label].
To omit POS tag set is_POS_present_in_words to False
to omit class/label set is_training_set to False
features_to_add:
None => all possible features added
otherwise expects feature_list object
is_training_set: set to False if no output class/labels are provided
is_training_set: set to False if no POS labels are in the text
'''
if features_to_add == None:
feat = feature_list()
else:
feat = features_to_add
if feat.POS and not is_POS_present:
poss = pos_tag([word[0] for word in words])
stemmer = PorterStemmer()
words_upgraded = []
maxi = len(words)
for i in range(maxi):
word = words[i][0]
geo = GeoText(word)
#word+features
wpf = []
if feat.word_itself: wpf.append(word) #word itself
if feat.stem: wpf.append(stemmer.stem(word))
if feat.POS:
if is_POS_present: #POS
wpf.append(words[i][1])
else:
wpf.append(poss[i][1])
if feat.is_lowercase: wpf.append('_lower:'+ str(word.islower())) #all letters lowercase
if feat.is_uppercase: wpf.append('_upper:'+ str(word.isupper())) #all letters uppercase
if feat.is_digit: wpf.append('_digit:'+ str(word.isdigit())) #contains only digits
if feat.is_capitalized: wpf.append('_title:'+ str(word.istitle())) #first letter capitalized
if feat.contains_x: wpf.append('_x:' + str('x' in word or 'X' in word)) #contains 'x' or 'X'
if feat.is_long: wpf.append('_long:' + str(len(word)>6)) #longer than 6 characters
if feat.is_location: wpf.append('loc:' + str(any(geo.cities) or any(geo.country_mentions))) #city or country
if feat.last_2_chars: wpf.append(word[-2:]) #last 2 characters
if feat.last_3_chars: wpf.append(word[-3:]) #last 3 characters
if feat.word_shape: wpf.append(word_shape(word)) #see word_shape(word) function
if feat.prev_word: wpf.append('-1:'+words[i-1][0]) if i>0 else wpf.append('-1:-'), #previous word
if feat.prev_POS: wpf.append('-1:'+words[i-1][1]) if i>0 else wpf.append('-1:-'),#previous POS
if feat.next_word: wpf.append('+1:'+words[i+1][0]) if i<maxi-1 else wpf.append('+1:-'),#next word
if feat.next_POS: wpf.append('+1:'+words[i+1][1]) if i<maxi-1 else wpf.append('+1:-'),#next POS
#the label (will be split from X into Y in createDataset(words))
#Also strips the I or B tag
if is_training_set:
if words[i][3]=='O':
wpf.append(words[i][3])
else :
wpf.append(words[i][3][2:])
words_upgraded.append(wpf)
return words_upgraded
#%%Reshape to the desired one;
#doing this while reading it would increase speed, but this is better for modularity
class translator:
'''
converts strings into a unique number
Example: If called (in this order) on each of the of the elements in the array:
['a',b','asdac','b','a'] -> [0, 1, 2, 1, 0]
[0, 'a' , 2] -> ['a', 0, 'asdac']
Input can be any non-numeric type. Numbers will be looked up and converted
back to the original object
'''
def __init__(self):
self.words={}
self.idx = 0
def translate(self,word):
'''
Works both ways.
Number will be converted into the stored object,
new objects will be assigned new numbers
'''
if isinstance(word,Number):
for k,v in self.words.items():
if v==word:
return k
return -1
else:
if not (word in self.words):
self.words[word]= self.idx
self.idx+=1
return self.idx-1
else:
return self.words[word]
#ensure all words are similarly split
def create_dataset(words, input_only = False):
'''
Converts the strings from the dataset into numbers.
(can be used as a pre-cursor) to vectorization (in nltk sense).
Returns the translator used, the list of numbers and the list of labels
for the output classes
'''
T = translator()
nw = len(words)
nf = len(words[0])-1
if input_only:
nf += 1
Y=None
labels_num = None
labels_name = None
X = np.zeros([nw,nf])
if not input_only:
Y = np.zeros([nw])
for i,word in enumerate(words):
Y[i] = T.translate(word[-1])
labels_num = sorted(list(set(Y)))
labels_name =[]
for label in labels_num:
labels_name.append(T.translate(label))
for j in range(nf):
for i,word in enumerate(words):
X[i][j] = T.translate(words[i][j])
return X,Y,T,labels_num,labels_name
#The one-hot encoding greatly increases performance
#Potential bug: if there is e.g. a word "B-LOC" in the corpus, it will be encoded
#This should however not be the case, since the input should not contain output classes
#the same way as the actual NE label
def one_hot(X, transl):
'''
Converts the numbers from the dataset into one-hot encoding.
To obtain X and transl, run createDataset(words)
'''
X_new = lil_matrix((len(X),transl.idx),dtype=np.int8)
#X_new = np.zeros([len(X),transl.idx])
for i in range(X.shape[0]):
if (len(X.shape)==1):
X_new[i,int(X[i])] = 1
else:
for j in range(X.shape[1]):
X_new[i,int(X[i][j])] = 1
return X_new
def shuffle_parallel(a, b):
'''Shuffles while keeping the indices together. a[i]->a[j]=>b[i]->b[j]. a and b mutated'''
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
indexes = np.arange(len(b))
np.random.shuffle(indexes)
return indexes
class data_wrap:
'''Shorthand for writing x_train, y_train etc. every time'''
def __init__(self,x_train,y_train,x_test,y_test,transl=None,labels_num=None,labels_name=None):
self.x_train=x_train
self.y_train=y_train
self.x_test=x_test
self.y_test = y_test
self.trans = transl
self.labels_num = labels_num
self.labels_name = labels_name
#%%Preparation for using ML algorithms
#Split training and test sets
def split_tr(X,Y,ratio):
'''returns: x_train,y_train,x_test,y_test'''
try:
lim = max((np.int)(X.shape[0]*ratio),(np.int)(Y.shape[0]*ratio))
except:
lim = max((np.int)(len(X)*ratio),(np.int)(len(Y)*ratio))
return X[:lim],Y[:lim],X[lim:],Y[lim:]
#%%Construct dictionary for CRF
def sentence_end(word,POS_index):
if POS_index==None:
return word[0] in '.?!'
else:
return word[POS_index] == '.'
def words2dictionary(words,feature_names=None,POS_index = None):
'''
Trasnforms word list into senctences (list of list -> list of list of dict)
Used for CRF model.
Keyword arguments:
words: list of [x1, x2,...,y]
feature_names: optional. The dict returned will use this list as keys.
Having different nr of feature_names and x colums is not supported
POS_index: if specified, this will be used to identify sentence endings
The POS character associated to the sentence end should be '.'.
If left None, '.?!' set will be used to identify sentence ending
'''
new_tokens = []
new_labels = []
if feature_names==None:
feature_names=[]
for i in range(len(words[0])-1):
feature_names.append(str(i))
token_sentence = []
label_sentence = []
for word in words:
new_word = {}
for idx,feature in enumerate(feature_names):
new_word[feature] = word[idx]
token_sentence.append(new_word)
label_sentence.append(word[idx+1])
if sentence_end(word,POS_index):
new_tokens.append(token_sentence)
token_sentence = []
new_labels.append(label_sentence)
label_sentence = []
#new_words.append(new_word)
return new_tokens, new_labels
def words2tuples(words,feature_used = 0,POS_index = None):
'''
Trasnforms word list into | |
'Hyrule Castle Exit (South)', player)
connect_exit(world, 'Hyrule Castle Exit (South)', 'Hyrule Castle Entrance (South)', player)
caves.append(('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
else:
doors.append('Hyrule Castle Entrance (South)')
entrances.append('Hyrule Castle Entrance (South)')
caves.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
# now let's deal with mandatory reachable stuff
def extract_reachable_exit(cavelist):
world.random.shuffle(cavelist)
candidate = None
for cave in cavelist:
if isinstance(cave, tuple) and len(cave) > 1:
# special handling: TRock has two entries that we should consider entrance only
# ToDo this should be handled in a more sensible manner
if cave[0] in ['Turtle Rock Exit (Front)', 'Spectacle Rock Cave Exit (Peak)'] and len(cave) == 2:
continue
candidate = cave
break
if candidate is None:
raise KeyError('No suitable cave.')
cavelist.remove(candidate)
return candidate
def connect_reachable_exit(entrance, caves, doors):
cave = extract_reachable_exit(caves)
exit = cave[-1]
cave = cave[:-1]
connect_exit(world, exit, entrance, player)
connect_entrance(world, doors.pop(), exit, player)
# rest of cave now is forced to be in this world
caves.append(cave)
# connect mandatory exits
for entrance in entrances_must_exits:
connect_reachable_exit(entrance, caves, doors)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [entrance for entrance in old_man_entrances if entrance in entrances]
world.random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
entrances.remove(old_man_exit)
connect_exit(world, 'Old Man Cave Exit (East)', old_man_exit, player)
connect_entrance(world, doors.pop(), 'Old Man Cave Exit (East)', player)
caves.append('Old Man Cave Exit (West)')
# place blacksmith, has limited options
blacksmith_doors = [door for door in blacksmith_doors if door in doors]
world.random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
doors.remove(blacksmith_hut)
# place dam and pyramid fairy, have limited options
bomb_shop_doors = [door for door in bomb_shop_doors if door in doors]
world.random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
doors.remove(bomb_shop)
# handle remaining caves
for cave in caves:
if isinstance(cave, str):
cave = (cave,)
for exit in cave:
connect_exit(world, exit, entrances.pop(), player)
connect_entrance(world, doors.pop(), exit, player)
# place remaining doors
connect_doors(world, doors, door_targets, player)
elif world.shuffle[player] == 'insanity_legacy':
world.fix_fake_world[player] = False
# beware ye who enter here
entrances = LW_Entrances + LW_Dungeon_Entrances + DW_Entrances + DW_Dungeon_Entrances + Old_Man_Entrances + ['Skull Woods Second Section Door (East)', 'Skull Woods First Section Door', 'Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave']
entrances_must_exits = DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit + LW_Dungeon_Entrances_Must_Exit + ['Skull Woods Second Section Door (West)']
doors = LW_Entrances + LW_Dungeon_Entrances + LW_Dungeon_Entrances_Must_Exit + ['Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave'] + Old_Man_Entrances +\
DW_Entrances + DW_Dungeon_Entrances + DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit + ['Skull Woods First Section Door', 'Skull Woods Second Section Door (East)', 'Skull Woods Second Section Door (West)']
world.random.shuffle(doors)
old_man_entrances = list(Old_Man_Entrances) + ['Tower of Hera']
caves = Cave_Exits + Dungeon_Exits + Cave_Three_Exits + ['Old Man House Exit (Bottom)', 'Old Man House Exit (Top)', 'Skull Woods First Section Exit', 'Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)',
'Kakariko Well Exit', 'Bat Cave Exit', 'North Fairy Cave Exit', 'Lost Woods Hideout Exit', 'Lumberjack Tree Exit', 'Sanctuary Exit']
# shuffle up holes
hole_entrances = ['Kakariko Well Drop', 'Bat Cave Drop', 'North Fairy Cave Drop', 'Lost Woods Hideout Drop', 'Lumberjack Tree Tree', 'Sanctuary Grave',
'Skull Woods First Section Hole (East)', 'Skull Woods First Section Hole (West)', 'Skull Woods First Section Hole (North)', 'Skull Woods Second Section Hole']
hole_targets = ['Kakariko Well (top)', 'Bat Cave (right)', 'North Fairy Cave', 'Lost Woods Hideout (top)', 'Lumberjack Tree (top)', 'Sewer Drop', 'Skull Woods Second Section (Drop)',
'Skull Woods First Section (Left)', 'Skull Woods First Section (Right)', 'Skull Woods First Section (Top)']
if world.mode[player] == 'standard':
# cannot move uncle cave
connect_entrance(world, 'Hyrule Castle Secret Entrance Drop', 'Hyrule Castle Secret Entrance', player)
connect_exit(world, 'Hyrule Castle Secret Entrance Exit', 'Hyrule Castle Secret Entrance Stairs', player)
connect_entrance(world, 'Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance Exit', player)
else:
hole_entrances.append('Hyrule Castle Secret Entrance Drop')
hole_targets.append('Hyrule Castle Secret Entrance')
doors.append('Hyrule Castle Secret Entrance Stairs')
entrances.append('Hyrule Castle Secret Entrance Stairs')
caves.append('Hyrule Castle Secret Entrance Exit')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
connect_two_way(world, 'Pyramid Entrance', 'Pyramid Exit', player)
connect_entrance(world, 'Pyramid Hole', 'Pyramid', player)
else:
entrances.append('Ganons Tower')
caves.extend(['Ganons Tower Exit', 'Pyramid Exit'])
hole_entrances.append('Pyramid Hole')
hole_targets.append('Pyramid')
entrances_must_exits.append('Pyramid Entrance')
doors.extend(['Ganons Tower', 'Pyramid Entrance'])
world.random.shuffle(hole_entrances)
world.random.shuffle(hole_targets)
world.random.shuffle(entrances)
# fill up holes
for hole in hole_entrances:
connect_entrance(world, hole, hole_targets.pop(), player)
# hyrule castle handling
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_entrance(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_exit(world, 'Hyrule Castle Exit (South)', 'Hyrule Castle Entrance (South)', player)
caves.append(('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
else:
doors.append('Hyrule Castle Entrance (South)')
entrances.append('Hyrule Castle Entrance (South)')
caves.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
# now let's deal with mandatory reachable stuff
def extract_reachable_exit(cavelist):
world.random.shuffle(cavelist)
candidate = None
for cave in cavelist:
if isinstance(cave, tuple) and len(cave) > 1:
# special handling: TRock has two entries that we should consider entrance only
# ToDo this should be handled in a more sensible manner
if cave[0] in ['Turtle Rock Exit (Front)', 'Spectacle Rock Cave Exit (Peak)'] and len(cave) == 2:
continue
candidate = cave
break
if candidate is None:
raise KeyError('No suitable cave.')
cavelist.remove(candidate)
return candidate
def connect_reachable_exit(entrance, caves, doors):
cave = extract_reachable_exit(caves)
exit = cave[-1]
cave = cave[:-1]
connect_exit(world, exit, entrance, player)
connect_entrance(world, doors.pop(), exit, player)
# rest of cave now is forced to be in this world
caves.append(cave)
# connect mandatory exits
for entrance in entrances_must_exits:
connect_reachable_exit(entrance, caves, doors)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [entrance for entrance in old_man_entrances if entrance in entrances]
world.random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
entrances.remove(old_man_exit)
connect_exit(world, 'Old Man Cave Exit (East)', old_man_exit, player)
connect_entrance(world, doors.pop(), 'Old Man Cave Exit (East)', player)
caves.append('Old Man Cave Exit (West)')
# handle remaining caves
for cave in caves:
if isinstance(cave, str):
cave = (cave,)
for exit in cave:
connect_exit(world, exit, entrances.pop(), player)
connect_entrance(world, doors.pop(), exit, player)
# handle simple doors
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Single_Cave_Targets)
# place blacksmith, has limited options
world.random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place dam and pyramid fairy, have limited options
world.random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
else:
raise NotImplementedError(
f'{world.shuffle[player]} Shuffling not supported yet. Player {world.get_player_names(player)}')
# check for swamp palace fix
if world.get_entrance('Dam', player).connected_region.name != 'Dam' or world.get_entrance('Swamp Palace', player).connected_region.name != 'Swamp Palace (Entrance)':
world.swamp_patch_required[player] = True
# check for potion shop location
if world.get_entrance('Potion Shop', player).connected_region.name != 'Potion Shop':
world.powder_patch_required[player] = True
# check for ganon location
if world.get_entrance('Pyramid Hole', player).connected_region.name != 'Pyramid':
world.ganon_at_pyramid[player] = False
# check for Ganon's Tower location
if world.get_entrance('Ganons Tower', player).connected_region.name != 'Ganons Tower (Entrance)':
world.ganonstower_vanilla[player] = False
def link_inverted_entrances(world, player):
# Link's house shuffled freely, Houlihan set in mandatory_connections
Dungeon_Exits = Inverted_Dungeon_Exits_Base.copy()
Cave_Exits = Cave_Exits_Base.copy()
Old_Man_House = Old_Man_House_Base.copy()
Cave_Three_Exits = Cave_Three_Exits_Base.copy()
unbias_some_entrances(world, Dungeon_Exits, Cave_Exits, Old_Man_House, Cave_Three_Exits)
# setup mandatory connections
for exitname, regionname in inverted_mandatory_connections:
connect_simple(world, exitname, regionname, player)
# if we do not shuffle, set default connections
if world.shuffle[player] == 'vanilla':
for exitname, regionname in inverted_default_connections:
connect_simple(world, exitname, regionname, player)
for exitname, regionname in inverted_default_dungeon_connections:
connect_simple(world, exitname, regionname, player)
elif world.shuffle[player] == 'dungeonssimple':
for exitname, regionname in inverted_default_connections:
connect_simple(world, exitname, regionname, player)
simple_shuffle_dungeons(world, player)
elif world.shuffle[player] == 'dungeonsfull':
for exitname, regionname in inverted_default_connections:
connect_simple(world, exitname, regionname, player)
skull_woods_shuffle(world, player)
dungeon_exits = list(Dungeon_Exits)
lw_entrances = list(Inverted_LW_Dungeon_Entrances)
lw_dungeon_entrances_must_exit = list(Inverted_LW_Dungeon_Entrances_Must_Exit)
dw_entrances = list(Inverted_DW_Dungeon_Entrances)
# randomize which desert ledge door is a must-exit
if world.random.randint(0, 1) == 0:
lw_dungeon_entrances_must_exit.append('Desert Palace Entrance (North)')
| |
"""
Freqtrade is the main module of this bot. It contains the class Freqtrade()
"""
import copy
import logging
import traceback
import os
from datetime import datetime, timezone
from math import isclose
from os import getpid
from typing import Any, Dict, List, Optional, Tuple
import arrow
from requests.exceptions import RequestException
from freqtrade import (DependencyException, InvalidOrderException, __version__,
constants, persistence)
from freqtrade.configuration import validate_config_consistency
from freqtrade.data.converter import order_book_to_dataframe
from freqtrade.data.dataprovider import DataProvider
from freqtrade.edge import Edge
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_next_date
from freqtrade.persistence import Trade
from freqtrade.resolvers import (ExchangeResolver, PairListResolver,
StrategyResolver)
from freqtrade.rpc import RPCManager, RPCMessageType
from freqtrade.state import State
from freqtrade.strategy.interface import IStrategy, SellType
from freqtrade.wallets import Wallets
logger = logging.getLogger(__name__)
class FreqtradeBot:
"""
Freqtrade is the main class of the bot.
This is from here the bot start its logic.
"""
def __init__(self, config: Dict[str, Any]) -> None:
"""
Init all variables and objects the bot needs to work
:param config: configuration dict, you can use Configuration.get_config()
to get the config dict.
"""
logger.info('Starting freqtrade %s', __version__)
# Init bot state
self.state = State.STOPPED
# Init objects
self.config = config
self._heartbeat_msg = 0
self.heartbeat_interval = self.config.get('internals', {}).get('heartbeat_interval', 60)
self.strategy: IStrategy = StrategyResolver(self.config).strategy
# Check config consistency here since strategies can set certain options
validate_config_consistency(config)
self.exchange = ExchangeResolver(self.config['exchange']['name'], self.config).exchange
self.wallets = Wallets(self.config, self.exchange)
self.dataprovider = DataProvider(self.config, self.exchange)
# Attach Dataprovider to Strategy baseclass
IStrategy.dp = self.dataprovider
# Attach Wallets to Strategy baseclass
IStrategy.wallets = self.wallets
pairlistname = self.config.get('pairlist', {}).get('method', 'StaticPairList')
self.pairlists = PairListResolver(pairlistname, self, self.config).pairlist
# Initializing Edge only if enabled
self.edge = Edge(self.config, self.exchange, self.strategy) if \
self.config.get('edge', {}).get('enabled', False) else None
self.active_pair_whitelist: List[str] = self.config['exchange']['pair_whitelist']
persistence.init(self.config.get('db_url', None),
clean_open_orders=self.config.get('dry_run', False))
# Set initial bot state from config
initial_state = self.config.get('initial_state')
self.state = State[initial_state.upper()] if initial_state else State.STOPPED
# RPC runs in separate threads, can start handling external commands just after
# initialization, even before Freqtradebot has a chance to start its throttling,
# so anything in the Freqtradebot instance should be ready (initialized), including
# the initial state of the bot.
# Keep this at the end of this initialization method.
self.rpc: RPCManager = RPCManager(self)
def cleanup(self) -> None:
"""
Cleanup pending resources on an already stopped bot
:return: None
"""
logger.info('Cleaning up modules ...')
self.rpc.cleanup()
persistence.cleanup()
def startup(self) -> None:
"""
Called on startup and after reloading the bot - triggers notifications and
performs startup tasks
"""
self.rpc.startup_messages(self.config, self.pairlists)
if not self.edge:
# Adjust stoploss if it was changed
Trade.stoploss_reinitialization(self.strategy.stoploss)
def process(self) -> None:
"""
Queries the persistence layer for open trades and handles them,
otherwise a new trade is created.
:return: True if one or more trades has been created or closed, False otherwise
"""
# Check whether markets have to be reloaded
self.exchange._reload_markets()
# Refresh whitelist
self.pairlists.refresh_pairlist()
self.active_pair_whitelist = self.pairlists.whitelist
# Calculating Edge positioning
if self.edge:
self.edge.calculate()
self.active_pair_whitelist = self.edge.adjust(self.active_pair_whitelist)
# Query trades from persistence layer
trades = Trade.get_open_trades()
# Extend active-pair whitelist with pairs from open trades
# It ensures that tickers are downloaded for open trades
self._extend_whitelist_with_trades(self.active_pair_whitelist, trades)
# Refreshing candles
self.dataprovider.refresh(self._create_pair_whitelist(self.active_pair_whitelist),
self.strategy.informative_pairs())
# First process current opened trades
self.process_maybe_execute_sells(trades)
# Then looking for buy opportunities
if len(trades) < self.config['max_open_trades']:
self.process_maybe_execute_buys()
if 'unfilledtimeout' in self.config:
# Check and handle any timed out open orders
self.check_handle_timedout()
Trade.session.flush()
if (self.heartbeat_interval
and (arrow.utcnow().timestamp - self._heartbeat_msg > self.heartbeat_interval)):
logger.info(f"Bot heartbeat. PID={getpid()}")
self._heartbeat_msg = arrow.utcnow().timestamp
def _extend_whitelist_with_trades(self, whitelist: List[str], trades: List[Any]):
"""
Extend whitelist with pairs from open trades
"""
whitelist.extend([trade.pair for trade in trades if trade.pair not in whitelist])
def _create_pair_whitelist(self, pairs: List[str]) -> List[Tuple[str, str]]:
"""
Create pair-whitelist tuple with (pair, ticker_interval)
"""
return [(pair, self.config['ticker_interval']) for pair in pairs]
def get_target_bid(self, pair: str, tick: Dict = None) -> float:
"""
Calculates bid target between current ask price and last price
:return: float: Price
"""
config_bid_strategy = self.config.get('bid_strategy', {})
if 'use_order_book' in config_bid_strategy and\
config_bid_strategy.get('use_order_book', False):
logger.info('Getting price from order book')
order_book_top = config_bid_strategy.get('order_book_top', 1)
order_book = self.exchange.get_order_book(pair, order_book_top)
logger.debug('order_book %s', order_book)
# top 1 = index 0
order_book_rate = order_book['bids'][order_book_top - 1][0]
logger.info('...top %s order book buy rate %0.8f', order_book_top, order_book_rate)
used_rate = order_book_rate
else:
if not tick:
logger.info('Using Last Ask / Last Price')
ticker = self.exchange.get_ticker(pair)
else:
ticker = tick
if ticker['ask'] < ticker['last']:
ticker_rate = ticker['ask']
else:
balance = self.config['bid_strategy']['ask_last_balance']
ticker_rate = ticker['ask'] + balance * (ticker['last'] - ticker['ask'])
used_rate = ticker_rate
return used_rate
def _get_trade_stake_amount(self, pair) -> Optional[float]:
"""
Check if stake amount can be fulfilled with the available balance
for the stake currency
:return: float: Stake Amount
"""
if self.edge:
return self.edge.stake_amount(
pair,
self.wallets.get_free(self.config['stake_currency']),
self.wallets.get_total(self.config['stake_currency']),
Trade.total_open_trades_stakes()
)
else:
stake_amount = self.config['stake_amount']
available_amount = self.wallets.get_free(self.config['stake_currency'])
if stake_amount == constants.UNLIMITED_STAKE_AMOUNT:
open_trades = len(Trade.get_open_trades())
if open_trades >= self.config['max_open_trades']:
logger.warning("Can't open a new trade: max number of trades is reached")
return None
return available_amount / (self.config['max_open_trades'] - open_trades)
# Check if stake_amount is fulfilled
if available_amount < stake_amount:
raise DependencyException(
f"Available balance({available_amount} {self.config['stake_currency']}) is "
f"lower than stake amount({stake_amount} {self.config['stake_currency']})"
)
return stake_amount
def _get_min_pair_stake_amount(self, pair: str, price: float) -> Optional[float]:
try:
market = self.exchange.markets[pair]
except KeyError:
raise ValueError(f"Can't get market information for symbol {pair}")
if 'limits' not in market:
return None
min_stake_amounts = []
limits = market['limits']
if ('cost' in limits and 'min' in limits['cost']
and limits['cost']['min'] is not None):
min_stake_amounts.append(limits['cost']['min'])
if ('amount' in limits and 'min' in limits['amount']
and limits['amount']['min'] is not None):
min_stake_amounts.append(limits['amount']['min'] * price)
if not min_stake_amounts:
return None
# reserve some percent defined in config (5% default) + stoploss
amount_reserve_percent = 1.0 - self.config.get('amount_reserve_percent',
constants.DEFAULT_AMOUNT_RESERVE_PERCENT)
if self.strategy.stoploss is not None:
amount_reserve_percent += self.strategy.stoploss
# it should not be more than 50%
amount_reserve_percent = max(amount_reserve_percent, 0.5)
return min(min_stake_amounts) / amount_reserve_percent
def create_trades(self) -> bool:
"""
Checks the implemented trading strategy for buy-signals, using the active pair whitelist.
If a pair triggers the buy_signal a new trade record gets created.
Checks pairs as long as the open trade count is below `max_open_trades`.
:return: True if at least one trade has been created.
"""
whitelist = copy.deepcopy(self.active_pair_whitelist)
if not whitelist:
logger.info("Active pair whitelist is empty.")
return False
# Remove currently opened and latest pairs from whitelist
for trade in Trade.get_open_trades():
if trade.pair in whitelist:
whitelist.remove(trade.pair)
logger.debug('Ignoring %s in pair whitelist', trade.pair)
if not whitelist:
logger.info("No currency pair in active pair whitelist, "
"but checking to sell open trades.")
return False
buycount = 0
# running get_signal on historical data fetched
for _pair in whitelist:
if self.strategy.is_pair_locked(_pair):
logger.info(f"Pair {_pair} is currently locked.")
continue
(buy, sell) = self.strategy.get_signal(
_pair, self.strategy.ticker_interval,
self.dataprovider.ohlcv(_pair, self.strategy.ticker_interval))
if buy and not sell and len(Trade.get_open_trades()) < self.config['max_open_trades']:
stake_amount = self._get_trade_stake_amount(_pair)
if not stake_amount:
continue
logger.info(f"Buy signal found: about create a new trade with stake_amount: "
f"{stake_amount} ...")
bidstrat_check_depth_of_market = self.config.get('bid_strategy', {}).\
get('check_depth_of_market', {})
if (bidstrat_check_depth_of_market.get('enabled', False)) and\
(bidstrat_check_depth_of_market.get('bids_to_ask_delta', 0) > 0):
if self._check_depth_of_market_buy(_pair, bidstrat_check_depth_of_market):
buycount += self.execute_buy(_pair, stake_amount)
else:
continue
buycount += self.execute_buy(_pair, stake_amount)
return buycount > 0
def _check_depth_of_market_buy(self, pair: str, conf: Dict) -> bool:
"""
Checks depth of market before executing a buy
"""
conf_bids_to_ask_delta = conf.get('bids_to_ask_delta', 0)
logger.info('checking depth of market for %s', pair)
order_book = self.exchange.get_order_book(pair, 1000)
order_book_data_frame = order_book_to_dataframe(order_book['bids'], order_book['asks'])
order_book_bids = order_book_data_frame['b_size'].sum()
order_book_asks = order_book_data_frame['a_size'].sum()
bids_ask_delta = order_book_bids / order_book_asks
logger.info('bids: %s, asks: %s, delta: %s', order_book_bids,
order_book_asks, bids_ask_delta)
if bids_ask_delta >= conf_bids_to_ask_delta:
return True
return False
def execute_buy(self, pair: str, stake_amount: float, price: Optional[float] = None) -> bool:
"""
Executes a limit buy for the given pair
:param pair: pair for which we want to create a LIMIT_BUY
:return: None
"""
pair_s = pair.replace('_', '/')
stake_currency = self.config['stake_currency']
fiat_currency = self.config.get('fiat_display_currency', None)
time_in_force = self.strategy.order_time_in_force['buy']
if price:
buy_limit_requested = price
else:
# Calculate amount
buy_limit_requested = self.get_target_bid(pair)
min_stake_amount = self._get_min_pair_stake_amount(pair_s, buy_limit_requested)
if min_stake_amount is not None and min_stake_amount > stake_amount:
logger.warning(
f"Can't open a new trade for {pair_s}: stake amount "
f"is too small ({stake_amount} < {min_stake_amount})"
)
return False
amount = stake_amount / buy_limit_requested
order_type = self.strategy.order_types['buy']
order = self.exchange.buy(pair=pair, ordertype=order_type,
amount=amount, rate=buy_limit_requested,
time_in_force=time_in_force)
order_id = order['id']
order_status = order.get('status', None)
# we assume the order is executed at the price | |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from pathlib import Path
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def calibrateCamera(nx,ny,imgPath):
objPts = [] #3D points in object plane
imgPts = [] #2D points in image plane
objP = np.zeros((6*9,3),np.float32)
objP[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
for path in Path(imgPath).glob('calibration*.jpg'):
img = cv2.imread(str(path))
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
imgPts.append(corners)
objPts.append(objP)
# Draw and display the corners
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objPts, imgPts, gray.shape[::-1], None, None)
return mtx,dist
print("calibrating...")
mtx, dist = calibrateCamera(9, 6, 'camera_cal')
print("calibration done")
def binaryImage(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
#combined = np.zeros_like(sxbinary)
combined = sxbinary | s_binary
return combined, color_binary
def corners_unwarp(img, points, offset=100):
#undist = cv2.undistort(img, mtx, dist, None, mtx)
#binaryImg = binaryImage(undist)
img_size = (img.shape[1], img.shape[0])
# For source points I'm grabbing the outer four detected corners
#src = np.float32([[600,450], [780,450], [1100,700], [250,700]])
src = np.float32(points)
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, 0], [img_size[0]-offset, 0],
[img_size[0]-offset, img_size[1]],
[offset, img_size[1]]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(img, M, img_size)
return warped
def corners_unwarp_inverse(img, points, offset=100):
#undist = cv2.undistort(img, mtx, dist, None, mtx)
#binaryImg = binaryImage(undist)
img_size = (img.shape[1], img.shape[0])
# For source points I'm grabbing the outer four detected corners
#src = np.float32([[600,450], [780,450], [1100,700], [250,700]])
src = np.float32(points)
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, 0], [img_size[0]-offset, 0],
[img_size[0]-offset, img_size[1]],
[offset, img_size[1]]])
# Given src and dst points, calculate the perspective transform matrix
Minv = cv2.getPerspectiveTransform(dst, src)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(img, Minv, img_size)
return warped
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty
def fit_polynomial(binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
## Visualization ##
# Create an image to draw on and an image to show the selection window
window_img = np.zeros_like(np.dstack((binary_warped, binary_warped, binary_warped))*255)
left_line_pts = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
right_line_pts = np.array([np.flipud(np.transpose(np.vstack([right_fitx,
ploty])))])
all_lane_points = np.hstack((left_line_pts, right_line_pts))
cv2.fillPoly(window_img, np.int_([all_lane_points]), (0,255, 255))
return window_img, left_fit, right_fit, left_fitx, right_fitx
def measure_curvature_real(image,left_fit_cr, right_fit_cr):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
ploty = np.linspace(0, image.shape[0]-1, image.shape[0] )
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
"pipeline"
def pipeline(ipImage):
dstImg = cv2.undistort(ipImage, mtx, dist, None, mtx)
binaryImg, color_binary = binaryImage(dstImg)
pts = [[590,450], [710,450], [1150,700], [200,700]]
topViewImg = corners_unwarp(binaryImg, pts, 150 )
imgWindow, left_fit, right_fit, left_fitx, right_fitx = fit_polynomial(topViewImg)
#result = search_around_poly(topViewImg, left_fit, right_fit)
unwarpedImg = corners_unwarp_inverse(imgWindow, pts, 150 )
left_curverad, right_curverad = measure_curvature_real(imgWindow, left_fit, right_fit)
center = (left_fitx[(imgWindow.shape[0])-1] + right_fitx[(imgWindow.shape[0])-1])/2
position = dstImg.shape[1]/2
xm_per_pix = 3.7/700 # meters per pixel in x dimension
#opImage = color_binary
#opImage = np.dstack(( topViewImg, topViewImg, topViewImg)) * 255
#opImage = imgWindow
#opImage = result
#opImage = unwarpedImg
opImage = cv2.addWeighted(dstImg, 1, unwarpedImg, 0.4, 0)
opImage = cv2.putText(opImage, "Radius of Curvature = %.2f m" % ((left_curverad+right_curverad)/2), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),3,cv2.LINE_AA)
if position > center:
opImage = cv2.putText(opImage, "Vehicle is %.2f m right of center" % ((position-center)*xm_per_pix), (100,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),3,cv2.LINE_AA)
elif position < center:
opImage = cv2.putText(opImage, "Vehicle is %.2f m left of center" % ((center-position)*xm_per_pix), (100,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),3,cv2.LINE_AA)
else:
opImage = cv2.putText(opImage, "Vehicle is in center" | |
from abc import ABC, abstractmethod
import scipy
import numpy as np
class Server(ABC):
def __init__(self, server_model, merged_update):
self.model = server_model
self.merged_update = merged_update
self.total_weight = 0
@abstractmethod
def train_model(self, my_round, num_syncs, clients_per_group,
sampler, batch_size, base_dist):
"""Aggregate clients' models after each iteration. If
num_syncs synchronizations are reached, middle servers'
models are then aggregated at the top server.
Args:
my_round: The current training round, used for learning rate
decay.
num_syncs: Number of client - middle server synchronizations
in each round before sending to the top server.
clients_per_group: Number of clients to select in
each synchronization.
sampler: Sample method, could be "random", "brute",
"probability", "bayesian", "ga" (namely genetic algorithm),
and "gbp-cs" (namely gradient-based binary permutation
client selection).
batch_size: Number of samples in a batch data.
base_dist: Real data distribution, usually global_dist.
Returns:
update: The trained model after num_syncs synchronizations.
"""
return None
def merge_updates(self, weight, update):
"""Aggregate updates based on their weights.
Args:
weight: Weight for this update.
update: The trained model.
"""
merged_update_ = list(self.merged_update.get_params())
current_update_ = list(update)
num_params = len(merged_update_)
self.total_weight += weight
for p in range(num_params):
merged_update_[p].set_data(
merged_update_[p].data() +
(weight * current_update_[p].data()))
def update_model(self):
"""Update self.model with averaged merged update."""
merged_update_ = list(self.merged_update.get_params())
num_params = len(merged_update_)
for p in range(num_params):
merged_update_[p].set_data(
merged_update_[p].data() / self.total_weight)
self.model.set_params(self.merged_update.get_params())
self.total_weight = 0
self.merged_update.reset_zero()
@abstractmethod
def test_model(self, set_to_use):
"""Test self.model on all clients.
Args:
set_to_use: Dataset to test on, either "train" or "test".
Returns:
metrics: Dict of metrics returned by the model.
"""
return None
def save_model(self, log_dir):
"""Save self.model to specified directory.
Args:
log_dir: Directory to save model file.
"""
self.model.save(log_dir)
class TopServer(Server):
def __init__(self, server_model, merged_update, servers):
self.middle_servers = []
self.register_middle_servers(servers)
super(TopServer, self).__init__(server_model, merged_update)
def register_middle_servers(self, servers):
"""Register middle servers.
Args:
servers: Middle servers to be registered.
"""
if type(servers) == MiddleServer:
servers = [servers]
self.middle_servers.extend(servers)
def train_model(self, my_round, num_syncs, clients_per_group,
sampler, batch_size, base_dist):
"""Call middle servers to train their models and aggregate
their updates."""
for s in self.middle_servers:
s.set_model(self.model)
update = s.train_model(
my_round, num_syncs, clients_per_group, sampler, batch_size, base_dist)
self.merge_updates(clients_per_group, update)
self.update_model()
def test_model(self, set_to_use="test"):
"""Call middle servers to test their models."""
metrics = {}
for middle_server in self.middle_servers:
middle_server.set_model(self.model)
s_metrics = middle_server.test_model(set_to_use)
metrics.update(s_metrics)
return metrics
class MiddleServer(Server):
def __init__(self, server_id, server_model, merged_update, clients_in_group):
self.server_id = server_id
self.clients = []
self.register_clients(clients_in_group)
super(MiddleServer, self).__init__(server_model, merged_update)
def register_clients(self, clients):
"""Register clients of this middle server.
Args:
clients: Clients to be registered.
"""
if type(clients) is not list:
clients = [clients]
self.clients.extend(clients)
def select_clients(self, my_round, clients_per_group, sampler="random",
batch_size=32, base_dist=None, display=False,
metrics_dir="metrics", rand_per_group=2):
"""Randomly select clients_per_group clients for this round."""
online_clients = self.online(self.clients)
num_clients = len(online_clients)
num_sample_clients = min(clients_per_group, num_clients) \
- rand_per_group
# Randomly select part of num_clients clients
np.random.seed(my_round)
rand_clients_idx = np.random.choice(
range(num_clients), rand_per_group, replace=False)
rand_clients = np.take(online_clients, rand_clients_idx).tolist()
# Select rest clients to meet approximate i.i.d. dist
sample_clients = []
rest_clients = np.delete(online_clients, rand_clients_idx).tolist()
if sampler == "random":
sample_clients = self.random_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "probability":
sample_clients = self.probability_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "brute":
sample_clients = self.brute_sampling(
rest_clients, num_sample_clients, base_dist, rand_clients)
elif sampler == "bayesian":
sample_clients = self.bayesian_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "ga":
sample_clients = self.genetic_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "gbp-cs":
sample_clients = self.gbp_cs_sampling(
rest_clients, num_sample_clients, batch_size, base_dist, rand_clients)
selected_clients = rand_clients + sample_clients
# Measure the distance of base distribution and mean distribution
distance = self.get_dist_distance(selected_clients, base_dist)
print("Dist Distance on Middle Server %i:"
% self.server_id, distance, flush=True)
# Visualize distributions if needed
if display:
from metrics.visualization_utils import plot_clients_dist
plot_clients_dist(clients=selected_clients,
global_dist=base_dist,
draw_mean=True,
metrics_dir=metrics_dir)
return selected_clients
def random_sampling(self, clients, num_clients, my_round, base_dist=None,
exist_clients=[], num_iter=1):
"""Randomly sample num_clients clients from given clients.
Args:
clients: List of clients to be sampled.
num_clients: Number of clients to sample.
my_round: The current training round, used as random seed.
base_dist: Real data distribution, usually global_dist.
exist_clients: List of existing clients.
num_iter: Number of iterations for sampling.
Returns:
rand_clients: List of randomly sampled clients.
"""
np.random.seed(my_round)
rand_clients_ = []
if num_iter == 1:
rand_clients_ = np.random.choice(
clients, num_clients, replace=False).tolist()
elif num_iter > 1:
min_distance_ = 1
rand_clients_ = []
while num_iter > 0:
rand_clients_tmp_ = np.random.choice(
clients, num_clients, replace=False).tolist()
all_clients_ = exist_clients + rand_clients_tmp_
distance_ = self.get_dist_distance(all_clients_, base_dist)
if distance_ < min_distance_:
min_distance_ = distance_
rand_clients_[:] = rand_clients_tmp_
num_iter -= 1
return rand_clients_
def probability_sampling(self, clients, num_clients, my_round, base_dist,
exist_clients=[], num_iter=100):
"""Randomly sample num_clients clients from given clients, according
to real-time learning probability.
Args:
clients: List of clients to be sampled.
num_clients: Number of clients to sample.
my_round: The current training round, used as random seed.
base_dist: Real data distribution, usually global_dist.
exist_clients: List of existing clients.
num_iter: Number of iterations for sampling.
Returns:
rand_clients: List of sampled clients.
"""
assert num_iter > 1, "Invalid num_iter=%s (num_iter>1)" % num_iter
np.random.seed(my_round)
min_distance_ = 1
rand_clients_ = []
prob_ = np.array([1. / len(clients)] * len(clients))
while num_iter > 0:
rand_clients_idx_ = np.random.choice(
range(len(clients)), num_clients, p=prob_, replace=False)
rand_clients_tmp_ = np.take(clients, rand_clients_idx_).tolist()
all_clients_ = exist_clients + rand_clients_tmp_
distance_ = self.get_dist_distance(all_clients_, base_dist)
if distance_ < min_distance_:
min_distance_ = distance_
rand_clients_[:] = rand_clients_tmp_
# update probability of sampled clients
prob_[rand_clients_idx_] += 1. / len(clients)
prob_ /= prob_.sum()
num_iter -= 1
return rand_clients_
def brute_sampling(self, clients, num_clients, base_dist, exist_clients=[]):
"""Brute search all possible combinations to find best clients.
Args:
clients: List of clients to be sampled.
num_clients: Number of clients to sample.
base_dist: Real data distribution, usually global_dist.
exist_clients: List of existing clients.
Returns:
best_clients: List of sampled clients, which makes
selected_clients most satisfying i.i.d. distribution.
"""
best_clients_ = []
min_distance_ = [np.inf]
clients_tmp_ = []
def recursive_combine(
clients_, start, num_clients_, best_clients_, min_distance_):
if num_clients_ == 0:
all_clients_ = exist_clients + clients_tmp_
distance_ = self.get_dist_distance(all_clients_, base_dist)
if distance_ < min_distance_[0]:
best_clients_[:] = clients_tmp_
min_distance_[0] = distance_
elif num_clients_ > 0:
for i in range(start, len(clients_) - num_clients_ + 1):
clients_tmp_.append(clients_[i])
recursive_combine(
clients_, i + 1, num_clients_ - 1, best_clients_, min_distance_)
clients_tmp_.remove(clients_[i])
recursive_combine(clients, 0, num_clients, best_clients_, min_distance_)
return best_clients_
def bayesian_sampling(self, clients, num_clients, my_round, base_dist,
exist_clients=[], init_points=5, n_iter=25, verbose=0):
"""Search for an approximate optimal solution using bayesian optimization.
Please refer to the link below for more details.
https://github.com/fmfn/BayesianOptimization
Args:
clients: List of clients to be sampled.
num_clients: Number of clients to sample.
my_round: The current training round, used as random seed.
base_dist: Real data distribution, usually global_dist.
exist_clients: List of existing clients.
init_points: Number of iterations before the explorations starts. Random
exploration can help by diversifying the exploration space.
n_iter: Number of iterations to perform bayesian optimization.
verbose: The level of verbosity, set verbose>0 to it.
Returns:
approx_clients: List of sampled clients, which makes
selected_clients approximate to i.i.d. distribution.
"""
from bayes_opt import BayesianOptimization
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
def get_indexes_(**kwargs):
c_idx_ = map(int, kwargs.values())
c_idx_ = list(c_idx_)
return c_idx_
def distance_blackbox_(**kwargs):
# Get clients' indexes
c_idx_ = get_indexes_(**kwargs)
assert len(set(c_idx_)) == len(c_idx_), \
"Repeat clients are sampled."
# Get clients and calculate distance
sample_clients_ = np.take(clients, c_idx_).tolist()
all_clients_ = exist_clients + sample_clients_
distance = self.get_dist_distance(all_clients_, base_dist)
# Aim to maximize -distance
return -distance
pbounds_ = {}
interval_ = len(clients) / num_clients
for i in range(num_clients):
bound_left_ = int(i * interval_)
bound_right_ = int(min((i + 1) * interval_, len(clients))) - 1e-12
pbounds_["p%i" % (i + 1)] = (bound_left_, bound_right_)
optimizer = BayesianOptimization(
f=distance_blackbox_,
pbounds=pbounds_,
random_state=my_round,
verbose=verbose
)
optimizer.maximize(
init_points=init_points,
n_iter=n_iter,
)
optimal_params = optimizer.max["params"]
c_idx_ = get_indexes_(**optimal_params)
approx_clients_ = np.take(clients, c_idx_).tolist()
return approx_clients_
def genetic_sampling(self, clients, num_clients, my_round, base_dist,
exist_clients=[], num_iter=100, size_pop=100,
prob_mutation=0.001):
"""Search for an approximate optimal solution using genetic algorithm.
Args:
clients: List of clients to be sampled.
num_clients: Number of clients to sample.
my_round: The current training round, used as random seed.
base_dist: Real data distribution, usually global_dist.
exist_clients: List of existing clients.
num_iter: Number of iterations for sampling.
size_pop: Size of population.
prob_mutation: Probability of mutation.
Returns:
approx_clients: List of sampled clients.
"""
from opt.genetic_algorithm import GeneticAlgorithm
assert size_pop >= 50, | |
charge=0)
ATP_HYDR_EEO = Metabolite('ATP_HYDR_EEO', formula='', name='', compartment='EEOe', charge=0)
ATP_TRANS_EEO = Metabolite('ATP_TRANS_EEO', formula='', name='', compartment='EEOe', charge=0)
lac__D_EEOc = Metabolite('lac__D_EEOc', formula='C3H5O3', name='D-Lactate', compartment='EEOc', charge=-1)
nad_EEOc = Metabolite('nad_EEOc', formula='C21H26N7O14P2', name='Nicotinamide adenine dinucleotide', compartment='EEOc', charge=-1)
nadh_EEOc = Metabolite('nadh_EEOc', formula='C21H27N7O14P2', name='Nicotinamide adenine dinucleotide - reduced', compartment='EEOc', charge=-2)
h_EEOc = Metabolite('h_EEOc', formula='H', name='H+', compartment='EEOc', charge=1)
pyr_EEOc = Metabolite('pyr_EEOc', formula='C3H3O3', name='Pyruvate', compartment='EEOc', charge=-1)
##Acetate Metabolism
#ac_EEOc + atp_EEOc <-> actp_EEOc + adp_EEOc
ac_EEOc = Metabolite('ac_EEOc', formula='C2H3O2', name='Acetate', compartment='EEOc', charge=-1)
atp_EEOc = Metabolite('atp_EEOc', formula='C10H12N5O13P3', name='ATP', compartment='EEOc', charge=-4)
adp_EEOc = Metabolite('adp_EEOc', formula='C10H12N5O10P2', name='ADP', compartment='EEOc', charge=-3)
pi_EEOc = Metabolite('pi_EEOc', formula='HO4P', name='xylose-D', compartment='EEOc', charge=-2)
actp_EEOc = Metabolite('actp_EEOc', formula='C2H3O5P', name='Acetyl phosphate', compartment='EEOc', charge=-2)
reaction = Reaction('EEO_ACKr')
reaction.name = 'Acetate kinase'
reaction.subsystem = 'Acetate Metabolism'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ac_EEOc: -1.0,
atp_EEOc: -1.0,
actp_EEOc: 1.0,
adp_EEOc: 1.0,
ATP_SLP_EEO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#accoa_EEOc + pi_EEOc <-> actp_EEOc + coa_EEOc
accoa_EEOc = Metabolite('accoa_EEOc', formula='C23H34N7O17P3S', name='Acetyl-CoA', compartment='EEOc', charge=-4)
coa_EEOc = Metabolite('coa_EEOc', formula='C21H32N7O16P3S', name='Coenzyme A', compartment='EEOc', charge=-4)
reaction = Reaction('EEO_PTAr')
reaction.name = 'Phosphotransacetylase'
reaction.subsystem = 'Acetate Metabolism'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_EEOc: -1.0,
pi_EEOc: -1.0,
actp_EEOc: 1.0,
coa_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#accoa_EEOc + h2o_EEOc -> ac_EEOc + coa_EEOc + h_EEOc
h2o_EEOc = Metabolite('h2o_EEOc', formula='H2O', name='H2O', compartment='EEOc', charge=0)
reaction = Reaction('EEO_ACOAH')
reaction.name = 'Acteyl-CoA hydrolase'
reaction.subsystem = 'Acetate Metabolism'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_EEOc: -1.0,
h2o_EEOc: -1.0,
ac_EEOc: 1.0,
coa_EEOc: 1.0,
h_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Pyruvate Oxidation
#coa_EEOc + pyr_EEOc + fdox_EEOc <-> accoa_EEOc + co2_EEOc + fdred_EEOc + h_EEOc
fdred_EEOc = Metabolite('fdred_EEOc', formula='Fe8S8X', name='Ferredoxin (reduced) 2[4Fe-4S]', compartment='EEOc', charge= -2)
fdox_EEOc = Metabolite('fdox_EEOc', formula='Fe8S8X', name='Ferredoxin (oxidized) 2[4Fe-4S]', compartment='EEOc', charge= 0)
co2_EEOc = Metabolite('co2_EEOc', formula='CO2', name='CO2', compartment='EEOc', charge= 0)
reaction = Reaction('EEO_PFOR')
#This reaction differs from BiGG database because a different ferredoxin is used and H+ is a product for mass and charge balance
reaction.name = '*Pyruvate flavodoxin oxidoreductase'
reaction.subsystem = 'Pyruvate Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({coa_EEOc: -1.0,
pyr_EEOc: -1.0,
fdox_EEOc: -1.0,
accoa_EEOc: 1.0,
co2_EEOc: 1.0,
fdred_EEOc: 1.0,
h_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#coa_EEOc + nad_EEOc + pyr_EEOc <-> accoa_EEOc + co2_EEOc + nadh_EEOc
reaction = Reaction('EEO_PDH')
#This reaction differs from BiGG database because a different ferredoxin is used and H+ is a product for mass and charge balance
reaction.name = 'Pyruvate dehdyrogenase'
reaction.subsystem = 'Pyruvate Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({coa_EEOc: -1.0,
pyr_EEOc: -1.0,
nad_EEOc: -1.0,
accoa_EEOc: 1.0,
co2_EEOc: 1.0,
nadh_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Reverse Beta Oxidation
#Butyrate Production (Cycle 1)
#2.0 accoa_EEOc <-> aacoa_EEOc + coa_EEOc
aacoa_EEOc = Metabolite('aacoa_EEOc', formula='C25H36N7O18P3S', name='Acetoacetyl-CoA', compartment='EEOc', charge=-4)
reaction = Reaction('EEO_ACACT1r')
reaction.name = 'Acetyl-CoA C-acetyltransferase'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_EEOc: -2.0,
aacoa_EEOc: 1.0,
coa_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#aacoa_EEOc + h_EEOc + nadh_EEOc <-> 3hbcoa_EEOc + nad_EEOc
_3hbcoa_EEOc = Metabolite('_3hbcoa_EEOc', formula='C25H38N7O18P3S', name='(S)-3-Hydroxybutanoyl-CoA', compartment='EEOc', charge=-4)
reaction = Reaction('EEO_HACD1')
reaction.name = '3-hydroxyacyl-CoA dehydrogenase (acetoacetyl-CoA)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({aacoa_EEOc: -1.0,
h_EEOc: -1.0,
nadh_EEOc: -1.0,
_3hbcoa_EEOc: 1.0,
nad_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#3hbcoa_EEOc <-> b2coa_EEOc + h2o_EEOc
b2coa_EEOc = Metabolite('b2coa_EEOc', formula='C25H36N7O17P3S', name='Crotonoyl-CoA', compartment='EEOc', charge=-4)
reaction = Reaction('EEO_ECOAH1')
reaction.name = '3-hydroxyacyl-CoA dehydratase (3-hydroxybutanoyl-CoA)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({_3hbcoa_EEOc: -1.0,
b2coa_EEOc: 1.0,
h2o_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#b2coa_EEOc + 2.0 nadh_EEOc + fdox_EEOc <-> btcoa_EEOc + 2.0 nad_EEOc + fdred_EEOc
btcoa_EEOc = Metabolite('btcoa_EEOc', formula='C25H38N7O17P3S', name='Butanoyl-CoA', compartment='EEOc', charge= -4)
reaction = Reaction('EEO_EBACD1')
#BiGG does not have an electron bifurcating acyl-CoA dehydrogenase reaction
reaction.name = '*Electron Bifurcating Acyl-CoA Dehydrogenase (C4)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({b2coa_EEOc: -1.0,
nadh_EEOc: -2.0,
fdox_EEOc: -1.0,
btcoa_EEOc: 1.0,
nad_EEOc: 2.0,
fdred_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#b2coa_EEOc + h_EEOc + nadh_EEOc <-> btcoa_EEOc + nad_EEOc
reaction = Reaction('EEO_ACOAD1')
reaction.name = "Acyl-CoA dehydrogenase (butanoyl-CoA)"
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({b2coa_EEOc: -1.0,
nadh_EEOc: -1.0,
h_EEOc: -1.0,
btcoa_EEOc: 1.0,
nad_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#btcoa_EEOc + h2o_EEOc <-> but_EEOc + coa_EEOc + h_EEOc
but_EEOc = Metabolite('but_EEOc', formula='C4H7O2', name='Butyrate (n-C4:0)', compartment='EEOc', charge= -1)
reaction = Reaction('EEO_ACHC4')
#BiGG does not have this specific acyl-CoA hydrolase reaction
reaction.name = '*Acyl-CoA Hydrolase (C4:0)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({btcoa_EEOc: -1.0,
h2o_EEOc: -1.0,
but_EEOc: 1.0,
coa_EEOc: 1.0,
h_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#btcoa_EEOc + ac_EEOc <-> but_EEOc + accoa_EEOc
reaction = Reaction('EEO_CoATC4')
#BiGG does not have this specific CoAT hydrolase reaction
reaction.name = '*CoA Transferase (C4:0-C2:0)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({btcoa_EEOc: -1.0,
ac_EEOc: -1.0,
but_EEOc: 1.0,
accoa_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Hexanoate Production (Cycle 2)
#accoa_EEOc + btcoa_EEOc <-> coa_EEOc + 3ohcoa_EEOc
_3ohcoa_EEOc = Metabolite('3ohcoa_EEOc', formula='C27H40N7O18P3S', name='3-Oxohexanoyl-CoA', compartment='EEOc', charge=-4)
reaction = Reaction('EEO_ACACT2')
reaction.name = 'Butanoyl-CoA:acetyl-CoA C-butanoyltransferase'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_EEOc: -1.0,
btcoa_EEOc: -1.0,
_3ohcoa_EEOc: 1.0,
coa_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#_3ohcoa_EEOc + h_EEOc + nadh_EEOc <-> _3hhcoa_EEOc + nad_EEOc
_3hhcoa_EEOc = Metabolite('_3hhcoa_EEOc', formula='C27H42N7O18P3S', name='(S)-3-Hydroxyhexanoyl-CoA', compartment='EEOc', charge=-4)
reaction = Reaction('EEO_HACD2')
reaction.name = '3-hydroxyacyl-CoA dehydrogenase (3-oxohexanoyl-CoA)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({_3ohcoa_EEOc: -1.0,
h_EEOc: -1.0,
nadh_EEOc: -1.0,
_3hhcoa_EEOc: 1.0,
nad_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#_3hhcoa_EEOc <-> h2o_EEOc + hx2coa_EEOc
hx2coa_EEOc = Metabolite('hx2coa_EEOc', formula='C27H40N7O17P3S', name='Trans-Hex-2-enoyl-CoA', compartment='EEOc', charge=-4)
reaction = Reaction('EEO_ECOAH2')
reaction.name = '3-hydroxyacyl-CoA dehydratase (3-hydroxyhexanoyl-CoA)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({_3hhcoa_EEOc: -1.0,
hx2coa_EEOc: 1.0,
h2o_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#hx2coa_EEOc + 2.0 nadh_EEOc + fdox_EEOc <-> hxcoa_EEOc + 2.0 nad_EEOc + fdred_EEOc
hxcoa_EEOc = Metabolite('hxcoa_EEOc', formula='C27H42N7O17P3S', name='Hexanoyl-CoA (n-C6:0CoA)', compartment='EEOc', charge= -4)
reaction = Reaction('EEO_EBACD2')
#BiGG does not have an electron bifurcating acyl-CoA dehydrogenase reaction
reaction.name = '*Electron Bifurcating Acyl-CoA Dehydrogenase (C6)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({hx2coa_EEOc: -1.0,
nadh_EEOc: -2.0,
fdox_EEOc: -1.0,
hxcoa_EEOc: 1.0,
nad_EEOc: 2.0,
fdred_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h_EEOc + hx2coa_EEOc + nadh_EEOc <-> hxcoa_EEOc + nad_EEOc
reaction = Reaction('EEO_ACOAD2')
reaction.name = "Acyl-CoA dehydrogenase (hexanoyl-CoA)"
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({hx2coa_EEOc: -1.0,
nadh_EEOc: -1.0,
h_EEOc: -1.0,
hxcoa_EEOc: 1.0,
nad_EEOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#hxcoa_EEOc + h2o_EEOc <-> hxa_EEOc + coa_EEOc + h_EEOc
hxa_EEOc = Metabolite('hxa_EEOc', formula='C6H11O2', name='Hexanoate (n-C6:0)', compartment='EEOc', charge= -1)
reaction = Reaction('EEO_ACH-C6')
#BiGG does not have this specific acyl-CoA hydrolase reaction
reaction.name = '*Acyl-CoA Hydrolase (C6:0)'
reaction.subsystem = 'Reverse Beta Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({hxcoa_EEOc: -1.0,
h2o_EEOc: | |
from sunpy.net import vso, Fido
from numpy import unique
import glob
from sunpy.sun import carrington_rotation_number
import astropy.units as u
from astropy.io import fits
from datetime import datetime,timedelta
import ftplib
import sys,os
import gzip
import shutil
class download_cms_files:
# inital information about the directoies and time for sigmoid
def __init__(self,time='2009/02/17 11:44:00',nproc=4,cmsdir='',outdir='%Y/%m/%d/%H%M/',x=None,y=None):
"""Sets up inital variables to pass to rest of download_cms_file functions.
Really only need to set the input time string "YYYY/MM/DD HH:MM:SS" and full path to the CMS2 directory.
Then assuming you set up the sigmoid directory to be YYYY/MM/DD/HHMM (can change with outdir variable if needed) you are set."""
if cmsdir == '': cmsdir = open('cms2_dir','r').readlines()[0][:-1]#read in first line of cms2_dir and format to send to script
if cmsdir[-1] != '/': cmsdir=cmsdir+'/'
self.time = time
self.nproc = nproc
self.sform = '%Y/%m/%d %H:%M:%S'
self.dttime = datetime.strptime(self.time,self.sform)
self.basedir = datetime.strftime(self.dttime,outdir)
self.cmsdir = cmsdir
self.x = x
self.y = y
#start date from using sdo data
self.sdo_start = datetime(2010,05,01,00,00,00)
#copy hinode synoptic files
def get_hinode(self):
#Hinode archive
self.hinode_date = '%Y/%m/%d/'
self.hinode_arch = '/archive/hinode/xrt/level1/'+datetime.strftime(self.dttime,self.hinode_date)
#look for timeline to get synoptic timelines
self.find_synoptic_times()
#find times for synoptics
def find_synoptic_times(self):
self.hinode_tfmt = self.hinode_date+'%Y%m%d_exported/'#location of hinode timelines
m = 0 # counter for number of days looking back
for p in range(2):#dirty way to prevent timeline start day from only returning one set of synoptics
lookt = True # found the proper timeline
while lookt:
self.hinode_time = self.dttime-timedelta(days=m)
self.hinode_tldr = '/archive/hinode/xrt/timelines/'+datetime.strftime(self.hinode_time,self.hinode_tfmt)
foundt = os.path.exists(self.hinode_tldr)
m += 1 #increment by 1 day
if m >= 14: return #exit downloading hinode data if no timeline found for 14 days
if foundt: lookt = False #if find hinode directory exit loop
self.copy_synoptics()
#copy xrt synoptics to local directory
def copy_synoptics(self):
self.read_xrt_timeline()
self.xrt_file_list()
for i in self.xrt_files: shutil.copy(i,self.cmsdir+self.basedir)
#get list of files in timerange
def xrt_file_list(self):
#get formatted list of hours
self.xrt_hours = []
for i in self.xrt_beg: self.xrt_hours.append('H{0:%H}00'.format(i))
for i in self.xrt_end: self.xrt_hours.append('H{0:%H}00'.format(i))
#get unique hours
self.xrt_hours = unique(self.xrt_hours)
#get all files in hours and their associated times
self.xrt_files = []
#loop over unique hours
for i in self.xrt_hours:
tempfiles = glob.glob('{0}/{1}/*fits'.format(self.hinode_arch,i))
for j in tempfiles:
temptime = datetime.strptime(j.split('/')[-1].split('.')[0],'L1_XRT%Y%m%d_%H%M%S')
#check if time is in range
for k in range(len(self.xrt_beg)):
if ((temptime >= self.xrt_beg[k]) & (temptime <= self.xrt_end[k])):
#check if header information is compatible with a syntopic
dat = fits.open(j)
hdr = dat[0].header
#check for acceptable filters
fil_check = (((hdr['EC_FW2_'] == 'Ti_poly') | (hdr['EC_FW1_'] == 'Al_poly') | (hdr['EC_FW2_'] == 'Al_mesh') | (hdr['EC_FW1_'] == 'Be_thin') | (hdr['EC_FW2_'] != 'Gband')))
#check header information on fits files to get just synoptics
if ((hdr['NAXIS1'] == 1024) & (hdr['NAXIS2'] == 1024) & (fil_check)):
self.xrt_files.append(j)
#check to make sure self.x and self.y are defined
if ((self.x != None) & (self.y != None)):
#Also check to see if there are any small FOV XRT files within 100'' of y and x
dist = (((hdr['CRVAL1']-self.x)**2.+(hdr['CRVAL2']-self.y)**2.))**.5
if ((dist <= 100) & (fil_check)):
self.xrt_files.append(j)
#read timeline and return synoptic times
def read_xrt_timeline(self):
fmtrept = self.hinode_tldr+'re-point_{0:%Y%m%d}*.txt'.format(self.hinode_time)
repfile = glob.glob(fmtrept)
repoint = open(repfile[-1],'r') # read the repoint file
#list of beginning and end time for synoptics
self.xrt_beg = []
self.xrt_end = []
ender = False #gets the end time for synoptic
timefmt = '%Y/%m/%d %H:%M:%S' #format of time in pointing file
#do not start with an end
end = False
#get the begging and end times of xrt syntopics
#loop through repoint file lines
for i in repoint:
if end:
end = False
try:
self.xrt_end.append(datetime.strptime(i[20:39],timefmt))
except:
self.xrt_end.append(self.xrt_beg[-1]+timedelta(minutes=20)) #if syntopic is last pointing just add 20 minutes
#look for lines containing the word synoptic
if 'synoptic' in i.lower():
end = True
self.xrt_beg.append(datetime.strptime(i[20:39],timefmt))
#Add continue to prevent errors when synoptic pointing is close to observed AR
continue
#if you want to look for local AR files
if ((self.x != None) & (self.y != None)):
#check for nearby pointings with small FoV <NAME> 2018/01/24
try:
p_x = float(i[72:78])
p_y = float(i[79:87])
#if values are not floats continue
except:
continue
#distance from pointing to planned AR
dist = (((p_x-self.x)**2.+(p_y-self.y)**2.))**.5
#if distance less than 100'' from AR add to list to look for XRT files
if dist < 100:
end = True
self.xrt_beg.append(datetime.strptime(i[20:39],timefmt))
#find carrington rotation number and as politely for the files
def get_carrington(self):
self.rotnum = carrington_rotation_number(self.time)
#connect to ftp directory
self.ftp = ftplib.FTP('solis.nso.<EMAIL>','anonymous')
#change to carrigoton rotation number directory
self.ftp.cwd('synoptic/level3/vsm/merged/carr-rot/')
#format of the fits file
self.forfil = "svsm_m21lr_B3_cr{0:4d}.fts.gz"
try:
self.grab_car()
self.ftp.close()
except:
print('Failed unexpectedly, closing ftp access',sys.exc_info()[0])
self.ftp.close()
raise
#get carrington roation files
def grab_car(self):
#primary rotation number
prot = int(self.rotnum)
#rotation number +/- 1
brot = prot-1
arot = prot+1
#only get 3 carrington rotations
rot_list = [brot,prot,arot]
#only get exact carrington rotation number
#rot_list = [prot]
#NSO synoptic maps only go until 2166
if self.rotnum > 2166:
#Use HMI synoptics
import urllib2
fname = 'hmi.Synoptic_Mr_small.{0:1.0f}.fits'.format(self.rotnum)
hmi_url = 'http://jsoc.stanford.edu/data/hmi/synoptic/'+fname
res = urllib2.urlopen(hmi_url)
#read binary fits file
f_carrot = res.read()
#write fits file locally
with open(self.cmsdir+self.basedir+fname,'wb') as f:
f.write(f_carrot)
#print("Carrington rotation {0:1.0f} is beyond NSO archive".format(self.rotnum))
else:
for rot in rot_list:
fname = self.forfil.format(rot)
#see if file exists with or without .gz
testfile = ((os.path.isfile(self.cmsdir+self.basedir+fname)) | (os.path.isfile(self.cmsdir+self.basedir+fname[:-3])))
#if file does not exist download new file
if testfile == False:
try:
fhandle = open(self.cmsdir+self.basedir+fname,'wb')
self.ftp.retrbinary('RETR {0}'.format(fname),fhandle.write)
fhandle.close()
self.unzip_fil(self.cmsdir+self.basedir+fname)
except:
print("Unable to download carrington rotation map at {0}".format(fname))
#unzip carrington file
def unzip_fil(self,fname):
oname = fname[:-3]
with gzip.open(fname,'rb') as infile:
with open(oname,'wb') as outfile:
for line in infile:
outfile.write(line)
#Get AIA 1024x1024 synoptics from Stanford, since CMS2 cannot handle 4096x4096 files
def get_aia_syn(self):
import urllib
#synoptic archive location
syn_arch = 'http://jsoc.stanford.edu/data/aia/synoptic/'
#check if current minute is even, since synoptics are every 2 minutes
if self.dttime.minute % 2 == 0:
inp_time = self.dttime
#otherwise add 1 minute to current time
else:
inp_time = self.dttime+timedelta(minutes=1)
#reduced time to 12 seconds for AIA observation download <NAME> 2018/01/24
dt = timedelta(minutes=2)
start = inp_time-dt
end = inp_time
#wavelengths to download
d_wav = [131,171,193,211,304]
#create directory path minus the wavelength
f_dir = '{0:%Y/%m/%d/H%H00/AIA%Y%m%d_%H%M_}'
s_dir = f_dir.format(start)
e_dir = f_dir.format(end)
#wavelength format
w_fmt = '{0:04d}.fits'
#download files from archive for each wavelength
for i in d_wav:
#format wavelength
w_fil = w_fmt.format(i)
urllib.urlretrieve(syn_arch+s_dir+w_fil,self.cmsdir+self.basedir+s_dir.split('/')[-1]+w_fil)
urllib.urlretrieve(syn_arch+e_dir+w_fil,self.cmsdir+self.basedir+e_dir.split('/')[-1]+w_fil)
#Get aia files from VSO
def get_aia(self):
#Get Stereo observations
client = vso.VSOClient()
#reduced time to 12 seconds for AIA observation download J. Prchlik 2018/01/18
dt = timedelta(seconds=12)
start = datetime.strftime(self.dttime-dt,self.sform)
end = datetime.strftime(self.dttime+dt,self.sform)
#set time span
time = vso.attrs.Time(start,end)
#grabs both stereo a and b
ins = vso.attrs.Instrument('aia')
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(171*u.AA,171*u.AA)
qr1 = client.query(time,ins,wave)
res1 = client.get(qr1,path=self.cmsdir+self.basedir+'{file}').wait()
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(193*u.AA,193*u.AA)
qr2 = client.query(time,ins,wave)
res2 = client.get(qr2,path=self.cmsdir+self.basedir+'{file}').wait()
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(304*u.AA,304*u.AA)
qr3 = client.query(time,ins,wave)
res3 = client.get(qr3,path=self.cmsdir+self.basedir+'{file}').wait()
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(211*u.AA,211*u.AA)
qr4 = client.query(time,ins,wave)
res4 = client.get(qr4,path=self.cmsdir+self.basedir+'{file}').wait()
#grab stereo files from stereo archive
def grab_stereo(self):
#look in both stereo ahead and behind
beacons = ['ahead','behind']
#set time range around to look for stereo files
dt = timedelta(minutes=30)
start = self.dttime-dt
end = self.dttime+dt
#base directory for start and end directory
f_bdir = '{0:%Y%m%d}/*fts'
s_bdir = f_bdir.format(start)
e_bdir = f_bdir.format(end)
#loop over subdirectories if start and end time cross days
if s_bdir == e_bdir:
l_dir = [s_bdir]
else:
l_dir = [s_bdir,e_bdir]
#loop over stereo ahead and behind
for bea in beacons:
#change to stereo ahead and behind directory continue if directories do not exist
try:
self.s_ftp.cwd('/pub/beacon/{0}/secchi/img/euvi/'.format(bea))
except:
print('No STEREO {0} OBS'.format(bea))
continue
#get list of files in subdirectory
fit_list = []
try:
for days in l_dir: fit_list.append(self.s_ftp.nlst(days))
except:
print('No STEREO {0} OBS at {1}'.format(bea,days))
continue
#flatten the list
flat_list = [item for sublist in fit_list for item in sublist]
#list of files to download
d_list = []
#time reange for looping
t_r = 1
#try expanding the time search range
loop = True
#make sure you get at least 4 files
while ((len(d_list) <= | |
#!/usr/bin/env python3
import statistics
import os
import glob
from tkinter import filedialog
from tkinter import * # noqa
import pandas as pd
from eventcodes import eventcodes_dictionary
from natsort import natsorted, ns
import matplotlib.pyplot as plt
import numpy as np
import datetime
__all__ = ["loop_over_days", "load_file", "concat_lickometer_files",
"extract_info_from_file", "DNAMIC_extract_info_from_file",
"DNAMIC_loop_over_days", "get_events_indices", "reward_retrieval", "cue_iti_responding",
"cue_iti_responding_PavCA", "binned_responding",
"cue_responding_duration", "lever_pressing", "lever_press_latency", "lever_press_latency_PavCA",
"total_head_pokes",
"num_successful_go_nogo_trials", "count_go_nogo_trials", "num_switch_trials", "bin_by_time",
"lever_press_lat_gng", "RVI_gng_weird", "RVI_nogo_latency", "lever_press_latency_Switch",
"response_rate_across_cue_iti", "duration_across_cue_iti"]
def date_sort_key(date_as_string, date_fmt = '%b_%d_%y', date_grep_fmt = '\w{3}_\d{1,2}_\d{2}'):
'''
:param date_as_string: a string containing a date, typically from a file or directory name
:param date_fmt: The formatting to use with datetime to extract the raw date information.
A default is provided for ease of use.
:param date_grep_fmt: A pattern used to pull the date itself out of date_as_string.
Default matches that of date_fmt.
:return: A tuple containing date information in Month, Day, Year order to be used by
a sort function, such as sorted, as a key for sorting a list of dates.
'''
# Ensure separator between year, month, and day is underscore.
date_as_string = date_as_string.replace('-', '_')
try:
sanitized_string_date = re.search(date_grep_fmt, date_as_string).group(0)
date_info = datetime.datetime.strptime(sanitized_string_date, date_fmt)
except (AttributeError, ValueError) as e:
print(e)
# If the desired string is not matched, re.search will return NoneType and
# group(0) will yield an AttributeError.
print(f'The date is {date_as_string}\n\
The regex pattern is {date_grep_fmt}\n\
The datetime format is {date_fmt}.)')
date_grep_fmt = input('Enter desired regex pattern to match date string: ')
date_fmt = input('Enter desired date format string for strptime: ')
# and then try it again.
sanitized_string_date = re.search(date_grep_fmt, date_as_string).group(0)
date_info = datetime.datetime.strptime(sanitized_string_date, date_fmt)
return date_info.month, date_info.day, date_info.year
def loop_over_days(column_list, behavioral_test_function, master_data_folder=''):
"""
:param column_list: list of strings/column titles for analysis that will be output in a table
:param behavioral_test_function: function that contains all the analysis functions to run on each file
:param master_data_folder: A directory which contains all single-day directories of interest. Used instead
of GUI selection with Tk. Path provided by sys.argv in executing script.
:return: one concatenated data table of analysis for each animal for each day specified
"""
# If a data folder is passed, skip user input.
if master_data_folder == '':
days = int(input("How many days would you like to analyze?"))
gui=True
else:
data_folders = glob.glob(os.path.join(master_data_folder, '*'))
data_folders = natsorted(data_folders, key=date_sort_key)
print('I found {}'.format(data_folders))
continue_script = input('Are these in the right order (y/n)? ')
if continue_script =='y':
pass
elif continue_script=='n':
date_fmt = input('Enter desired date format string for strptime: ')
regex_fmt = input('Enter desired regex pattern to match date string: ')
data_folders = natsorted(data_folders, key=date_sort_key(date_fmt=date_fmt, date_grep_fmt=regex_fmt))
days = len(data_folders)
gui=False
df = pd.DataFrame(columns=column_list)
for i in range(days):
# Ask user to specify data folder if necessary.
if gui:
root = Tk() # noqa
root.withdraw()
folder_selected = filedialog.askdirectory()
else:
folder_selected = data_folders[i]
# Iterate over single animal datafiles in current folder.
file_pattern = os.path.join(folder_selected, '*')
for file in sorted(glob.glob(file_pattern)):
loaded_file = load_file(file)
df2 = behavioral_test_function(loaded_file, i)
df = df.append(df2, ignore_index=True)
return days, df
def loop_over_days_lickometer(column_list, behavioral_test_function):
"""
:param column_list: list of strings/column titles for analysis that will be output in a table
:param behavioral_test_function: function that contains all the analysis functions to run on each file
:return: one concatenated data table of analysis for each animal for each day specified
"""
days = int(input("How many days would you like to analyze?"))
df = pd.DataFrame(columns=column_list)
for i in range(days):
root = Tk() # noqa
root.withdraw()
folder_selected = filedialog.askdirectory()
file_pattern = os.path.join(folder_selected, '*')
for file in sorted(glob.glob(file_pattern)):
loaded_file = load_file(file)
df2 = behavioral_test_function(loaded_file, i)
df = df.append(df2, ignore_index=True)
return days, df
def load_file(filename):
"""
:param filename: string that refers to single operant file location, file is txt
:return: dictionary of all the fields and their values contained in the file (like subject, group, or w array)
"""
with open(filename, "r") as fileref:
filelines = fileref.readlines()
fields_dictionary = {}
for line in filelines:
if line[0] != ' ' and line[0] != '\n':
name = line.split(':')[0]
fields_dictionary[name] = line.replace(name + ':', '')
fields_dictionary[name] = fields_dictionary[name].replace('\n', '')
fields_dictionary[name] = fields_dictionary[name].replace(' ', '')
elif line[0] == ' ':
fields_dictionary[name] += line
fields_dictionary[name] = fields_dictionary[name].replace('\n', '')
group_identities = fields_dictionary['Group'].split('/')
fields_dictionary['Group'] = group_identities.pop(0)
for remaining in group_identities:
if ':' in remaining:
next_group = remaining.split(':')
fields_dictionary[next_group[0]] = next_group[1]
return fields_dictionary
def concat_lickometer_files():
"""
:return: data frame for lickometer analysis
"""
files_list = []
root = Tk();
root.withdraw()
home = os.path.expanduser('~') # returns the home directory on any OS --> ex) /Users/jhl
selected_folder = filedialog.askdirectory(initialdir=home)
file_pattern = os.path.join(selected_folder, '*.txt')
data_dict = {}
for fname in natsorted(glob.glob(file_pattern), alg=ns.IGNORECASE): # loop through all the txt files
with open(fname, "r") as file:
filelines = file.readlines() # read the lines in each file
subject_line = filelines[5] # Animal ID will always be at the 6th index (5+1)
subject = subject_line.split(",")[-1].strip() # subject will be the last element, strip any whitespaces!
values = filelines[-1].strip().split(",") # Need to split by delimiter in order to make the list!
data_dict[subject] = values
lick_df = pd.DataFrame.from_dict(data_dict, orient='index')
lick_final = lick_df.T
# Delete row at index position 0 & 1
lick_final = lick_final.drop([lick_final.index[0]]) # to get rid of row of ones at top
lick_final.reset_index(inplace=True)
for c in lick_final.columns:
lick_final[c] = pd.to_numeric(lick_final[c], errors='coerce')
lick_final = lick_final.drop(lick_final.columns[[0]], axis=1)
lick_final.fillna(value=pd.np.nan, inplace=True)
lick_final.rename(columns=lick_final.iloc[0]).drop(lick_final.index[0])
lick_final.to_excel("output.xlsx")
return lick_final
def extract_info_from_file(dictionary_from_file, time_conversion):
"""
:param dictionary_from_file: dictionary of all the fields and their values contained in the file (like subject, group, or w array)
:param time_conversion: conversion number the timecode needs to be divided by to get seconds
:return: timecode and eventcode lists derived from the w array
"""
time_event_codes = dictionary_from_file["W"].split()
for num in time_event_codes:
if ':' in num:
time_event_codes.remove(num)
for num in time_event_codes:
time_event_codes[time_event_codes.index(num)] = str(int(float(num)))
timecode = []
eventcode = []
first_timecode = (float(time_event_codes[0][:-4]) / time_conversion)
for num in time_event_codes:
if num == time_event_codes[0]:
timecode += [0.0]
else:
timecode += [round((float(num[:-4]) / time_conversion) - first_timecode, 2)]
eventcode += [eventcodes_dictionary[int(num[-4:])]]
return timecode, eventcode
def DNAMIC_loop_over_days(column_list, behavioral_test_function):
"""
:param column_list: list of strings/column titles for analysis that will be output in a table
:param behavioral_test_function: function that contains all the analysis functions to run on each file
:return: one concatenated data table of analysis for each animal for each day specified
"""
days = int(input("How many days would you like to analyze?"))
df = pd.DataFrame(columns=column_list)
for i in range(days):
root = Tk() # noqa
root.withdraw()
folder_selected = filedialog.askdirectory()
file_pattern = os.path.join(folder_selected, '*')
for file in sorted(glob.glob(file_pattern)):
(eventcode, timecode, fields_dictionary) = DNAMIC_extract_info_from_file(file)
df2 = behavioral_test_function(eventcode, timecode, fields_dictionary, i)
df = df.append(df2, ignore_index=True)
return days, df
def DNAMIC_extract_info_from_file(filename):
df = pd.read_csv(filename, sep=':', names=['event', 'timestamp'])
df['timestamp'] = df['timestamp'].str.strip()
# 0, 0, 0 appears after successful initialization --> serves as a cutoff mark
end_of_init_idx = df.loc[df['timestamp'] == '0'].index[-1]
body_start_idx = end_of_init_idx + 1
keys = df[:body_start_idx]['event'].tolist()
values = df[:body_start_idx]['timestamp'].tolist()
fields_dictionary = dict(zip(keys, values))
df_body = df[body_start_idx:-2]
eventcode = df_body['event'].tolist()
eventcode = [eventcodes_dictionary[int(i)] for i in eventcode]
timecode = df_body['timestamp'].tolist()
timecode = [int(i) / 1000 for i in timecode]
return eventcode, timecode, fields_dictionary
def get_events_indices(eventcode, eventtypes):
"""
:param eventcode: list of event codes from operant conditioning file
:param eventtypes: list of event types to index
:return: list of indices of target events
"""
return [i for i, event in enumerate(eventcode) if event in eventtypes]
def reward_retrieval(timecode, eventcode):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:return: number of reinforcers (dippers) presented, number retrieved, and latency to retrieve as floats
"""
dip_on = get_events_indices(eventcode, ['DipOn'])
dip_off = get_events_indices(eventcode, ['DipOff', 'EndSession'])
poke_on = get_events_indices(eventcode, ['PokeOn1'])
poke_off = get_events_indices(eventcode, ['PokeOff1'])
dips_retrieved = 0
latency_dip_retrieval = []
for i in range(len(dip_on)):
for x in range(len(poke_off)):
dip_on_idx = dip_on[i]
dip_off_idx = dip_off[i]
if poke_on[x] < dip_on_idx < poke_off[x]:
dips_retrieved += 1
latency_dip_retrieval += [0]
break
elif 'PokeOn1' in eventcode[dip_on_idx:dip_off_idx]:
dips_retrieved += 1
poke_during_dip_idx = eventcode[dip_on_idx:dip_off_idx].index('PokeOn1')
latency_dip_retrieval += [round(timecode[poke_during_dip_idx + dip_on_idx] - timecode[dip_on_idx], 2)]
break
if dips_retrieved == 0:
return len(dip_on), dips_retrieved, 0
else:
return len(dip_on), | |
Phi_n=0 since h=b here)
rhs[:,-1] = 0 # but we again set it explicitly to be absolutely clear
# For the xi=1 boundary condition we implicitly make the 2nd derivative zero
A_p0_m1 = np.zeros(Phi_n_old.shape)
A_p0_m2 = np.zeros(Phi_n_old.shape)
A_p0_m3 = np.zeros(Phi_n_old.shape) # required for higher order stencil
A_p0_p0[-1,2:-1] = 2.0 # 1 low order, 2 higher order
A_p0_m1[-1,2:-1] = -5.0 # -2 low order, -5 higher order
A_p0_m2[-1,2:-1] = 4.0 # 1 low order, 4 higher order
A_p0_m3[-1,2:-1] = -1.0 # -1 higher order
rhs[-1,2:-1] = 0
# Now the BC at r=0 (and the artificial one I enforce next to it)
A_p1_p0 = np.zeros(Phi_n_old.shape)
A_p2_p0 = np.zeros(Phi_n_old.shape)
A_m1_p0 = np.zeros(Phi_n_old.shape) # required for the artificial BC
A_p3_p0 = np.zeros(Phi_n_old.shape) # required for higher order stencil at r=0
A_p0_p0[:,0] = 3.0;A_p1_p0[:,0] = -4.0;A_p2_p0[:,0] = 1.0
rhs[:,0] = 0
if self._use_artificial_dr_bc: # if artificial 'BC' is also enforced near r=0
A_p0_p0[:,0] = 3.0;A_p1_p0[:,0] = -4.0;A_p2_p0[:,0] = 1.0
A_m1_p0[:,1] = 4.0;A_p0_p0[:,1] = -7.0;A_p1_p0[:,1] = 4.0;A_p2_p0[:,1] = -1.0
rhs[:,1] = 0
if False: # if high order BC enforcement at r=0
A_p0_p0[:,0] = 11.0;A_p1_p0[:,0] = -18.0;A_p2_p0[:,0] = 9.0;A_p3_p0[:,0] = - 2.0
if False: # if both high order BC enforcement at r=0 and additional artificial BC near r=0
A_p0_p0[:,0] = 11.0;A_p1_p0[:,0] = -18.0;A_p2_p0[:,0] = 9.0;A_p3_p0[:,0] = -2.0
A_m1_p0[:,1] = 4.0;A_p0_p0[:,1] = - 7.0;A_p1_p0[:,1] = 4.0;A_p2_p0[:,1] = -1.0
rhs[:,1] = 0
# Add the forcing terms on the 'interior' (this need not be implicit really)
A_p0_p0[1:-1,2:-1] += -dt*(g_b_old-Psi_d)[np.newaxis,2:-1] # implicit forcing...
#rhs[1:-1,2:-1] += dt*Phi_n_old[1:-1,2:-1]*(g_b_old-Psi_d)[np.newaxis,2:-1] # explicit forcing...
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_old = self._advective_term(r,h_old)
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*(fot_old)[np.newaxis,1:-1]
# Now add this to the appropriate diagonals...
A_p0_p1 = np.zeros(Phi_n_old.shape)
A_p0_m1[1:-1,2:-1] = -dt/(2*dxi)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1] # central...
A_p0_p1[1:-1,2:-1] = +dt/(2*dxi)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_m1[1:-1,2:-1] += -dt/dxi*(v_z_old[1:-1,2:-1]>0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1] # upwinded...
#A_p0_p0[1:-1,2:-1] += +dt/dxi*(v_z_old[1:-1,2:-1]>0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_p0[1:-1,2:-1] += -dt/dxi*(v_z_old[1:-1,2:-1]<0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_p1[1:-1,2:-1] += +dt/dxi*(v_z_old[1:-1,2:-1]<0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
# Setup the horizontal 'advection' stencil
if explicit_r_advection: # true - explicit, false - implicit
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
adv_old = self._advective_term_alt(r,h_old,f_old)
# Add the horizontal 'advection' part to the system
# Note: currently this is treated explicitly, which seems to work okay for the most part...
rhs[1:-1,2:-1] += dt*gamma_ast*adv_old[1:-1,2:-1]/r[np.newaxis,2:-1]
else:
# Note: we can re-use the _advective_term_f_gradient function here
diagonals_h2,offsets_h2 = self._advective_term_f_gradient(r,h_old,2,Phi_n_old)
assert offsets_h2[0]==-1
A_m1_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[0][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[1][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[2][np.newaxis,2:-1]/r[np.newaxis,2:-1]
if np.isfinite(lambda_ast):
diagonals_h1,offsets_h1 = self._advective_term_f_gradient(r,h_old,1,Phi_n_old)
assert offsets_h1[0]==-1
A_m1_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[0][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[1][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[2][np.newaxis,2:-1]/r[np.newaxis,2:-1]
# Now add the integral component (note this is somewhat denser than usual)
# (Note: it might be easier to build the entire matrix directly in coo format?)
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
H = (h_old>h_ast) # Use this to zero out bits where h is too small...
row,col,dat = [],[],[]
for j in range(1,nxi-1): # exclude the first and last index... (the first is 0 regardless)
for k in range(j+1):
c = 0.5*dxi if (k==0 or k==j) else dxi
row.append(indices[j,2:-1])
col.append(indices[k,1:-2])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[0][2:-1]/r[2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,2:-1])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[1][2:-1]/r[2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,3: ])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[2][2:-1]/r[2:-1])
M_trap = coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr*nxi))
# Zero parts where h is still too small
h_small = (h_old<=h_ast)
A_p0_p0[:,h_small] = 1
rhs[:,h_small] = 0
A_m1_p0[:,h_small] = 0;A_p1_p0[:,h_small] = 0;A_p2_p0[:,h_small] = 0;#A_p3_p0[:,h_small] = 0;
A_p0_m3[:,h_small] = 0;A_p0_m2[:,h_small] = 0;A_p0_m1[:,h_small] = 0;A_p0_p1[:,h_small] = 0;
# Now setup the sparse linear system...
if explicit_r_advection:
A_11 = diags([A_p0_p0.ravel(),
A_m1_p0.ravel()[1:],A_p1_p0.ravel()[:-1],A_p2_p0.ravel()[:-2],#A_p3_p0.ravel()[:-3],
A_p0_m3.ravel()[3*nr:],A_p0_m2.ravel()[2*nr:],A_p0_m1.ravel()[nr:],A_p0_p1.ravel()[:-nr]],
[0,
-1,1,2,#3,
-3*nr,-2*nr,-nr,nr],
format="csr")
else:
A_11_partial = diags([A_p0_p0.ravel(),
A_m1_p0.ravel()[1:],A_p1_p0.ravel()[:-1],A_p2_p0.ravel()[:-2],#A_p3_p0.ravel()[:-3],
A_p0_m3.ravel()[3*nr:],A_p0_m2.ravel()[2*nr:],A_p0_m1.ravel()[nr:],A_p0_p1.ravel()[:-nr]],
[0,
-1,1,2,#3,
-3*nr,-2*nr,-nr,nr],
format="coo")
A_11 = (A_11_partial+M_trap).tocsr()
# Now solve the sparse linear system...
Phi_n_new = spsolve(A_11,rhs.ravel()).reshape(Phi_n_old.shape)
# done
return Phi_n_new
def _Phi_n_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
The internal time step dt is used if one is not provided.
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Some extra fields for convenience
H_old,H_new = (h_old>h_ast),(h_new>h_ast) # Use this to zero out bits where h is too small...
Hor_old,Hor_new = H_old[2:-1]/r[2:-1],H_new[2:-1]/r[2:-1]
# Setup the rhs field and initialise on the interior with the difference in the fields
rhs = np.zeros(Phi_n_new.shape)
rhs[1:-1,2:-1] = -(Phi_n_new[1:-1,2:-1]-Phi_n_old[1:-1,2:-1]*H_old[np.newaxis,2:-1])
# Note: the H_old in the above line should ensure that delta_Phi will be 0 where-ever h remains small
# (although it should be redundant since Phi_n_old should be zero here regardless)
# Add the forcing term
rhs[1:-1,2:-1] += 0.5*dt*( Phi_n_old[1:-1,2:-1]*(H_old*(g_b_old-Psi_d))[np.newaxis,2:-1]\
+Phi_n_new[1:-1,2:-1]*(H_new*(g_b_new-Psi_d))[np.newaxis,2:-1])
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_new = self._advective_term(r,h_new)
fot_old = self._advective_term(r,h_old)
#v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*(fot_old)[np.newaxis,:]
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_old[np.newaxis,1:-1]
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
# Add the vertical advection part (note no flux through the top or bottom...
rhs[1:-1,2:-1] -= 0.25*dt/dxi*( v_z_old[1:-1,2:-1]*(Phi_n_old[2:,2:-1]-Phi_n_old[:-2,2:-1])*(H_old/h_old)[np.newaxis,2:-1]\
+v_z_new[1:-1,2:-1]*(Phi_n_new[2:,2:-1]-Phi_n_new[:-2,2:-1])*(H_new/h_new)[np.newaxis,2:-1])
# Setup the horizontal 'advection' stencil
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
Phi_int_dxi_new = np.cumsum(0.5*((Phi_n_new*(1-XI))[1:,:]+(Phi_n_new*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
integral_new = np.empty(Phi_n_new.shape)
integral_new[0 ,:] = 0
integral_new[1:,:] = Phi_int_dxi_new
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
f_new = (Phi_n_new*(0.5*XI**2-XI)+integral_new)*h_new[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
f_new -= Phi_n_new*h_new[np.newaxis,:]/lambda_ast
adv_new = self._advective_term_alt(r,h_new,f_new)
adv_old = self._advective_term_alt(r,h_old,f_old)
# Add the horizontal 'advection' part
rhs[1:-1,2:-1] += 0.5*dt*gamma_ast*( adv_new[1:-1,2:-1]*Hor_new[np.newaxis,:]
+adv_old[1:-1,2:-1]*Hor_old[np.newaxis,:])
# Set all of the entries relating to boundary conditions
# Set the RHS corresponding to the \xi=0 boundary condition (delta_Phi+Phi_n_new)=0
rhs[0,2:-1] = -Phi_n_new[0,2:-1]
# Set the RHS corresponding to the r=R boundary condition (delta_Phi+Phi_n_new)=0 (since h=b~0 is enforced)
rhs[:, -1] = -Phi_n_new[:, -1]
if self._add_top_Phi_bc:
# Set the RHS corresponding to the \xi=1 boundary condition d^2/dr^2(delta_Phi+Phi_n_new)=0
rhs[-1,2:-1] = -2*Phi_n_new[-1,2:-1]+5*Phi_n_new[-2,2:-1]-4*Phi_n_new[-3,2:-1]+Phi_n_new[-4,2:-1]
else:
# Implement the discretisation of the horizontal advection
rhs[-1,2:-1] = -(Phi_n_new[-1,2:-1]-Phi_n_old[-1,2:-1]*H_old[2:-1])\
+0.5*dt*gamma_ast*(adv_new[-1,2:-1]*Hor_new+adv_old[-1,2:-1]*Hor_old)\
+0.5*dt*( Phi_n_old[-1,2:-1]*(H_old*(g_b_old-Psi_d))[2:-1]\
+Phi_n_new[-1,2:-1]*(H_new*(g_b_new-Psi_d))[2:-1])
# Set the RHS corresponding to the r=0 boundary condition d/dr(delta_Phi+Phi_n_new)=0
rhs[:, 0] = -3.0*Phi_n_new[:,0]+4.0*Phi_n_new[:,1]-Phi_n_new[:,2]
if False: # optional, higher order stencil
rhs[:, 0] = -11*Phi_n_new[:,0]+18*Phi_n_new[:,1]-9*Phi_n_new[:,2]+2*Phi_n_new[:,3]
if self._use_artificial_dr_bc:
# Set the RHS corresponding to the introduced r=dr condition Phi(dr)=Phi(0)+0.5*dr^2*Phi''(0)
rhs[:, 1] = 4*Phi_n_new[:,0]-7*Phi_n_new[:,1]+4*Phi_n_new[:,2]-Phi_n_new[:,3]
# done
return rhs.ravel()
def _Phi_n_equation_LHS0(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
h dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Note this block has rectangular shape
# Setup some index arrays for constructing the matrix in coo format
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
row,col,dat = [],[],[]
H = (h_new>h_ast) # Use this to zero out bits where h is too small...
Hor = H[1:-1]/r[1:-1]
# Setup the vertical advection components first
# Do the easier part first
fot_new = self._advective_term(r,h_new)
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
xi_adv_term1 = -0.25*dt/dxi*v_z_new[1:-1,:]*(Phi_n_new[2:,:]-Phi_n_new[:-2,:])*(H/h_new**2)[np.newaxis,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(xi_adv_term1[:,1:-1].ravel())
if self._use_artificial_dr_bc:
#xi_adv_term1[:,1] = 0 # Need to modify this in conjunction with the 'artificial' BC at r=dr
row[-1] = indices[1:-1,2:-1].ravel()
col[-1] = r_i[1:-1,2:-1].ravel()
dat[-1] = xi_adv_term1[:,2:-1].ravel()
# Now the more difficult/involved part...
# First get diagonals relating to the fourth order h term
diagonals_h3,offsets_h3 = self._advective_term_h_gradient(r,h_new,3)
if self._use_artificial_dr_bc:
# Need to modify diagonals in conjunction with the 'artificial' BC | |
<filename>code_mod1.py
# Model I
# Version 23th July 2015
# The python code solves Model I for the optimization of a transit line
# and for the strategic assessment of mode choice, see:
# <NAME>, and <NAME>, 'Improved models for technology choice in a transit corridor with fixed demand', 2015
# For comments, please write to:
# <EMAIL>
# Model from Tirachini et al 2010 published in TRE
# Demand fixed
# Minimization of total cost defined as the sum user and agency costs
# frequency is the only variable of this objective function
# Import formulae, and code shared by all models
from common_code import *
# Import parameters shared by all models
from common_parameters import *
# Computed cost parameters for Model I
hours_per_year = 2947.0
c0_m = [infra_fixed_cost_per_hour(m, hours_per_year, L, width_m, land_cost_per_hectare, \
infracost_m, infralife_m, inframaint_m, discount_rate) \
for m in range(num_modes)]
c1_m = [nfix_m[m] * rolling_stock_cost_per_hour(m, hours_per_year, vehcost_m, vehlife_m, discount_rate, \
res_value_rs) + c1t_m[m] \
for m in range(num_modes)]
#Old values from Tirachini et al 2010
#c0_m = [ 0.0, 9489.0,14866.0, 24910.0]# Unit fixed operator cost per hour ($/h)
#c1_m = [54.0, 63.0, 164.0, 354.0]# Unit operator cost per vehicle-hour ($/veh-h)
R_m = [2 * L / S_m[m] for m in range(num_modes)] # Running time at speed S (h)
def Cu(Pa, Pw, Pv, d, v, y, tw, mu, epsilon, f, l, L, beta, R):
# User cost
Ca = Pa * d * y / (2.0 * v) # Cost of access
Cw = Cost_waiting(f, y, t01, t02, t11, t12, fbar, Pw, epsilon) # Cost of waiting
Cv = Pv * l / (2.0 * L) * ((y/f) * beta / 3600.0 + R) * y # Cost of in-vehicle time
return Ca + Cw + Cv
def Co(c0, c1, y, beta, R, f, c2, L):
# Agency cost
return c0 + c1 * (y * beta / 3600.0 + R * f) + 2.0 * c2 * L * f
def Ctot(Pa, Pw, Pv, d, v, y, tw, mu, epsilon, f, l, L, beta, R, c0, c1, c2):
# Total cost sum of user and agency cost
return Cu(Pa, Pw, Pv, d, v, y, tw, mu, epsilon, f, l, L, beta, R) + \
Co(c0, c1, y, beta, R, f, c2, L)
def Cop_of_f(f):
# Operator cost as a function of f
return Co(c0, c1, y, beta, R, f, c2, L)
def Cuser_of_f(f):
# user cost as a function of f
return Cu(Pa, Pw, Pv, d, v, y, tw, mu, epsilon, f, l, L, beta, R)
def Ctot_of_f(f):
# Total cost as a function of f
return Ctot(Pa, Pw, Pv, d, v, y, tw, mu, epsilon, f, l, L, beta, R, c0, c1, c2)
def fmin_closed(fmin, fmax):
# Compute optimal frequency by closed form
def t_param_of_f(f):
if (f >= fbar):
t0 = t02
t1 = t12
else:
t0 = t01
t1 = t11
return t0, t1
def my_fopt():
# Compute fopt with unpacked t1 value
from math import sqrt
return sqrt((Pw * t1 * epsilon * y + Pv * l * y ** 2 * beta / (3600.0 * 2.0 * L))
/ (c1 * R + 2.0 * c2 * L))
#Case 1, if f >= fbar relevant only t02 and t12 are relevant
if (fmin >= fbar):
t0, t1 = t_param_of_f(fmin)
fopt = my_fopt()
if fopt <= fmin:
fopt = fmin
elif fopt > fmax:
fopt = fmax
else:
# First compute minimum for case t01 and t11
t0, t1 = t_param_of_f(fmin)
fopt1 = my_fopt()
if fopt1 < fmin:
fopt1 = fmin
elif fopt1 > fmax:
fopt1 = fmax
if fopt1 > fbar:
fopt1 = fbar - 0.01
Copt1 = Ctot_of_f(fopt1)
# Now compute minimum for case t02 and t12
t0, t1 = t_param_of_f(fbar)
fopt2 = my_fopt()
if fopt2 < fmin:
fopt2 = fmin
elif fopt2 > fmax:
fopt2 = fmax
if fopt2 < fbar:
fopt2 = fbar
Copt2 = Ctot_of_f(fopt2)
# Determine fopt
if Copt1 < Copt2:
fopt = fopt1
else:
fopt = fopt2
return fopt
def unpack_mode_specific_par(mode):
# return mode specific parameters
return c0_m[mode], c1_m[mode], c2_m[mode], beta_m[mode], d_m[mode], R_m[mode]
#Print to stdout a latex table
from tabulate import tabulate
headers = ['Parameter', 'Unit'] + mode_label
row1 = ['c_0', '$/h'] + ["%.0f" % c0_m[m] for m in range(num_modes)]
row2 = ['c_1', '$/h'] + ["%.1f" % c1_m[m] for m in range(num_modes)]
row3 = ['c_2', '$/h'] + ["%.2f" % c2_m[m] for m in range(num_modes)]
row4 = ['d', 'km'] + ["%.1f" % d_m[m] for m in range(num_modes)]
row5 = ['fmax', 'TU/h'] +["%.0f" % fmax_m[m] for m in range(num_modes)]
row6 = ['K', 'pax/TU'] + ["%.0f" % K_m[m] for m in range(num_modes)]
row7 = ['S', 'km/h'] + ["%.0f" % S_m[m] for m in range(num_modes)]
row8 = ['beta', 's/pax'] + ["%.2f" % beta_m[m] for m in range(num_modes)]
table = [row1, row2, row3, row4, row5, row6, row7, row8]
print 'The following is a table of the mode-specific parameters (almost) ready for latex (math mode must be corrected)'
print tabulate(table, headers=headers, tablefmt="latex")
# COMPUTE RESULTS
import numpy as np
from scipy.optimize import fminbound
max_demand = np.empty(num_modes)
y_range = [np.array([]) for m in range(num_modes)]
min_f_range = [np.array([]) for m in range(num_modes)]
min_f_range2 = [np.array([]) for m in range(num_modes)]
f_min_range = [np.array([]) for m in range(num_modes)]
min_avg_op_cost = [np.array([]) for m in range(num_modes)]
min_avg_user_cost = [np.array([]) for m in range(num_modes)]
min_avg_tot_cost = [np.array([]) for m in range(num_modes)]
for m in range(num_modes):
max_demand[m] = K_m[m] * fmax_m[m] * nu / alpha
if max_demand[m] > max_demand_studied:
max_demand[m] = max_demand_studied
y_range[m] = np.arange(min_demand_studied, max_demand[m], step=step_demand)
min_f_range[m] = np.empty_like(y_range[m])
min_f_range2[m] = np.empty_like(y_range[m])
f_min_range[m] = np.empty_like(y_range[m])
min_avg_tot_cost[m] = np.empty_like(y_range[m])
min_avg_op_cost[m] = np.empty_like(y_range[m])
min_avg_user_cost[m]= np.empty_like(y_range[m])
fmax = fmax_m[m]
c0, c1, c2, beta, d, R = unpack_mode_specific_par(m)
for i, y in enumerate(y_range[m]):
fmin = alpha * y /(nu * K_m[m]) # Minimal frequency
min_f_range[m][i] = fmin_closed(fmin, fmax)
# Double check
min_f_range2[m][i] = fminbound(Ctot_of_f, x1=fmin, x2=fmax)
from math import fabs
if fabs(min_f_range[m][i] - min_f_range2[m][i]) > 0.0001:
print min_f_range[m][i], min_f_range2[m][i]
min_avg_tot_cost[m][i] = Ctot_of_f(min_f_range[m][i]) / y
min_avg_op_cost[m][i] = Cop_of_f(min_f_range[m][i]) / y
min_avg_user_cost[m][i] = Cuser_of_f(min_f_range[m][i]) / y
f_min_range[m][i] = fmin
# PLOTS
import matplotlib.pyplot as plt
# Print points of total cost intersection between modes
print_itersection_points(y_range, min_avg_tot_cost, mode_label)
# A plot with all 4 figures
fig, axes = plt.subplots(2,2)
fig.set_size_inches(9,9)
i = 0
from math import floor
while (i < 4):
ix = int(floor(i/2))
iy = i - ix * 2
axes[ix][iy].set_xlabel('Demand (pax/h)')
axes[ix][iy].grid(ls=':', lw=0.5)
for item in ([axes[ix][iy].title, axes[ix][iy].xaxis.label, axes[ix][iy].yaxis.label] +
axes[ix][iy].get_xticklabels() + axes[ix][iy].get_yticklabels()):
item.set_fontsize(10)
for m in range(num_modes):
if i == 0:
axes[ix][iy].plot(y_range[m], min_f_range[m], ls=linestyle[m], lw=3,
color=colors[m], label=mode_label[m])
axes[ix][iy].set_ylabel('Frequency (veh/h)')
axes[ix][iy].set_title('a) Optimal frequency')
elif i == 1:
axes[ix][iy].plot(y_range[m], min_avg_op_cost[m], ls=linestyle[m], lw=3,
color=colors[m], label=mode_label[m])
axes[ix][iy].set_ylabel('Avg operator cost ($/pax)')
axes[ix][iy].set_title('b) Average operator cost')
elif i == 2:
axes[ix][iy].plot(y_range[m], min_avg_user_cost[m], ls=linestyle[m], lw=3,
color=colors[m], label=mode_label[m])
axes[ix][iy].set_ylabel('Avg user cost ($/pax)')
axes[ix][iy].set_title('c) Average user cost')
elif i == 3:
axes[ix][iy].plot(y_range[m], min_avg_tot_cost[m], ls=linestyle[m], lw=3,
color=colors[m], label=mode_label[m])
axes[ix][iy].set_ylabel('Avg total cost ($/pax)')
axes[ix][iy].set_title('d) Average total cost')
axes[ix][iy].legend(loc='upper right')
i += 1
name_outputfile0 = 'mod1_all_plots.pdf'
plt.savefig(name_outputfile0)
plt.close()
model_id = 'fig_M1_'
name_outputfile1 = model_id + 'freq.pdf'
plot_frequency(y_range, min_f_range, f_min_range, 'Demand (pax/h)', 'Frequency (TU/h)', name_outputfile1, linestyle, colors, mode_label)
name_outputfile2 = model_id + 'tot_cost.pdf'
plot_single(y_range, min_avg_tot_cost, 'Demand (pax/h)', 'Average total cost ($/pax)', name_outputfile2, linestyle, colors, mode_label)
name_outputfile3 = model_id + 'op_cost.pdf'
plot_single(y_range, min_avg_op_cost, 'Demand (pax/h)', 'Average operator cost ($/pax)', name_outputfile3, linestyle, colors, mode_label)
name_outputfile4 = model_id + 'user_cost.pdf'
plot_single(y_range, min_avg_user_cost, 'Demand (pax/h)', 'Average user cost ($/pax)', name_outputfile4, linestyle, colors, mode_label)
# A figure with all plots related to the economical aspects
name_outputfile0e = model_id + 'all_eco_plots.pdf'
plot_three_economics(y_range, min_avg_op_cost, min_avg_user_cost, min_avg_tot_cost, name_outputfile0e, linestyle, colors, mode_label)
# If MAC OS open the pdf output files
import platform
if platform.system() == 'Darwin':
import os
command = 'open -a Preview.app ' + name_outputfile1 + ' ' + name_outputfile2 \
+ ' ' + name_outputfile3 + ' ' + name_outputfile4 + ' ' + name_outputfile0 \
+ ' ' + name_outputfile0e
os.system(command)
# Check specific values for a mode and a level y
# m = 1
# c0, c1, c2, beta, d, R = unpack_mode_specific_par(m)
# y = 10000.0
# print 'Check specific values for mode ', m, ' and demand ', y, ' (pax/h)'
# fmin = alpha * y /(nu | |
# -*- coding: utf-8 -*-
# Copyright 2016 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure the expected functioning of ``memote.support.basic``."""
from __future__ import absolute_import
import cobra
import pytest
import memote.support.basic as basic
import memote.support.helpers as helpers
from memote.utils import register_with
MODEL_REGISTRY = dict()
@register_with(MODEL_REGISTRY)
def three_missing(base):
base.add_metabolites([cobra.Metabolite(id="M{0:d}".format(i))
for i in range(1, 4)])
return base
@register_with(MODEL_REGISTRY)
def three_present(base):
base.add_metabolites(
[cobra.Metabolite(id="M{0:d}".format(i), formula="CH4", charge=-1)
for i in range(1, 4)]
)
return base
@register_with(MODEL_REGISTRY)
def gpr_present(base):
"""Provide a model with reactions that all have GPR"""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'gene1 or gene2'
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def gpr_present_complex(base):
"""Provide a model with reactions that all have GPR"""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'gene1 and gene2'
rxn_2 = cobra.Reaction("RXN2")
rxn_2.gene_reaction_rule = '(gene4 and gene7) or ' \
'(gene9 and (gene10 or gene14))'
rxn_3 = cobra.Reaction("RXN3")
rxn_3.gene_reaction_rule = 'gene1 and gene2'
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def gpr_missing(base):
"""Provide a model reactions that lack GPR"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def gpr_missing_with_exchange(base):
"""Provide a model reactions that lack GPR"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2 = cobra.Reaction("EX_met1_c")
met_1 = cobra.Metabolite("met1")
rxn_2.add_metabolites({met_1: 1})
base.add_reactions([rxn_1, rxn_2])
return base
@register_with(MODEL_REGISTRY)
def gpr_present_not_lumped(base):
"""Provide a model with reactions that all have GPR"""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'gene1'
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def unconstrained_rxn(base):
"""Provide a model with one unconstrained reaction"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_1.bounds = -1000, 1000
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def irreversible_rxn(base):
"""Provide a model with one irreversible reaction"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_1.bounds = 0, 1000
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def zero_constrained_rxn(base):
"""Provide a model with one zero-constrained reaction"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_1.bounds = 0, 0
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def nonzero_constrained_rxn(base):
"""Provide a model with one nonzero-constrained reaction"""
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
met_3 = cobra.Metabolite("met3")
met_4 = cobra.Metabolite("met4")
rxn_1 = cobra.Reaction("RXN1")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2 = cobra.Reaction("RXN2")
rxn_2.add_metabolites({met_2: -1, met_3: -1})
rxn_3 = cobra.Reaction("RXN3")
rxn_3.add_metabolites({met_3: -1, met_4: 1})
rxn_1.bounds = -1000, 1000
rxn_2.bounds = -1000, 1000
rxn_3.bounds = 0, 10
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def no_nonzero_constrained_rxn(base):
"""Provide a model with no nonzero-constrained reactions"""
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
met_3 = cobra.Metabolite("met3")
met_4 = cobra.Metabolite("met4")
rxn_1 = cobra.Reaction("RXN1")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2 = cobra.Reaction("RXN2")
rxn_2.add_metabolites({met_2: -1, met_3: -1})
rxn_3 = cobra.Reaction("RXN3")
rxn_3.add_metabolites({met_3: -1, met_4: 1})
rxn_1.bounds = -1000, 1000
rxn_2.bounds = -1000, 1000
rxn_3.bounds = 0, 1000
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def ngam_present(base):
"""Provide a model with a correct NGAM reaction"""
met_g = cobra.Metabolite("atp_c", "C10H12N5O13P3", compartment="c")
met_h = cobra.Metabolite("adp_c", "C10H12N5O10P2", compartment="c")
met_i = cobra.Metabolite("h_c", "H", compartment="c")
met_j = cobra.Metabolite("h2o_c", "H2O", compartment="c")
met_k = cobra.Metabolite("pi_c", "HO4P", compartment="c")
rxn_1 = cobra.Reaction("ATPM", name="non-growth associated maintenance")
rxn_1.add_metabolites({met_g: -1, met_h: 1, met_i: 1, met_j: -1, met_k: 1})
rxn_1.lower_bound = 8.39
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def ngam_and_atpsynthase(base):
"""Provide a model with an ATP hydrolysis and an NGAM reaction"""
met_g = cobra.Metabolite("atp_c", "C10H12N5O13P3", compartment="c")
met_h = cobra.Metabolite("adp_c", "C10H12N5O10P2", compartment="c")
met_i = cobra.Metabolite("h_e", "H", compartment="e")
met_j = cobra.Metabolite("h2o_c", "H2O", compartment="c")
met_k = cobra.Metabolite("pi_c", "HO4P", compartment="c")
met_l = cobra.Metabolite("h_c", "H", compartment="c")
rxn_1 = cobra.Reaction("ATPS", name="ATPase cytosolic")
rxn_1.add_metabolites({met_g: -1, met_h: 1, met_i: 1, met_j: -1, met_k: 1})
rxn_1.bounds = -1000, 1000
rxn_2 = cobra.Reaction("NGAM", name="non-growth associated maintenance")
rxn_2.add_metabolites({met_g: -1, met_h: 1, met_l: 1, met_j: -1, met_k: 1})
rxn_2.bounds = 0, 1000
base.add_reactions([rxn_1, rxn_2])
return base
@register_with(MODEL_REGISTRY)
def sufficient_compartments(base):
"""Provide a model with the minimal amount of compartments"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("a_p", compartment="p")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_b = cobra.Reaction("AB")
rxn_a_b.add_metabolites({met_a: 1, met_b: -1})
rxn_b_c = cobra.Reaction("BC")
rxn_b_c.add_metabolites({met_b: 1, met_c: -1})
base.add_reactions([rxn_b_c, rxn_a_b])
return base
@register_with(MODEL_REGISTRY)
def insufficient_compartments(base):
"""Provide a model with less than the minimal amount of compartments"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_c = cobra.Reaction("AC")
rxn_a_c.add_metabolites({met_a: 1, met_c: -1})
base.add_reactions([rxn_a_c])
return base
@register_with(MODEL_REGISTRY)
def non_metabolic_reactions(base):
"""Provide a model all kinds of reactions that are not purely metabolic"""
met_a = cobra.Metabolite("a_c", formula='CH4', compartment="c")
met_c = cobra.Metabolite("a_e", formula='CH4', compartment="e")
rxn_a_c = cobra.Reaction("AC")
rxn_a_c.add_metabolites({met_a: 1, met_c: -1})
biomass = cobra.Reaction("BIOMASS")
ex_a = cobra.Reaction("EX_a_e")
ex_a.add_metabolites({met_c: -1})
base.add_reactions([rxn_a_c, biomass, ex_a])
return base
@register_with(MODEL_REGISTRY)
def transport_gpr(base):
"""Provide a model with a transport reaction without GPR."""
met_a = cobra.Metabolite("co2_c", formula='CO2', compartment="c")
met_b = cobra.Metabolite("co2_e", formula='CO2', compartment="e")
met_c = cobra.Metabolite("na_c", formula='Na', compartment="c")
met_d = cobra.Metabolite("na_e", formula='Na', compartment="e")
uni = cobra.Reaction("UNI")
uni.gene_reaction_rule="X and Y"
uni.add_metabolites({met_a: 1, met_b: -1})
anti = cobra.Reaction("ANTI")
anti.gene_reaction_rule = "W or V"
anti.add_metabolites({met_a: 1, met_d: 1, met_b: -1, met_c: -1})
sym = cobra.Reaction("SYM")
sym.add_metabolites({met_a: 1, met_c: 1, met_b: -1, met_d: -1})
base.add_reactions([uni, anti, sym])
return base
@register_with(MODEL_REGISTRY)
def transport_gpr_constrained(base):
"""Provide a model with a constrained transport reaction without GPR."""
met_a = cobra.Metabolite("co2_c", formula='CO2', compartment="c")
met_b = cobra.Metabolite("co2_e", formula='CO2', compartment="e")
met_c = cobra.Metabolite("na_c", formula='Na', compartment="c")
met_d = cobra.Metabolite("na_e", formula='Na', compartment="e")
uni = cobra.Reaction("UNI")
uni.gene_reaction_rule="X and Y"
uni.add_metabolites({met_a: 1, met_b: -1})
anti = cobra.Reaction("ANTI")
anti.gene_reaction_rule = "W or V"
anti.add_metabolites({met_a: 1, met_d: 1, met_b: -1, met_c: -1})
sym = cobra.Reaction("SYM")
sym.add_metabolites({met_a: 1, met_c: 1, met_b: -1, met_d: -1})
sym.lower_bound = 8.39
base.add_reactions([uni, anti, sym])
return base
@register_with(MODEL_REGISTRY)
def reversible_oxygen_flow(base):
"""Provide a model with a reversible oxygen-containing reaction."""
met_a = cobra.Metabolite("o2s_e", formula="O2", compartment="e")
met_b = cobra.Metabolite("o2s_p", formula="O2", compartment="p")
rxn = cobra.Reaction("O2Stex")
rxn.add_metabolites({met_a: -1, met_b: 1})
rxn.lower_bound = -1000
rxn.upper_bound = 1000
base.add_reactions([rxn])
return base
@register_with(MODEL_REGISTRY)
def non_reversible_oxygen_flow(base):
"""Provide a model with a non-reversible oxygen-containing reaction."""
met_a = cobra.Metabolite("o2s_c", formula="O2", compartment="c")
met_b = cobra.Metabolite("o2_c", formula="O2", compartment="c")
met_c = cobra.Metabolite("h2o2_c", formula='H2O2', compartment="c")
met_i = cobra.Metabolite("h_c", "H", compartment="c")
rxn = cobra.Reaction("SPODM")
rxn.add_metabolites({met_a: -2, met_b: 1, met_c: 1, met_i: -2})
rxn.lower_bound = 0
rxn.upper_bound = 0
base.add_reactions([rxn])
return base
@register_with(MODEL_REGISTRY)
def dup_mets_in_c(base):
"""Provide a model with duplicate metabolites in the same compartment"""
met_a = cobra.Metabolite("a_c", compartment="c")
dup_a = cobra.Metabolite("x_c", compartment="c")
not_a = cobra.Metabolite("b_c", compartment="c")
met_a.annotation["inchikey"] = "1231"
met_a.annotation["kegg"] = "123"
dup_a.annotation["inchikey"] = "1231"
dup_a.annotation["kegg"] = "123"
not_a.annotation["inchikey"] = "3211"
not_a.annotation["kegg"] = "321"
met_b = cobra.Metabolite("a_p", compartment="p")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_b = cobra.Reaction("AB")
rxn_a_b.add_metabolites({dup_a: 1, met_a: 1, met_b: -1})
rxn_b_c = cobra.Reaction("BC")
rxn_b_c.add_metabolites({not_a: 1, met_b: 1, met_c: -1})
base.add_reactions([rxn_b_c, rxn_a_b])
return base
@register_with(MODEL_REGISTRY)
def dup_mets_in_c_wrong_annotation(base):
"""Provide a model like `dup_mets_in_c` but with improper annotations"""
met_a = cobra.Metabolite("a_c", compartment="c")
dup_a = cobra.Metabolite("x_c", compartment="c")
not_a = cobra.Metabolite("b_c", compartment="c")
met_a.annotation["kegg"] = "123"
dup_a.annotation["kegg"] = "123"
not_a.annotation["kegg"] = "321"
met_b = cobra.Metabolite("a_p", compartment="p")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_b = cobra.Reaction("AB")
rxn_a_b.add_metabolites({dup_a: 1, met_a: 1, met_b: -1})
rxn_b_c = cobra.Reaction("BC")
rxn_b_c.add_metabolites({not_a: 1, met_b: 1, met_c: -1})
base.add_reactions([rxn_b_c, rxn_a_b])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns(base):
"""Provide a model with duplicate reactions"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
met_a.annotation["inchikey"] = "123"
met_b.annotation["inchikey"] = "456"
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX"
dup_1.annotation["kegg.reaction"] = "HEX"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_multiple_anns(base):
"""Provide a model like `dup_rxns` but with multiple annotations per rxn"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX"
rxn_1.annotation["metanetx.reaction"] = "MNXR1"
dup_1.annotation["kegg.reaction"] = "HEX"
dup_1.annotation["metanetx.reaction"] = "MNXR1"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_partial_matching_multiple_anns(base):
"""Provide a model like `dup_rxns_multiple_anns` but with partial matches"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX"
rxn_1.annotation["metanetx.reaction"] = "MNXR1"
dup_1.annotation["kegg.reaction"] = "HEX"
dup_1.annotation["metanetx.reaction"] = "MNXR2"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_no_matching_multiple_anns(base):
"""Provide a model like `dup_rxns_multiple_anns` but with no matches"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", | |
# -*- coding: utf-8 -*-
"""Siamese Network for performing training of a Deep Convolutional
Network for Face Verification on the Olivetti and LFW Faces datasets.
Dependencies:
python 3.4+, numpy>=1.10.4, sklearn>=0.17, scipy>=0.17.0, theano>=0.7.0, lasagne>=0.1, cv2, dlib>=18.18 (only required if using the 'trees' crop mode).
Part of the package siamese_net:
siamese_net/
siamese_net/faces.py
siamese_net/datasets.py
siamese_net/normalization.py
siamese_net/siamese_net.py
Copyright 2016 Kadenze, Inc.
Kadenze(R) and Kannu(R) are Registered Trademarks of Kadenze, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
"""
import sys
import pickle
import os
# base_compiledir = os.path.expandvars("$HOME/.theano/slot-%d" % (os.getpid()))
# os.environ['THEANO_FLAGS'] = "base_compiledir=%s" % base_compiledir
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import time
import lasagne
# For training the final output network
from sklearn import cross_validation
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
# Custom code for parsing datasets and normalizing images
from datasets import Datasets
from normalization import LCN, ZCA
# plt.style.use('ggplot')
theano.config.floatX = 'float32'
def montage(x):
if x.shape[1] == 1 or x.shape[1] == 3:
num_img = x.shape[0]
num_img_per_dim = np.ceil(np.sqrt(num_img)).astype(int)
montage_img = np.zeros((
num_img_per_dim * x.shape[3],
num_img_per_dim * x.shape[2], x.shape[1]))
else:
num_img_per_dim = np.ceil(np.sqrt(x.shape[1])).astype(int)
montage_img = np.zeros((
num_img_per_dim * x.shape[3],
num_img_per_dim * x.shape[2]))
num_img = x.shape[1]
for img_i in range(num_img_per_dim):
for img_j in range(num_img_per_dim):
if img_i * num_img_per_dim + img_j < num_img:
if x.shape[0] == 1:
montage_img[
img_i * x.shape[3]: (img_i + 1) * x.shape[2],
img_j * x.shape[3]: (img_j + 1) * x.shape[2]
] = np.squeeze(np.squeeze(
x[0, img_i * num_img_per_dim + img_j, ...]
) / (np.max(x[0, img_i * num_img_per_dim + img_j, ...]
) + 1e-15))
else:
montage_img[
img_i * x.shape[3]: (img_i + 1) * x.shape[2],
img_j * x.shape[3]: (img_j + 1) * x.shape[2],
:
] = np.swapaxes(np.squeeze(
x[img_i * num_img_per_dim + img_j, ...]
) / (np.max(x[img_i * num_img_per_dim + img_j, ...]
) + 1e-15), 0, 2)
return montage_img
def get_image_manifold(images, features, res=64, n_neighbors=5):
'''Creates a montage of the images based on a TSNE
manifold of the associated image features.
'''
from sklearn import manifold
mapper = manifold.SpectralEmbedding()
transform = mapper.fit_transform(features)
nx = int(np.ceil(np.sqrt(len(transform))))
ny = int(np.ceil(np.sqrt(len(transform))))
montage_img = np.zeros((res * nx, res * ny, 3))
from sklearn.neighbors import NearestNeighbors
nn = NearestNeighbors()
nn.fit(transform)
min_x = np.mean(transform[:, 0]) - np.std(transform[:, 0]) * 3.0
max_x = np.mean(transform[:, 0]) + np.std(transform[:, 0]) * 3.0
min_y = np.mean(transform[:, 1]) - np.std(transform[:, 1]) * 3.0
max_y = np.mean(transform[:, 1]) + np.std(transform[:, 1]) * 3.0
for n_i in range(nx):
for n_j in range(ny):
x = min_x + (max_x - min_x) / nx * n_i
y = min_y + (max_y - min_y) / ny * n_j
idx = nn.kneighbors([x, y], n_neighbors=n_neighbors)[1][0][:]
for neighbor_i in idx:
montage_img[
n_i * res: (n_i + 1) * res, n_j * res: (n_j + 1) * res, :] += images[neighbor_i]
montage_img[
n_i * res: (n_i + 1) * res, n_j * res: (n_j + 1) * res, :] /= float(len(idx))
montage_img = montage_img / np.max(montage_img)
return montage_img
def make_image_pairs(X, y, unique_labels):
'''For each person in unique_labels (P people):
1. combine all matched pairs the images of that person (N images):
N_matched = (P choose 2) * (N choose 2)
2. combine all imposter pairs. N_unmatched = (P choose 2) * (N * N)
Returns an array of matched and unmatched images and their targets
------------------------------------------------------------------
X_matched, y_matched, X_unmatched, y_unmatched
where the dimensions of the Xs are (with 2 being each image in the pair):
[(N_matched + N_unmatched) x 2 x W x H]
and ys are
----------
[(N_matched + N_unmatched),]
Args
----
X : TYPE
Description
y : TYPE
Description
unique_labels : TYPE
Description
Deleted Parameters
------------------
X (TYPE) : Description
y (TYPE) : Description
unique_labels (TYPE) : Description
'''
from itertools import combinations
X_pairs_matched = list()
y_pairs_matched = list()
# Iterate over all actual pairs
# 32 choose 2 = 496 people pairs. 496 * (10 images choose 2) = 496 * 45 =
# 1440
for person in unique_labels:
# Find images of those people
im_idx = np.where(person == y)[0]
for el in combinations(im_idx, 2):
X_pairs_matched.append(
np.concatenate((X[el[0], ...], X[el[1], ...]),
axis=0)[np.newaxis, ...])
y_pairs_matched.append(1)
X_pairs_unmatched = list()
y_pairs_unmatched = list()
# Iterate over all imposter pairs of people
# (32 choose 2 = 496 people pairs. 496 * 10 * 10 image pairs =
# 49600 imposter pairs)
# (157 * 0.4 = 63), 63 choose 2 = 1953, 1953 * 100 = 195300
for pair in combinations(unique_labels, 2):
# Find images of those people
im1_idx = np.where(pair[0] == y)[0]
im2_idx = np.where(pair[1] == y)[0]
for im1_idx_it in im1_idx:
for im2_idx_it in im2_idx:
X_pairs_unmatched.append(np.concatenate(
(X[im1_idx_it, ...], X[im2_idx_it, ...]),
axis=0)[np.newaxis, ...])
y_pairs_unmatched.append(0)
return (np.concatenate(X_pairs_matched),
np.array(y_pairs_matched),
np.concatenate(X_pairs_unmatched),
np.array(y_pairs_unmatched))
def make_image_pair_idxs(y, unique_labels):
'''For each person in unique_labels (P people):
1. combine all matched pairs the images of that person (N images):
N_matched = (P choose 2) * (N choose 2)
2. combine all imposter pairs. N_unmatched = (P choose 2) * (N * N)
Returns an array of matched and unmatched images and their targets
------------------------------------------------------------------
X_matched, y_matched, X_unmatched, y_unmatched
where the dimensions of the Xs are [(N_matched + N_unmatched) x 2]
(with 2 being the index into X defining the image in the pair),
and ys are [(N_matched + N_unmatched),]
Args
----
y : TYPE
Description
unique_labels : TYPE
Description
Deleted Parameters
------------------
y (TYPE) : Description
unique_labels (TYPE) : Description
'''
from itertools import combinations
X_pairs_matched = list()
y_pairs_matched = list()
# Iterate over all actual pairs
# 32 choose 2 = 496 people pairs. 496 * (10 images choose 2) = 496 * 45 =
# 1440
for person in unique_labels:
# Find images of those people
im_idx = np.where(person == y)[0]
for el in combinations(im_idx, 2):
X_pairs_matched.append(np.array([el[0], el[1]])[np.newaxis, ...])
y_pairs_matched.append(1)
X_pairs_unmatched = list()
y_pairs_unmatched = list()
# Iterate over all imposter pairs of people
# (32 choose 2 = 496 people pairs. 496 * 10 * 10 image pairs = 49600 imposter pairs)
# (157 * 0.4 = 63), 63 choose 2 = 1953, 1953 * 100 = 195300
for pair_i, pair in enumerate(combinations(unique_labels, 2)):
# Find images of those people
im1_idx = np.where(pair[0] == y)[0]
im2_idx = np.where(pair[1] == y)[0]
for im1_idx_it in im1_idx:
for im2_idx_it in im2_idx:
X_pairs_unmatched.append(
np.array([im1_idx_it, im2_idx_it])[np.newaxis, ...])
y_pairs_unmatched.append(0)
return (np.concatenate(X_pairs_matched),
np.array(y_pairs_matched),
np.concatenate(X_pairs_unmatched),
np.array(y_pairs_unmatched))
def draw_image_pair(X, y, idx=None):
'''Given X of N x 2 x W x H, and the associated label matrix, plot
a random pair, or a given idx.
Keyword arguments
-----------------
idx -- Integer - Which pair to show. If none is given, then a
idx -- Integer - Which pair to show. If none is given, then a
random one is picked. [None]
Args
----
X : TYPE
Description
y : TYPE
Description
idx : TYPE, optional
Description
Deleted Parameters
------------------
X (TYPE) : Description
y (TYPE) : Description
idx (TYPE, optional) : Description
'''
if idx is None:
idx = np.random.randint(len(X) - 2)
if X.shape[1] == 1:
idx = idx + (idx % 2)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(10, 8))
if X.shape[1] == 2:
ax1.imshow(np.squeeze(X[idx, 0, ...]), cmap='gray')
ax2.imshow(np.squeeze(X[idx, 1, ...]), cmap='gray')
else:
ax1.imshow(np.squeeze(X[idx, ...]), cmap='gray')
ax2.imshow(np.squeeze(X[idx + 1, ...]), cmap='gray')
ax1.grid(False)
ax2.grid(False)
if y[idx] == 0:
fig.suptitle('Unmatched: %d' % idx, fontsize=30)
else:
fig.suptitle('Matched: %d' % idx, fontsize=30)
def load_pairs(
dataset='lfw',
normalization='LCN',
split=(0.8, 0.1, 0.1),
resolution=(128, 128),
crop_style='none',
crop_factor=1.2,
n_files_per_person=5,
path_to_data=None,
b_load_idxs_only=True,
b_convert_to_grayscale=True):
'''
Given a dataset name, generate the training, validation, and testing
data of matched and unmatched pairs, optionally applying normalization
to each image.
Note this method only returns the idxs of the original dataset.
Parameters
----------
dataset -- string
The name of the dataset to load, 'olivetti', ['lfw'].
normalization -- string
The type of | |
be changed in the future, and at
# the IR level it makes sense to include the dependency
if annotation.namespace.name != self.namespace.name:
self.namespace.add_imported_namespace(
annotation.namespace,
imported_annotation=True)
# Indicate that the attributes of the type have been populated.
self._is_forward_ref = False
@property
def all_fields(self):
raise NotImplementedError
def has_documented_type_or_fields(self, include_inherited_fields=False):
"""Returns whether this type, or any of its fields, are documented.
Use this when deciding whether to create a block of documentation for
this type.
"""
if self.doc:
return True
else:
return self.has_documented_fields(include_inherited_fields)
def has_documented_fields(self, include_inherited_fields=False):
"""Returns whether at least one field is documented."""
fields = self.all_fields if include_inherited_fields else self.fields
for field in fields:
if field.doc:
return True
return False
def get_all_omitted_callers(self):
"""Returns all unique omitted callers for the object."""
return {f.omitted_caller for f in self.fields if f.omitted_caller}
@property
def name(self):
return self._name
def copy(self):
return copy.deepcopy(self)
def prepend_field(self, field):
self.fields.insert(0, field)
def get_examples(self, compact=False):
"""
Returns an OrderedDict mapping labels to Example objects.
Args:
compact (bool): If True, union members of void type are converted
to their compact representation: no ".tag" key or containing
dict, just the tag as a string.
"""
# Copy it just in case the caller wants to mutate the object.
examples = copy.deepcopy(self._examples)
if not compact:
return examples
def make_compact(d):
# Traverse through dicts looking for ones that have a lone .tag
# key, which can be converted into the compact form.
if not isinstance(d, dict):
return
for key in d:
if isinstance(d[key], dict):
inner_d = d[key]
if len(inner_d) == 1 and '.tag' in inner_d:
d[key] = inner_d['.tag']
else:
make_compact(inner_d)
if isinstance(d[key], list):
for item in d[key]:
make_compact(item)
for example in examples.values():
if (isinstance(example.value, dict) and
len(example.value) == 1 and '.tag' in example.value):
# Handle the case where the top-level of the example can be
# made compact.
example.value = example.value['.tag']
else:
make_compact(example.value)
return examples
class Example(object):
"""An example of a struct or union type."""
def __init__(self, label, text, value, ast_node=None):
assert isinstance(label, six.text_type), type(label)
self.label = label
assert isinstance(text, (six.text_type, type(None))), type(text)
self.text = doc_unwrap(text) if text else text
assert isinstance(value, (six.text_type, OrderedDict)), type(value)
self.value = value
self._ast_node = ast_node
def __repr__(self):
return 'Example({!r}, {!r}, {!r})'.format(
self.label, self.text, self.value)
class Struct(UserDefined):
"""
Defines a product type: Composed of other primitive and/or struct types.
"""
# pylint: disable=attribute-defined-outside-init
composite_type = 'struct'
def set_attributes(self, doc, fields, parent_type=None):
"""
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Struct)
self.subtypes = []
# These are only set if this struct enumerates subtypes.
self._enumerated_subtypes = None # Optional[List[Tuple[str, DataType]]]
self._is_catch_all = None # Optional[Bool]
super(Struct, self).set_attributes(doc, fields, parent_type)
if self.parent_type:
self.parent_type.subtypes.append(self)
def check(self, val):
raise NotImplementedError
def check_example(self, ex_field):
if not isinstance(ex_field.value, AstExampleRef):
raise InvalidSpec(
"example must reference label of '%s'" % self.name,
ex_field.lineno, ex_field.path)
def check_attr_repr(self, attrs):
# Since we mutate it, let's make a copy to avoid mutating the argument.
attrs = attrs.copy()
validated_attrs = {}
for field in self.all_fields:
attr = field.check_attr_repr(attrs.pop(field.name, None))
validated_attrs[field.name] = attr
if attrs:
attr_name, attr_field = attrs.popitem()
raise InvalidSpec(
"Route attribute '%s' is not defined in 'stone_cfg.Route'."
% attr_name, attr_field.lineno, attr_field.path)
return validated_attrs
@property
def all_fields(self):
"""
Returns an iterator of all fields. Required fields before optional
fields. Super type fields before type fields.
"""
return self.all_required_fields + self.all_optional_fields
def _filter_fields(self, filter_function):
"""
Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted.
"""
fields = []
if self.parent_type:
fields.extend(self.parent_type._filter_fields(filter_function))
fields.extend(filter(filter_function, self.fields))
return fields
@property
def all_required_fields(self):
"""
Returns an iterator that traverses required fields in all super types
first, and then for this type.
"""
def required_check(f):
return not is_nullable_type(f.data_type) and not f.has_default
return self._filter_fields(required_check)
@property
def all_optional_fields(self):
"""
Returns an iterator that traverses optional fields in all super types
first, and then for this type.
"""
def optional_check(f):
return is_nullable_type(f.data_type) or f.has_default
return self._filter_fields(optional_check)
def has_enumerated_subtypes(self):
"""
Whether this struct enumerates its subtypes.
"""
return bool(self._enumerated_subtypes)
def get_enumerated_subtypes(self):
"""
Returns a list of subtype fields. Each field has a `name` attribute
which is the tag for the subtype. Each field also has a `data_type`
attribute that is a `Struct` object representing the subtype.
"""
assert self._enumerated_subtypes is not None
return self._enumerated_subtypes
def is_member_of_enumerated_subtypes_tree(self):
"""
Whether this struct enumerates subtypes or is a struct that is
enumerated by its parent type. Because such structs are serialized
and deserialized differently, use this method to detect these.
"""
return (self.has_enumerated_subtypes() or
(self.parent_type and
self.parent_type.has_enumerated_subtypes()))
def is_catch_all(self):
"""
Indicates whether this struct should be used in the event that none of
its known enumerated subtypes match a received type tag.
Use this method only if the struct has enumerated subtypes.
Returns: bool
"""
assert self._enumerated_subtypes is not None
return self._is_catch_all
def set_enumerated_subtypes(self, subtype_fields, is_catch_all):
"""
Sets the list of "enumerated subtypes" for this struct. This differs
from regular subtyping in that each subtype is associated with a tag
that is used in the serialized format to indicate the subtype. Also,
this list of subtypes was explicitly defined in an "inner-union" in the
specification. The list of fields must include all defined subtypes of
this struct.
NOTE(kelkabany): For this to work with upcoming forward references, the
hierarchy of parent types for this struct must have had this method
called on them already.
:type subtype_fields: List[UnionField]
"""
assert self._enumerated_subtypes is None, \
'Enumerated subtypes already set.'
assert isinstance(is_catch_all, bool), type(is_catch_all)
self._is_catch_all = is_catch_all
self._enumerated_subtypes = []
if self.parent_type:
raise InvalidSpec(
"'%s' enumerates subtypes so it cannot extend another struct."
% self.name, self._ast_node.lineno, self._ast_node.path)
# Require that if this struct enumerates subtypes, its parent (and thus
# the entire hierarchy above this struct) does as well.
if self.parent_type and not self.parent_type.has_enumerated_subtypes():
raise InvalidSpec(
"'%s' cannot enumerate subtypes if parent '%s' does not." %
(self.name, self.parent_type.name),
self._ast_node.lineno, self._ast_node.path)
enumerated_subtype_names = set() # Set[str]
for subtype_field in subtype_fields:
path = subtype_field._ast_node.path
lineno = subtype_field._ast_node.lineno
# Require that a subtype only has a single type tag.
if subtype_field.data_type.name in enumerated_subtype_names:
raise InvalidSpec(
"Subtype '%s' can only be specified once." %
subtype_field.data_type.name, lineno, path)
# Require that a subtype has this struct as its parent.
if subtype_field.data_type.parent_type != self:
raise InvalidSpec(
"'%s' is not a subtype of '%s'." %
(subtype_field.data_type.name, self.name), lineno, path)
# Check for subtype tags that conflict with this struct's
# non-inherited fields.
if subtype_field.name in self._fields_by_name:
# Since the union definition comes first, use its line number
# as the source of the field's original declaration.
orig_field = self._fields_by_name[subtype_field.name]
raise InvalidSpec(
"Field '%s' already defined on line %d." %
(subtype_field.name, lineno),
orig_field._ast_node.lineno,
orig_field._ast_node.path)
# Walk up parent tree hierarchy to ensure no field conflicts.
# Checks for conflicts with subtype tags and regular fields.
cur_type = self.parent_type
while cur_type:
if subtype_field.name in cur_type._fields_by_name:
orig_field = cur_type._fields_by_name[subtype_field.name]
raise InvalidSpec(
"Field '%s' already defined in parent '%s' (%s:%d)."
% (subtype_field.name, cur_type.name,
orig_field._ast_node.path, orig_field._ast_node.lineno),
lineno, path)
cur_type = cur_type.parent_type
# Note the discrepancy between `fields` which contains only the
# struct fields, and `_fields_by_name` which contains the struct
# fields and enumerated subtype fields.
self._fields_by_name[subtype_field.name] = subtype_field
enumerated_subtype_names.add(subtype_field.data_type.name)
self._enumerated_subtypes.append(subtype_field)
assert len(self._enumerated_subtypes) > 0
# Check that all known subtypes are listed in the enumeration.
for subtype in self.subtypes:
if subtype.name not in enumerated_subtype_names:
raise InvalidSpec(
"'%s' does not enumerate all subtypes, missing '%s'" %
(self.name, subtype.name),
self._ast_node.lineno)
def get_all_subtypes_with_tags(self):
"""
Unlike other enumerated-subtypes-related functionality, this method
returns not just direct subtypes, but all subtypes of this struct. The
tag of each subtype is the list of tags from which the type descends.
This method only applies to structs that enumerate subtypes.
Use this when you need to generate a lookup table for a root struct
that maps a generated class representing a subtype to the tag it needs
in the serialized | |
Constraint(expr= m.x420 == 0)
m.c124 = Constraint(expr= m.x421 == 0)
m.c125 = Constraint(expr= m.x443 == 0)
m.c126 = Constraint(expr= m.x444 == 0)
m.c127 = Constraint(expr= m.x445 == 0)
m.c128 = Constraint(expr= m.x146 - m.x416 - m.x419 == 0)
m.c129 = Constraint(expr= m.x147 - m.x417 - m.x420 == 0)
m.c130 = Constraint(expr= m.x148 - m.x418 - m.x421 == 0)
m.c131 = Constraint(expr= m.x158 - m.x440 - m.x443 == 0)
m.c132 = Constraint(expr= m.x159 - m.x441 - m.x444 == 0)
m.c133 = Constraint(expr= m.x160 - m.x442 - m.x445 == 0)
m.c134 = Constraint(expr= m.x416 - 4.45628648004517*m.b914 <= 0)
m.c135 = Constraint(expr= m.x417 - 4.45628648004517*m.b915 <= 0)
m.c136 = Constraint(expr= m.x418 - 4.45628648004517*m.b916 <= 0)
m.c137 = Constraint(expr= m.x419 + 4.45628648004517*m.b914 <= 4.45628648004517)
m.c138 = Constraint(expr= m.x420 + 4.45628648004517*m.b915 <= 4.45628648004517)
m.c139 = Constraint(expr= m.x421 + 4.45628648004517*m.b916 <= 4.45628648004517)
m.c140 = Constraint(expr= m.x440 - 3.34221486003388*m.b914 <= 0)
m.c141 = Constraint(expr= m.x441 - 3.34221486003388*m.b915 <= 0)
m.c142 = Constraint(expr= m.x442 - 3.34221486003388*m.b916 <= 0)
m.c143 = Constraint(expr= m.x443 + 3.34221486003388*m.b914 <= 3.34221486003388)
m.c144 = Constraint(expr= m.x444 + 3.34221486003388*m.b915 <= 3.34221486003388)
m.c145 = Constraint(expr= m.x445 + 3.34221486003388*m.b916 <= 3.34221486003388)
m.c146 = Constraint(expr=(m.x446/(1e-6 + m.b917) - 1.5*log(1 + m.x422/(1e-6 + m.b917)))*(1e-6 + m.b917) <= 0)
m.c147 = Constraint(expr=(m.x447/(1e-6 + m.b918) - 1.5*log(1 + m.x423/(1e-6 + m.b918)))*(1e-6 + m.b918) <= 0)
m.c148 = Constraint(expr=(m.x448/(1e-6 + m.b919) - 1.5*log(1 + m.x424/(1e-6 + m.b919)))*(1e-6 + m.b919) <= 0)
m.c149 = Constraint(expr= m.x425 == 0)
m.c150 = Constraint(expr= m.x426 == 0)
m.c151 = Constraint(expr= m.x427 == 0)
m.c152 = Constraint(expr= m.x452 == 0)
m.c153 = Constraint(expr= m.x453 == 0)
m.c154 = Constraint(expr= m.x454 == 0)
m.c155 = Constraint(expr= m.x149 - m.x422 - m.x425 == 0)
m.c156 = Constraint(expr= m.x150 - m.x423 - m.x426 == 0)
m.c157 = Constraint(expr= m.x151 - m.x424 - m.x427 == 0)
m.c158 = Constraint(expr= m.x161 - m.x446 - m.x452 == 0)
m.c159 = Constraint(expr= m.x162 - m.x447 - m.x453 == 0)
m.c160 = Constraint(expr= m.x163 - m.x448 - m.x454 == 0)
m.c161 = Constraint(expr= m.x422 - 4.45628648004517*m.b917 <= 0)
m.c162 = Constraint(expr= m.x423 - 4.45628648004517*m.b918 <= 0)
m.c163 = Constraint(expr= m.x424 - 4.45628648004517*m.b919 <= 0)
m.c164 = Constraint(expr= m.x425 + 4.45628648004517*m.b917 <= 4.45628648004517)
m.c165 = Constraint(expr= m.x426 + 4.45628648004517*m.b918 <= 4.45628648004517)
m.c166 = Constraint(expr= m.x427 + 4.45628648004517*m.b919 <= 4.45628648004517)
m.c167 = Constraint(expr= m.x446 - 2.54515263975353*m.b917 <= 0)
m.c168 = Constraint(expr= m.x447 - 2.54515263975353*m.b918 <= 0)
m.c169 = Constraint(expr= m.x448 - 2.54515263975353*m.b919 <= 0)
m.c170 = Constraint(expr= m.x452 + 2.54515263975353*m.b917 <= 2.54515263975353)
m.c171 = Constraint(expr= m.x453 + 2.54515263975353*m.b918 <= 2.54515263975353)
m.c172 = Constraint(expr= m.x454 + 2.54515263975353*m.b919 <= 2.54515263975353)
m.c173 = Constraint(expr= - m.x428 + m.x458 == 0)
m.c174 = Constraint(expr= - m.x429 + m.x459 == 0)
m.c175 = Constraint(expr= - m.x430 + m.x460 == 0)
m.c176 = Constraint(expr= - 0.5*m.x434 + m.x458 == 0)
m.c177 = Constraint(expr= - 0.5*m.x435 + m.x459 == 0)
m.c178 = Constraint(expr= - 0.5*m.x436 + m.x460 == 0)
m.c179 = Constraint(expr= m.x431 == 0)
m.c180 = Constraint(expr= m.x432 == 0)
m.c181 = Constraint(expr= m.x433 == 0)
m.c182 = Constraint(expr= m.x437 == 0)
m.c183 = Constraint(expr= m.x438 == 0)
m.c184 = Constraint(expr= m.x439 == 0)
m.c185 = Constraint(expr= m.x461 == 0)
m.c186 = Constraint(expr= m.x462 == 0)
m.c187 = Constraint(expr= m.x463 == 0)
m.c188 = Constraint(expr= m.x152 - m.x428 - m.x431 == 0)
m.c189 = Constraint(expr= m.x153 - m.x429 - m.x432 == 0)
m.c190 = Constraint(expr= m.x154 - m.x430 - m.x433 == 0)
m.c191 = Constraint(expr= m.x155 - m.x434 - m.x437 == 0)
m.c192 = Constraint(expr= m.x156 - m.x435 - m.x438 == 0)
m.c193 = Constraint(expr= m.x157 - m.x436 - m.x439 == 0)
m.c194 = Constraint(expr= m.x164 - m.x458 - m.x461 == 0)
m.c195 = Constraint(expr= m.x165 - m.x459 - m.x462 == 0)
m.c196 = Constraint(expr= m.x166 - m.x460 - m.x463 == 0)
m.c197 = Constraint(expr= m.x428 - 4.45628648004517*m.b920 <= 0)
m.c198 = Constraint(expr= m.x429 - 4.45628648004517*m.b921 <= 0)
m.c199 = Constraint(expr= m.x430 - 4.45628648004517*m.b922 <= 0)
m.c200 = Constraint(expr= m.x431 + 4.45628648004517*m.b920 <= 4.45628648004517)
m.c201 = Constraint(expr= m.x432 + 4.45628648004517*m.b921 <= 4.45628648004517)
m.c202 = Constraint(expr= m.x433 + 4.45628648004517*m.b922 <= 4.45628648004517)
m.c203 = Constraint(expr= m.x434 - 30*m.b920 <= 0)
m.c204 = Constraint(expr= m.x435 - 30*m.b921 <= 0)
m.c205 = Constraint(expr= m.x436 - 30*m.b922 <= 0)
m.c206 = Constraint(expr= m.x437 + 30*m.b920 <= 30)
m.c207 = Constraint(expr= m.x438 + 30*m.b921 <= 30)
m.c208 = Constraint(expr= m.x439 + 30*m.b922 <= 30)
m.c209 = Constraint(expr= m.x458 - 15*m.b920 <= 0)
m.c210 = Constraint(expr= m.x459 - 15*m.b921 <= 0)
m.c211 = Constraint(expr= m.x460 - 15*m.b922 <= 0)
m.c212 = Constraint(expr= m.x461 + 15*m.b920 <= 15)
m.c213 = Constraint(expr= m.x462 + 15*m.b921 <= 15)
m.c214 = Constraint(expr= m.x463 + 15*m.b922 <= 15)
m.c215 = Constraint(expr=(m.x494/(1e-6 + m.b923) - 1.25*log(1 + m.x464/(1e-6 + m.b923)))*(1e-6 + m.b923) <= 0)
m.c216 = Constraint(expr=(m.x495/(1e-6 + m.b924) - 1.25*log(1 + m.x465/(1e-6 + m.b924)))*(1e-6 + m.b924) <= 0)
m.c217 = Constraint(expr=(m.x496/(1e-6 + m.b925) - 1.25*log(1 + m.x466/(1e-6 + m.b925)))*(1e-6 + m.b925) <= 0)
m.c218 = Constraint(expr= m.x467 == 0)
m.c219 = Constraint(expr= m.x468 == 0)
m.c220 = Constraint(expr= m.x469 == 0)
m.c221 = Constraint(expr= m.x500 == 0)
m.c222 = Constraint(expr= m.x501 == 0)
m.c223 = Constraint(expr= m.x502 == 0)
m.c224 = Constraint(expr= m.x167 - m.x464 - m.x467 == 0)
m.c225 = Constraint(expr= m.x168 - m.x465 - m.x468 == 0)
m.c226 = Constraint(expr= m.x169 - m.x466 - m.x469 == 0)
m.c227 = Constraint(expr= m.x182 - m.x494 - m.x500 == 0)
m.c228 = Constraint(expr= m.x183 - m.x495 - m.x501 == 0)
m.c229 = Constraint(expr= m.x184 - m.x496 - m.x502 == 0)
m.c230 = Constraint(expr= m.x464 - 3.34221486003388*m.b923 <= 0)
m.c231 = Constraint(expr= m.x465 - 3.34221486003388*m.b924 <= 0)
m.c232 = Constraint(expr= m.x466 - 3.34221486003388*m.b925 <= 0)
m.c233 = Constraint(expr= m.x467 + 3.34221486003388*m.b923 <= 3.34221486003388)
m.c234 = Constraint(expr= m.x468 + 3.34221486003388*m.b924 <= 3.34221486003388)
m.c235 = Constraint(expr= m.x469 + 3.34221486003388*m.b925 <= 3.34221486003388)
m.c236 = Constraint(expr= m.x494 - 1.83548069293539*m.b923 <= 0)
m.c237 = Constraint(expr= m.x495 - 1.83548069293539*m.b924 <= 0)
m.c238 = Constraint(expr= m.x496 - 1.83548069293539*m.b925 <= 0)
m.c239 = Constraint(expr= m.x500 + 1.83548069293539*m.b923 <= 1.83548069293539)
m.c240 = Constraint(expr= m.x501 + 1.83548069293539*m.b924 <= 1.83548069293539)
m.c241 = Constraint(expr= m.x502 + 1.83548069293539*m.b925 <= 1.83548069293539)
m.c242 = Constraint(expr=(m.x506/(1e-6 + m.b926) - 0.9*log(1 + m.x470/(1e-6 + m.b926)))*(1e-6 + m.b926) <= 0)
m.c243 = Constraint(expr=(m.x507/(1e-6 + m.b927) - 0.9*log(1 + m.x471/(1e-6 + m.b927)))*(1e-6 + m.b927) <= 0)
m.c244 = Constraint(expr=(m.x508/(1e-6 + m.b928) - 0.9*log(1 + m.x472/(1e-6 + m.b928)))*(1e-6 + m.b928) <= 0)
m.c245 = Constraint(expr= m.x473 == 0)
m.c246 = Constraint(expr= m.x474 == 0)
m.c247 = Constraint(expr= m.x475 == 0)
m.c248 = Constraint(expr= m.x512 == 0)
m.c249 = Constraint(expr= m.x513 == 0)
m.c250 = Constraint(expr= m.x514 == 0)
m.c251 = Constraint(expr= m.x170 - m.x470 - m.x473 == 0)
m.c252 = Constraint(expr= m.x171 - m.x471 - m.x474 == 0)
m.c253 = Constraint(expr= m.x172 - m.x472 - m.x475 == 0)
m.c254 = Constraint(expr= m.x185 - m.x506 - m.x512 == 0)
m.c255 = Constraint(expr= m.x186 - m.x507 - m.x513 == 0)
m.c256 = Constraint(expr= m.x187 - m.x508 - m.x514 == 0)
m.c257 = Constraint(expr= m.x470 - 3.34221486003388*m.b926 <= 0)
m.c258 = Constraint(expr= m.x471 - 3.34221486003388*m.b927 <= 0)
m.c259 = Constraint(expr= m.x472 - 3.34221486003388*m.b928 <= 0)
m.c260 = Constraint(expr= m.x473 + 3.34221486003388*m.b926 <= 3.34221486003388)
m.c261 = Constraint(expr= m.x474 + 3.34221486003388*m.b927 <= 3.34221486003388)
m.c262 = Constraint(expr= m.x475 + 3.34221486003388*m.b928 <= 3.34221486003388)
m.c263 = Constraint(expr= m.x506 - 1.32154609891348*m.b926 <= 0)
m.c264 = Constraint(expr= m.x507 - 1.32154609891348*m.b927 <= 0)
m.c265 = Constraint(expr= m.x508 - 1.32154609891348*m.b928 <= 0)
m.c266 = Constraint(expr= m.x512 + 1.32154609891348*m.b926 <= 1.32154609891348)
m.c267 = Constraint(expr= m.x513 + 1.32154609891348*m.b927 <= 1.32154609891348)
m.c268 = Constraint(expr= m.x514 + 1.32154609891348*m.b928 <= 1.32154609891348)
m.c269 = Constraint(expr=(m.x518/(1e-6 + m.b929) - log(1 + m.x449/(1e-6 + m.b929)))*(1e-6 + m.b929) <= 0)
m.c270 = Constraint(expr=(m.x519/(1e-6 + m.b930) - log(1 + m.x450/(1e-6 + m.b930)))*(1e-6 + m.b930) <= 0)
m.c271 = Constraint(expr=(m.x520/(1e-6 + m.b931) - log(1 + m.x451/(1e-6 + m.b931)))*(1e-6 + m.b931) <= 0)
m.c272 = Constraint(expr= m.x455 == 0)
m.c273 = Constraint(expr= m.x456 == 0)
m.c274 = Constraint(expr= m.x457 == 0)
m.c275 = Constraint(expr= m.x521 == 0)
m.c276 = Constraint(expr= m.x522 == 0)
m.c277 = Constraint(expr= m.x523 == 0)
m.c278 = Constraint(expr= m.x161 - m.x449 - m.x455 == 0)
m.c279 = Constraint(expr= m.x162 - m.x450 - m.x456 == 0)
m.c280 = Constraint(expr= m.x163 - m.x451 - m.x457 == 0)
m.c281 = Constraint(expr= m.x188 - m.x518 - m.x521 == 0)
m.c282 = Constraint(expr= m.x189 - m.x519 - m.x522 == 0)
m.c283 = Constraint(expr= m.x190 - m.x520 - m.x523 == 0)
m.c284 = Constraint(expr= m.x449 - 2.54515263975353*m.b929 <= 0)
m.c285 = Constraint(expr= m.x450 - 2.54515263975353*m.b930 <= 0)
m.c286 = Constraint(expr= m.x451 - 2.54515263975353*m.b931 <= 0)
m.c287 = Constraint(expr= m.x455 + 2.54515263975353*m.b929 <= 2.54515263975353)
m.c288 = Constraint(expr= m.x456 + 2.54515263975353*m.b930 <= 2.54515263975353)
m.c289 = Constraint(expr= m.x457 + 2.54515263975353*m.b931 <= 2.54515263975353)
m.c290 = Constraint(expr= m.x518 - 1.26558121681553*m.b929 <= 0)
m.c291 = Constraint(expr= m.x519 - 1.26558121681553*m.b930 <= 0)
m.c292 = Constraint(expr= m.x520 - 1.26558121681553*m.b931 <= | |
'latconst(c)'\n")
fout.write("set zlabel 'Energy'\n")
fout.write("splot 'energy-latconst.data'\n")
fout.write("EOF\n")
else:
fout.write(" energy=`cat ../relax-${a}/optimization.out | grep 'Total =' | tail -n -1 | cut -d \"=\" -f 2`\n")
fout.write(" cat >> energy-latconst.data <<EOF\n")
fout.write("${a} ${energy:32:-36}\n")
fout.write("EOF\n")
fout.write("done\n")
fout.write("cat > energy-latconst.gp<<EOF\n")
fout.write("set term gif\n")
fout.write("set output 'energy-latconst.gif'\n")
fout.write("set title 'Energy Latconst'\n")
fout.write("set xlabel 'latconst(a)'\n")
fout.write("set ylabel 'Energy'\n")
fout.write("plot 'energy-latconst.data' w l\n")
fout.write("EOF\n")
else:
if nc >= 2:
fout.write("for c in `seq -w %f %f %f`\n" % (c-nc/2*stepc, stepc, c+nc/2*stepc))
fout.write("do\n")
fout.write(" energy=`cat ../relax-${c}/optimization.out | grep 'Total =' | tail -n -1 | cut -d \"=\" -f 2`\n")
fout.write(" cat >> energy-latconst.data <<EOF\n")
fout.write("${c} ${energy:32:-36}\n")
fout.write("EOF\n")
fout.write("done\n")
fout.write("cat > energy-latconst.gp<<EOF\n")
fout.write("set term gif\n")
fout.write("set output 'energy-latconst.gif'\n")
fout.write("set title 'Energy Latconst'\n")
fout.write("set xlabel 'latconst(c)'\n")
fout.write("set ylabel 'Energy'\n")
fout.write("plot 'energy-latconst.data' w l\n")
fout.write("EOF\n")
else:
# nothing to do
pass
#os.system("cd post-processing; bash get_energy.sh; cd ../")
os.chdir("../")
if runopt == "run" or runopt == "genrun":
# run the simulation
os.chdir(directory)
os.system("bash opt-tetragonal.sh")
os.chdir("../")
server_handle(auto=auto, directory=directory, jobfilebase="opt-tetragonal", server=self.run_params["server"])
def abc(self, directory="tmp-qe-opt-abc", runopt="gen", auto=0, range_a=[-0.1, 0.1, 0.01], range_b=[-0.1, 0.1, 0.01], range_c=[-0.1, 0.1, 0.01]):
"""
"""
na = len(np.arange(range_a[0], range_a[1], range_a[2]))
nb = len(np.arange(range_b[0], range_b[1], range_b[2]))
nc = len(np.arange(range_c[0], range_c[1], range_c[2]))
if self.batch_a == None:
# namely all in one batch
self.batch_a = na
else:
pass
if self.batch_b == None:
# namely all in one batch
self.batch_b = nb
else:
pass
if self.batch_c == None:
# namely all in one batch
self.batch_c = nc
else:
pass
if na % self.batch_a == 0:
n_batch_a = int(na / self.batch_a)
else:
n_batch_a = int(na / self.batch_a) + 1
if nb % self.batch_b == 0:
n_batch_b = int(nb / self.batch_b)
else:
n_batch_b = int(nb / self.batch_b) + 1
if nc % self.batch_c == 0:
n_batch_c = int(nc / self.batch_c)
else:
n_batch_c = int(nc / self.batch_c) + 1
#
if os.path.exists(directory):
shutil.rmtree(directory)
os.mkdir(directory)
for element in self.system.xyz.specie_labels:
shutil.copyfile("%s.psf" % element, os.path.join(directory, "%s.psf" % element))
shutil.copyfile(self.system.xyz.file, os.path.join(directory, os.path.basename(self.system.xyz.file)))
#
os.chdir(directory)
for i_batch_a in range(n_batch_a):
for i_batch_b in range(n_batch_b):
for i_batch_c in range(n_batch_c):
range_a_start = range_a[0] + i_batch_a * self.batch_a * range_a[2]
range_a_end = range_a[0] + (i_batch_a+1) * self.batch_a * range_a[2] - range_a[2] / 2
# - range_a[2] / 2, so that the last value is ignored which is actually the begining of next batch
if range_a_end > range_a[1]:
range_a_end = range_a[1]
range_b_start = range_b[0] + i_batch_b * self.batch_b * range_b[2]
range_b_end = range_b[0] + (i_batch_b+1) * self.batch_b * range_b[2] - range_b[2] / 2
# - range_b[2] / 2, so that the last value is ignored which is actually the begining of next batch
if range_b_end > range_b[1]:
range_b_end = range_b[1]
range_c_start = range_c[0] + i_batch_c * self.batch_c * range_c[2]
range_c_end = range_c[0] + (i_batch_c+1) * self.batch_c * range_c[2] - range_c[2] / 2
# - range_c[2] / 2, so that the last value is ignored which is actually the begining of next batch
if range_c_end > range_c[1]:
range_c_end = range_c[1]
a = np.sqrt(self.dataset[0].system.xyz.cell[0][0]**2+self.dataset[0].system.xyz.cell[0][1]**2+self.dataset[0].system.xyz.cell[0][2]**2)
b = np.sqrt(self.dataset[0].system.xyz.cell[1][0]**2+self.dataset[0].system.xyz.cell[1][1]**2+self.dataset[0].system.xyz.cell[1][2]**2)
c = np.sqrt(self.dataset[0].system.xyz.cell[2][0]**2+self.dataset[0].system.xyz.cell[2][1]**2+self.dataset[0].system.xyz.cell[2][2]**2)
# gen llhpc script
with open("opt-abc-%d-%d-%d.slurm" % (i_batch_a, i_batch_b, i_batch_c), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("#SBATCH -p %s\n" % self.run_params["partition"])
fout.write("#SBATCH -N %d\n" % self.run_params["nodes"])
fout.write("#SBATCH -n %d\n" % self.run_params["ntask"])
fout.write("#SBATCH -J %s\n" % self.run_params["jobname"])
fout.write("#SBATCH -o %s\n" % self.run_params["stdout"])
fout.write("#SBATCH -e %s\n" % self.run_params["stderr"])
fout.write("cat > optimization.fdf<<EOF\n")
fout.write(self.system.to_string())
fout.write(self.electrons.to_string())
fout.write(self.ions.to_string())
fout.write("EOF\n")
fout.write("a_in=%f\n" % a)
fout.write("b_in=%f\n" % b)
fout.write("c_in=%f\n" % c)
fout.write("a1=%f\n" % self.system.xyz.cell[0][0])
fout.write("a2=%f\n" % self.system.xyz.cell[0][1])
fout.write("a3=%f\n" % self.system.xyz.cell[0][2])
fout.write("b1=%f\n" % self.system.xyz.cell[1][0])
fout.write("b2=%f\n" % self.system.xyz.cell[1][1])
fout.write("b3=%f\n" % self.system.xyz.cell[1][2])
fout.write("c1=%f\n" % self.system.xyz.cell[2][0])
fout.write("c2=%f\n" % self.system.xyz.cell[2][1])
fout.write("c3=%f\n" % self.system.xyz.cell[2][2])
fout.write("lat_vec_begin=`cat optimization.fdf | grep -n \'%block LatticeVectors\' | cut -d \":\" -f 1`\n")
fout.write("lat_vec_end=`cat optimization.fdf | grep -n \'%endblock LatticeVectors\' | cut -d \":\" -f 1`\n")
fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end))
fout.write("do\n")
fout.write("for b in `seq -w %f %f %f`\n" % (b+range_b_start, range_b[2], b+range_b_end))
fout.write("do\n")
fout.write("for c in `seq -w %f %f %f`\n" % (c+range_c_start, range_c[2], c+range_c_end))
fout.write("do\n")
fout.write(" mkdir relax-${a}-${b}-${c}\n")
fout.write(" cp *.psf relax-${a}-${b}-${c}/\n")
fout.write(" cat optimization.fdf | head -n +${lat_vec_begin} > relax-${a}-${b}-${c}/optimization.fdf\n")
fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a} / ${a_in}; print result\" | `bc`)\n")
fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a} / ${a_in}; print result\" | `bc`)\n")
fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a} / ${a_in}; print result\" | `bc`)\n")
fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${b} / ${b_in}; print result\" | `bc`)\n")
fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${b} / ${b_in}; print result\" | `bc`)\n")
fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${b} / ${b_in}; print result\" | `bc`)\n")
fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c} / ${c_in}; print result\" | `bc`)\n")
fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c} / ${c_in}; print result\" | `bc`)\n")
fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c} / ${c_in}; print result\" | `bc`)\n")
fout.write(" cat >> relax-${a}-${b}-${c}/optimization.fdf<<EOF\n")
fout.write("${vec11} ${vec12} ${vec13}\n")
fout.write("${vec21} ${vec22} ${vec23}\n")
fout.write("${vec31} ${vec32} ${vec33}\n")
fout.write("EOF\n")
fout.write(" cat optimization.fdf | tail -n +${lat_vec_end} >> relax-${a}-${b}-${c}/optimization.fdf\n")
fout.write(" cd relax-${a}-${b}-${c}/\n")
fout.write(" yhrun $PMF_SIESTA < optimization.fdf > optimization.out\n")
fout.write(" cd ../\n")
fout.write("done\n")
fout.write("done\n")
fout.write("done\n")
# gen pbs script
with open("opt-abc-%d-%d-%d.pbs" % (i_batch_a, i_batch_b, i_batch_c), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("#PBS -N %s-%d-%d-%d\n" % (self.run_params["jobname"], i_batch_a, i_batch_b, i_batch_c))
fout.write("#PBS -l nodes=%d:ppn=%d\n" % (self.run_params["nodes"], self.run_params["ppn"]))
if "queue" in self.run_params and self.run_params["queue"] != None:
fout.write("#PBS -q %s\n" %self.run_params["queue"])
fout.write("\n")
fout.write("cd $PBS_O_WORKDIR\n")
fout.write("NP=`cat $PBS_NODEFILE | wc -l`\n")
fout.write("cat > optimization.fdf<<EOF\n")
fout.write(self.system.to_string())
fout.write(self.electrons.to_string())
fout.write(self.ions.to_string())
fout.write("EOF\n")
fout.write("a_in=%f\n" % a)
fout.write("b_in=%f\n" % b)
fout.write("c_in=%f\n" % c)
fout.write("a1=%f\n" % self.system.xyz.cell[0][0])
fout.write("a2=%f\n" % self.system.xyz.cell[0][1])
fout.write("a3=%f\n" % self.system.xyz.cell[0][2])
fout.write("b1=%f\n" % self.system.xyz.cell[1][0])
fout.write("b2=%f\n" % self.system.xyz.cell[1][1])
fout.write("b3=%f\n" % self.system.xyz.cell[1][2])
fout.write("c1=%f\n" % self.system.xyz.cell[2][0])
fout.write("c2=%f\n" % self.system.xyz.cell[2][1])
fout.write("c3=%f\n" % self.system.xyz.cell[2][2])
fout.write("lat_vec_begin=`cat optimization.fdf | grep -n \'%block LatticeVectors\' | cut -d \":\" -f 1`\n")
fout.write("lat_vec_end=`cat optimization.fdf | grep -n \'%endblock LatticeVectors\' | cut -d \":\" -f 1`\n")
fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end))
fout.write("do\n")
fout.write("for b in `seq -w %f %f %f`\n" % (b+range_b_start, range_b[2], b+range_b_end))
fout.write("do\n")
fout.write("for c in `seq -w %f %f %f`\n" % (c+range_c_start, range_c[2], c+range_c_end))
fout.write("do\n")
fout.write(" mkdir relax-${a}-${b}-${c}\n")
fout.write(" cp *.psf relax-${a}-${b}-${c}/\n")
fout.write(" cat optimization.fdf | head -n +${lat_vec_begin} > relax-${a}-${b}-${c}/optimization.fdf\n")
fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a} / ${a_in}; print result\" | `bc`)\n")
fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a} / ${a_in}; print result\" | `bc`)\n")
fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a} / ${a_in}; print result\" | `bc`)\n")
fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${b} / ${b_in}; print result\" | `bc`)\n")
fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${b} / ${b_in}; print result\" | `bc`)\n")
fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${b} / ${b_in}; print result\" | `bc`)\n")
fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c} / ${c_in}; print result\" | `bc`)\n")
fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c} / ${c_in}; print result\" | `bc`)\n")
fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c} / ${c_in}; print result\" | `bc`)\n")
fout.write(" cat >> relax-${a}-${b}-${c}/optimization.fdf<<EOF\n")
fout.write("${vec11} ${vec12} ${vec13}\n")
fout.write("${vec21} ${vec22} ${vec23}\n")
fout.write("${vec31} ${vec32} ${vec33}\n")
fout.write("EOF\n")
fout.write(" cat optimization.fdf | tail -n +${lat_vec_end} >> relax-${a}-${b}-${c}/optimization.fdf\n")
fout.write(" cd relax-${a}-${b}-${c}/\n")
fout.write(" mpirun -np $NP -machinefile $PBS_NODEFILE $PMF_SIESTA < optimization.fdf > optimization.out\n")
fout.write(" cd ../\n")
fout.write("done\n")
fout.write("done\n")
fout.write("done\n")
# gen local bash script
with open("opt-abc-%d-%d-%d.sh" % (i_batch_a, i_batch_b, i_batch_c), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("cat > optimization.fdf<<EOF\n")
fout.write(self.system.to_string())
fout.write(self.electrons.to_string())
fout.write(self.ions.to_string())
fout.write("EOF\n")
fout.write("a_in=%f\n" % a)
fout.write("b_in=%f\n" % b)
fout.write("c_in=%f\n" % c)
fout.write("a1=%f\n" % self.system.xyz.cell[0][0])
fout.write("a2=%f\n" % self.system.xyz.cell[0][1])
fout.write("a3=%f\n" % self.system.xyz.cell[0][2])
fout.write("b1=%f\n" % self.system.xyz.cell[1][0])
fout.write("b2=%f\n" % self.system.xyz.cell[1][1])
fout.write("b3=%f\n" % self.system.xyz.cell[1][2])
fout.write("c1=%f\n" % self.system.xyz.cell[2][0])
fout.write("c2=%f\n" % self.system.xyz.cell[2][1])
fout.write("c3=%f\n" % self.system.xyz.cell[2][2])
fout.write("lat_vec_begin=`cat optimization.fdf | grep -n \'%block LatticeVectors\' | cut -d \":\" -f 1`\n")
fout.write("lat_vec_end=`cat optimization.fdf | grep -n \'%endblock LatticeVectors\' | cut -d \":\" -f 1`\n")
fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end))
fout.write("do\n")
fout.write("for b in `seq -w %f %f %f`\n" | |
connection type.')
# Activation function
self._input_activation_fn = None
if activation_fn_in_separable_conv:
activation_fn = (torch.nn.ReLU6(inplace=False) if
use_bounded_activation else
torch.nn.ReLU(inplace=False))
else:
if use_bounded_activation:
# When use_bounded_activation is True, we clip the feature
# values and apply relu6 for activation.
activation_fn = lambda x: torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)
self._input_activation_fn = torch.nn.ReLU6(inplace=False)
else:
# Original network design.
activation_fn = None
self._input_activation_fn = torch.nn.ReLU(inplace=False)
self._use_bounded_activation = use_bounded_activation
self._output_activation_fn = None
if use_bounded_activation:
self._output_activation_fn = torch.nn.ReLU6(inplace=True)
# Separable conv block.
layers = []
in_channels_ = in_channels
for i in range(3):
if self._input_activation_fn is not None:
layers += [self._input_activation_fn]
layers += [
SeparableConv2dSame(in_channels_,
depth_list[i],
kernel_size=3,
depth_multiplier=1,
regularize_depthwise=regularize_depthwise,
rate=rate*unit_rate_list[i],
stride=stride if i==2 else 1,
activation_fn=activation_fn,
use_explicit_padding=use_explicit_padding)]
in_channels_ = depth_list[i]
self._separable_conv_block = torch.nn.Sequential(*layers)
# Skip connection
self._skip_connection_type = skip_connection_type
if skip_connection_type == 'conv':
self._conv_skip_connection = torch.nn.Conv2d(in_channels,
depth_list[-1],
kernel_size=1,
stride=stride)
self._batch_norm_shortcut = torch.nn.BatchNorm2d(
depth_list[-1], **_BATCH_NORM_PARAMS)
def forward(self, x):
"""
Args:
x: A 4-D tensor with shape [batch, height, width, channels].
Returns:
The Xception module's output.
"""
residual = self._separable_conv_block(x)
if self._skip_connection_type == 'conv':
shortcut = self._conv_skip_connection(x)
shortcut = self._batch_norm_shortcut(shortcut)
if self._use_bounded_activation:
residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)
shortcut = torch.clamp(shortcut, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + shortcut
if self._use_bounded_activation:
outputs = self._output_activation_fn(outputs)
elif self._skip_connection_type == 'sum':
if self._use_bounded_activation:
residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)
x = torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + x
if self._use_bounded_activation:
outputs = self._output_activation_fn(outputs)
else:
outputs = residual
return outputs
class StackBlocksDense(torch.nn.Module):
"""Stacks Xception blocks and controls output feature density.
This class allows the user to explicitly control the output stride, which
is the ratio of the input to output spatial resolution. This is useful for
dense prediction tasks such as semantic segmentation or object detection.
Control of the output feature density is implemented by atrous convolution.
"""
def __init__(self, blocks, output_stride=None):
"""Constructor.
Args:
blocks: A list of length equal to the number of Xception blocks.
Each element is an Xception Block object describing the units
in the block.
output_stride: If None, then the output will be computed at the
nominal network stride. If output_stride is not None, it
specifies the requested ratio of input to output spatial
resolution, which needs to be equal to the product of unit
strides from the start up to some level of Xception. For
example, if the Xception employs units with strides 1, 2, 1,
3, 4, 1, then valid values for the output_stride are 1, 2, 6,
24 or None (which is equivalent to output_stride=24).
Raises:
ValueError: If the target output_stride is not valid.
"""
super(StackBlocksDense, self).__init__()
# The current_stride variable keeps track of the effective stride of
# the activations. This allows us to invoke atrous convolution whenever
# applying the next residual unit would result in the activations
# having stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
layers = []
for block in blocks:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be '
'reached.')
# If we have reached the target output_stride, then we need to
# employ atrous convolution with stride=1 and multiply the
# atrous rate by the current unit's stride for use subsequent
# layers.
if output_stride is not None and current_stride == output_stride:
layers += [block.unit_fn(rate=rate, **dict(unit, stride=1))]
rate *= unit.get('stride', 1)
else:
layers += [block.unit_fn(rate=1, **unit)]
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target ouput_stride cannot be reached.')
self._blocks = torch.nn.Sequential(*layers)
def forward(self, x):
"""
Args:
x: A tensor of shape [batch, height, widht, channels].
Returns:
Output tensor with stride equal to the specified output_stride.
"""
x = self._blocks(x)
return x
class Xception(torch.nn.Module):
"""Generator for Xception models.
This class generates a family of Xception models. See the xception_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce Xception of various depths.
"""
def __init__(self, blocks, num_classes=None, global_pool=True,
keep_prob=0.5, output_stride=None, scope=None):
"""Constructor.
Args:
blocks: A list of length equal to the number of Xception blocks.
Each element is an Xception Block object describing the units
in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
global_pool: If True, we perform global average pooling before
computing logits. Set to True for image classification, False
for dense prediction.
keep_prob: Keep probability used in the pre-logits dropout layer.
output_stride: If None, the the output will be computed at the
nominal network stride. If output_stride is not None, it
specifies the requested ratio of input to output spatial
resolution.
scope: Optional variable_scope.
Raises:
ValueError: If the target output_stride is not valid.
"""
super(Xception, self).__init__()
self._scope = scope
layers = []
if output_stride is not None:
if output_stride % 2 != 0:
raise ValueError('The output_stride must be a multiple of 2.')
output_stride /= 2
# Root block function operated on inputs
layers += [Conv2dSame(3, 32, 3, stride=2),
Conv2dSame(32, 64, 3, stride=1)]
# Extract features for entry_flow, middle_flow, and exit_flow
layers += [StackBlocksDense(blocks, output_stride)]
if global_pool:
# Global average pooling
layers += [torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))]
if num_classes:
layers += [torch.nn.Dropout2d(p=keep_prob, inplace=True),
torch.nn.Conv2d(blocks[-1].args[-1]['depth_list'][-1],
num_classes, 1)]
self._layers = torch.nn.Sequential(*layers)
def forward(self, x):
"""
Args:
x: A tensor of shape [batch, height, widht, channels].
Returns:
Output tensor with stride equal to the specified output_stride.
"""
output = self._layers(x)
x1 = self._layers[0](x)
x2 = self._layers[1](x1)
low_level_features = self._layers[2]._blocks[0](x2)
#low_level_features = self._layers[2]._blocks[0](x1)
#print('x1',x1.size())
#print('x2',x2.size())
#print('low_level_features',low_level_features.size())
'''
if output_stride = None:
output.size() torch.Size([2, 2048, 7, 7])
low_level_features.size() torch.Size([2, 128, 56, 56])
elif output_stride = 16:
output.size() torch.Size([2, 2048, 14, 14])
low_level_features.size() torch.Size([2, 128, 56, 56])
'''
return output,low_level_features
@property
def scope(self):
return self._scope
def xception_block(scope,
in_channels,
depth_list,
skip_connection_type,
activation_fn_in_separable_conv,
regularize_depthwise,
num_units,
stride,
unit_rate_list=None):
"""Helper function for creating a Xception block.
Args:
scope: The scope of the block.
in_channels: The number of input filters.
depth_list: The depth of the bottleneck layer for each unit.
skip_connection_type: Skip connection type for the residual path. Only
supports 'conv', 'sum', or 'none'.
activation_fn_in_separable_conv: Includes activation function in the
separable convolution or not.
regularize_depthwise: Whether or not apply L2-norm regularization on
the depthwise convolution weights.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last
unit. All other units have stride=1.
unit_rate_list: A list of three integers, determining the unit rate in
the corresponding xception block.
Returns:
An xception block.
"""
if unit_rate_list is None:
unit_rate_list = _DEFAULT_MULTI_GRID
return Block(scope, XceptionModule, [{
'in_channels': in_channels,
'depth_list': depth_list,
'skip_connection_type': skip_connection_type,
'activation_fn_in_separable_conv': activation_fn_in_separable_conv,
'regularize_depthwise': regularize_depthwise,
'stride': stride,
'unit_rate_list': unit_rate_list,
}] * num_units)
def Xception41(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_41'):
"""Xception-41 model."""
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=8,
stride=1),
xception_block('exit_flow/block1',
in_channels=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
in_channels=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return Xception(blocks=blocks, num_classes=num_classes,
global_pool=global_pool, keep_prob=keep_prob,
output_stride=output_stride, scope=scope)
def xception_41(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_41',
pretrained=True,
checkpoint_path='./pretrained/xception_41.pth'):
"""Xception-41 model."""
xception = Xception41(num_classes=num_classes, global_pool=global_pool,
keep_prob=keep_prob, output_stride=output_stride,
scope=scope)
if pretrained:
_load_state_dict(xception, num_classes, checkpoint_path)
return xception
def Xception65(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_65'):
"""Xception-65 model."""
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, | |
#----------------------------------------------------------------------------
# Name: wx.lib.intctrl.py
# Author: <NAME>
# Created: 01/16/2003
# Copyright: (c) 2003 by <NAME>
# License: wxWindows license
# Tags: phoenix-port, py3-port, unittest, documented
#----------------------------------------------------------------------------
# NOTE:
# This was written to provide a standard integer edit control for wxPython.
#
# IntCtrl permits integer (long) values to be retrieved or set via
# .GetValue() and .SetValue(), and provides an EVT_INT() event function
# for trapping changes to the control.
#
# It supports negative integers as well as the naturals, and does not
# permit leading zeros or an empty control; attempting to delete the
# contents of the control will result in a (selected) value of zero,
# thus preserving a legitimate integer value, or an empty control
# (if a value of None is allowed for the control.) Similarly, replacing the
# contents of the control with '-' will result in a selected (absolute)
# value of -1.
#
# IntCtrl also supports range limits, with the option of either
# enforcing them or simply coloring the text of the control if the limits
# are exceeded.
#----------------------------------------------------------------------------
# 12/08/2003 - <NAME> (<EMAIL>)
#
# o 2.5 Compatibility changes
#
# 12/20/2003 - <NAME> (<EMAIL>)
#
# o wxIntUpdateEvent -> IntUpdateEvent
# o wxIntValidator -> IntValidator
# o wxIntCtrl -> IntCtrl
#
import sys
import string
import types
import wx
import six
#----------------------------------------------------------------------------
MAXSIZE = six.MAXSIZE # (constants should be in upper case)
MINSIZE = -six.MAXSIZE-1
if six.PY2:
LONGTYPE = long
else:
LONGTYPE = int
#----------------------------------------------------------------------------
# Used to trap events indicating that the current
# integer value of the control has been changed.
wxEVT_COMMAND_INT_UPDATED = wx.NewEventType()
EVT_INT = wx.PyEventBinder(wxEVT_COMMAND_INT_UPDATED, 1)
#----------------------------------------------------------------------------
# wxWindows' wxTextCtrl translates Composite "control key"
# events into single events before returning them to its OnChar
# routine. The doc says that this results in 1 for Ctrl-A, 2 for
# Ctrl-B, etc. However, there are no wxPython or wxWindows
# symbols for them, so I'm defining codes for Ctrl-X (cut) and
# Ctrl-V (paste) here for readability:
WXK_CTRL_X = (ord('X')+1) - ord('A')
WXK_CTRL_V = (ord('V')+1) - ord('A')
class IntUpdatedEvent(wx.PyCommandEvent):
"""Event sent from the :class:`~lib.intctrl.IntCtrl` when control is updated."""
def __init__(self, id, value = 0, object=None):
"""
Default class constructor.
:param int `id`: the object id
:param int `value`: the value
:param `object`: the object of the event
"""
wx.PyCommandEvent.__init__(self, wxEVT_COMMAND_INT_UPDATED, id)
self.__value = value
self.SetEventObject(object)
def GetValue(self):
"""
Retrieve the value of the control at the time
this event was generated."""
return self.__value
#----------------------------------------------------------------------------
class IntValidator(wx.Validator):
"""
Validator class used with :class:`~lib.intctrl.IntCtrl` handles all validation of
input prior to changing the value of the underlying :class:`TextCtrl`.
"""
def __init__(self):
"""Standard constructor"""
wx.Validator.__init__(self)
self.Bind(wx.EVT_CHAR, self.OnChar)
def Clone (self):
"""
Standard cloner
..note::
Every validator must implement the Clone() method.
"""
return self.__class__()
def Validate(self, window): # window here is the *parent* of the ctrl
"""
Because each operation on the control is vetted as it's made,
the value of the control is always valid.
"""
return 1
def OnChar(self, event):
"""
Validates keystrokes to make sure the resulting value will a legal
value. Erasing the value causes it to be set to 0, with the value
selected, so it can be replaced. Similarly, replacing the value
with a '-' sign causes the value to become -1, with the value
selected. Leading zeros are removed if introduced by selection,
and are prevented from being inserted.
"""
key = event.GetKeyCode()
ctrl = event.GetEventObject()
if 'wxMac' in wx.PlatformInfo:
if event.CmdDown() and key == ord('c'):
key = WXK_CTRL_C
elif event.CmdDown() and key == ord('v'):
key = WXK_CTRL_V
value = ctrl.GetValue()
textval = wx.TextCtrl.GetValue(ctrl)
allow_none = ctrl.IsNoneAllowed()
pos = ctrl.GetInsertionPoint()
sel_start, sel_to = ctrl.GetSelection()
select_len = sel_to - sel_start
# (Uncomment for debugging:)
## print('keycode:', key)
## print('pos:', pos)
## print('sel_start, sel_to:', sel_start, sel_to)
## print('select_len:', select_len)
## print('textval:', textval)
# set defaults for processing:
allow_event = 1
set_to_none = 0
set_to_zero = 0
set_to_minus_one = 0
paste = 0
internally_set = 0
new_value = value
new_text = textval
new_pos = pos
# Validate action, and predict resulting value, so we can
# range check the result and validate that too.
if key in (wx.WXK_DELETE, wx.WXK_BACK, WXK_CTRL_X):
if select_len:
new_text = textval[:sel_start] + textval[sel_to:]
elif key == wx.WXK_DELETE and pos < len(textval):
new_text = textval[:pos] + textval[pos+1:]
elif key == wx.WXK_BACK and pos > 0:
new_text = textval[:pos-1] + textval[pos:]
# (else value shouldn't change)
if new_text in ('', '-'):
# Deletion of last significant digit:
if allow_none and new_text == '':
new_value = None
set_to_none = 1
else:
new_value = 0
set_to_zero = 1
else:
try:
new_value = ctrl._fromGUI(new_text)
except ValueError:
allow_event = 0
elif key == WXK_CTRL_V: # (see comments at top of file)
# Only allow paste if number:
paste_text = ctrl._getClipboardContents()
new_text = textval[:sel_start] + paste_text + textval[sel_to:]
if new_text == '' and allow_none:
new_value = None
set_to_none = 1
else:
try:
# Convert the resulting strings, verifying they
# are legal integers and will fit in proper
# size if ctrl limited to int. (if not,
# disallow event.)
new_value = ctrl._fromGUI(new_text)
if paste_text:
paste_value = ctrl._fromGUI(paste_text)
else:
paste_value = 0
new_pos = sel_start + len(str(paste_value))
# if resulting value is 0, truncate and highlight value:
if new_value == 0 and len(new_text) > 1:
set_to_zero = 1
elif paste_value == 0:
# Disallow pasting a leading zero with nothing selected:
if( select_len == 0
and value is not None
and ( (value >= 0 and pos == 0)
or (value < 0 and pos in [0,1]) ) ):
allow_event = 0
paste = 1
except ValueError:
allow_event = 0
elif key < wx.WXK_SPACE or key > 255:
pass # event ok
elif chr(key) == '-':
# Allow '-' to result in -1 if replacing entire contents:
if( value is None
or (value == 0 and pos == 0)
or (select_len >= len(str(abs(value)))) ):
new_value = -1
set_to_minus_one = 1
# else allow negative sign only at start, and only if
# number isn't already zero or negative:
elif pos != 0 or (value is not None and value < 0):
allow_event = 0
else:
new_text = '-' + textval
new_pos = 1
try:
new_value = ctrl._fromGUI(new_text)
except ValueError:
allow_event = 0
elif chr(key) in string.digits:
# disallow inserting a leading zero with nothing selected
if( chr(key) == '0'
and select_len == 0
and value is not None
and ( (value >= 0 and pos == 0)
or (value < 0 and pos in [0,1]) ) ):
allow_event = 0
# disallow inserting digits before the minus sign:
elif value is not None and value < 0 and pos == 0:
allow_event = 0
else:
new_text = textval[:sel_start] + chr(key) + textval[sel_to:]
try:
new_value = ctrl._fromGUI(new_text)
except ValueError:
allow_event = 0
else:
# not a legal char
allow_event = 0
if allow_event:
# Do range checking for new candidate value:
if ctrl.IsLimited() and not ctrl.IsInBounds(new_value):
allow_event = 0
elif new_value is not None:
# ensure resulting text doesn't result in a leading 0:
if not set_to_zero and not set_to_minus_one:
if( (new_value > 0 and new_text[0] == '0')
or (new_value < 0 and new_text[1] == '0')
or (new_value == 0 and select_len > 1 ) ):
# Allow replacement of leading chars with
# zero, but remove the leading zero, effectively
# making this like "remove leading digits"
# Account for leading zero when positioning cursor:
if( key == wx.WXK_BACK
or (paste and paste_value == 0 and new_pos > 0) ):
new_pos = new_pos - 1
wx.CallAfter(ctrl.SetValue, new_value)
wx.CallAfter(ctrl.SetInsertionPoint, new_pos)
internally_set = 1
elif paste:
# Always do paste numerically, to remove
# leading/trailing spaces
wx.CallAfter(ctrl.SetValue, new_value)
wx.CallAfter(ctrl.SetInsertionPoint, new_pos)
internally_set = 1
elif (new_value == 0 and len(new_text) > 1 ):
allow_event = 0
if allow_event:
ctrl._colorValue(new_value) # (one way or t'other)
# (Uncomment for debugging:)
## if allow_event:
## print('new value:', new_value)
## if paste: print('paste')
## if set_to_none: print('set_to_none')
## if set_to_zero: print('set_to_zero')
## if set_to_minus_one: print('set_to_minus_one')
## if internally_set: print('internally_set')
## else:
## print('new text:', new_text)
## print('disallowed')
## print()
if allow_event:
if set_to_none:
wx.CallAfter(ctrl.SetValue, new_value)
elif set_to_zero:
# select to "empty" numeric value
wx.CallAfter(ctrl.SetValue, new_value)
wx.CallAfter(ctrl.SetInsertionPoint, 0)
wx.CallAfter(ctrl.SetSelection, 0, 1)
elif set_to_minus_one:
wx.CallAfter(ctrl.SetValue, new_value)
wx.CallAfter(ctrl.SetInsertionPoint, 1)
wx.CallAfter(ctrl.SetSelection, 1, 2)
elif | |
-5): (-1, 1),
(2, 32, -1, -4): (1, 1),
(2, 32, -1, -3): (-1, 1),
(2, 32, -1, -2): (0, 1),
(2, 32, -1, -1): (0, 1),
(2, 32, -1, 0): (0, 1),
(2, 32, -1, 1): (0, 1),
(2, 32, -1, 2): (0, 1),
(2, 32, -1, 3): (0, 0),
(2, 32, -1, 4): (-1, -1),
(2, 32, -1, 5): (0, 1),
(2, 32, 0, -5): (-1, 1),
(2, 32, 0, -4): (0, 1),
(2, 32, 0, -3): (0, 1),
(2, 32, 0, -2): (0, 1),
(2, 32, 0, -1): (-1, 1),
(2, 32, 0, 0): (-1, 1),
(2, 32, 0, 1): (-1, 1),
(2, 32, 0, 2): (-1, 1),
(2, 32, 0, 3): (-1, 0),
(2, 32, 0, 4): (-1, -1),
(2, 32, 0, 5): (0, 1),
(2, 32, 1, -5): (0, 1),
(2, 32, 1, -4): (-1, 1),
(2, 32, 1, -3): (-1, 1),
(2, 32, 1, -2): (-1, 1),
(2, 32, 1, -1): (-1, 1),
(2, 32, 1, 0): (-1, 1),
(2, 32, 1, 1): (-1, 1),
(2, 32, 1, 2): (-1, 0),
(2, 32, 1, 3): (-1, 1),
(2, 32, 1, 4): (-1, 1),
(2, 32, 1, 5): (-1, 1),
(2, 32, 2, -5): (0, 1),
(2, 32, 2, -4): (0, 1),
(2, 32, 2, -3): (-1, 1),
(2, 32, 2, -2): (-1, 1),
(2, 32, 2, -1): (-1, 0),
(2, 32, 2, 0): (-1, -1),
(2, 32, 2, 1): (-1, 1),
(2, 32, 2, 2): (-1, 1),
(2, 32, 2, 3): (-1, 1),
(2, 32, 2, 4): (-1, 1),
(2, 32, 2, 5): (-1, 1),
(2, 32, 3, -5): (0, 1),
(2, 32, 3, -4): (0, 1),
(2, 32, 3, -3): (0, 1),
(2, 32, 3, -2): (-1, 1),
(2, 32, 3, -1): (-1, 0),
(2, 32, 3, 0): (-1, -1),
(2, 32, 3, 1): (-1, 1),
(2, 32, 3, 2): (-1, 1),
(2, 32, 3, 3): (-1, 1),
(2, 32, 3, 4): (-1, 1),
(2, 32, 3, 5): (-1, 1),
(2, 32, 4, -5): (-1, 1),
(2, 32, 4, -4): (-1, 1),
(2, 32, 4, -3): (-1, 1),
(2, 32, 4, -2): (-1, 1),
(2, 32, 4, -1): (-1, 0),
(2, 32, 4, 0): (-1, -1),
(2, 32, 4, 1): (-1, 1),
(2, 32, 4, 2): (-1, 1),
(2, 32, 4, 3): (-1, 1),
(2, 32, 4, 4): (-1, 1),
(2, 32, 4, 5): (-1, 1),
(2, 32, 5, -5): (0, 1),
(2, 32, 5, -4): (0, 1),
(2, 32, 5, -3): (0, 1),
(2, 32, 5, -2): (0, 1),
(2, 32, 5, -1): (0, 1),
(2, 32, 5, 0): (0, 1),
(2, 32, 5, 1): (0, 1),
(2, 32, 5, 2): (0, 1),
(2, 32, 5, 3): (-1, 1),
(2, 32, 5, 4): (-1, 1),
(2, 32, 5, 5): (-1, 1),
(2, 33, -5, -5): (0, 1),
(2, 33, -5, -4): (0, 1),
(2, 33, -5, -3): (0, 1),
(2, 33, -5, -2): (0, 1),
(2, 33, -5, -1): (0, 1),
(2, 33, -5, 0): (0, 1),
(2, 33, -5, 1): (0, 1),
(2, 33, -5, 2): (0, 0),
(2, 33, -5, 3): (-1, -1),
(2, 33, -5, 4): (0, 1),
(2, 33, -5, 5): (0, 1),
(2, 33, -4, -5): (0, 1),
(2, 33, -4, -4): (0, 1),
(2, 33, -4, -3): (0, 1),
(2, 33, -4, -2): (0, 1),
(2, 33, -4, -1): (0, 1),
(2, 33, -4, 0): (0, 1),
(2, 33, -4, 1): (0, 1),
(2, 33, -4, 2): (0, 0),
(2, 33, -4, 3): (-1, -1),
(2, 33, -4, 4): (0, 1),
(2, 33, -4, 5): (0, 1),
(2, 33, -3, -5): (0, 1),
(2, 33, -3, -4): (0, 1),
(2, 33, -3, -3): (0, 1),
(2, 33, -3, -2): (0, 1),
(2, 33, -3, -1): (0, 1),
(2, 33, -3, 0): (0, 1),
(2, 33, -3, 1): (0, 1),
(2, 33, -3, 2): (0, 0),
(2, 33, -3, 3): (-1, -1),
(2, 33, -3, 4): (0, 1),
(2, 33, -3, 5): (0, 1),
(2, 33, -2, -5): (0, 1),
(2, 33, -2, -4): (0, 1),
(2, 33, -2, -3): (0, 1),
(2, 33, -2, -2): (0, 1),
(2, 33, -2, -1): (0, 1),
(2, 33, -2, 0): (0, 1),
(2, 33, -2, 1): (0, 1),
(2, 33, -2, 2): (0, 0),
(2, 33, -2, 3): (-1, -1),
(2, 33, -2, 4): (0, 1),
(2, 33, -2, 5): (0, 1),
(2, 33, -1, -5): (1, 1),
(2, 33, -1, -4): (-1, 1),
(2, 33, -1, -3): (0, 1),
(2, 33, -1, -2): (0, 1),
(2, 33, -1, -1): (0, 1),
(2, 33, -1, 0): (0, 1),
(2, 33, -1, 1): (0, 1),
(2, 33, -1, 2): (0, 0),
(2, 33, -1, 3): (-1, -1),
(2, 33, -1, 4): (0, 1),
(2, 33, -1, 5): (0, 1),
(2, 33, 0, -5): (0, 1),
(2, 33, 0, -4): (0, 1),
(2, 33, 0, -3): (0, 1),
(2, 33, 0, -2): (-1, 1),
(2, 33, 0, -1): (-1, 1),
(2, 33, 0, 0): (-1, 1),
(2, 33, 0, 1): (-1, 1),
(2, 33, 0, 2): (-1, 0),
(2, 33, 0, 3): (-1, -1),
(2, 33, 0, 4): (0, 1),
(2, 33, 0, 5): (0, 1),
(2, 33, 1, -5): (-1, 1),
(2, 33, 1, -4): (-1, 1),
(2, 33, 1, -3): (-1, 1),
(2, 33, 1, -2): (-1, 1),
(2, 33, 1, -1): (-1, 1),
(2, 33, 1, 0): (-1, 1),
(2, 33, 1, 1): (-1, 0),
(2, 33, 1, 2): (-1, 1),
(2, 33, 1, 3): (-1, 1),
(2, 33, 1, 4): (-1, 1),
(2, 33, 1, 5): (-1, 1),
(2, 33, 2, -5): (0, 1),
(2, 33, 2, -4): (-1, 1),
(2, 33, 2, -3): (-1, 1),
(2, 33, 2, -2): (-1, 0),
(2, 33, 2, -1): (-1, -1),
(2, 33, 2, 0): (-1, -1),
(2, 33, 2, 1): (-1, 1),
(2, 33, 2, 2): (-1, 1),
(2, 33, 2, 3): (-1, 1),
(2, 33, 2, 4): (-1, 1),
(2, 33, 2, 5): (-1, 1),
(2, 33, 3, -5): (0, 1),
(2, 33, 3, -4): (0, 1),
(2, 33, 3, -3): (-1, 1),
(2, 33, 3, -2): (-1, 0),
(2, 33, 3, -1): (-1, -1),
(2, 33, 3, 0): (-1, 1),
(2, 33, 3, 1): (-1, 1),
(2, 33, 3, 2): (-1, 1),
(2, 33, 3, 3): (-1, 1),
(2, 33, 3, 4): (-1, 1),
(2, 33, 3, 5): (-1, 1),
(2, 33, 4, -5): (-1, 1),
(2, 33, 4, -4): (-1, 1),
(2, 33, 4, -3): (-1, 1),
(2, 33, 4, -2): (-1, 0),
(2, 33, 4, -1): (-1, -1),
(2, 33, 4, 0): (-1, 1),
(2, 33, 4, 1): (-1, 1),
(2, 33, 4, 2): (-1, 1),
(2, 33, 4, 3): (-1, 1),
(2, 33, 4, 4): (-1, 1),
(2, 33, 4, 5): (-1, 1),
(2, 33, 5, -5): (0, 1),
(2, 33, 5, -4): (0, 1),
(2, 33, 5, -3): (0, 1),
(2, 33, 5, -2): (0, 0),
(2, 33, 5, -1): (0, 1),
(2, 33, 5, 0): (0, 1),
(2, 33, 5, 1): (0, 1),
(2, 33, 5, 2): (-1, 1),
(2, 33, 5, 3): (-1, 1),
(2, 33, 5, 4): (-1, 1),
(2, 33, 5, 5): (-1, 1),
(2, 34, -5, -5): (0, 1),
(2, 34, -5, -4): (0, 1),
(2, 34, -5, -3): (0, 1),
(2, 34, -5, -2): (0, 1),
(2, 34, -5, -1): (0, 1),
(2, 34, -5, 0): (0, 1),
(2, 34, -5, 1): (0, 0),
(2, 34, -5, 2): (-1, -1),
(2, 34, -5, 3): (0, 1),
(2, 34, -5, 4): (0, 1),
(2, 34, -5, 5): (0, 1),
(2, 34, -4, -5): (0, 1),
(2, 34, -4, -4): (0, 1),
(2, 34, -4, -3): (0, 1),
(2, 34, -4, -2): (0, 1),
(2, 34, -4, -1): (0, | |
<reponame>piyush1301/plotly.py
import _plotly_utils.basevalidators
class YsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name='ysrc', parent_name='waterfall', **kwargs):
super(YsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class YAxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name='yaxis', parent_name='waterfall', **kwargs):
super(YAxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop('dflt', 'y'),
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class Y0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name='y0', parent_name='waterfall', **kwargs):
super(Y0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='y', parent_name='waterfall', **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'data'),
**kwargs
)
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name='xsrc', parent_name='waterfall', **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class XAxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name='xaxis', parent_name='waterfall', **kwargs):
super(XAxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop('dflt', 'x'),
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class X0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name='x0', parent_name='waterfall', **kwargs):
super(X0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='x', parent_name='waterfall', **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'data'),
**kwargs
)
import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='widthsrc', parent_name='waterfall', **kwargs
):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name='width', parent_name='waterfall', **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'calc'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='visible', parent_name='waterfall', **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
values=kwargs.pop('values', [True, False, 'legendonly']),
**kwargs
)
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name='uirevision', parent_name='waterfall', **kwargs
):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='uid', parent_name='waterfall', **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class TotalsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='totals', parent_name='waterfall', **kwargs
):
super(TotalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Totals'),
data_docs=kwargs.pop(
'data_docs', """
marker
plotly.graph_objs.waterfall.totals.Marker
instance or dict with compatible properties
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='textsrc', parent_name='waterfall', **kwargs
):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class TextpositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='textpositionsrc', parent_name='waterfall', **kwargs
):
super(TextpositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class TextpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='textposition', parent_name='waterfall', **kwargs
):
super(TextpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
values=kwargs.pop('values', ['inside', 'outside', 'auto', 'none']),
**kwargs
)
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='textfont', parent_name='waterfall', **kwargs
):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Textfont'),
data_docs=kwargs.pop(
'data_docs', """
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='text', parent_name='waterfall', **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='stream', parent_name='waterfall', **kwargs
):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Stream'),
data_docs=kwargs.pop(
'data_docs', """
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='showlegend', parent_name='waterfall', **kwargs
):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class SelectedpointsValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name='selectedpoints', parent_name='waterfall', **kwargs
):
super(SelectedpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class OutsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='outsidetextfont', parent_name='waterfall', **kwargs
):
super(OutsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Outsidetextfont'),
data_docs=kwargs.pop(
'data_docs', """
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='orientation', parent_name='waterfall', **kwargs
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'info'),
values=kwargs.pop('values', ['v', 'h']),
**kwargs
)
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='opacity', parent_name='waterfall', **kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
max=kwargs.pop('max', 1),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class OffsetsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='offsetsrc', parent_name='waterfall', **kwargs
):
super(OffsetsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class OffsetgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='offsetgroup', parent_name='waterfall', **kwargs
):
super(OffsetgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class OffsetValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='offset', parent_name='waterfall', **kwargs
):
super(OffsetValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='name', parent_name='waterfall', **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class MeasuresrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='measuresrc', parent_name='waterfall', **kwargs
):
super(MeasuresrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class MeasureValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name='measure', parent_name='waterfall', **kwargs
):
super(MeasureValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'data'),
**kwargs
)
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='legendgroup', parent_name='waterfall', **kwargs
):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class InsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='insidetextfont', parent_name='waterfall', **kwargs
):
super(InsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Insidetextfont'),
data_docs=kwargs.pop(
'data_docs', """
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class IncreasingValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='increasing', parent_name='waterfall', **kwargs
):
super(IncreasingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Increasing'),
data_docs=kwargs.pop(
'data_docs', """
marker
plotly.graph_objs.waterfall.increasing.Marker
instance or dict with compatible properties
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='idssrc', parent_name='waterfall', **kwargs
):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='ids', parent_name='waterfall', **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'data'),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='hovertextsrc', parent_name='waterfall', **kwargs
):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='hovertext', parent_name='waterfall', **kwargs
):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='hovertemplatesrc',
parent_name='waterfall',
**kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='hovertemplate', parent_name='waterfall', **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
| |
<filename>core/snapshot.py
import abc
import numpy as np
from namelist import load_nml, NML
from seren3 import config
class Snapshot(object):
"""
Base class for loading RAMSES snapshots
"""
__metaclass__ = abc.ABCMeta
def __init__(self, path, ioutput, **kwargs):
import os
from snapshot_quantities import Quantity
from pymses.utils import constants as C
self.path = os.getcwd() if path.strip('/') == '.' else path
if ioutput != int(ioutput):
raise Exception("Must provide integer output number (got %f)" % ioutput)
self.ioutput = int(ioutput)
self.C = C
# Known particles
self.known_particles = ["part", "dm", "star", "gmc"]
self.particle_field_list = ["mass", "pos", "vel", "epoch", "id"]
# Load the namelist file
self.nml = load_nml(self)
# Tracking metals?
self.metals = False
# First check if the namelist is using PHYSICS_PARAMS (legacy)
# or if it is using COOLING_PARAMS (new)
if NML.PHYSICS_PARAMS in self.nml:
print("Using legacy namelist block (PHYSICS_PARAMS)")
if 'metal' in self.nml[NML.PHYSICS_PARAMS]:
self.metals = self.nml[NML.PHYSICS_PARAMS]['metal'] == '.true.' or kwargs.pop("metal", False)
elif NML.COOLING_PARAMS in self.nml:
if 'metal' in self.nml[NML.COOLING_PARAMS]:
self.metals = self.nml[NML.COOLING_PARAMS]['metal'] == '.true.' or kwargs.pop("metal", False)
else:
print("Neither PHYSICS_PARAMS or COOLING_PARAMS found.")
if self.metals:
self.particle_field_list.append("metal")
# Patch?
self.patch = self.detect_rt_module()
# Init friedmann dict variable
self._friedmann = None
# Init info variable
self._info = None
# Quantities object
self.quantities = Quantity(self)
def ancestor(self):
import weakref
return weakref.ref(self)
def array(self, array, units=None, **kwargs):
from seren3.array import SimArray
return SimArray(array, units, snapshot=self, **kwargs)
@abc.abstractmethod
def g(self):
return
@abc.abstractmethod
def p(self):
return
@abc.abstractmethod
def d(self):
return
@abc.abstractmethod
def s(self):
return
@abc.abstractmethod
def get_io_source(self, family):
return
@abc.abstractmethod
def get_sphere(self, pos, r):
return
@abc.abstractmethod
def get_cube(self, pos, l):
return
@property
def boxsize(self):
return self.array(self.info["unit_length"]).in_units("Mpc a h**-1")
def pickle_dump(self, fname, data):
'''
Dumps data (safely) to a pickle database
'''
import pickle
with open(fname, "wb") as f:
pickle.dump(data, f)
def pickle_load(self, fname):
'''
Loads data (safely) from a pickle databse
'''
import pickle
data = None
with open(fname, "rb") as f:
data = pickle.load(f)
return data
def halos(self, finder=config.get("halo", "default_finder"), **kwargs):
if finder.lower() == 'ahf':
from seren3.halos.halos import AHFCatalogue
return AHFCatalogue(self, **kwargs)
elif finder.lower() == 'rockstar':
from seren3.halos.halos import RockstarCatalogue
return RockstarCatalogue(self, **kwargs)
elif finder.lower() == 'ctrees':
from seren3.halos.halos import ConsistentTreesCatalogue
return ConsistentTreesCatalogue(self, **kwargs)
else:
raise Exception("Unknown halo finder: %s" % finder)
@property
def h(self):
return self.halos()
@abc.abstractmethod
def camera(self, **kwargs):
return
@property
def friedmann(self):
if self._friedmann is None:
self._friedmann = self.integrate_friedmann()
return self._friedmann
def detect_rt_module(self):
'''
Checks if RAMSES-RT or RAMSES-CUDATON simulation.
Retuns string 'rt' or 'cudaton'
'''
import os, glob
if len(glob.glob("%s/output_%05i/rt_*.out*" % (self.path, self.ioutput))):
return 'rt'
elif os.path.isfile("%s/output_%05i/rad_%05i.out00001" % (self.path, self.ioutput, self.ioutput)):
return 'cudaton'
else:
return 'ramses'
@property
def info_fname(self):
return '%s/output_%05d/info_%05d.txt' % (self.path, self.ioutput, self.ioutput)
@property
def info_rt_fname(self):
return '%s/output_%05d/info_rt_%05d.txt' % (self.path, self.ioutput, self.ioutput)
@property
def info(self):
'''
Expose info API
'''
import os
if self._info is None:
fname = self.info_fname
from pymses.sources.ramses import info as info_utils
info_dict = info_utils.read_ramses_info_file(fname)
# if self.patch == 'rt':
if (os.path.isfile(self.info_rt_fname)):
full_info = info_dict.copy()
full_info.update(self.info_rt)
self._info = full_info
else:
self._info = info_dict
return self._info
@property
def info_rt(self):
'''
Expose RT info API
'''
from pymses.sources.ramses import info as info_utils
fname = self.info_rt_fname
return info_utils.read_ramses_rt_info_file(fname)
@property
def unit_l(self):
return self.array(self.info["unit_length"])
@property
def hilbert_dom_decomp(self):
'''
Expose Hilbert domain decomposition API
'''
from pymses.sources.ramses.hilbert import HilbertDomainDecomp
info = self.info
keys = info['dom_decomp_Hilbert_keys']
dom_decomp = HilbertDomainDecomp(
info['ndim'], keys[:-1], keys[1:], (info['levelmin'], info['levelmax']))
return dom_decomp
def cpu_list(self, bounding_box):
'''
Return the list of CPUs which cover the bounding box
- bounding box: (2, ndim) ndarray containing min/max bounding box
'''
return self.hilbert_dom_decomp.map_box(bounding_box)
@property
def z(self):
return self.cosmo["z"]
@property
def age(self):
from seren3.array import SimArray
fr = self.friedmann
age_simu = fr["age_simu"]
return SimArray(age_simu, "Gyr")
@property
def cosmo(self):
"""
Returns a cosmolopy compatible dict
"""
par = self.info
cosmo = {'omega_M_0': round(par['omega_m'], 3),
'omega_lambda_0': round(par['omega_l'], 3),
'omega_k_0': round(par['omega_k'], 3),
'omega_b_0': round(par['omega_b'], 3),
'h': par['H0'] / 100.,
'aexp': par['aexp'],
'z': (1. / par['aexp']) - 1.,
'omega_n_0': 0.,
'N_nu': 0.,
'n': 0.96} # TODO - read this from somewhere
return cosmo
@property
def z(self):
return (1. / self.info['aexp']) - 1.
def integrate_friedmann(self, aexp=None):
from seren3.utils.f90 import friedmann as fm
cosmology = self.cosmo
omega_m_0 = cosmology['omega_M_0']
omega_l_0 = cosmology['omega_lambda_0']
omega_k_0 = cosmology['omega_k_0']
if aexp is None:
aexp = cosmology['aexp']
H0 = cosmology['h'] * 100
alpha = 1e-6
axpmin = 1e-3
ntable = 1000
axp_out, hexp_out, tau_out, t_out, age_tot = fm.friedmann(
omega_m_0, omega_l_0, omega_k_0, alpha, axpmin, ntable)
# Find neighbouring expansion factor
i = 1
while ((axp_out[i] > aexp) and (i < ntable)):
i += 1
# Interpolate time
time_simu = t_out[i] * (aexp - axp_out[i - 1]) / (axp_out[i] - axp_out[i - 1]) + \
t_out[i - 1] * (aexp - axp_out[i]) / (axp_out[i - 1] - axp_out[i])
age_simu = (time_simu + age_tot) / \
(H0 * 1e5 / 3.08e24) / (365 * 24 * 3600 * 1e9)
friedmann = {
'axp_out': axp_out,
'hexp_out': hexp_out,
'tau_out': tau_out,
't_out': t_out,
'age_tot': age_tot,
'age_simu': age_simu,
'time_simu': time_simu
}
return friedmann
def pynbody_snapshot(self, **kwargs):
raise NotImplementedError("Conversion not implemented for base snapshot")
class Family(object):
"""
Class to load family specific fields
"""
def __init__(self, snapshot, family):
import weakref
self._base = weakref.ref(snapshot)
self.path = snapshot.path
self.ioutput = snapshot.ioutput
self.quantities = snapshot.quantities
self.family = family.lower()
self.C = snapshot.C
def __str__(self):
return "Family<%s>" % self.family
def __repr__(self):
return self.__str__()
def __len__(self):
dset = self["pos"].flatten()["pos"]
return len(dset)
@property
def base(self):
return self._base()
@property
def ro(self):
return self.base.ro
def array(self, *args, **kwargs):
return self.base.array(*args, **kwargs)
@property
def info(self):
return self.base.info
@property
def cosmo(self):
return self.base.cosmo
@property
def friedmann(self):
return self.base.friedmann
@property
def patch(self):
return self.base.patch
@property
def nml(self):
return self.base.nml
def camera(self, **kwargs):
return self.base.camera(**kwargs)
def compute_required_fields(self, fields):
"""
Computes which of the tracked scalar quantities are needed to fully derive a field
"""
from seren3.utils import derived_utils as derived
if not hasattr(fields, "__iter__"):
fields = [fields]
field_list = None # Fields RAMSES knows about
if self.family == 'amr':
field_list = self.ro._amr_fields().field_name_list
field_list.extend(["pos", "dx"])
else:
field_list = self.base.particle_field_list
field_list.append("pos")
known_fields = set()
def _get_rules(field):
if derived.is_derived(self, field):
required_fields = [r for r in derived.required_for_field(self, field)]
for r in required_fields:
if derived.is_derived(self, r):
_get_rules(r)
else:
known_fields.add(r)
else:
if field in field_list:
known_fields.add(field)
else:
raise Exception("Unknown %s field: %s" % (self.family, field))
for f in fields:
_get_rules(f)
return list(known_fields)
def get_source(self, fields, return_required_fields=False):
"""
Data access via pymses for family specific tracked/derived fields
"""
from serensource import SerenSource
required_fields = self.compute_required_fields(fields)
if self.family in self.base.known_particles:
required_fields.append("level") # required for fft projections of particle fields
if "epoch" not in required_fields:
required_fields.append("epoch") # required for particle filtering
if "id" not in required_fields:
required_fields.append("id") # required for particle filtering
source = None
if "dx" in required_fields or "pos" in required_fields:
source = self.base.get_io_source(self.family, [r for r in required_fields if r != "dx" and r != "pos"])
else:
source = self.base.get_io_source(self.family, required_fields)
if return_required_fields:
return source, required_fields
return source
def __getitem__(self, fields):
"""
Data access via pymses for family specific tracked/derived fields
"""
from serensource import SerenSource
if not hasattr(fields, "__iter__"):
fields = [fields]
source, required_fields = self.get_source(fields, return_required_fields=True)
if self.family in ['amr', 'rt']:
from pymses.filters import CellsToPoints
source = CellsToPoints(source)
cpu_list = None
if hasattr(self.base, "region"):
from pymses.filters import RegionFilter
source = RegionFilter(self.base.region, source)
return SerenSource(self, source)
def bin_spherical(self, field, npoints, nbins, divide_by_counts=False, **kwargs):
'''
Spherical binning function
'''
from seren3.array import SimArray
from seren3.utils.derived_utils import is_derived, get_derived_field
from seren3.analysis.profile_binners import SphericalProfileBinner
center = kwargs.pop("center", None)
if center is None:
if hasattr(self.base, "region"):
center = self.base.region.center
else:
raise Exception("center not specified")
# if not isinstance(center, SimArray):
# raise Exception("Center must be a SimArray")
radius = kwargs.pop("radius", None)
if radius is None:
if hasattr(self.base, "region"):
radius = float(self.base.region.radius)
else:
raise Exception("radius not specified")
# if not isinstance(radius, SimArray):
# raise Exception("Radius must be a SimArray")
profile_func = kwargs.pop("profile_func", None)
if profile_func is None:
if is_derived(self, field):
fn = get_derived_field(self, field)
profile_func = lambda dset: fn(self, dset)
else:
profile_func = lambda dset: dset[field]
bin_bounds = np.linspace(0., radius, nbins)
dset = None
source = self[[field, "pos"]]
if self.family == "amr":
sphere = | |
#!/usr/bin/env python
# Import Modules
#-------------------------------------------------------
# Scipy and Numpy
from scipy import *
import scipy.special as special
# Mesh generator
import mesh as tri
# Non-symmetric Gaussian Solver
import nsymgauss as NSG
# Quadrature data
import quad as GQD
# Shape function module
import sfntri as SFN
# GUI - wxPython busted at moment
## import femGUI
# Define python functions
#-------------------------------------------------------
def vanilla(r, K, dt, sigma, S):
"""
This is a simple Vanilla Put Option calcualtion
based on the analytic solution for a single
underlying asset. The solution used is from
The Mathematics of Financial Derivatives, Wilmott, et al.
Uses ndtr and exp from scipy and ndtr scipy.special modules.
r : risk free rate (float)
K : strike price (float)
dt : time to expiry (float)
sigma: volatility of S (float)
S : range of underlying values (array[float])
Usage:
put_value = vanilla(r, K, dt, sigma, S)
"""
d1 = zeros(len(S))
d2 = zeros(len(S))
n1 = zeros(len(S))
n2 = zeros(len(S))
pt = zeros(len(S))
b = sigma*sqrt( dt)
dsct = exp(-1.0*r*dt)
for i in range(len(S)):
d1[i] = (log(S[i]/K) + (r + (0.5* sigma**2))*dt)/b
d2[i] = (log(S[i]/K) + (r - (0.5* sigma**2))*dt)/b
n1[i] = special.ndtr(-1.0*d1[i])
n2[i] = special.ndtr(-1.0*d2[i])
pt[i] = K*dsct*n2[i] - S[i]*n1[i]
return pt
# Create the boundary information arrays only once to
# save calculations during time-dependent BCs
def findbc(gnodes,s1max,s2max,nnm):
"""
This function will return an array of values for
which global nodes lie on the boundaries.
bnode:
0 = interior node
1 = boundary node
mind*: the indices of the axes' s1min,s2min nodes
maxd*: the indices of the axes' s1max,s2max nodes
gnodes is an array of size (num of nodes) x 2
gnodes[:,0] = global x values
gnodes[:,1] = global y values
bnode,mindx,maxdx,mindy,maxdy,s1y0,s2x0
= findbc(gnodes,s1max,s2max,nnm)
"""
# The "max" matrices are not actually called in the present
# program, but may be needed later.
bnode = zeros(nnm,dtype=int)
mxndx = zeros(nnm,dtype=int)
mnndx = zeros(nnm,dtype=int)
mxndy = zeros(nnm,dtype=int)
mnndy = zeros(nnm,dtype=int)
for i in range(nnm):
if allclose(gnodes[i,0],0.0): # axis -> (x=0, y[:])
bnode[i] = 1 # BC here = vanilla(s2,t)
mnndx[i] = i
elif allclose(gnodes[i,0],s1max):
# axis -> (x=s1max, y[:]) BC here = 0.0
bnode[i] = 1
mxndx[i] = i
for j in range(nnm):
if allclose(gnodes[j,1],0.0): # axis -> (x=[:],y=0)
bnode[j] = 1 # BC here = vanilla(s1,t)
mnndy[j] = j
elif allclose(gnodes[j,1],s2max):
# axis -> (x=[:],y=s2max) BC here = 0.0
bnode[j] = 1
mxndy[j] = j
# Create array of only the non-zero entries
# These are the outer nodes.
tmp1x = mnndx[mnndx.nonzero()]
tmp2x = mxndx[mxndx.nonzero()]
tmp1y = mnndy[mnndy.nonzero()]
tmp2y = mxndy[mxndy.nonzero()]
# must include the origin
origin = 0
mindx = sort(append(tmp1x,origin))
maxdx = sort(append(tmp2x,origin))
mindy = sort(append(tmp1y,origin))
maxdy = sort(append(tmp2y,origin))
# Need these global coords for time dependent BCs on
# the boundaries. The convention used here is:
# -> s1 = 0.0 and all S2 is the y-axis
# -> s2 = 0.0 and all S1 is the x-axis
s1y0 = zeros(len(mindy),dtype=float)
s2x0 = zeros(len(mindx),dtype=float)
# These are the actual global coordinates of the
# outer nodes. These are required for the BC
# calculation
for i in range(len(mindy)):
s1y0[i] = gnodes[mindy[i],0]
for i in range(len(mindx)):
s2x0[i] = gnodes[mindx[i],1]
return bnode, mindx, maxdx, mindy, maxdy, s1y0, s2x0
# Create inital value (actually the "final" sol'n here)
def initialVal(K,gnodes,nnm,etype):
u0 = zeros(nnm,dtype=float)
for i in range(nnm):
# Toggle the two definitions below for
# a different exit strategies
# See ACHDOU & PIRONNEAU eqn's [2.64] & [2.65]
# You get very different graphs
if allclose(etype,1.0):
# [2.64]
s1s2 = gnodes[i,0] + gnodes[i,1]
else:
# [2.65]
s1s2 = max(gnodes[i,0], gnodes[i,1])
test = K - s1s2
u0[i] = max(test,0.0)
return u0
# Create the function to update the time-dependent BCs
def newBound(nnm,mindx,mindy,r,K,vol1,vol2,dt,s1y0,s2x0):
newBC = zeros(nnm,dtype=float)
# Call the vannila PUT function and use outer
# nodal values
s1bc = vanilla(r,K,dt,vol1,s1y0)
s2bc = vanilla(r,K,dt,vol2,s2x0)
# Set the values equal to the output from vanilla
# NOTE: I assume that the min of S1 and S2 are at the
# origin and that they are equal because they share the
# same strike. The rest are zeros
for i in range(len(s1bc)):
bnod = mindy[i]
newBC[bnod] = s1bc[i]
for i in range(len(s2bc)):
bnod = mindx[i]
newBC[bnod] = s2bc[i]
return newBC
# Get user input
#-------------------------------------------------------
## app = femGUI.MyApp(False)
## app.MainLoop()
## inputs = femGUI.values
## # Multiply everything by 1.0 or 1 to ensure we have SciPy dtype
## # floats or integers as the GUI passes UNICODE STRINGS!!!
## etype = float(inputs[0])*1.0
## s1high = float(inputs[1])*1.0
## s2high = float(inputs[2])*1.0
## vol1 = float(inputs[3])*1.0
## vol2 = float(inputs[4])*1.0
## rate = float(inputs[5])*1.0
## pcorr = float(inputs[6])*1.0
## K = float(inputs[7])*1.0
## lastT = float(inputs[8])*1.0
## dt = float(inputs[9])*1.0
## nx = int(inputs[10])*1
## ny = int(inputs[11])*1
##
# Below for comparison purposes
# Comment out GUI inputs and run with the below values
#
# These values are the same used by ACHDOU & PIRONNEAU
# for the creation of Figures [4.11] and [4.12]
# These are the equivalent values for their THETA matrix
# using this formulation
#
# CHANGE ETYPE FOR THE DIFFERENT FINAL CONDITIONS
# Use either 1.0 or 0.0
etype = 0.0
s1high = 150.0
s2high = 150.0
vol1 = 0.1414
vol2 = 0.1414
rate = 0.1
pcorr = -0.6
K = 100.0
lastT = 0.70
dt = 0.01
nx = 50
ny = 50
# Specify zero as the minimum value for the grid.
s1low = 0.0
s2low = 0.0
# Initialize vectors/matrices
# Integer values for loops/sizes
nex1 = nx + 1
ney1 = ny + 1
nem = 2*nx*ny
nnm = nex1*ney1
npe = 3
ndf = 1
neq = nnm*ndf
nn = npe*ndf
# Number of quadrature points
nipf = 3
# Floats and arrays
x0 = s1low
y0 = s2low
dx = ones(nex1,float)*float((s1high/nx))
dy = ones(ney1,float)*float((s2high/ny))
dx[-1] = 0.0
dy[-1] = 0.0
# Create the differential eqn's coefficients
f0 = 0.0
c0 = 1.0
a110 = 0.5*(vol1**2.0)
a220 = 0.5*(vol2**2.0)
a120 = pcorr*vol1*vol2
b10 = rate
b20 = rate
G = -1.0*rate
# Call Fortran Mesh routine
# NOTA BENE: The connectivity matirx NODF has indices
# according to the FORTRAN CONVENTION!
nodf,glxy = tri.mesh(nx,ny,nex1,ney1,nem,nnm,dx,dy,x0,y0)
# Switch NODF indices for the Python convention
fort2py = ones(shape(nodf),dtype=int)
nodp = nodf - fort2py
# Find IdaigF and Idiag where they are the Fortran and Python
# index of the diagonal for the non-symmetric stiffness matrix
# respectively -> RECALL: Python starts indexing at 0!
IdiagF = 0
for i in range(nem):
for j in range(npe):
for k in range(npe):
nw = (int(abs(nodf[i,j] - nodf[i,k])+1))*ndf
if IdiagF < nw:
IdiagF = nw
# Band width of sparse matrix
band = (IdiagF*2) - 1
Idiag = IdiagF - 1
#-------------------------------------------------------#
# #
# Begin FEM Routine #
# #
#-------------------------------------------------------#
# [1] Set time values
# Time dependent variables & Crank-Nicolson parameters
alfa = 0.5
ntime = int(lastT/dt) + 1
a1 = alfa*dt
a2 = (1.0 - alfa)*dt
# Create storage matrices for values at each time step
optionValue = zeros((nnm,ntime),dtype=float)
optionParam = zeros((nnm,ntime),dtype=float)
# [2] Initialize BCs
# Create "final" condition and store for option price calculation
# once all the values in time have been calculated
u0 = initialVal(K,glxy,nnm,etype)
glu = u0
# Generate boundary information matrices from global matrix
bnode,mindx,maxdx,mindy,maxdy,s1y0,s2x0 = \
findbc(glxy,s1high,s2high,nnm)
# An array of Python indices
nwld = arange(nnm,dtype=int)
# [3] Enter time loop
time = 0.0
ncount = 0
while ncount < ntime :
# Find new BCs for future time step
time += dt
newBC = \
newBound(nnm,mindx,mindy,rate,K,vol1,vol2,time,s1y0,s2x0)
# Global matrices
glk = zeros((neq,band),dtype=float)
glf = zeros(neq,dtype=float)
# Begin loop over each element
for n in range(nem):
# Element matrices
elxy = zeros((npe,2),dtype=float)
elu = zeros(npe,dtype=float)
elf = zeros(npe,dtype=float)
elm = zeros((npe,npe),dtype=float)
elk = zeros((npe,npe),dtype=float)
for i in range(npe):
# Assign global values for each node in the element
ni = nodp[n,i]
elxy[i,0] = glxy[ni,0]
elxy[i,1] = glxy[ni,1]
elu[i] = glu[ni]
# [4] Now compute elemental matrices
# Load quadrature data from Fortran Module
l1,l2,l3,lwt = GQD.quad()
# [5] Begin quadtrature loop
for nl in range(npe):
ac1 = l1[nl]
ac2 = l2[nl]
ac3 = l3[nl]
# Call Fortran Shape Function Module
det,sf,gdsf = SFN.sfntri(ac1,ac2,ac3,elxy)
cnst = 0.5*det*lwt[nl]
# Global x an y in terms of the unit triangle
x = 0.0
y = 0.0
for it in range(npe):
x += elxy[it,0]*sf[it]
| |
is a naming change of variable. X in fit()
is changed to Y here, and design in fit() is changed to X here.
This is because we follow the tradition that X expresses the
variable defined (controlled) by the experimenter, i.e., the
time course of experimental conditions convolved by an HRF,
and Y expresses data.
However, in wrapper function fit(), we follow the naming
routine of scikit-learn.
"""
rank = self.rank
n_subj = len(Y)
n_V = [np.size(y, axis=1) for y in Y]
n_T = [np.size(y, axis=0) for y in Y]
n_C = np.size(X[0], axis=1)
l_idx, rank = self._chol_idx(n_C, rank)
n_l = np.size(l_idx[0]) # the number of parameters for L
t_start = time.time()
logger.info('Starting to fit the model. Maximum iteration: '
'{}.'.format(self.n_iter))
# log_SNR_grids, SNR_weights \
# = np.polynomial.hermite.hermgauss(SNR_bins)
# SNR_weights = SNR_weights / np.pi**0.5
# SNR_grids = np.exp(log_SNR_grids * self.logS_range * 2**.5)
SNR_grids, SNR_weights = self._set_SNR_grids()
logger.info('The grids of pseudo-SNR used for numerical integration '
'is {}.'.format(SNR_grids))
assert np.max(SNR_grids) < 1e10, \
'ATTENTION!! The range of grids of pseudo-SNR' \
' to be marginalized is too large. Please ' \
'consider reducing logS_range to 1 or 2'
rho_grids, rho_weights = self._set_rho_grids()
logger.info('The grids of rho used to do numerical integration '
'is {}.'.format(rho_grids))
n_grid = self.SNR_bins * self.rho_bins
log_weights = np.reshape(
np.log(SNR_weights[:, None]) + np.log(rho_weights), n_grid)
all_rho_grids = np.reshape(np.repeat(
rho_grids[None, :], self.SNR_bins, axis=0), n_grid)
all_SNR_grids = np.reshape(np.repeat(
SNR_grids[:, None], self.rho_bins, axis=1), n_grid)
# Prepare the data for fitting. These pre-calculated matrices
# will be re-used a lot in evaluating likelihood function and
# gradient.
D = [None] * n_subj
F = [None] * n_subj
run_TRs = [None] * n_subj
n_run = [None] * n_subj
XTY = [None] * n_subj
XTDY = [None] * n_subj
XTFY = [None] * n_subj
YTY_diag = [None] * n_subj
YTDY_diag = [None] * n_subj
YTFY_diag = [None] * n_subj
XTX = [None] * n_subj
XTDX = [None] * n_subj
XTFX = [None] * n_subj
X0TX0 = [None] * n_subj
X0TDX0 = [None] * n_subj
X0TFX0 = [None] * n_subj
XTX0 = [None] * n_subj
XTDX0 = [None] * n_subj
XTFX0 = [None] * n_subj
X0TY = [None] * n_subj
X0TDY = [None] * n_subj
X0TFY = [None] * n_subj
X0 = [None] * n_subj
X_res = [None] * n_subj
n_X0 = [None] * n_subj
idx_DC = [None] * n_subj
log_fixed_terms = [None] * n_subj
# Initialization for L.
# There are several possible ways of initializing the covariance.
# (1) start from the point estimation of covariance
cov_point_est = np.zeros((n_C, n_C))
for subj in range(n_subj):
D[subj], F[subj], run_TRs[subj], n_run[subj] = self._prepare_DF(
n_T[subj], scan_onsets=scan_onsets[subj])
XTY[subj], XTDY[subj], XTFY[subj], YTY_diag[subj], \
YTDY_diag[subj], YTFY_diag[subj], XTX[subj], XTDX[subj], \
XTFX[subj] = self._prepare_data_XY(
X[subj], Y[subj], D[subj], F[subj])
# The contents above stay fixed during fitting.
# Initializing X0 as DC baseline
# DC component will be added to the nuisance regressors.
# In later steps, we do not need to add DC components again
X0TX0[subj], X0TDX0[subj], X0TFX0[subj], XTX0[subj], XTDX0[subj], \
XTFX0[subj], X0TY[subj], X0TDY[subj], X0TFY[subj], X0[subj], \
X_base[subj], n_X0[subj], idx_DC[subj] = \
self._prepare_data_XYX0(
X[subj], Y[subj], X_base[subj], None, D[subj], F[subj],
run_TRs[subj], no_DC=False)
X_joint = np.concatenate((X0[subj], X[subj]), axis=1)
beta_hat = np.linalg.lstsq(X_joint, Y[subj])[0]
residual = Y[subj] - np.dot(X_joint, beta_hat)
# point estimates of betas and fitting residuals without assuming
# the Bayesian model underlying RSA.
cov_point_est += np.cov(beta_hat[n_X0[subj]:, :]
/ np.std(residual, axis=0))
log_fixed_terms[subj] = - (n_T[subj] - n_X0[subj]) \
/ 2 * np.log(2 * np.pi) + n_run[subj] \
/ 2 * np.log(1 - all_rho_grids**2) \
+ scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 2) / 2) \
+ (n_T[subj] - n_X0[subj] - 2) / 2 * np.log(2)
# These are terms in the log likelihood that do not
# depend on L. Notice that the last term comes from
# ther term of marginalizing sigma. We take the 2 in
# the denominator out. Accordingly, the "denominator"
# variable in the _raw_loglike_grids() function is not
# divided by 2
cov_point_est = cov_point_est / n_subj
current_vec_U_chlsk_l = np.linalg.cholesky(
(cov_point_est + np.eye(n_C)) / 2)[l_idx]
# We use the average of covariance of point estimation and an identity
# matrix as the initial value of the covariance matrix, just in case
# the user provides data in which n_V is smaller than n_C.
# (2) start from identity matrix
# current_vec_U_chlsk_l = np.eye(n_C)[l_idx]
# (3) random initialization
# current_vec_U_chlsk_l = self.random_state_.randn(n_l)
# vectorized version of L, Cholesky factor of U, the shared
# covariance matrix of betas across voxels.
L = np.zeros((n_C, rank))
L[l_idx] = current_vec_U_chlsk_l
X0TAX0 = [None] * n_subj
X0TAX0_i = [None] * n_subj
XTAcorrX = [None] * n_subj
s2XTAcorrX = [None] * n_subj
YTAcorrY_diag = [None] * n_subj
XTAcorrY = [None] * n_subj
sXTAcorrY = [None] * n_subj
X0TAY = [None] * n_subj
XTAX0 = [None] * n_subj
half_log_det_X0TAX0 = [None] * n_subj
s_post = [None] * n_subj
rho_post = [None] * n_subj
sigma_post = [None] * n_subj
beta_post = [None] * n_subj
beta0_post = [None] * n_subj
# The contents below can be updated during fitting.
# e.g., X0 will be re-estimated
logger.info('start real fitting')
LL = np.zeros(n_subj)
for it in range(self.n_iter):
logger.info('Iteration {}'.format(it))
# Re-estimate part of X0: X_res
for subj in range(n_subj):
if self.auto_nuisance and it > 0:
residuals = Y[subj] - np.dot(X[subj], beta_post[subj]) \
- np.dot(
X_base[subj],
beta0_post[subj][:np.shape(X_base[subj])[1], :])
X_res[subj] = self.nureg_method(
self.n_nureg_[subj]).fit_transform(
self.preprocess_residual(residuals))
X0TX0[subj], X0TDX0[subj], X0TFX0[subj], XTX0[subj],\
XTDX0[subj], XTFX0[subj], X0TY[subj], X0TDY[subj], \
X0TFY[subj], X0[subj], X_base[subj], n_X0[subj], _ = \
self._prepare_data_XYX0(
X[subj], Y[subj], X_base[subj], X_res[subj],
D[subj], F[subj], run_TRs[subj], no_DC=True)
X0TAX0[subj], X0TAX0_i[subj], XTAcorrX[subj], XTAcorrY[subj],\
YTAcorrY_diag[subj], X0TAY[subj], XTAX0[subj] \
= self._precompute_ar1_quad_forms_marginalized(
XTY[subj], XTDY[subj], XTFY[subj], YTY_diag[subj],
YTDY_diag[subj], YTFY_diag[subj], XTX[subj],
XTDX[subj], XTFX[subj], X0TX0[subj], X0TDX0[subj],
X0TFX0[subj], XTX0[subj], XTDX0[subj], XTFX0[subj],
X0TY[subj], X0TDY[subj], X0TFY[subj], rho_grids,
n_V[subj], n_X0[subj])
# Now we expand to another dimension including SNR
# and collapse the dimension again.
half_log_det_X0TAX0[subj], X0TAX0[subj], X0TAX0_i[subj], \
s2XTAcorrX[subj], YTAcorrY_diag[subj], sXTAcorrY[subj], \
X0TAY[subj], XTAX0[subj] = self._matrix_flattened_grid(
X0TAX0[subj], X0TAX0_i[subj], SNR_grids,
XTAcorrX[subj], YTAcorrY_diag[subj], XTAcorrY[subj],
X0TAY[subj], XTAX0[subj], n_C, n_V[subj], n_X0[subj],
n_grid)
res = scipy.optimize.minimize(
self._sum_loglike_marginalized, current_vec_U_chlsk_l
+ self.random_state_.randn(n_l) *
np.linalg.norm(current_vec_U_chlsk_l)
/ n_l**0.5 * np.exp(-it / self.n_iter
* self.anneal_speed - 1),
args=(s2XTAcorrX, YTAcorrY_diag, sXTAcorrY,
half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank),
method=self.optimizer, jac=True, tol=self.tol,
options=self.minimize_options)
param_change = res.x - current_vec_U_chlsk_l
current_vec_U_chlsk_l = res.x.copy()
# Estimating a few parameters.
L[l_idx] = current_vec_U_chlsk_l
for subj in range(n_subj):
LL_raw, denominator, L_LAMBDA, L_LAMBDA_LT = \
self._raw_loglike_grids(
L, s2XTAcorrX[subj], YTAcorrY_diag[subj],
sXTAcorrY[subj], half_log_det_X0TAX0[subj],
log_weights, log_fixed_terms[subj], n_C, n_T[subj],
n_V[subj], n_X0[subj], n_grid, rank)
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
LL[subj] = np.sum(np.log(result_sum) + max_value)
weight_post = result_exp / result_sum
s_post[subj] = np.sum(all_SNR_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of SNR.
rho_post[subj] = np.sum(all_rho_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of rho.
sigma_means = denominator ** 0.5 \
* (np.exp(scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 3) / 2)
- scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 2) / 2)) / 2**0.5)
sigma_post[subj] = np.sum(sigma_means * weight_post, axis=0)
# The mean of inverse-Gamma distribution is beta/(alpha-1)
# The mode is beta/(alpha+1). Notice that beta here does not
# refer to the brain activation, but the scale parameter of
# inverse-Gamma distribution. In the _UV version, we use the
# maximum likelihood estimate of sigma^2. So we divide by
# (alpha+1), which is (n_T - n_X0).
beta_post[subj] = np.zeros((n_C, n_V[subj]))
beta0_post[subj] = np.zeros((n_X0[subj], n_V[subj]))
for grid in range(n_grid):
beta_post[subj] += np.dot(L_LAMBDA_LT[grid, :, :],
sXTAcorrY[subj][grid, :, :])\
* all_SNR_grids[grid] \
* weight_post[grid, :]
beta0_post[subj] += weight_post[grid, :] * np.dot(
X0TAX0_i[subj][grid, :, :],
(X0TAY[subj][grid, :, :]
- np.dot(np.dot(XTAX0[subj][grid, :, :].T,
L_LAMBDA_LT[grid, :, :]),
sXTAcorrY[subj][grid, :, :])
* all_SNR_grids[grid]))
if np.max(np.abs(param_change)) < self.tol:
logger.info('The change of parameters is smaller than '
'the tolerance value {}. Fitting is finished '
'after {} iterations'.format(self.tol, it + 1))
break
for subj in range(n_subj):
if idx_DC[subj].size > 1:
collapsed_DC = np.sum(X0[subj][:, idx_DC[subj]], axis=1)
X0[subj] = np.insert(np.delete(X0[subj], idx_DC[subj], axis=1),
0, collapsed_DC, axis=1)
collapsed_beta0 = np.mean(beta0_post[subj][idx_DC[subj], :],
axis=0)
beta0_post[subj] = np.insert(
np.delete(beta0_post[subj], idx_DC[subj], axis=0),
0, collapsed_beta0, | |
from nutils import mesh, function, solver, util, export, cli, testing
import numpy as np, treelog
from CoolProp.CoolProp import PropsSI
import scipy.special as sc
from matplotlib import pyplot as plt
from scipy.stats import norm
from matplotlib import collections, colors
import pandas as pd
# import seaborn as sns
import matplotlib.pyplot as plt
import math
#################### Doublet model library #########################
#Objects
class Aquifer:
def __init__(self, aquifer):
#if stoichastic params not used
self.H = aquifer['H']
self.φ = aquifer['porosity']
self.K = aquifer['K']
self.Q = aquifer['Q'] # pumping rate from well (negative value = extraction)
#deterministic
self.dtop = aquifer['dtop'] # depth to top aquifer
self.dsensor = aquifer['dsensor'] # depth to esp sensor
self.dpump = aquifer['dpump'] # depth to pump location
self.labda = aquifer['labda'] # geothermal gradient
self.Tsur = aquifer['Tsurface']
self.ρf = self.rhof = aquifer['rhof']
self.rhos = aquifer['rhos']
self.cpf = aquifer['cpf']
self.cps = aquifer['cps'] # stone specific heat capacity (limestone) [J/kg K]
self.labdas = aquifer['labdas'] # thermal conductivity solid [W/mK]
self.labdaf = aquifer['labdaf'] # thermal conductivity fluid [W/mK]
self.mu = aquifer['viscosity']
self.pref = aquifer['pref'] # initial reservoir pressure [Pa]
self.Tref = aquifer['Tref'] # initial reservoir temperature [K]
self.rw = aquifer['rw'] # well radius [m]
self.rmax = aquifer['rmax'] # well radius of influence [m]
self.mdot = self.Q * aquifer['rhof']
self.D = 2 * aquifer['rw']
self.Aw = 2 * np.pi * aquifer['rw']
self.g = 9.81
self.L = aquifer['L'] # distance between injection well and production well
self.Tinj = aquifer['Tinj'] # initial temperature of injection well (reinjection temperature)
self.patm = aquifer['patm'] # atmospheric pressure
self.ε = aquifer['ε'] # tubing roughness [m]
self.ct = aquifer['ct']
# total system (rock + fluid) variable
self.ρ = self.φ * self.rhof + (1 - self.φ) * self.rhos
self.cp = self.φ * self.cpf + (1 - self.φ) * self.cps
self.λ = self.φ * self.labdaf + (1 - self.φ) * self.labdas
# class Well:
#
# def __init__(self, well, aquifer):
#
# self.Q = well['Q'] # pumping rate from well (negative value = extraction)
# self.mdot = self.Q * aquifer['rho_f']
# self.D = 2 * aquifer['rw']
# self.Aw = 2 * np.pi * aquifer['rw']
class DoubletGenerator:
"""Generates all properties for a doublet
Args:
"""
def __init__(self, aquifer, sol, params=None):
# Initialize deterministic parameters
self.aquifer = aquifer
self.time = 365*24*60*60 #1 year [s]
self.H = self.aquifer.H
self.Q = self.aquifer.Q
self.alpha = self.aquifer.labdas / ( self.aquifer.rhos * self.aquifer.cps) #thermal diffusion of rock
self.gamma = 0.577216 #euler constant
self.pnode9 = sol[0]
self.Tnode9 = sol[1]
self.Tinj = self.aquifer.Tinj * np.ones_like(self.Tnode9)
# if params:
# Stoichastic parameters with effect on well test
# self.params = params
# self.H = np.mean(params[0])
# self.Q = np.mean(params[4])
# Set lengths in system
self.lpipe = self.z = self.aquifer.dsensor
self.dpump = self.aquifer.dpump
# Set specs
self.effpump = 0.61 # Efficiency of pump [-]
self.eta = 0.61 # Efficiency of heat exchanger [-]
self.Ppump = 2.671e5/2 # Power of pump [W]
# Evaluate objects within doublet
self.T_aqinjector = self.Tinj
self.T_aqproducer = self._get_Tz(self.lpipe)
self.P_aqproducer = self._get_pgz(self.aquifer.patm, self.lpipe, self.T_aqproducer)
self.P_aqinjector = self._get_pgz(self.aquifer.patm, self.lpipe, self.Tinj)
self.ppump = self._get_ppump(self.Ppump, self.Q)
# Evaluate Tnodes within doublet
self.Tnode10 = self.T_aqproducer # Tref when based on depth of sensor
self.Tnode8 = self.get_Tnode8(self.Tnode9)
self.Tnode6 = self.Tnode7 = self.get_Tnode7(self.Tnode9)
self.Tnode4 = self.Tnode5 = self.Tinj
self.Tnode3 = self.get_Tnode3(self.Tnode4)
self.Tnode2 = self.get_Twinj(self.z - self.dpump, self.Tinj)
self.Tnode1 = self.T_aqproducer
# Evaluate pnodes within doublet
self.pnode10 = self.P_aqproducer # pref when based on depth
self.pnode8 = self.get_pnode8(self.pnode9)
self.pnode6 = self.pnode7 = self.get_pnode7(self.pnode8)
self.pnode4 = self.pnode5 = self.pnode6
self.pnode3 = self.get_pnode3(self.pnode4)
self.pnode2 = self.get_pnode2(self.pnode3)
self.pnode1 = self.P_aqinjector # pref when based on depth and injection temperature
# Calculate power output system
self.Phe = self.aquifer.mdot * self.aquifer.cpf * (self.Tnode6 - self.Tinj)
def get_Tw(self, dz, Tw):
Tw = Tw.copy()
dl = 10 # pipe segment [m]
zi = np.linspace(self.z, self.z - dz, dz/dl + 1)
for i in range(len(zi)-1):
Tw -= dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )
return Tw
def get_Twinj(self, dz, Tw):
Tw = Tw.copy()
dl = 10 # pipe segment [m]
zi = np.linspace(0, dz, dz/dl + 1)
for i in range(len(zi)-1):
Tw += dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )
return Tw
def _getqw(self, Tw, zi):
qw = 4 * math.pi * self.aquifer.labdas * ( Tw - self._get_Tz(zi) ) / math.log( ( 4 * self.alpha * self.time ) / (math.exp(self.gamma) * self.aquifer.rw**2 ) )
return qw
def get_Tnode8(self, Tnode9):
Tnode8 = self.get_Tw(self.z - self.dpump, Tnode9)
return Tnode8
def get_Tnode7(self, Tnode9):
Tnode7 = self.get_Tw(self.z, Tnode9)
return Tnode7
def get_Tnode3(self, Tnode4):
Tnode3 = self.get_Twinj(self.dpump, Tnode4)
return Tnode3
def get_Tnode2(self, Tnode4):
Tnode2 = self.get_Twinj(self.z, Tnode4)
return Tnode2
def get_pnode8(self, pnode9):
pnode8 = pnode9 - self._get_pgz(0, (self.z - self.dpump), self.Tnode9) - self._get_pfriction(self.z - self.dpump)
# print('loss of pressure by height', self._get_pgz(0, (self.z - self.dpump), self.Tnode9))
# print('loss of pressure by friction', self._get_pfriction(self.z - self.dpump))
return pnode8
def get_pnode7(self, pnode8):
pnode7 = pnode8 - self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) - self._get_pfriction(self.dpump) + self._get_ppump(self.Ppump, self.Q)
return pnode7
def get_pnode3(self, pnode4):
pnode3 = pnode4 + self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) + self._get_pfriction(self.dpump) #+ self._get_ppump(self.Ppump, self.Q)
return pnode3
def get_pnode2(self, pnode3):
pnode2 = pnode3 + self._get_pgz(0, (self.z - self.dpump), self.T_aqinjector) + self._get_pfriction(self.z - self.dpump)
return pnode2
def _get_ppump(self, Ppump, Q):
ppump = Ppump / (Q * self.effpump) # appropiate value is 20e5 Pa
# print('pump added pressure', ppump)
return ppump
def _get_pgz(self, patm, z, T):
""" Computes pressure of the aquifer as a function of the depth, temperature and pressure
Arguments:
z (float): depth (downwards from groundlevel is positive)
Returns:
p (float): value of pressure
"""
pgz = patm + self.aquifer.g * self.aquifer.rhof * z # density as a constant
# pgz = patm + self.aquifer.g * self.rho(np.mean(T)-273, pgz) * z # density as a function of temperature and pressure
return pgz
def _get_pfriction(self, z):
pfriction = (self._get_f() * self.aquifer.rhof * self.get_vmean(self.Q) * z) / 2 * self.aquifer.D
return pfriction
def _get_f(self):
f = ( 1.14 - 2 * math.log10( self.aquifer.ε / self.aquifer.D + 21.25 / ( self.get_Re( self.get_vmean(self.Q) )**0.9 ) ) )**-2
return f
def get_vmean(self, Q):
vmean = 4 * Q / ( math.pi * ( self.aquifer.D ** 2 ) )
return vmean
def get_Re(self, vmean):
Re = ( self.aquifer.rhof * vmean ) / self.aquifer.mu
return Re
# Theis solution, temperature and pressure as a function of depth
# def _get_P_wb(self, P_aquifer, T_aquifer):
# """ Computes pressure at wellbore
#
# Arguments:
# d (float): depth (downwards from groundlevel is positive)
# Returns:
# P_wb (float): value of pressure at well bore
# """
# if P_aquifer == self.P_aqproducer:
# Q = -self.Q
# else:
# Q = self.Q
#
# P_wb = P_aquifer + ( ( Q * self.mu(T_aquifer, P_aquifer) ) / ( 2 * math.pi * self.aquifer.K * self.aquifer.H ) ) * np.log ( self.aquifer.L / self.aquifer.rw)
# return P_wb
def _get_Tz(self, z):
""" Computes temperature of the aquifer as a function of the depth
Arguments:
z (float): depth (downwards from groundlevel is positive)
Returns:
T (float): value of temperature
"""
T = self.aquifer.Tsur + z * self.aquifer.labda
return T
# Thermophysical properties
def rho(self, Twater, Pwater):
# rho = (1 + 10e-6 * (-80 * T - 3.3 * T**2 + 0.00175 * T**3 + 489 * p - 2 * T * p + 0.016 * T**2 * p - 1.3e-5 * T**3\
# * p - 0.333 * p**2 - 0.002 * T * p**2) )
rho = PropsSI('D', 'T', Twater, 'P', Pwater, 'IF97::Water')
# rho = self.aquifer.rhof * (1 - 3.17e-4 * (Twater - 298.15) - 2.56e-6 * (Twater - 298.15) ** 2)
return rho
def mu(self, Twater, Pwater):
# mu = 0.1 + 0.333 * saltcontent + (1.65 + 91.9 * saltcontent**3) * math.exp(-(0.42*(saltcontent**0.8 - 0.17)**2 + 0.045) * Twater**0.8)
mu = PropsSI('V', 'T', Twater, 'P', Pwater, 'IF97::Water')
return mu
## Graphical variables for GUI ##
# self.Dx = self.aquifer.L * 3 # domain of x
# self.Dy = - (2 * self.aquifer.dtop + self.aquifer.H) # domain of y
# self.Nx = 24 # number of nodes by | |
<filename>spikeinterface/comparison/paircomparisons.py
import numpy as np
import pandas as pd
from ..toolkit.postprocessing import compute_template_similarity
from .basecomparison import BasePairComparison, MixinSpikeTrainComparison, MixinTemplateComparison
from .comparisontools import (do_count_event, make_match_count_matrix,
make_agreement_scores_from_count, do_score_labels, do_confusion_matrix,
do_count_score, compute_performance)
class BasePairSorterComparison(BasePairComparison, MixinSpikeTrainComparison):
"""
Base class shared by SymmetricSortingComparison and GroundTruthComparison
"""
def __init__(self, sorting1, sorting2, sorting1_name=None, sorting2_name=None,
delta_time=0.4, match_score=0.5, chance_score=0.1, n_jobs=1,
verbose=False):
if sorting1_name is None:
sorting1_name = 'sorting1'
if sorting2_name is None:
sorting2_name = 'sorting2'
BasePairComparison.__init__(self, object1=sorting1, object2=sorting2,
name1=sorting1_name, name2=sorting2_name,
match_score=match_score, chance_score=chance_score,
verbose=verbose)
MixinSpikeTrainComparison.__init__(self, delta_time=delta_time, n_jobs=n_jobs)
self.set_frames_and_frequency(self.object_list)
self.unit1_ids = self.sorting1.get_unit_ids()
self.unit2_ids = self.sorting2.get_unit_ids()
self._do_agreement()
self._do_matching()
@property
def sorting1(self):
return self.object_list[0]
@property
def sorting2(self):
return self.object_list[1]
@property
def sorting1_name(self):
return self.name_list[0]
@property
def sorting2_name(self):
return self.name_list[1]
def _do_agreement(self):
if self._verbose:
print('Agreement scores...')
# common to GroundTruthComparison and SymmetricSortingComparison
# spike count for each spike train
self.event_counts1 = do_count_event(self.sorting1)
self.event_counts2 = do_count_event(self.sorting2)
# matrix of event match count for each pair
self.match_event_count = make_match_count_matrix(self.sorting1, self.sorting2, self.delta_frames,
n_jobs=self.n_jobs)
# agreement matrix score for each pair
self.agreement_scores = make_agreement_scores_from_count(self.match_event_count, self.event_counts1,
self.event_counts2)
class SymmetricSortingComparison(BasePairSorterComparison):
"""
Compares two spike sorter outputs.
- Spike trains are matched based on their agreement scores
- Individual spikes are labelled as true positives (TP), false negatives (FN), false positives 1 (FP from spike
train 1), false positives 2 (FP from spike train 2), misclassifications (CL)
It also allows to get confusion matrix and agreement fraction, false positive fraction and
false negative fraction.
Parameters
----------
sorting1: SortingExtractor
The first sorting for the comparison
sorting2: SortingExtractor
The second sorting for the comparison
sorting1_name: str
The name of sorter 1
sorting2_name: : str
The name of sorter 2
delta_time: float
Number of ms to consider coincident spikes (default 0.4 ms)
match_score: float
Minimum agreement score to match units (default 0.5)
chance_score: float
Minimum agreement score to for a possible match (default 0.1)
n_jobs: int
Number of cores to use in parallel. Uses all available if -1
verbose: bool
If True, output is verbose
Returns
-------
sorting_comparison: SortingComparison
The SortingComparison object
"""
def __init__(self, sorting1, sorting2, sorting1_name=None, sorting2_name=None,
delta_time=0.4, sampling_frequency=None, match_score=0.5, chance_score=0.1,
n_jobs=-1, verbose=False):
BasePairSorterComparison.__init__(self, sorting1, sorting2, sorting1_name=sorting1_name,
sorting2_name=sorting2_name,
delta_time=delta_time,
match_score=match_score, chance_score=chance_score,
n_jobs=n_jobs, verbose=verbose)
def get_matching(self):
return self.hungarian_match_12, self.hungarian_match_21
def get_matching_event_count(self, unit1, unit2):
if (unit1 is not None) and (unit2 is not None):
return self.match_event_count.at[unit1, unit2]
else:
raise Exception(
'get_matching_event_count: unit1 and unit2 must not be None.')
def get_best_unit_match1(self, unit1):
return self.best_match_12[unit1]
def get_best_unit_match2(self, unit2):
return self.best_match_21[unit2]
def get_matching_unit_list1(self, unit1):
return self.possible_match_12[unit1]
def get_matching_unit_list2(self, unit2):
return self.possible_match_21[unit2]
def get_agreement_fraction(self, unit1=None, unit2=None):
if unit1 is None or unit1 == -1 or unit2 is None or unit2 == -1:
return 0
else:
return self.agreement_scores.at[unit1, unit2]
def compare_two_sorters(*args, **kwargs):
return SymmetricSortingComparison(*args, **kwargs)
compare_two_sorters.__doc__ = SymmetricSortingComparison.__doc__
class GroundTruthComparison(BasePairSorterComparison):
"""
Compares a sorter to a ground truth.
This class can:
* compute a "match between gt_sorting and tested_sorting
* compute optionally the score label (TP, FN, CL, FP) for each spike
* count by unit of GT the total of each (TP, FN, CL, FP) into a Dataframe
GroundTruthComparison.count
* compute the confusion matrix .get_confusion_matrix()
* compute some performance metric with several strategy based on
the count score by unit
* count well detected units
* count false positive detected units
* count redundant units
* count overmerged units
* summary all this
Parameters
----------
gt_sorting: SortingExtractor
The first sorting for the comparison
tested_sorting: SortingExtractor
The second sorting for the comparison
gt_name: str
The name of sorter 1
tested_name: : str
The name of sorter 2
delta_time: float
Number of ms to consider coincident spikes (default 0.4 ms) match_score: float
Minimum agreement score to match units (default 0.5)
chance_score: float
Minimum agreement score to for a possible match (default 0.1)
redundant_score: float
Agreement score above which units are redundant (default 0.2)
overmerged_score: float
Agreement score above which units can be overmerged (default 0.2)
well_detected_score: float
Agreement score above which units are well detected (default 0.8)
exhaustive_gt: bool (default True)
Tell if the ground true is "exhaustive" or not. In other world if the
GT have all possible units. It allows more performance measurement.
For instance, MEArec simulated dataset have exhaustive_gt=True
match_mode: 'hungarian', or 'best'
What is match used for counting : 'hungarian' or 'best match'.
n_jobs: int
Number of cores to use in parallel. Uses all available if -1
compute_labels: bool
If True, labels are computed at instantiation (default False)
compute_misclassifications: bool
If True, misclassifications are computed at instantiation (default False)
verbose: bool
If True, output is verbose
Returns
-------
sorting_comparison: SortingComparison
The SortingComparison object
"""
def __init__(self, gt_sorting, tested_sorting, gt_name=None, tested_name=None,
delta_time=0.4, sampling_frequency=None, match_score=0.5, well_detected_score=0.8,
redundant_score=0.2, overmerged_score=0.2, chance_score=0.1, exhaustive_gt=False, n_jobs=-1,
match_mode='hungarian', compute_labels=False, compute_misclassifications=False, verbose=False):
if gt_name is None:
gt_name = 'ground truth'
if tested_name is None:
tested_name = 'tested'
BasePairSorterComparison.__init__(self, gt_sorting, tested_sorting, sorting1_name=gt_name,
sorting2_name=tested_name, delta_time=delta_time,
match_score=match_score, chance_score=chance_score,
n_jobs=n_jobs, verbose=verbose)
self.exhaustive_gt = exhaustive_gt
self._compute_misclassifications = compute_misclassifications
self.redundant_score = redundant_score
self.overmerged_score = overmerged_score
self.well_detected_score = well_detected_score
assert match_mode in ['hungarian', 'best']
self.match_mode = match_mode
self._compute_labels = compute_labels
self._do_count()
self._labels_st1 = None
self._labels_st2 = None
if self._compute_labels:
self._do_score_labels()
# confusion matrix is compute on demand
self._confusion_matrix = None
def get_labels1(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting1.get_unit_ids():
return self._labels_st1[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def get_labels2(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting2.get_unit_ids():
return self._labels_st2[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def _do_count(self):
"""
Do raw count into a dataframe.
Internally use hungarian match or best match.
"""
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self.count_score = do_count_score(self.event_counts1, self.event_counts2,
match_12, self.match_event_count)
def _do_confusion_matrix(self):
if self._verbose:
print("Computing confusion matrix...")
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self._confusion_matrix = do_confusion_matrix(self.event_counts1, self.event_counts2, match_12,
self.match_event_count)
def get_confusion_matrix(self):
"""
Computes the confusion matrix.
Returns
-------
confusion_matrix: pandas.DataFrame
The confusion matrix
"""
if self._confusion_matrix is None:
self._do_confusion_matrix()
return self._confusion_matrix
def _do_score_labels(self):
assert self.match_mode == 'hungarian', \
'Labels (TP, FP, FN) can be computed only with hungarian match'
if self._verbose:
print("Adding labels...")
self._labels_st1, self._labels_st2 = do_score_labels(self.sorting1, self.sorting2,
self.delta_frames, self.hungarian_match_12,
self._compute_misclassifications)
def get_performance(self, method='by_unit', output='pandas'):
"""
Get performance rate with several method:
* 'raw_count' : just render the raw count table
* 'by_unit' : render perf as rate unit by unit of the GT
* 'pooled_with_average' : compute rate unit by unit and average
Parameters
----------
method: str
'by_unit', or 'pooled_with_average'
output: str
'pandas' or 'dict'
Returns
-------
perf: pandas dataframe/series (or dict)
dataframe/series (based on 'output') with performance entries
"""
possibles = ('raw_count', 'by_unit', 'pooled_with_average')
if method not in possibles:
raise Exception("'method' can be " + ' or '.join(possibles))
if method == 'raw_count':
perf = self.count_score
elif method == 'by_unit':
perf = compute_performance(self.count_score)
elif method == 'pooled_with_average':
perf = self.get_performance(method='by_unit').mean(axis=0)
if output == 'dict' and isinstance(perf, pd.Series):
perf = perf.to_dict()
return perf
def print_performance(self, method='pooled_with_average'):
"""
Print performance with the selected method
"""
template_txt_performance = _template_txt_performance
if method == 'by_unit':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
d = {k: perf[k].tolist() for k in perf.columns}
txt = template_txt_performance.format(method=method, **d)
print(txt)
elif method == 'pooled_with_average':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
txt = template_txt_performance.format(
method=method, **perf.to_dict())
print(txt)
def print_summary(self, well_detected_score=None, redundant_score=None, overmerged_score=None):
"""
Print a global performance summary that depend on the context:
* exhaustive= True/False
* how many gt units (one or several)
This summary mix several performance metrics.
"""
txt = _template_summary_part1
d = dict(
num_gt=len(self.unit1_ids),
num_tested=len(self.unit2_ids),
num_well_detected=self.count_well_detected_units(
well_detected_score),
num_redundant=self.count_redundant_units(redundant_score),
num_overmerged=self.count_overmerged_units(overmerged_score),
)
if self.exhaustive_gt:
txt = txt + _template_summary_part2
d['num_false_positive_units'] = self.count_false_positive_units()
d['num_bad'] = self.count_bad_units()
txt = txt.format(**d)
print(txt)
def get_well_detected_units(self, well_detected_score=None):
"""
Return units list of "well detected units" from tested_sorting.
"well detected units" are defined as units in tested that
are well matched to GT units.
Parameters
----------
well_detected_score: float (default 0.8)
The agreement score above which tested units
are counted as "well detected".
"""
if well_detected_score is not None:
self.well_detected_score = well_detected_score
matched_units2 = self.hungarian_match_12
well_detected_ids | |
color='red', linewidth=5)
Ax.plot((30*dz[l],30*dz[l]), (98*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((32*dz[l],32*dz[l]), (98*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((30*dz[l],32*dz[l]), (98*dr[l],98*dr[l]), '-', color='red', linewidth=5)
Ax.plot((30*dz[l],32*dz[l]), (103*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((36*dz[l],36*dz[l]), (98*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((38*dz[l],38*dz[l]), (98*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((36*dz[l],38*dz[l]), (98*dr[l],98*dr[l]), '-', color='red', linewidth=5)
Ax.plot((36*dz[l],38*dz[l]), (103*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((42*dz[l],42*dz[l]), (98*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((44*dz[l],44*dz[l]), (98*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((42*dz[l],44*dz[l]), (98*dr[l],98*dr[l]), '-', color='red', linewidth=5)
Ax.plot((42*dz[l],44*dz[l]), (103*dr[l],103*dr[l]), '-', color='red', linewidth=5)
Ax.plot((42*dz[l],44*dz[l]), (103*dr[l],103*dr[l]), '-', color='red', linewidth=5)
#Horseshoe Solenoid
Ax.plot((68*dz[l],68*dz[l]), (4*dr[l],18*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((70*dz[l],70*dz[l]), (4*dr[l],15*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((68*dz[l],70*dz[l]), (18*dr[l],22*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((70*dz[l],72*dz[l]), (15*dr[l],18*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((72*dz[l],72*dz[l]), (18*dr[l],22*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((70*dz[l],72*dz[l]), (22*dr[l],22*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((68*dz[l],68*dz[l]), (4*dr[l],18*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((70*dz[l],70*dz[l]), (4*dr[l],15*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((68*dz[l],70*dz[l]), (18*dr[l],22*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((70*dz[l],72*dz[l]), (15*dr[l],18*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((72*dz[l],72*dz[l]), (18*dr[l],22*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((70*dz[l],72*dz[l]), (22*dr[l],22*dr[l]), '-', color='lightgreen', linewidth=5)
#Iron Magnetic Focus
Ax.plot((71*dz[l],71*dz[l]), (43*dr[l],46*dr[l]), '-', color='g', linewidth=6.5)
Ax.plot((72*dz[l],72*dz[l]), (43*dr[l],53*dr[l]), '-', color='g', linewidth=6.5)
Ax.plot((71*dz[l],71*dz[l]), (65*dr[l],68*dr[l]), '-', color='g', linewidth=6.5)
Ax.plot((72*dz[l],72*dz[l]), (58*dr[l],68*dr[l]), '-', color='g', linewidth=6.5)
#enddef
#=============#
def ManualEVgenyMesh(Ax=plt.gca()):
#Plot upstream ICP material dimensions.
Ax.plot((2.5*dz[l],2.5*dz[l]), (0*dr[l],20*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((2.5*dz[l],2.5*dz[l]), (-20*dr[l],0*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((2.5*dz[l],41*dz[l]), (20*dr[l],20*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((2.5*dz[l],41*dz[l]), (-20*dr[l],-20*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((41*dz[l],41*dz[l]), (20*dr[l],90*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((41*dz[l],41*dz[l]), (-20*dr[l],-90*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((41*dz[l],87*dz[l]), (90*dr[l],90*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((41*dz[l],87*dz[l]), (-90*dr[l],-90*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((87*dz[l],87*dz[l]), (0*dr[l],90*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((87*dz[l],87*dz[l]), (-90*dr[l],0*dr[l]), '-', color='dimgrey', linewidth=4)
#Macor Dielectric
Ax.plot((2.5*dz[l],2.5*dz[l]), (0*dr[l],20*dr[l]), 'c-', linewidth=4)
Ax.plot((2.5*dz[l],2.5*dz[l]), (-20*dr[l],0*dr[l]), 'c-', linewidth=4)
Ax.plot((3*dz[l],6*dz[l]), (20*dr[l],20*dr[l]), 'c-', linewidth=4)
Ax.plot((3*dz[l],6*dz[l]), (-20*dr[l],-20*dr[l]), 'c-', linewidth=4)
Ax.plot((23*dz[l],41*dz[l]), (20*dr[l],20*dr[l]), 'c-', linewidth=4)
Ax.plot((23*dz[l],41*dz[l]), (-20*dr[l],-20*dr[l]), 'c-', linewidth=4)
Ax.plot((41*dz[l],41*dz[l]), (20*dr[l],90*dr[l]), 'c-', linewidth=4)
Ax.plot((41*dz[l],41*dz[l]), (-20*dr[l],-90*dr[l]), 'c-', linewidth=4)
#Powered Electrode - LaB6 Cathode
Ax.plot((6*dz[l],23*dz[l]), (20*dr[l],20*dr[l]), '-', color='orange', linewidth=5)
Ax.plot((6*dz[l],23*dz[l]), (-20*dr[l],-20*dr[l]), '-', color='orange', linewidth=5)
#Powered Electrode - 'Metal' Anode
Ax.plot((42*dz[l],86*dz[l]), (91*dr[l],91*dr[l]), '-', color='red', linewidth=5)
Ax.plot((42*dz[l],86*dz[l]), (-91*dr[l],-91*dr[l]), '-', color='red', linewidth=5)
#Powered ICP Coils - 'Metal'
Ax.plot((6*dz[l],8*dz[l]), (25*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((6*dz[l],8*dz[l]), (-25*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((6*dz[l],8*dz[l]), (35*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((6*dz[l],8*dz[l]), (-35*dr[l],-35*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((6*dz[l],6*dz[l]), (25*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((6*dz[l],6*dz[l]), (-25*dr[l],-35*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((8*dz[l],8*dz[l]), (25*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Downstream
Ax.plot((8*dz[l],8*dz[l]), (-25*dr[l],-35*dr[l]), '-', color='red', linewidth=5) #Downstream
Ax.plot((13.5*dz[l],15.5*dz[l]), (25*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((13.5*dz[l],15.5*dz[l]), (-25*dr[l],-25*dr[l]), '-', color='red', linewidth=5)#Inboard
Ax.plot((13.5*dz[l],15.5*dz[l]), (35*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((13.5*dz[l],15.5*dz[l]), (-35*dr[l],-35*dr[l]), '-', color='red', linewidth=5)#Outboard
Ax.plot((13.5*dz[l],13.5*dz[l]), (25*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((15.5*dz[l],15.5*dz[l]), (25*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Downtream
Ax.plot((13.5*dz[l],13.5*dz[l]), (-25*dr[l],-35*dr[l]), '-', color='red', linewidth=5)#Upstream
Ax.plot((15.5*dz[l],15.5*dz[l]), (-25*dr[l],-35*dr[l]), '-', color='red', linewidth=5)#Downstream
Ax.plot((21*dz[l],23*dz[l]), (25*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((21*dz[l],23*dz[l]), (-25*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((21*dz[l],23*dz[l]), (35*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((21*dz[l],23*dz[l]), (-35*dr[l],-35*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((21*dz[l],21*dz[l]), (25*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((23*dz[l],23*dz[l]), (25*dr[l],35*dr[l]), '-', color='red', linewidth=5) #Downstream
Ax.plot((21*dz[l],21*dz[l]), (-25*dr[l],-35*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((23*dz[l],23*dz[l]), (-25*dr[l],-35*dr[l]), '-', color='red', linewidth=5) #Downstream
#Solenoid
Ax.plot((42*dz[l],86*dz[l]), (93*dr[l],93*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((42*dz[l],86*dz[l]), (-93*dr[l],-93*dr[l]), '-', color='lightgreen', linewidth=5)
#enddef
def ManualEVgenyMeshOLD(Ax=plt.gca()):
#Plot upstream ICP material dimensions.
Ax.plot((0.5*dz[l],0.5*dz[l]), (0*dr[l],10*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((0.5*dz[l],0.5*dz[l]), (-10*dr[l],0*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((0.5*dz[l],39*dz[l]), (10*dr[l],10*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((0.5*dz[l],39*dz[l]), (-10*dr[l],-10*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((39*dz[l],39*dz[l]), (10*dr[l], 80*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((39*dz[l],39*dz[l]), (-10*dr[l], -80*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((39*dz[l],85*dz[l]), (80*dr[l],80*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((39*dz[l],85*dz[l]), (-80*dr[l],-80*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((85*dz[l],85*dz[l]), (0*dr[l],80*dr[l]), '-', color='dimgrey', linewidth=4)
Ax.plot((85*dz[l],85*dz[l]), (-80*dr[l],0*dr[l]), '-', color='dimgrey', linewidth=4)
#Macor Dielectric
Ax.plot((1*dz[l],4*dz[l]), (10*dr[l],10*dr[l]), 'c-', linewidth=4)
Ax.plot((1*dz[l],4*dz[l]), (-10*dr[l],-10*dr[l]), 'c-', linewidth=4)
Ax.plot((21*dz[l],39*dz[l]), (10*dr[l],10*dr[l]), 'c-', linewidth=4)
Ax.plot((21*dz[l],39*dz[l]), (-10*dr[l],-10*dr[l]), 'c-', linewidth=4)
#Powered Electrode - LaB6 Cathode
Ax.plot((4*dz[l],21*dz[l]), (10*dr[l],10*dr[l]), '-', color='orange', linewidth=5)
Ax.plot((4*dz[l],21*dz[l]), (-10*dr[l],-10*dr[l]), '-', color='orange', linewidth=5)
#Powered Electrode - 'Metal' Anode
Ax.plot((40*dz[l],84*dz[l]), (81*dr[l],81*dr[l]), '-', color='red', linewidth=5)
Ax.plot((40*dz[l],84*dz[l]), (-81*dr[l],-81*dr[l]), '-', color='red', linewidth=5)
#Powered ICP Coils - 'Metal'
Ax.plot((4*dz[l],6*dz[l]), (15*dr[l],15*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((4*dz[l],6*dz[l]), (-15*dr[l],-15*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((4*dz[l],6*dz[l]), (25*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((4*dz[l],6*dz[l]), (-25*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((4*dz[l],4*dz[l]), (15*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((4*dz[l],4*dz[l]), (-15*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((6*dz[l],6*dz[l]), (15*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Downstream
Ax.plot((6*dz[l],6*dz[l]), (-15*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Downstream
Ax.plot((11.5*dz[l],13.5*dz[l]), (15*dr[l],15*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((11.5*dz[l],13.5*dz[l]), (-15*dr[l],-15*dr[l]), '-', color='red', linewidth=5)#Inboard
Ax.plot((11.5*dz[l],13.5*dz[l]), (25*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((11.5*dz[l],13.5*dz[l]), (-25*dr[l],-25*dr[l]), '-', color='red', linewidth=5)#Outboard
Ax.plot((11.5*dz[l],11.5*dz[l]), (15*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((13.5*dz[l],13.5*dz[l]), (15*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Downtream
Ax.plot((11.5*dz[l],11.5*dz[l]), (-15*dr[l],-25*dr[l]), '-', color='red', linewidth=5)#Upstream
Ax.plot((13.5*dz[l],13.5*dz[l]), (-15*dr[l],-25*dr[l]), '-', color='red', linewidth=5)#Downstream
Ax.plot((19*dz[l],21*dz[l]), (15*dr[l],15*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((19*dz[l],21*dz[l]), (-15*dr[l],-15*dr[l]), '-', color='red', linewidth=5) #Inboard
Ax.plot((19*dz[l],21*dz[l]), (25*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((19*dz[l],21*dz[l]), (-25*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Outboard
Ax.plot((19*dz[l],19*dz[l]), (15*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((21*dz[l],21*dz[l]), (15*dr[l],25*dr[l]), '-', color='red', linewidth=5) #Downstream
Ax.plot((19*dz[l],19*dz[l]), (-15*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Upstream
Ax.plot((21*dz[l],21*dz[l]), (-15*dr[l],-25*dr[l]), '-', color='red', linewidth=5) #Downstream
#Solenoid
Ax.plot((40*dz[l],84*dz[l]), (83*dr[l],83*dr[l]), '-', color='lightgreen', linewidth=5)
Ax.plot((40*dz[l],84*dz[l]), (-83*dr[l],-83*dr[l]), '-', color='lightgreen', linewidth=5)
#enddef
#=============#
def ManualGECMesh(Ax=plt.gca()): #Greg's GEC overlay code
thin = 2
thick = 2
verythik = 2
superthick = 3
#Plot upstream ICP material dimensions.
Ax.plot((56.5*dz[l],56.5*dz[l]), (1.5*dr[l],55.5*dr[l]), '-', color='dimgrey', linewidth=thin) #vertical right edge
Ax.plot((34.5*dz[l],34.5*dz[l]), (1.5*dr[l],14.75*dr[l]), '-', color='dimgrey', linewidth=thin) #vertical left edge coil house
Ax.plot((30.5*dz[l],30.5*dz[l]), (1.5*dr[l],12.5*dr[l]), '-', color='dimgrey', linewidth=thin) #vertical internal edge coil house
Ax.plot((40*dz[l],40*dz[l]), (14.75*dr[l],20.75*dr[l]), '-', color='dimgrey', linewidth=thin) #vertical left edge coil rim
Ax.plot((27.5*dz[l],27.5*dz[l]), (39*dr[l],55.5*dr[l]), '-', color='dimgrey', linewidth=thin) #vertical left edge electrode
Ax.plot((27.5*dz[l],27.5*dz[l]), (17.75*dr[l],20.75*dr[l]), '-', color='dimgrey', linewidth=thin) #vertical right edge coil house rim
Ax.plot((27.5*dz[l],34.5*dz[l]), (55.5*dr[l],55.5*dr[l]), '-', color='dimgrey', linewidth=thin) #horizontal base edge left
Ax.plot((46*dz[l],56.5*dz[l]), (55.5*dr[l],55.5*dr[l]), '-', color='dimgrey', linewidth=thin) #horizontal base edge right
Ax.plot((34.5*dz[l],56.5*dz[l]), (1.5*dr[l],1.5*dr[l]), '-', color='dimgrey', linewidth=thin) #horizontal top edge
Ax.plot((34.5*dz[l],40*dz[l]), (14.75*dr[l],14.75*dr[l]), '-', color='dimgrey', linewidth=thin) #horizontal top edge coil house rim
Ax.plot((27.5*dz[l],40*dz[l]), (20.75*dr[l],20.75*dr[l]), '-', color='dimgrey', linewidth=thin) #horizontal bottom edge coil house rim
Ax.plot((0.01*dz[l],30.5*dz[l]), (1.5*dr[l],1.5*dr[l]), '-', color='dimgrey', linewidth=thin) #horizontal top edge internal
#Dielectric window
Ax.plot((31.5*dz[l],31.5*dz[l]), (12.5*dr[l],17.75*dr[l]), 'orange', linewidth=thin) #Vertical right edege dielectric
Ax.plot((0.01*dz[l],31.5*dz[l]), (12.5*dr[l],12.5*dr[l]), 'orange', linewidth=thin)
Ax.plot((0.01*dz[l],31.5*dz[l]), (17.75*dr[l],17.75*dr[l]), 'orange', linewidth=thin)
#Powered Electrode
Ax.plot((0.01*dz[l],27.5*dz[l]), (39*dr[l],39*dr[l]), 'dimgrey', linewidth=superthick)
#Dielectric spacer on Electrode
#Ax.plot((27.55*dz[l],27.55*dz[l]), (39.5*dr[l],40*dr[l]), 'orange', linewidth=verythik)
#Gas inlet - 'Metal' A
Ax.plot((34*dz[l],46*dz[l]), (55.5*dr[l],55.5*dr[l]), 'dimgrey', linewidth=thick)
#Powered ICP Coils - 'Metal'
Ax.plot((1.5*dz[l],3.5*dz[l]), (12.25*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #1st coil bot
Ax.plot((1.5*dz[l],3.5*dz[l]), (10*dr[l],10*dr[l]), '-', color='red', linewidth=thick) #1st coil top
Ax.plot((1.5*dz[l],1.5*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #1st coil left
Ax.plot((3.5*dz[l],3.5*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #1st coilright
Ax.plot((7.25*dz[l],9.25*dz[l]), (12.25*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #2nd coil bot
Ax.plot((7.25*dz[l],9.25*dz[l]), (10*dr[l],10*dr[l]), '-', color='red', linewidth=thick) #2nd coil top
Ax.plot((7.25*dz[l],7.25*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #2nd coil left
Ax.plot((9.25*dz[l],9.25*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #2nd coilright
Ax.plot((13.1*dz[l],15*dz[l]), (12.25*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #3rd coil bot
Ax.plot((13.1*dz[l],15*dz[l]), (10*dr[l],10*dr[l]), '-', color='red', linewidth=thick) #3rd coil top
Ax.plot((13.1*dz[l],13.1*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #3rd coil left
Ax.plot((15*dz[l],15*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #3rd coilright
Ax.plot((19*dz[l],20.9*dz[l]), (12.25*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #4th coil bot
Ax.plot((19*dz[l],20.9*dz[l]), (10*dr[l],10*dr[l]), '-', color='red', linewidth=thick) #4th coil top
Ax.plot((19*dz[l],19*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #4th coil left
Ax.plot((20.9*dz[l],20.9*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #4th coilright
Ax.plot((24.75*dz[l],26.5*dz[l]), (12.25*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #5th coil bot
Ax.plot((24.75*dz[l],26.5*dz[l]), (10*dr[l],10*dr[l]), '-', color='red', linewidth=thick) #5th coil top
Ax.plot((24.75*dz[l],24.75*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #5th coil left
Ax.plot((26.5*dz[l],26.5*dz[l]), (10*dr[l],12.25*dr[l]), '-', color='red', linewidth=thick) #5th coilright
Ax.plot((0.01*dz[l], 0.2*dz[l]), (57*dr[l],57*dr[l]), color= 'black', linewidth = 14)
#===================##===================#
#===================##===================#
#====================================================================#
#READING DATA INTO MEMORY#
#====================================================================#
print('----------------------')
print('Beginning Data Readin:')
print('----------------------')
#Extraction and organization of data from .PDT files.
for l in tqdm(range(0,numfolders)):
#Load data from TECPLOT2D file and unpack into 1D array.
rawdata, nn_2D = ExtractRawData(Dir,'TECPLOT2D.PDT',l)
rawdata_2D.append(rawdata)
#Read through all variables for each file and stop when list ends.
Variablelist,HeaderEndMarker = ['Radius','Height'],'ZONE'
for i in range(2,nn_2D):
if HeaderEndMarker in str(rawdata_2D[l][i]): break
else: Variablelist.append(str(rawdata_2D[l][i][:-2].strip(' \t\n\r\"')))
#endif
#endfor
numvariables_2D,header_2D = len(Variablelist),len(Variablelist)+2
header_2Dlist.append(header_2D)
#Seperate total 1D data array into sets of data for each variable.
CurrentFolderData = SDFileFormatConvertorHPEM(rawdata_2D[l],header_2D,numvariables_2D)
#Save all variables for folder[l] to Data.
#Data is now 3D array of form [folder,variable,datapoint(R,Z)]
Data.append(CurrentFolderData)
#===================##===================#
#===================##===================#
# #Kinetics data readin - NOT CURRENTLY EMPLOYED IN ANY DIAGNOSTICS
# if True == True:
#
# #Load data from TECPLOT_KIN file and unpack into 1D array.
# rawdata, nn_kin = ExtractRawData(Dir,'TECPLOT_KIN.PDT',l)
# rawdata_kin.append(rawdata)
#
# #Read through all variables for each file and stop when list ends.
# KinVariablelist,KinHeaderEndMarker = ['T (S)'],'ZONE'
# for i in range(2,nn_2D):
# if KinHeaderEndMarker in str(rawdata_kin[l][i]):
# I = int(filter(lambda x: x.isdigit(), rawdata_kin[l][i].split(',')[0]))
# break
# else: KinVariablelist.append(str(rawdata_kin[l][i][:-2].strip(' \t\n\r\"')))
# #endif
# #endfor
# numvariables_kin,header_kin = len(KinVariablelist),len(KinVariablelist)+2
# header_kinlist.append(header_kin)
#
# #Seperate total 1D data array into sets of data for each variable.
# CurrentFolderData = SDFileFormatConvertorHPEM(rawdata_kin[l],header_kin,numvariables_kin, Zmesh=I,Dimension='1D')
#
# #Save all variables for folder[l] to Data.
# #Data is now 3D array of form [folder,variable,datapoint(R,Z)]
# DataKin.append(CurrentFolderData)
# #endif
#===================##===================#
#===================##===================#
#IEDF/NEDF file readin.
if True in [savefig_IEDFangular,savefig_IEDFtrends]:
#Define arguments and autorun conv_prof.exe if possible.
#### THIS IS HACKY, WON'T ALWAYS WORK, ARGS LIST NEEDS AUTOMATING ####
IEDFVarArgs = ['1','1','1','1','1'] #Works for 2 species 1 surface.
ExtraArgs = ['1','1','1','1','1','1','1','1','1','1']#[] #Hack For Additional Species
Args = ['pcmc.prof','title','1','1','1'] + IEDFVarArgs + ExtraArgs + ['0','0']
DirAdditions = ['iprofile_tec2d.pdt','nprofile_tec2d.pdt','iprofile_tec1d.pdt', 'nprofile_tec1d.pdt','iprofile_zones_tec1d.pdt','nprofile_zones_tec1d.pdt']
#try: AutoConvProf('./conv_prof.exe',Args,DirAdditions)
#except: print('ConvProf Failure:'+Dirlist[l])
AutoConvProf('./conv_prof.exe',Args,DirAdditions)
#Load data from IEDFprofile file and unpack into 1D array.
rawdata, nn_IEDF = ExtractRawData(Dir,'iprofile_tec2d.pdt',l)
rawdata_IEDF.append(rawdata)
#Read through all variables for each file and stop when list ends.
IEDFVariablelist,HeaderEndMarker = ['Theta [deg]','Energy [eV]'],'ZONE'
for i in range(2,nn_IEDF):
#Grab EDFangle(I),EDFbins(J) values from the ZONE line, these outline the datablock size.
if HeaderEndMarker in str(rawdata_IEDF[l][i]):
I = list(filter(lambda x: x.isdigit(), rawdata_IEDF[l][i].split(',')[0])) #discrete digits
I = int( ''.join(I) ); EDFangle = I #Number of EDF angle bins [Integer]
J = list(filter(lambda x: x.isdigit(), rawdata_IEDF[l][i].split(',')[1])) #discrete digits
J = int( ''.join(J) ); EDFbins = J #Number of EDF energy bins [Integer]
break
else: IEDFVariablelist.append(str(rawdata_IEDF[l][i][:-2].strip(' \t\n\r\"')))
#endif
#endfor
numvariables_IEDF,header_IEDF = len(IEDFVariablelist),len(IEDFVariablelist)+2
header_IEDFlist.append(header_IEDF)
#Seperate total 1D data array into sets of data for each variable.
#Data is stored in 2D array of shape: [EDFangle,EDFbins] or [I,J]
CurrentFolderData = SDFileFormatConvertorHPEM(rawdata_IEDF[l],header_IEDF,numvariables_IEDF,0,I,J)
#Save all variables for folder[l] to Data.
#Data is now 3D array of form [folder,variable,datapoint(R,Z)]
DataIEDF.append(CurrentFolderData)
#endif
#===================##===================#
#===================##===================#
#EEDF data readin.
if savefig_EEDF == True:
#Load data from MCS.PDT file and unpack into 1D array.
rawdata, nn_mcs = ExtractRawData(Dir,'boltz_tec.pdt',l)
rawdata_mcs.append(rawdata)
#Unpack each row of data points into single array of floats.
#Removing 'spacing' between the floats and ignoring variables above data.
Energy,Fe = list(),list()
for i in range(3,len(rawdata_mcs[l])):
if 'ZONE' in rawdata_mcs[l][i]:
EEDF_TDlist.append( rawdata_mcs[l][i].split('"')[-2].strip(' ') )
DataEEDF.append([Energy,Fe])
Energy,Fe = list(),list()
#endif
try:
Energy.append( float(rawdata_mcs[l][i].split()[0]) )
Fe.append( float(rawdata_mcs[l][i].split()[1]) )
except:
NaN_Value = 1
#endtry
#endfor
a,b = 0,5
for i in range(a,b):
plt.plot(DataEEDF[i][0],DataEEDF[i][1], lw=2)
plt.legend(EEDF_TDlist[a:b])
plt.xlabel('Energy [eV]')
plt.ylabel('F(e) [eV-3/2]')
plt.show()
#endif
#===================##===================#
#===================##===================#
if True in [savefig_convergence,savefig_temporalprofiles]:
#Load data from movie_icp file and unpack into 1D array.
rawdata,nn_itermovie = ExtractRawData(Dir,'movie_icp.pdt',l)
rawdata_itermovie.append(rawdata)
#Read through all variables for each file and stop when list ends.
#movie_icp has geometry at top, therefore | |
AccountUnits = sentinel):
Model.__init__(**locals())
class ArrayCalculatedTradeState(Array, contains=CalculatedTradeState):
pass
class MarketOrderDelayedTradeClose(Model):
"""Details for the Market Order extensions specific to a Market Order placed
with the intent of fully closing a specific open trade that should have
already been closed but wasn't due to halted market conditions
Attributes:
trade_id: :class:`~async_v20.TradeID`
The ID of the Trade being closed
client_trade_id: :class:`~async_v20.TradeID`
The Client ID of the Trade being closed
source_transaction_id: :class:`~async_v20.TransactionID`
The Transaction ID of the DelayedTradeClosure transaction
to which this Delayed Trade Close belongs to
"""
def __init__(self, trade_id: TradeID = sentinel, client_trade_id: TradeID = sentinel,
source_transaction_id: TransactionID = sentinel):
Model.__init__(**locals())
class MarketOrderPositionCloseout(Model):
"""A MarketOrderPositionCloseout specifies the extensions to a Market Order
when it has been created to closeout a specific Position.
Attributes:
instrument: :class:`~async_v20.InstrumentName`
The instrument of the Position being closed out.
units: :class:`str`
Indication of how much of the Position to close. Either "ALL", or a DecimalNumber reflection a
partial close of the Trade. The DecimalNumber must always be positive,
and represent a number that doesn't exceed the absolute size of the Position.
"""
def __init__(self, instrument: InstrumentName = sentinel, units: str = sentinel):
Model.__init__(**locals())
class MarketOrderTradeClose(Model):
"""A MarketOrderTradeClose specifies the extensions to a Market Order that has
been created specifically to close a Trade.
Attributes:
trade_id: :class:`~async_v20.TradeID`
The ID of the Trade requested to be closed
client_trade_id: :class:`str` :class:`~async_v20.TradeID`
The client ID of the Trade requested to be closed
units: :class:`str`
Indication of how much of the Trade to close. Either
"ALL", or a DecimalNumber reflection a partial close of the Trade.
"""
def __init__(self, trade_id: TradeID = sentinel, client_trade_id: str = sentinel, units: str = sentinel):
Model.__init__(**locals())
class OpenTradeFinancing(Model):
"""OpenTradeFinancing is used to pay/collect daily financing charge for an
open Trade within an Account
Attributes:
trade_id: :class:`~async_v20.TradeID`
The ID of the Trade that financing is being paid/collected for.
financing: :class:`~async_v20.AccountUnits`
The amount of financing paid/collected for the Trade.
"""
def __init__(self, trade_id: TradeID = sentinel, financing: AccountUnits = sentinel):
Model.__init__(**locals())
class ArrayOpenTradeFinancing(Array, contains=OpenTradeFinancing):
pass
class PositionFinancing(Model):
"""OpenTradeFinancing is used to pay/collect daily financing charge for a
Position within an Account
Attributes:
instrument: :class:`~async_v20.InstrumentName`
The instrument of the Position that financing is being paid/collected for.
financing: :class:`~async_v20.AccountUnits`
The amount of financing paid/collected for the Position.
open_trade_financings: ( :class:`~async_v20.OpenTradeFinancing`, ...)
The financing paid/collecte for each open Trade within the Position.
"""
def __init__(self, instrument: InstrumentName = sentinel, financing: AccountUnits = sentinel,
open_trade_financings: ArrayOpenTradeFinancing = sentinel):
Model.__init__(**locals())
class ArrayPositionFinancing(Array, contains=PositionFinancing):
pass
class TradeOpen(Model):
"""A TradeOpen object represents a Trade for an instrument that was opened in
an Account. It is found embedded in Transactions that affect the position
of an instrument in the Account, specifically the OrderFill Transaction.
Attributes:
trade_id: :class:`~async_v20.TradeID`
The ID of the Trade that was opened
units: :class:`~async_v20.DecimalNumber`
The number of units opened by the Trade
client_extensions: :class:`~async_v20.ClientExtensions`
The client extensions for the newly opened Trade
initial_margin_required: :class:`~async_v20.AccountUnits`
The margin required at the time the Trade was created. Note, this is the
‘pure’ margin required, it is not the ‘effective’ margin used that
factors in the trade risk if a GSLO is attached to the trade.
"""
def __init__(self, price: DecimalNumber = sentinel, trade_id: TradeID = sentinel, units: DecimalNumber = sentinel,
client_extensions: ClientExtensions = sentinel,
guaranteed_execution_fee: AccountUnits = sentinel,
half_spread_cost: AccountUnits = sentinel,
initial_margin_required: AccountUnits = sentinel):
Model.__init__(**locals())
class VWAPReceipt(Model):
"""A VWAP Receipt provides a record of how the price for an Order fill is
constructed. If the Order is filled with multiple buckets in a depth of
market, each bucket will be represented with a VWAP Receipt.
Attributes:
units: :class:`~async_v20.DecimalNumber`
The number of units filled
price: :class:`~async_v20.PriceValue`
The price at which the units were filled
"""
def __init__(self, units: DecimalNumber = sentinel, price: PriceValue = sentinel):
Model.__init__(**locals())
class UserInfo(Model):
"""A representation of user information, as provided to the user themself.
Attributes:
username: :class:`str`
The user-provided username.
user_id: :class:`str`
The user's OANDA-assigned user ID.
country: :class:`str`
The country that the user is based in.
email_address: :class:`str`
The user's email address.
"""
def __init__(self, username: str = sentinel, user_id: str = sentinel, country: str = sentinel,
email_address: str = sentinel):
Model.__init__(**locals())
class AccountProperties(Model):
"""Properties related to an Account.
Attributes:
id: :class:`~async_v20.AccountID`
The Account's identifier
mt4account_id: :class:`~async_v20.AccountID`
The Account's associated MT4 Account ID. This field will not
be present if the Account is not an MT4 account.
tags: ( :class:`str`, ...)
The Account's tags
"""
def __init__(self, id: AccountID = sentinel, mt4_account_id: int = sentinel, tags: ArrayStr = sentinel):
Model.__init__(**locals())
class ArrayAccountProperties(Array, contains=AccountProperties):
pass
class Candlestick(Model):
"""The Candlestick representation
Attributes:
time: :class:`~async_v20.DateTime`
The start time of the candlestick
bid: :class:`~async_v20.CandlestickData`
The candlestick data based on bids.
Only provided if bid-based candles were requested.
ask: :class:`~async_v20.CandlestickData`
The candlestick data based on asks.
Only provided if ask-based candles were requested.
mid: :class:`~async_v20.CandlestickData`
The candlestick data based on midpoints.
Only provided if midpoint-based candles were requested.
volume: :class:`int`
The number of prices created during
the time-range represented by the candlestick.
complete: :class:`bool`
A flag indicating if the candlestick is complete. A complete
candlestick is one whose ending time is not in the future.
"""
def __init__(self, time: DateTime = sentinel, bid: CandlestickData = sentinel, ask: CandlestickData = sentinel,
mid: CandlestickData = sentinel, volume: int = sentinel, complete: bool = sentinel):
Model.__init__(**locals())
class ArrayCandlestick(Array, contains=Candlestick):
pass
class OrderBook(Model):
"""The representation of an instrument's order book at a point in time
Attributes:
instrument: :class:`~async_v20.InstrumentName`
The order book's instrument
time: :class:`~async_v20.DateTime`
The time when the order book snapshot was created.
unix_time: :class:`~async_v20.DateTime`
The time when the order book snapshot was created in unix format.
price: :class:`~async_v20.PriceValue`
The price (midpoint) for the order book's instrument
at the time of the order book snapshot
bucket_width: :class:`~async_v20.PriceValue`
The price width for each bucket. Each bucket covers the price
range from the bucket's price to the bucket's price + bucketWidth.
buckets: ( :class:`~async_v20.OrderBookBucket`, ...)
The partitioned order book, divided into buckets using a default bucket width. These
buckets are only provided for price ranges which actually contain order or position data.
"""
def __init__(self, instrument: InstrumentName = sentinel, time: DateTime = sentinel, unix_time: DateTime = sentinel,
price: PriceValue = sentinel, bucket_width: PriceValue = sentinel,
buckets: ArrayOrderBookBucket = sentinel):
Model.__init__(**locals())
class PositionBook(Model):
"""The representation of an instrument's position book at a point in time
Attributes:
instrument: :class:`~async_v20.InstrumentName`
The position book's instrument
time: :class:`~async_v20.DateTime`
The time when the position book snapshot was created
price: :class:`~async_v20.PriceValue`
The price (midpoint) for the position book's instrument
at the time of the position book snapshot
bucket_width: :class:`~async_v20.PriceValue`
The price width for each bucket. Each bucket covers the price
range from the bucket's price to the bucket's price + bucketWidth.
buckets: ( :class:`~async_v20.PositionBookBucket`, ...)
The partitioned position book, divided into buckets using a default bucket width. These
buckets are only provided for price ranges which actually contain order or position data.
"""
def __init__(self, instrument: InstrumentName = sentinel, time: DateTime = sentinel, unix_time: DateTime = sentinel,
price: PriceValue = sentinel,
bucket_width: PriceValue = sentinel, buckets: ArrayPositionBookBucket = sentinel):
Model.__init__(**locals())
class Order(Model):
"""The base Order definition. Contains all possible attributes an Order
may contain
Attributes:
id: :class:`~async_v20.OrderID`
The Order's identifier, unique within the Order's Account.
create_time: :class:`~async_v20.DateTime`
The time when the Order was created.
state: :class:`~async_v20.OrderState`
The current state of the Order.
client_extensions: :class:`~async_v20.ClientExtensions`
The client extensions of the Order. Do not set, modify,
or delete clientExtensions if your account is associated with MT4.
trade_id: :class:`~async_v20.TradeID`
price: :class:`~async_v20.PriceValue`
type: :class:`~async_v20.OrderType`
client_trade_id: :class:`~async_v20.ClientID`
time_in_force: :class:`~async_v20.TimeInForce`
gtd_time: :class:`~async_v20.DateTime`
trigger_condition: :class:`~async_v20.OrderTriggerCondition`
filling_transaction_id: :class:`~async_v20.TransactionID`
filled_time: :class:`~async_v20.DateTime`
trade_opened_id: :class:`~async_v20.TradeID`
trade_reduced_id: :class:`~async_v20.TradeID`
trade_closed_ids: ( :class:`~async_v20.TradeID`, ...),
cancelling_transaction_id: :class:`~async_v20.TransactionID`
cancelled_time: :class:`~async_v20.DateTime`
replaces_order_id: :class:`~async_v20.OrderID`
replaced_by_order_id: :class:`~async_v20.OrderID`
distance: :class:`~async_v20.PriceValue`
trailing_stop_value: :class:`~async_v20.PriceValue`
instrument: :class:`~async_v20.InstrumentName`
units: :class:`~async_v20.DecimalNumber`
partial_fill: :class:`str`
position_fill: :class:`~async_v20.OrderPositionFill`
take_profit_on_fill: :class:`~async_v20.TakeProfitDetails`
stop_loss_on_fill: :class:`~async_v20.StopLossDetails`
trailing_stop_loss_on_fill: :class:`~async_v20.TrailingStopLossDetails`
trade_client_extensions: :class:`~async_v20.ClientExtensions`
price_bound: :class:`~async_v20.PriceValue`
initial_market_price: :class:`~async_v20.PriceValue`
trade_close: :class:`~async_v20.MarketOrderTradeClose`
long_position_closeout: :class:`~async_v20.MarketOrderPositionCloseout`
short_position_closeout: :class:`~async_v20.MarketOrderPositionCloseout`
margin_closeout: :class:`~async_v20.MarketOrderMarginCloseout`
delayed_trade_close: :class:`~async_v20.MarketOrderDelayedTradeClose`
trigger_distance: :class:`~async_v20.PriceValue`
is_trigger_distance_exact: :class:`bool`
"""
# TODO: Update the annotation for partial_fill when OANDA responds to email, & `guaranteed`
def __init__(self, id: OrderID = sentinel, | |
amplification'
print 'Multipliers for %s:' % method_name
for idx in top_concept_multiplier_indices:
concept = concepts[idx]
nonzero_indices = term_concept_index.getcol(idx).nonzero()[0]
print '%s (%f): ' % (concept, concept_multiplier_lil[0, idx]),
for word_idx in nonzero_indices:
print '(%s, %f)' % (mapping[word_idx], docs_term_index_lil[0, word_idx]),
print
print
with Timing('Multiplying coefficients...', self.logging):
interpretation_vector = interpretation_vector + interpretation_vector.multiply(concept_multiplier)
return interpretation_vector
def _take_top_phrases(self, interpretation_vector, test_doc_term, candidate_phrases, named_entities=[], n=10,
with_score=False, k=25, n_ranked=25, rank_sim='spearman_rank_similarity', text=None,
boost_ne = 0.15, max_phrase=0):
"""Return the top n concepts, word, and phrases
interpretation_vector is expected to be a row vector as csr_matrix
"""
concepts = self.concepts_
forward_index = self.forward_index_
vocabulary = self.vocabulary_
mapping = self.mapping_
#num_features = self.num_features_
#num_concepts = self.num_concepts_
word_info = self.word_info_
tokenizer, postprocessor = self.dtf.get_tokenizer(), self.dtf.get_postprocessor()
tokenized = list(tokenizer(text))
# Error checking to make sure that we pass the correct variable types
if not isinstance(interpretation_vector, csr_matrix):
raise TypeError('Expecting csr_matrix for interpretation_vector, got %s' % type(interpretation_vector))
if interpretation_vector.shape[0] != 1:
raise ValueError('Expecting a row vector, found a matrix with %d rows' % interpretation_vector.shape[0])
if not isinstance(test_doc_term, csr_matrix):
raise TypeError('Expecting csr_matrix for test_doc_term, got %s' % type(test_doc_term))
if not isinstance(forward_index, csr_matrix):
raise TypeError('Expecting csr_matrix for forward_index, got %s' % type(forward_index))
with Timing('Sorting concepts...', self.logging):
doc_score = interpretation_vector.toarray()[0]
sorted_concept_indices = np.argsort(doc_score)
top_concept_indices = sorted_concept_indices[:-n - 1:-1]
top_k_indices = sorted_concept_indices[:-k - 1:-1]
if n_ranked > 0:
rank_sim_func = getattr(KeyTermsExtractor, rank_sim)
with Timing('Reranking top concepts...', self.logging):
top_2k_indices = sorted_concept_indices[:-2 * k - 1:-1]
# Find the top n_ranked terms in the input document
word_indices_input = test_doc_term.indices[np.argsort(test_doc_term.data)[:-n_ranked - 1:-1]]
word_indices_set_input = set(word_indices_input)
test_doc_term = test_doc_term.tolil()
concept_to_words = []
#min_overlap = 2000
#max_overlap = 0
#sum_overlap = 0
for concept_idx in top_2k_indices:
concept_vector = forward_index.getrow(concept_idx).tolil()
concept_vector_col_idx = np.array(concept_vector.rows[0])
concept_vector_data = concept_vector.data[0]
# Find the top n_ranked terms in the concept
word_indices_concept = concept_vector_col_idx[np.argsort(concept_vector_data)[:-n_ranked - 1:-1]]
word_indices_set_concept = set(word_indices_concept)
# Combine the top terms in input and in concept
word_indices_union = np.array(list(word_indices_set_input | word_indices_set_concept))
# Gather overlap statistics for analysis purpose (non-essential)
#overlap = len(word_indices_set_concept)+len(word_indices_set_input)-len(word_indices_union)
#min_overlap = min(min_overlap, overlap)
#max_overlap = max(max_overlap, overlap)
#sum_overlap += overlap
# Take the scores for each term in the combined list
filtered_word_scores_input = test_doc_term[:, word_indices_union].toarray()[0]
filtered_word_scores_concept = concept_vector[:, word_indices_union].toarray()[0]
# The next four lines to get sorted list of term indices (i.e. the ranking)
ranked_word_union_indices_input = np.argsort(filtered_word_scores_input)
ranked_word_union_indices_concept = np.argsort(filtered_word_scores_concept)
ranked_word_indices_input = word_indices_union[ranked_word_union_indices_input]
ranked_word_indices_concept = word_indices_union[ranked_word_union_indices_concept]
# The sorted list of term indices are then compared
rank_similarity_score = rank_sim_func(ranked_word_indices_input, ranked_word_indices_concept)
doc_score[concept_idx] *= rank_similarity_score
if DEBUG:
words_input = [mapping[idx] for idx in ranked_word_indices_input]
words_concept = [mapping[idx] for idx in ranked_word_indices_concept]
concept_to_words.append([concept_idx, concepts[concept_idx], words_input, words_concept,
rank_similarity_score, doc_score[concept_idx],
doc_score[concept_idx] * rank_similarity_score])
if DEBUG:
from pprint import pprint
pprint(concept_to_words)
sorted_concept_indices = top_2k_indices[np.argsort(doc_score[top_2k_indices])]
top_concept_indices = list(sorted_concept_indices[:-n - 1:-1])
k = len(sorted_concept_indices >= 1)
top_k_indices = list(sorted_concept_indices[:-k - 1:-1])
#LOGGER.debug('Min overlaps: %d\nMax overlaps: %d\nAvg overlaps: %.2f' %
# (min_overlap, max_overlap, sum_overlap/(2.0*k)))
with Timing('Sorting terms...', self.logging):
# This part is explained in https://wiki.knorex.asia/x/rAMYBQ the "Key Words Extraction Part" section
# Take top k concepts score from the interpretation vector (shape: 1 x k)
top_k_concepts = csr_matrix(doc_score[top_k_indices])
# Multiply each term in the top k concept vectors by term weight in the input text (shape: k x |V|)
concept_word_matrix = forward_index[top_k_indices, :].multiply(scipy.sparse.vstack([test_doc_term] * k))
# Find the maximum term score in each concept (shape: 1 x k)
padded_data = np.pad(concept_word_matrix.data, (0, 1), 'constant', constant_values=0)
scale = csr_matrix(np.maximum.reduceat(padded_data, concept_word_matrix.indptr[:-1]))
# Find the normalizing constant for the top k concepts of the interpretation vector, multiply it to scale
# Now scale contains the normalizing constant multiplied by the maximum term score in each concept
scale = scale * top_k_concepts.sum(axis=1)[0, 0]
# Invert the scale so that later division is just a multiplication with this scale
scale.data = 1 / scale.data
# Normalize the interpretation vector as well as divide each with the maximum term score of each concept
# This completes step 3 (normalizing interpretation vector top_k_concepts) and prepare for step 2
scale = scale.multiply(top_k_concepts)
# When scale is multiplied (matrix multiplication) with the top k concept vectors, we are doing
# step 2 and 4 simultaneously, resulting in a 1 x |V| vector containing the desired term score
word_affinity_vector = scale * concept_word_matrix
word_affinity_vector = word_affinity_vector.toarray()[0]
top_terms_indices = [i for i in np.argsort(word_affinity_vector)[:-n - 1:-1]
if word_affinity_vector[i] > 0]
# top_terms_indices = []
def WordLength(tokens):
punct = '.,()&[]\'\"-/\\\n'
res = 0
for x in tokens:
if x not in punct:
res += 1
return res
def dist(word, tokenized):
pos = tokenized.index(word)
return 0.5 - 1.0/(len(tokenized) - pos + 1)
def calcMaxPhrases(lDoc):
nPhrases = len(candidate_phrases)
if nPhrases < 3:
return nPhrases
if nPhrases < 19:
return nPhrases/3 + 2
return int(round(nPhrases/math.log(nPhrases))) + 1
# tokset = set()
# for phrase in candidate_phrases:
# for x in tokenizer(phrase):
# tokset.add(postprocessor(x))
# nToks = len(tokset)
# return (nPhrases + nToks)/10, (nPhrases + nToks + lDoc)/20
shortThres = 250
#Update word_affinity_vector for short text
lDoc = WordLength(tokenized)
if lDoc < shortThres:
tokenized = map(postprocessor, tokenized)
tokenized = [token for token in tokenized if token in vocabulary]
for word in set(tokenized):
wid = vocabulary[word]
word_affinity_vector[wid] = word_info[wid]*dist(word, tokenized)
def ILPSolver(named_entities=[], regularizer=0.00, max_phrase=15, min_len=0.0, TOL=0.00001, w_ne=2.0,
postprocessor=postprocessor, tokenizer=tokenizer):
def build_word_list(token_phrases):
res = []
for x in token_phrases:
res.extend(x)
return list(set(res))
def build_substr(token_phrases):
def cal_prefix_score(l1, l2):
len_l1, len_l2, res = len(l1), len(l2), 0.0
for i, x in enumerate(l1):
if i == len_l2:
break
if x == l2[i]:
res += 1.0
else:
break
return res/len_l1
def cal_suffix_score(t1, t2):
l1, l2 = list(reversed(t1)), list(reversed(t2))
len_l1, len_l2, res = len(l1), len(l2), 0.0
for i, x in enumerate(l1):
if i == len_l2:
break
if x == l2[i]:
res += 1.0
else:
break
return res/len_l1
res = []
for x1, ls1 in enumerate(token_phrases):
count = 0.0
s1 = ' '.join(ls1)
for x2, ls2 in enumerate(token_phrases):
if x1 != x2:
s2 = ' '.join(ls2)
if s2.find(s1) != -1 and len(s2) != 0:
count += float(len(ls1))/len(ls2)
elif s1.find(s2) == -1 and len(s1)!= 0:
count += cal_suffix_score(ls1, ls2)
count += cal_prefix_score(ls1, ls2)
res.append(count)
return res
def build_ne_reg(phrases_list, named_entities):
res = []
for phrase in phrases_list:
tmp = 0.0
for ne in named_entities:
if phrase.find(ne) != -1:
tmp = w_ne
break
res.append(tmp)
return res
def build_occ_termphrase(TERMS, PHRASES):
res = dict()
for id_terms in TERMS:
tmp = []
for id_phrase in PHRASES:
if occ(id_terms, id_phrase) == 1:
tmp.append(id_phrase)
res[id_terms] = tmp
return res
def occ(id_term, id_phrase):
# term, phrase = mapping[id_term], token_phrases[id_phrase]
term, phrase = word_map(id_term), token_phrases[id_phrase]
if term in phrase:
return 1
return 0
def length_phrase(id_phrase):
tokens = token_phrases[id_phrase]
return len(tokens)
def cal_phrase_score(id_phrase):
score = 0.00
for word in token_phrases[id_phrase]:
wid = word_index(word)
if wid == -1:
continue
if term_vars[wid].varValue > TOL:
score += word_score(wid)
score /= length_phrase(id_phrase)
return abs(score - regularizer*(length_phrase(j) - min_len)/(1.0 + substr[j] + ne_reg[j]))
def phrase_tokenize(phrase, tokenizer=None):
if tokenizer:
res = [x.strip('.,()&[]\'\"-/\\\n ') for x in tokenizer(phrase)]
else:
res = [x.strip('.,()&[]\'\"-/\\\n ') for x in phrase.split()]
res = [x.replace(u'\n', u' ') for x in res if len(x) > 0]
return [postprocessor(x) for x in res]
def word_score(wordIdx):
if wordIdx >= 0:
return word_affinity_vector[wordIdx]
else:
return ne_score[wordIdx]
def word_index(word):
if word in vocabulary:
return vocabulary[word]
if word in ne_vocab:
return ne_vocab[word]
return -1
def word_map(wid):
if wid >= 0:
return mapping[wid]
if wid < -1:
return ne_mapping[wid]
def build_ne_word_score(named_entities, tokenizer=None):
neVocab, neMap, neScore = {}, {}, {}
idx = -2
for named_entity in named_entities:
tokens = phrase_tokenize(named_entity, tokenizer)
boostScore = boost_ne*1.0/len(tokens)
for token in tokens:
if token not in vocabulary:
if token not in neVocab:
neVocab[token] = idx
neMap[idx] = token
neScore[idx] = boostScore
idx -= 1
elif neScore[neVocab[token]] < boostScore:
neScore[neVocab[token]] = boostScore
# elif word_affinity_vector[vocabulary[token]] < TOL:
# word_affinity_vector[vocabulary[token]] = boostScore
return neVocab, neMap, neScore
phrases_list = list(candidate_phrases)
token_phrases = [phrase_tokenize(x, tokenizer) for x in phrases_list]
word_list = build_word_list(token_phrases)
substr = build_substr(token_phrases)
ne_reg = build_ne_reg(phrases_list, named_entities)
ne_vocab, ne_mapping, ne_score = build_ne_word_score(named_entities, tokenizer)
TERMS = [word_index(word) for word in word_list if word_index(word) != -1]
# TERMS | |
<filename>photobuchauswahltool/__main__.py<gh_stars>0
"""
Graphisches Programm zum sortieren von Bildern in verschiedene Ordner.
"""
from typing import List, Dict, Callable, Tuple, Optional
import imghdr
import tkinter as tk
from tkinter import ttk, N, E, S, W
import tkinter.filedialog as tkfd
import tkinter.messagebox as tkmb
import pathlib
import shutil
import copy
from dataclasses import dataclass
import argparse
import sys
import PIL.Image
import PIL.ImageTk
# Note tkinter-lifetime
# Assignments such as
# image = ttk.Label(self, image=photo)
# image.image = photo
# seem strange and redundant (and raise a mypy warning because image has not
# member image) but they are necessary!
# This makes sure, that the photo is not garbage collected -- see
# https://web.archive.org/web/20201111190625/http://effbot.org/pyfaq/why-do-my-tkinter-images-not-appear.htm
def get_images_in(directory: pathlib.Path) -> List[pathlib.Path]:
entities = directory.glob("*")
files = [x for x in entities if x.is_file()]
images = [x for x in files if imghdr.what(x) is not None]
sorted_images = sorted(images)
return sorted_images
def get_expected_file_in_directory(file: pathlib.Path, directory: pathlib.Path) -> pathlib.Path:
"""
Return the expected file in the directory -- the file may exist or not.
"""
expected_file = directory / file.name
return expected_file
def is_file_in_directory(file: pathlib.Path, directory: pathlib.Path) -> bool:
"""
Check, whether a file with the same filename exists in the given directory.
"""
if not directory.is_dir():
raise ValueError(f"{directory} is not a directory.")
expected_file = get_expected_file_in_directory(file, directory)
return expected_file.exists() and expected_file.is_file()
def copy_file_to_directory(file: pathlib.Path, directory: pathlib.Path) -> None:
"""
Copy given file to the given directory.
"""
if is_file_in_directory(file, directory):
print(f"File {file} already in {directory}")
return
print(f"Copy {file} to {directory}", end="... ")
shutil.copy(file, directory)
print("done")
def delete_file_in_directory(file: pathlib.Path, directory: pathlib.Path) -> None:
"""
Delete the given file (name) in the given given directory.
"""
expected_file = get_expected_file_in_directory(file, directory)
if not expected_file.exists():
print(f"File {file} not in {directory}")
return
if not expected_file.is_file():
raise ValueError(f"File {file} in {directory} (i.e. {expected_file} is not a file.")
print(f"Delete {file} in {directory}, i.e. {expected_file}", end="... ")
expected_file.unlink()
print("done")
class FileAction:
"""
Callable that contains all the information to copy/delete/... a file to a destination.
Note: The ``action`` to perform is not copied.
Use this in a callback to make sure that the file/destination is "immutable"
-- i.e. that they are not changed if the variables used to denote
file/destination get a new value.
"""
def __init__(
self,
file: pathlib.Path,
directory: pathlib.Path,
action: Callable[[pathlib.Path, pathlib.Path], None],
) -> None:
self.source = copy.deepcopy(file)
self.destination = copy.deepcopy(directory)
self.action = action
self.callbacks: List[Callable[[], None]] = []
def __call__(self):
self.action(self.source, self.destination)
for callback in self.callbacks:
callback()
def set_button_active(button: ttk.Button, should_be_active: bool) -> None:
command = "!disabled" if should_be_active else "disabled"
button.state([command])
@dataclass
class FileCopyUI:
"""
The UI elements and metadata responsible to copy/delete/... files.
"""
file: pathlib.Path
destination_directory: pathlib.Path
current_state: ttk.Label
copy_button: ttk.Button
delete_button: ttk.Button
def update(self) -> None:
"""
Update state of UI according to files found on disk.
"""
is_copied = is_file_in_directory(self.file, self.destination_directory)
text_prefix = "schon" if is_copied else "noch nicht"
self.current_state["text"] = f"{text_prefix} in {self.destination_directory.name}"
self.current_state["background"] = "green" if is_copied else "blue"
self.current_state["foreground"] = "black" if is_copied else "white"
set_button_active(self.copy_button, not is_copied)
set_button_active(self.delete_button, is_copied)
class SelectableImage(ttk.Frame):
"""
UI elements to display a single image and the information in what
destination directories it is available.
"""
def draw(self):
"""
Update the image to display.
Currently the logic for file actions will fail if we change the name as
the FileActions still have the old name set.
"""
# image
content = PIL.Image.open(self.file)
content.thumbnail(self.size, PIL.Image.ANTIALIAS)
photo = PIL.ImageTk.PhotoImage(content)
self.image["image"] = photo
self.image.image = photo # type: ignore See tkinter-lifetime above
# label
self.label["text"] = self.file.name
# controls
for ui in self.file_uis.values():
ui.update()
def __init__(
self,
parent: ttk.Frame,
file: pathlib.Path,
destination_directories: List[pathlib.Path],
size: Tuple[int, int] = (300, 300),
):
super().__init__(parent)
self.file = file
self.size = size
self.image = ttk.Label(self)
self.image.grid(column=0, row=0)
self.label = ttk.Label(self, text=file.name)
self.label.grid(column=0, row=1)
destinations = ttk.Frame(self)
self.file_uis: Dict[str, FileCopyUI] = {}
for index, possible_destination in enumerate(destination_directories):
is_copied = is_file_in_directory(file, possible_destination)
text_prefix = "schon" if is_copied else "noch nicht"
state = ttk.Label(
destinations,
text=f"{text_prefix} in {possible_destination.name}",
background="green" if is_copied else "blue",
)
state.grid(row=0, column=index)
copyer = FileAction(file, possible_destination, copy_file_to_directory)
copy_button = ttk.Button(
destinations,
text=f"Kopiere nach {possible_destination.name}",
command=copyer,
)
copy_button.grid(row=1, column=index)
deleter = FileAction(file, possible_destination, delete_file_in_directory)
delete_button = ttk.Button(
destinations,
text=f"Loesche in {possible_destination.name}",
command=deleter,
)
delete_button.grid(row=2, column=index)
ui = FileCopyUI(
file=file,
destination_directory=possible_destination,
current_state=state,
copy_button=copy_button,
delete_button=delete_button,
)
for action in [copyer, deleter]:
action.callbacks.append(ui.update)
self.file_uis[possible_destination] = ui
destinations.grid(column=0, row=2)
self.draw()
class CurrentImagesProvider:
"""
The logic to display one or a list of current images.
"""
def __init__(self, source_directory: pathlib.Path) -> None:
self.images = get_images_in(source_directory)
self.current = 0
def skim(self, number_of_images: int):
"""
Go ``number_of_images`` to the next (positive) or previous (negative)
image.
"""
self.current = max(0, min(self.current + number_of_images, len(self.images) - 1))
def get(self, number_of_images: int) -> List[pathlib.Path]:
"""
Return paths to ``number_of_images`` images (or less, if only less are available).
"""
# corner case: too few images
if len(self.images) < number_of_images:
print(f"Only {len(self.images)} images available, but {number_of_images} requested.")
number_of_images = len(self.images)
# corner case: current image too close to the end of images
index = self.current
if index + number_of_images > len(self.images):
index = len(self.images) - number_of_images
print(f"Already at image num {self.current} of {len(self.images)} -- showing from image num {index}.")
images = self.images[index : index + number_of_images]
return images
def progress(self) -> float:
"""
Return progress in percent.
"""
return (100 * (self.current + 1)) / len(self.images)
class PhotoSelectionGUI:
"""
UI to display controls and images to copy.
"""
def __init__(
self,
root: tk.Tk,
source_directory: pathlib.Path,
destination_directories: List[pathlib.Path],
) -> None:
for folder in [source_directory] + destination_directories:
if not folder.exists() or not folder.is_dir():
raise ValueError(f"{folder} is not a directory.")
self.images_provider = CurrentImagesProvider(source_directory)
root.title("Photobuchauswahltool")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
# fill extra space if window is resized
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
# BEGIN frame with actions/overview/... next to the actual images
actions_frame = ttk.Frame(mainframe)
ttk.Label(actions_frame, text="Bild:").grid(row=0, column=0)
back_button = ttk.Button(actions_frame, text="<-", command=self.previous_image)
back_button.grid(row=1, column=0)
forward_button = ttk.Button(actions_frame, text="->", command=self.next_image)
forward_button.grid(row=2, column=0)
ttk.Label(actions_frame, text="Anzahl Bilder:").grid(row=10, column=0)
num_images = tk.IntVar(root, value=1)
num_images_spinbox = ttk.Spinbox(
actions_frame,
from_=1,
increment=1,
to=11,
textvariable=num_images,
width=5,
)
num_images_spinbox.grid(row=11, column=0)
num_images_spinbox.value = num_images # type: ignore See tkinter-lifetime above
ttk.Label(actions_frame, text="Groesse:").grid(row=20, column=0)
size_images = tk.IntVar(root, value=500)
size_images_spinbox = ttk.Spinbox(
actions_frame,
from_=100,
increment=100,
to=2000,
textvariable=size_images,
width=5,
)
size_images_spinbox.grid(row=21, column=0)
size_images_spinbox.value = size_images # type: ignore See tkinter-lifetime above
ttk.Label(actions_frame, text="Fortschritt:").grid(row=30, column=0)
progress = tk.DoubleVar(root, value=0)
progress_bar = ttk.Progressbar(
actions_frame,
variable=progress,
orient=tk.HORIZONTAL,
)
progress_bar.value = progress # type: ignore See tkinter-lifetime above
progress_bar.grid(row=31, column=0)
actions_frame.grid(row=0, column=0, sticky=N)
# END frame
self.num_images = num_images
self.size_images = size_images
self.progress = progress
# callback to react on numeric UI input
self.num_images.trace_add("write", lambda _, __, ___: self.display_current_images())
self.size_images.trace_add("write", lambda _, __, ___: self.display_current_images())
self.destination_directories = destination_directories
self.images = ttk.Frame(mainframe)
self.images.grid(row=0, column=1)
def next_image(self) -> None:
self.images_provider.skim(+1)
self.display_current_images()
def previous_image(self) -> None:
self.images_provider.skim(-1)
self.display_current_images()
def display_current_images(self) -> None:
number_of_images = self.num_images.get()
images = self.images_provider.get(number_of_images)
self.display_images(images)
self.progress.set(self.images_provider.progress())
def display_images(self, image_files: List[pathlib.Path]) -> None:
# remove the old ones
for image in self.images.grid_slaves():
image.destroy()
# set the new ones
size = self.size_images.get()
for index, image_file in enumerate(image_files):
image = SelectableImage(self.images, image_file, self.destination_directories, (size, size))
image.grid(row=0, column=index)
@dataclass
class ProgramOptions:
source_directory: pathlib.Path
destination_directory: List[pathlib.Path]
@dataclass
class CommandLineArguments:
source_directory: Optional[pathlib.Path]
target_directories: List[pathlib.Path]
def ask_for_directory(description: str, root: tk.Tk) -> Optional[pathlib.Path]:
possible_directory = tkfd.askdirectory(
parent=root,
initialdir=pathlib.Path.home(),
title=description,
)
if len(possible_directory) == 0:
return None
else:
return pathlib.Path(possible_directory)
def insist_for_directory(description: str, explanation: str, root: tk.Tk) -> pathlib.Path:
directory = None
while directory is None:
directory = ask_for_directory(description, root)
if directory is None:
tkmb.showwarning(message=explanation)
return directory
def ask_for_missing_options(arguments: CommandLineArguments, root: tk.Tk) -> ProgramOptions:
"""
Complete the missing information by askin the user interactively.
"""
values = copy.deepcopy(arguments)
if values.source_directory is None:
values.source_directory = insist_for_directory(
"Ordner mit allen Bildern auswaehlen.",
"Quellverzeichnis muss ausgewaehlt sein.",
root,
)
if len(values.target_directories) == 0:
values.target_directories.append(
insist_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
"Mindestens ein Zielverzeichnis muss ausgewaehlt sein.",
root,
)
)
is_more_to_add = tkmb.askyesno(message="Ein weiteres Zielverzeichnis angeben?")
while is_more_to_add:
possible_directory = ask_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
root,
)
if possible_directory is None:
tkmb.showwarning(message="Kein Verzeichnis gewaehlt!")
else:
values.target_directories.append(possible_directory)
is_more_to_add = tkmb.askyesno(message="Noch ein weiteres Zielverzeichnis angeben?")
program_options = ProgramOptions(
values.source_directory,
values.target_directories,
)
return program_options
def parse_arguments(argv=sys.argv[1:]) -> CommandLineArguments:
| |
###############################################################################################################################
# This script implements a simplification of the evolutionary process proposed by Real et al.: https://arxiv.org/abs/1802.01548v7#
###############################################################################################################################
import sys
import os
import space
import numpy as np
import csv
import json
import copy
import datetime
from jsonschema import Draft4Validator, validators, exceptions
from utility_functions import *
from collections import defaultdict
from scipy import stats
import numpy.random as rd
def mutation(param_space, config, mutation_rate, list=False):
"""
Mutates given configuration.
:param param_space: space.Space(), will give us information about parameters
:param configs: list of configurations.
:param mutation_rate: integer for how many parameters to mutate
:param list: boolean whether returning one or more alternative configs
:return: list of dicts, list of mutated configurations
"""
parameter_object_list = param_space.get_input_parameters_objects()
rd_config = dict()
for name, obj in parameter_object_list.items():
x = obj.randomly_select()
single_valued_param = False
param_type = param_space.get_type(name)
if param_type == 'real' or param_type == 'integer':
if obj.get_max() == obj.get_min():
single_valued_param = True
else:
if obj.get_size() == 1:
single_valued_param = True
mutation_attempts = 0
while x == config[name] and single_valued_param == False:
x = obj.randomly_select()
mutation_attempts += 1
if mutation_attempts > 1000000:
break
rd_config[name] = x
parameter_names_list = param_space.get_input_parameters()
nbr_params = len(parameter_names_list)
configs = []
n_configs = nbr_params if list else 1
for _ in range(n_configs):
indices = rd.permutation(nbr_params)[:mutation_rate]
for idx in indices:
mutation_param = parameter_names_list[idx]
# Should I do something if they are the same?
temp = config.copy()
temp[mutation_param] = rd_config[mutation_param]
configs.append(temp)
return configs
# Taken from local_search and slightly modified
def run_objective_function(
configurations,
hypermapper_mode,
param_space,
beginning_of_time,
run_directory,
evolution_data_array,
fast_addressing_of_data_array,
enable_feasible_predictor=False,
evaluation_limit=float("inf"),
black_box_function=None,
number_of_cpus=0):
"""
Evaluate a list of configurations using the black-box function being optimized.
This method avoids evaluating repeated points by recovering their value from the history of evaluated points.
:param configurations: list of configurations to evaluate.
:param hypermapper_mode: which HyperMapper mode is being used.
hypermapper_mode == "default"
:param param_space: a space object containing the search space.
:param beginning_of_time: timestamp of when the optimization started.
:param run_directory: directory where HyperMapper is running.
:param evolution_data_array: a dictionary containing all of the configurations that have been evaluated.
:param fast_addressing_of_data_array: a dictionary containing evaluated configurations and their index in
the evolution_data_array.
:param enable_feasible_predictor: whether to use constrained optimization.
:param evaluation_limit: the maximum number of function evaluations allowed for the evolutionary search.
:param black_box_function: the black_box_function being optimized in the evolutionary search.
:param number_of_cpus: an integer for the number of cpus to be used in parallel.
:return: configurations with evaluations for all points in configurations and the number of evaluated configurations
"""
new_configurations = []
new_evaluations = {}
previous_evaluations = defaultdict(list)
number_of_new_evaluations = 0
t0 = datetime.datetime.now()
absolute_configuration_index = len(fast_addressing_of_data_array)
# Adds configutations to new if they have not been evaluated before
for configuration in configurations:
str_data = param_space.get_unique_hash_string_from_values(configuration)
if str_data in fast_addressing_of_data_array:
configuration_idx = fast_addressing_of_data_array[str_data]
for key in evolution_data_array:
previous_evaluations[key].append(evolution_data_array[key][configuration_idx])
else:
if absolute_configuration_index + number_of_new_evaluations < evaluation_limit:
new_configurations.append(configuration)
number_of_new_evaluations += 1
# Evaluates new configurations. If there is any
t1 = datetime.datetime.now()
if number_of_new_evaluations > 0:
new_evaluations = param_space.run_configurations(hypermapper_mode, new_configurations, beginning_of_time,
black_box_function, run_directory=run_directory)
# Values for all given configurations
all_evaluations = concatenate_data_dictionaries(previous_evaluations, new_evaluations)
all_evaluations_size = len(all_evaluations[list(all_evaluations.keys())[0]])
population = list()
for idx in range(number_of_new_evaluations):
configuration = get_single_configuration(new_evaluations, idx)
population.append(configuration)
for key in configuration:
evolution_data_array[key].append(configuration[key])
str_data = param_space.get_unique_hash_string_from_values(configuration)
fast_addressing_of_data_array[str_data] = absolute_configuration_index
absolute_configuration_index += 1
sys.stdout.write_to_logfile(("Time to run new configurations %10.4f sec\n" %
((datetime.datetime.now() - t1).total_seconds())))
sys.stdout.write_to_logfile(("Total time to run configurations %10.4f sec\n" %
((datetime.datetime.now() - t0).total_seconds())))
return population, all_evaluations_size
def evolution(population_size, generations, mutation_rate, crossover, regularize, batch_size, fitness_measure,
param_space, fast_addressing_of_data_array, optimization_function, optimization_function_parameters):
"""
Do the entire evolutinary process from config to best config
:param population_size: an integer for the number of configs to keep. All will be initiated randomly
:param generations: an integer for the number of iterations through the evolutionary loop
:param mutation_rate: an integer for the number of parameters to change in a mutation
:param crossover: a boolean whether to use crossover in the algorithm
:param regularize: boolean, whether to use regularized or non-regularized evolution strategy
:param batch_size: an integer for how many individuals to compare in a generation
:param fitness_measure: a string name of the objective that should be optimized
:param param_space: a space object containing the search space.
:param fast_addressing_of_data_array: an array that keeps track of all evaluated configurations
:param optimization_function: the function that will be optimized by the evolutionary search.
:param optimization_function_parameters: a dictionary containing the parameters that will be passed to the
optimization function.
:return: all points evaluted and the best config at each generation of the Evolutionary Algorithm.
"""
t0 = datetime.datetime.now()
tmp_fast_addressing_of_data_array = copy.deepcopy(fast_addressing_of_data_array)
input_params = param_space.get_input_parameters()
data_array = {}
### Initialize a random population ###
default_configuration = param_space.get_default_or_random_configuration()
str_data = param_space.get_unique_hash_string_from_values(default_configuration)
if str_data not in fast_addressing_of_data_array:
tmp_fast_addressing_of_data_array[str_data] = 1
if population_size - 1 > 0: # Will always be true
configurations = [default_configuration] + param_space.random_sample_configurations_without_repetitions(
tmp_fast_addressing_of_data_array, population_size - 1)
else:
configurations = param_space.random_sample_configurations_without_repetitions(tmp_fast_addressing_of_data_array,
population_size)
population, function_values_size = optimization_function(configurations=configurations, **optimization_function_parameters)
# This will concatenate the entire data array if all configurations were evaluated
new_data_array = concatenate_list_of_dictionaries(configurations[:function_values_size])
data_array = concatenate_data_dictionaries(data_array, new_data_array)
### Evolutionary loop ###
for gen in range(1, generations + 1):
if not gen % 10:
print('Now we are at generation: ', gen)
# pick a random batch from the population and find the two best and the worst of the batch
cand_idxs = rd.permutation(len(population))[:batch_size]
infty = float("inf")
best = (-1, infty)
second = (-1, infty)
worst = (-1, -infty)
for ci in cand_idxs:
val = population[ci][fitness_measure]
if val < best[1]:
second = best
best = (ci, val)
elif val < second[1]:
second = (ci, val)
if val > worst[1]:
worst = (ci, val)
# checks that candidate loop was successful
if min(best[0], second[0], worst[0]) < 0:
print('failed to fined best and/or worst individual. Script will terminate')
sys.exit()
# Make a child by copy/crossover from parent(s)
child = dict()
parent = population[best[0]]
if crossover:
parent2 = population[second[0]]
for param in input_params:
if rd.uniform(0, 1) < 0.5:
child[param] = parent[param]
else:
child[param] = parent2[param]
else:
for param in input_params:
child[param] = parent[param]
# Get mutation candidates, evaluate and add to population
child_list = mutation(param_space, child, mutation_rate, list=True)
need_random = True
for c in child_list:
evaluated_child_list, func_val_size = optimization_function(configurations=[c],
**optimization_function_parameters)
if evaluated_child_list:
new_data_array = concatenate_list_of_dictionaries([c][:func_val_size])
data_array = concatenate_data_dictionaries(data_array, new_data_array)
population.append(evaluated_child_list[0])
need_random = False
break
# If no new configs where found, draw some random configurations instead.
if need_random:
tmp_fast_addressing_of_data_array = copy.deepcopy(
optimization_function_parameters['fast_addressing_of_data_array'])
random_children = param_space.random_sample_configurations_without_repetitions(
tmp_fast_addressing_of_data_array, 1)
evaluated_random_children, func_val_size = optimization_function(configurations=random_children,
**optimization_function_parameters)
new_data_array = concatenate_list_of_dictionaries(random_children[:func_val_size])
data_array = concatenate_data_dictionaries(data_array, new_data_array)
population.append(evaluated_random_children[0])
# Remove a configuration
if regularize: # removing oldest, which will be first as we append new last
killed = population.pop(0)
else: # removing the worst in the subset
killed = population.pop(worst[0])
sys.stdout.write_to_logfile(("Evolution time %10.4f sec\n" % ((datetime.datetime.now() - t0).total_seconds())))
return data_array
def main(config, black_box_function=None, output_file=""):
"""
Run design-space exploration using evolution.
:param config: dictionary containing all the configuration parameters of this design-space exploration.
:param black_box_function: The function hypermapper seeks to optimize
:param output_file: a name for the file used to save the dse results.
:return:
"""
param_space = space.Space(config)
run_directory = config["run_directory"]
application_name = config["application_name"]
hypermapper_mode = config["hypermapper_mode"]["mode"]
if hypermapper_mode == "default":
if black_box_function == None:
print("Error: the black box function must be provided")
raise SystemExit
if not callable(black_box_function):
print("Error: the black box function parameter is not callable")
raise SystemExit
optimization_metrics = config["optimization_objectives"]
number_of_objectives = len(optimization_metrics)
if number_of_objectives != 1:
print("the evolutionary optimization does not support multi-objective optimization. Exiting.")
sys.exit()
fitness_measure = optimization_metrics[0]
population_size = config["evolution_population_size"]
generations = config["evolution_generations"]
mutation_rate = config["mutation_rate"]
if mutation_rate > len(param_space.get_input_parameters()):
print("mutation rate cannot be higher than the number of parameters. Exiting.")
sys.exit()
if mutation_rate < 1:
print("mutation rate must be at least 1 for evolution to work. Exiting.")
sys.exit()
crossover = config["evolution_crossover"]
regularize = config["regularize_evolution"]
batch_size = config["batch_size"]
if batch_size > population_size:
print("population_size must be bigger than batch_size. Exiting.")
sys.exit()
elif batch_size < 2 and not crossover:
print("batch_size cannot be smaller than 2. Exiting.")
sys.exit()
elif batch_size < 3 and crossover:
print("batch_size must be at least 3 when using crossover. Exiting.")
sys.exit()
log_file = deal_with_relative_and_absolute_path(run_directory, config["log_file"])
sys.stdout.change_log_file(log_file)
if hypermapper_mode | |
#!/usr/bin/env python
#
# clean.sgd: cleaning operations for SGD GFF data
# Copyright (C) University of Manchester 2020 <NAME>
#
import logging
from ..GFFFile import GFFID
from ..GFFFile import OrderedDictionary
#######################################################################
# Functions
#######################################################################
def GroupByID(gff_data):
"""
Group GFF features into subsets based on ID
Grouping is based on the ID attribute, which is assumed
to be of the form '<prefix>:<name>:<index>' (e.g.
'CDS:YEL0W:3').
Consecutive features are determined to belong to the same
gene if they have the same name and consecutive indices.
Arguments:
gff_data: a list of GFF data lines
Returns:
A list of subsets with one or more GFF data lines, where
each subset corresponds to the same name
"""
this_subset = []
subsets = []
last_index = None
logging.debug("%d genes submitted for grouping" %
len(gff_data))
for data in gff_data:
this_index = GFFID(data['attributes']['ID']).index
if last_index is not None:
if this_index != last_index + 1:
subsets.append(this_subset)
this_subset = []
this_subset.append(data)
last_index = this_index
# Append last subset
if this_subset:
subsets.append(this_subset)
# Report
for subset in subsets:
logging.debug("--Subset--")
for gene in subset:
logging.debug("\t%s" % GFFID(gene['attributes']['ID']))
return subsets
def GFFGetDuplicateSGDs(gff_data):
"""
Return GFF data with duplicate SGD names
Returns an OrderedDictionary where the keys are
the duplicate SGD names and the values are
lists of the associated GFF records, e.g.
>>> dups = GFFGetDuplicateSGDs(gff)
>>> for sgd in dups.keys():
>>> ... loops over SGD names ...
>>> for data in dups[sgd]:
>>> ... loops over duplicates ...
Note that duplicates are determined purely by SGD
name; no account is taken of chromosome or strand.
Arguments:
gff_data: a GFFFile object containing the GFF
file data
Returns:
OrderedDictionary with SGDs as keys for lists
of the duplicate TabDataLines corresponding to
the SGD
"""
# Use ordered dictionary to sort info on duplicates
duplicates = OrderedDictionary()
# Process data line-by-line
for data in gff_data:
attributes = data['attributes']
if 'SGD' in attributes:
# Store data
sgd = attributes['SGD']
if sgd != '':
# Check for duplicates
if sgd in duplicates:
duplicates[sgd].append(data)
else:
duplicates[sgd] = [data]
# Filter out true duplicates i.e. SGDs with at least two
# GFF data lines
for sgd in duplicates.keys():
if len(duplicates[sgd]) < 2:
del(duplicates[sgd])
# Finished
return duplicates
def GFFResolveDuplicateSGDs(gff_data,mapping_data,duplicates,
overlap_margin=1000):
"""Resolve duplicate SGD names in GFF data
Attempts to resolve duplicate SGDs by referring to a
list of 'best' genes.
Note that this function doesn't remove any of the data
from the GFF input; the calling subprogram should do
this based on the list of "discards".
Arguments:
gff_data: a GFFFile object containing the GFF file data
mapping_data: a TabFile object containing candidate genes
to insert into the GFF data if not present
duplicates: a dictionary with keys representing SGDs
(each key maps to a list of duplicate GFF data lines for
that SGD) returned by the GFFGetDuplicateSGDs function
overlap_margin: additional number of bases either side of
candidate gene start and end positions to consider when
looking for overlaps with duplicates
Returns:
Dictionary with the following keys (each key linked to a list):
resolved_sgds: list of SGDs that were resolved
unresolved_sgds: SGDs that were unresolved
unresolved_sgds_no_mapping_genes: SGDs that were unresolved
because there were no mapping genes with the same name
unresolved_sgds_no_mapping_genes_after_filter: SGDs that were
unresolved after filtering on chromosome and strand
unresolved_sgds_no_overlaps: SGDs that were unresolved after
checking for overlaps
unresolved_sgds_multiple_matches: SGDs that were unresolved
because at least two mapping genes matched
discard: list of GFF data lines which were selected for
discard after resolution
"""
# Dictionary for storing results
result = { 'resolved_sgds': [],
'unresolved_sgds': [],
'unresolved_sgds_no_mapping_genes': [],
'unresolved_sgds_no_mapping_genes_after_filter': [],
'unresolved_sgds_no_overlaps': [],
'unresolved_sgds_multiple_matches': [],
'discard': [] }
# Make list of matching genes for each duplicate from mapping data
sgds = duplicates.keys()
for sgd in duplicates.keys():
# Look up genes with the same SGD name
logging.debug("* * * * * * * * * * * * * * * * * * * * * * *")
logging.debug("SGD = %s" % sgd)
mapping_genes = mapping_data.lookup('name',sgd)
if len(mapping_genes) == 0:
logging.debug("No genes in mapping file with matching SGD to "
"resolve:")
for duplicate in duplicates[sgd]:
attr = duplicate['attributes']
logging.debug("\t%s %s %s %s %s L%d %s" %
(attr['ID'],
duplicate['seqname'],
duplicate['start'],
duplicate['end'],
duplicate['strand'],
duplicate.lineno(),
duplicate['feature']))
# Record unresolved duplicates for reporting
result['unresolved_sgds_no_mapping_genes'].append(sgd)
continue
# At least one mapping gene available
matches = []
rejects = []
# Match duplicates to mapping genes
genes_to_duplicates = {}
for duplicate in duplicates[sgd]:
assigned = False
for gene in mapping_genes:
# Filter on chromosome and strand
if gene['chr'] == duplicate['seqname'] and \
gene['strand'] == duplicate['strand']:
if gene not in genes_to_duplicates.keys():
genes_to_duplicates[gene] = []
genes_to_duplicates[gene].append(duplicate)
assigned = True
# No match for this duplicate, add to provisional rejects
if not assigned:
if duplicate in rejects:
logging.warning("Duplicate added multiple times to "
"rejects list")
rejects.append(duplicate)
# Check if there are any matches
if len(genes_to_duplicates.keys()) == 0:
logging.debug("No mapping genes matched on chromosome and "
"strand")
result['unresolved_sgds_no_mapping_genes_after_filter'].append(sgd)
continue
# Cluster duplicates for each gene and filter by overlap
for gene in genes_to_duplicates.keys():
# Determine overlap region
region = (gene['start'] - overlap_margin,
gene['end'] + overlap_margin)
# Group duplicates into subsets
genes_to_duplicates[gene] = GroupByID(genes_to_duplicates[gene])
# Check for overlaps for each subset
for duplicate in genes_to_duplicates[gene]:
if duplicate[0]['start'] > region[0] and \
duplicate[-1]['end'] < region[1]:
# Found a match
matches.append(duplicate)
else:
# Not a match, unpack and add to provisional rejects
for d in duplicate:
if d in rejects:
logging.warning("Duplicate added multiple "
"times to rejects list")
rejects.append(d)
# End of filtering process - see what we're left with
if len(matches) == 1:
# Resolved
logging.debug("Duplication resolved for %s" % sgd)
result['resolved_sgds'].append(sgd)
for duplicate in matches[0]:
logging.debug("\t%s %s %s %s L%d %s" %
(duplicate['seqname'],
duplicate['start'],
duplicate['end'],
duplicate['strand'],
duplicate.lineno(),
duplicate['feature']))
# Add rejects to discard pile
for duplicate in rejects:
result['discard'].append(duplicate)
elif len(matches) == 0:
# Unresolved, no overlaps
result['unresolved_sgds_no_overlaps'].append(sgd)
else:
# Multiple matches left
logging.debug("Unable to resolve duplication for %s between:")
result['unresolved_sgds_multiple_matches'].append(sgd)
for match in matches:
for duplicate in match:
logging.debug("\t%s %s %s %s L%d %s" %
(duplicate['seqname'],
duplicate['start'],
duplicate['end'],
duplicate['strand'],
duplicate.lineno(),
duplicate['feature']))
# Finished - make list of unresolved SGDs
for unresolved in ('unresolved_sgds_no_mapping_genes',
'unresolved_sgds_no_mapping_genes_after_filter',
'unresolved_sgds_no_overlaps',
'unresolved_sgds_multiple_matches'):
result['unresolved_sgds'].extend(result[unresolved])
return result
def GFFGroupSGDs(gff_data):
"""
Update ID attribute of GFF data to indicate SGD groups
For each line in the GFF data, looks for a non-blank SGD
value in the GFF attributes and updates the ID attribute
with the format:
ID=CDS:<SGD>:<i>
where <SGD> is the SGD value and <i> is an integer index
starting from 1.
For lines in the GFF within 5 lines of each other and
with matching SGDs, the integer index increases by 1 each
time to indicate that the lines form a group, for example
CDS:YEL0W:1, CDS:YEL0W:2 etc.
Arguments:
gff_data: a GFFFile object containing the GFF file data
"""
logging.debug("Starting grouping of SGDs")
next_ln = 0
for data in gff_data:
# Increment the line index
next_ln += 1
# Process the attributes data
attributes = data['attributes']
# Get the SGD value
try:
sgd = attributes['SGD']
except KeyError:
# SGD not in the attributes, treat as blank
sgd = ''
if sgd != '':
# Check the ID
idx = GFFID(attributes['ID'])
if idx.code != 'CDS':
# Set the CDS prefix and index and update ID attribute
idx.code = 'CDS'
idx.name = sgd
idx.index = 1
attributes['ID'] = str(idx)
ln = data.lineno()
# Loop over next 5 data lines after this looking for
# matching SGD
for data0 in gff_data[next_ln:next_ln+5]:
attr0 = data0['attributes']
sgd0 = attr0['SGD']
if sgd0 == sgd:
# Found a match
idx0 = GFFID(attr0['ID'])
if idx0.code != '':
logging.warning("ID already has code assigned "
"(L%d)" % data0.lineno())
logging.warning("Index will be overwritten")
else:
idx0.code = "CDS"
idx0.name = sgd
idx0.index = idx.index + 1
attr0['ID'] = str(idx0)
logging.debug("%d %s\t%d %s" % (next_ln,
idx,
data0.lineno(),
idx0))
# Don't look any further
break
# Finished grouping by SGD
return gff_data
def GFFInsertMissingGenes(gff_data,mapping_data):
"""Insert 'missing' genes from mapping file into GFF data
A gene is considered | |
<filename>skbio/diversity/beta/tests/test_unifrac.py<gh_stars>100-1000
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from io import StringIO
from unittest import main, TestCase
import numpy as np
from skbio import TreeNode
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity.beta import unweighted_unifrac, weighted_unifrac
from skbio.diversity.beta._unifrac import (_unweighted_unifrac,
_weighted_unifrac,
_weighted_unifrac_branch_correction)
class UnifracTests(TestCase):
def setUp(self):
self.b1 = np.array(
[[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1],
[5, 3, 5, 0, 0],
[0, 0, 0, 3, 5]])
self.sids1 = list('ABCDEF')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.t1 = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
self.t1_w_extra_tips = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
')root;'))
self.t2 = TreeNode.read(
StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
'root;'))
self.oids2 = ['OTU%d' % i for i in range(1, 5)]
def test_unweighted_otus_out_of_order(self):
# UniFrac API does not assert the observations are in tip order of the
# input tree
shuffled_ids = self.oids1[:]
shuffled_b1 = self.b1.copy()
shuffled_ids[0], shuffled_ids[-1] = shuffled_ids[-1], shuffled_ids[0]
shuffled_b1[:, [0, -1]] = shuffled_b1[:, [-1, 0]]
for i in range(len(self.b1)):
for j in range(len(self.b1)):
actual = unweighted_unifrac(
self.b1[i], self.b1[j], self.oids1, self.t1)
expected = unweighted_unifrac(
shuffled_b1[i], shuffled_b1[j], shuffled_ids, self.t1)
self.assertAlmostEqual(actual, expected)
def test_weighted_otus_out_of_order(self):
# UniFrac API does not assert the observations are in tip order of the
# input tree
shuffled_ids = self.oids1[:]
shuffled_b1 = self.b1.copy()
shuffled_ids[0], shuffled_ids[-1] = shuffled_ids[-1], shuffled_ids[0]
shuffled_b1[:, [0, -1]] = shuffled_b1[:, [-1, 0]]
for i in range(len(self.b1)):
for j in range(len(self.b1)):
actual = weighted_unifrac(
self.b1[i], self.b1[j], self.oids1, self.t1)
expected = weighted_unifrac(
shuffled_b1[i], shuffled_b1[j], shuffled_ids, self.t1)
self.assertAlmostEqual(actual, expected)
def test_unweighted_extra_tips(self):
# UniFrac values are the same despite unobserved tips in the tree
for i in range(len(self.b1)):
for j in range(len(self.b1)):
actual = unweighted_unifrac(
self.b1[i], self.b1[j], self.oids1, self.t1_w_extra_tips)
expected = unweighted_unifrac(
self.b1[i], self.b1[j], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_weighted_extra_tips(self):
# UniFrac values are the same despite unobserved tips in the tree
for i in range(len(self.b1)):
for j in range(len(self.b1)):
actual = weighted_unifrac(
self.b1[i], self.b1[j], self.oids1, self.t1_w_extra_tips)
expected = weighted_unifrac(
self.b1[i], self.b1[j], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_unweighted_minimal_trees(self):
# two tips
tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;'))
actual = unweighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'],
tree)
expected = 1.0
self.assertEqual(actual, expected)
def test_weighted_minimal_trees(self):
# two tips
tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;'))
actual = weighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'], tree)
expected = 0.25
self.assertEqual(actual, expected)
def test_unweighted_root_not_observed(self):
# expected values computed with QIIME 1.9.1 and by hand
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered shared
actual = unweighted_unifrac([1, 1, 0, 0], [1, 0, 0, 0],
self.oids2, self.t2)
# for clarity of what I'm testing, compute expected as it would
# based on the branch lengths. the values that compose shared was
# a point of confusion for me here, so leaving these in for
# future reference
expected = 0.2 / (0.1 + 0.2 + 0.3) # 0.3333333333
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered shared
actual = unweighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
self.oids2, self.t2)
# for clarity of what I'm testing, compute expected as it would
# based on the branch lengths. the values that compose shared was
# a point of confusion for me here, so leaving these in for
# future reference
expected = 0.7 / (1.1 + 0.5 + 0.7) # 0.3043478261
self.assertAlmostEqual(actual, expected)
def test_weighted_root_not_observed(self):
# expected values computed by hand, these disagree with QIIME 1.9.1
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered shared
actual = weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0],
self.oids2, self.t2)
expected = 0.15
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered shared
actual = weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
self.oids2, self.t2)
expected = 0.6
self.assertAlmostEqual(actual, expected)
def test_weighted_normalized_root_not_observed(self):
# expected values computed by hand, these disagree with QIIME 1.9.1
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered shared
actual = weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0],
self.oids2, self.t2, normalized=True)
expected = 0.1764705882
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered shared
actual = weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
self.oids2, self.t2, normalized=True)
expected = 0.1818181818
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac_identity(self):
for i in range(len(self.b1)):
actual = unweighted_unifrac(
self.b1[i], self.b1[i], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac_symmetry(self):
for i in range(len(self.b1)):
for j in range(len(self.b1)):
actual = unweighted_unifrac(
self.b1[i], self.b1[j], self.oids1, self.t1)
expected = unweighted_unifrac(
self.b1[j], self.b1[i], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_invalid_input(self):
# Many of these tests are duplicated from
# skbio.diversity.tests.test_base, but I think it's important to
# confirm that they are being run when *unifrac is called.
# tree has duplicated tip ids
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, unweighted_unifrac,
u_counts, v_counts, otu_ids, t)
self.assertRaises(DuplicateNodeError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# unrooted tree as input
t = TreeNode.read(StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
'OTU4:0.7);'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
u_counts = [1, 2, 3]
v_counts = [1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# negative counts
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, -3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
u_counts = [1, 2, 3]
v_counts = [1, 1, -1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(MissingNodeError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
def test_unweighted_unifrac_non_overlapping(self):
# these communities only share the root node
actual = unweighted_unifrac(
self.b1[4], self.b1[5], self.oids1, self.t1)
expected = 1.0
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
[1, 1, 1, 0, 0], [0, 0, 0, 1, 1], self.oids1, self.t1)
expected = 1.0
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac_zero_counts(self):
actual = unweighted_unifrac(
[1, 1, 1, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 1.0
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
[], [], [], self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# unweighted unifrac implementation
| |
<filename>src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_array_ops.py
from typing import List
import numpy as np
import tensorflow as tf
from webdnn import ConstantVariable
from webdnn.frontend.constraints import AxisVar, unify_order
from webdnn.graph.operators.reshape import Reshape
from webdnn.graph.operators.zero_padding_2d import ZeroPadding2D
from webdnn.graph.variable import Variable
from webdnn.graph.axis import Axis
from webdnn.graph.graph import Graph
from webdnn.graph.order import Order, OrderNC, OrderNTC, OrderNHWC, OrderC
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
@TensorFlowConverter.register_handler("BatchMatrixBandPart")
def batch_matrix_band_part_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchMatrixDiag")
def batch_matrix_diag_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchMatrixDiagPart")
def batch_matrix_diag_part_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchMatrixSetDiag")
def batch_matrix_set_diag_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchToSpace")
def batch_to_space_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchToSpaceND")
def batch_to_space_nd_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Bitcast")
def bitcast_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("CheckNumerics")
def check_numerics_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Concat")
def concat_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ConcatOffset")
def concat_offset_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ConcatV2")
def concat_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Const")
def const_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME: should output ConstantVariable?
tensor = tf_op.outputs[0]
shape = [Placeholder() if dim.value is None else dim.value for dim in tensor.shape.dims]
if len(shape) == 0:
# Scalar variable
# WebDNN's variable should have at least 1 dimension
variable = ConstantVariable(np.array([tf_op.get_attr("value").float_val._values[0]], dtype=np.float32),
Order([Axis.C]))
else:
variable = Variable(shape, Order([AxisVar() for _ in shape]))
converter.set_variable(tensor, variable)
@TensorFlowConverter.register_handler("DepthToSpace")
def depth_to_space_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Dequantize")
def dequantize_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Diag")
def diag_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("DiagPart")
def diag_part_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("EditDistance")
def edit_distance_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ExpandDims")
def expand_dims_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ExtractImagePatches")
def extract_image_patches_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FakeQuantWithMinMaxArgs")
def fake_quant_with_min_max_args_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FakeQuantWithMinMaxArgsGradient")
def fake_quant_with_min_max_args_gradient_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FakeQuantWithMinMaxVars")
def fake_quant_with_min_max_vars_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FakeQuantWithMinMaxVarsGradient")
def fake_quant_with_min_max_vars_gradient_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FakeQuantWithMinMaxVarsPerChannel")
def fake_quant_with_min_max_vars_per_channel_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FakeQuantWithMinMaxVarsPerChannelGradient")
def fake_quant_with_min_max_vars_per_channel_gradient_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Fill")
def fill_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Gather")
def gather_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("GatherNd")
def gather_nd_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("GatherV2")
def gather_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Identity")
def identity_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
converter.set_variable(tf_op.outputs[0], converter.get_variable(tf_op.inputs[0]))
@TensorFlowConverter.register_handler("ImmutableConst")
def immutable_const_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("InvertPermutation")
def invert_permutation_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ListDiff")
def list_diff_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MatrixBandPart")
def matrix_band_part_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MatrixDiag")
def matrix_diag_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MatrixDiagPart")
def matrix_diag_part_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MatrixSetDiag")
def matrix_set_diag_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MirrorPad")
def mirror_pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MirrorPadGrad")
def mirror_pad_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("OneHot")
def one_hot_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("OnesLike")
def ones_like_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Pack")
def pack_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Pad")
def pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# Zero padding
# FIXME: currently, determining padding from shape of input / output. Originally, determining by inputs[1] is correct.
in_var = converter.get_variable(tf_op.inputs[0])
unify_order(in_var.order, OrderNHWC) # FIXME: assuming input order as NHWC
out_tf_var = tf_op.outputs[0]
# calculate output shape from out_tf_var.shape and in_var.shape
# ZeroPadding2D operator only accepts padding for H and W axes.
padding = [0, 0]
for dim in range(in_var.ndim):
in_size = in_var.shape[dim]
out_size = out_tf_var.shape.dims[dim].value
assert isinstance(in_size, int), "[TensorFlowConverter] Pad: Placeholder for input shape is not supported yet."
assert isinstance(out_size, int), "[TensorFlowConverter] Pad: Placeholder for output shape is not supported yet."
axis = in_var.order.axes[dim]
if axis in [Axis.H, Axis.W]:
assert (out_size - in_size % 2) != 0, "[TensorFlowConverter] Pad: Uneven padding is not supported yet."
pad_size = (out_size - in_size) // 2
if axis == Axis.H:
padding[0] = pad_size
elif axis == Axis.W:
padding[1] = pad_size
else:
assert out_size == in_size, "[TensorFlowConverter] Pad: padding for axis other than H and W is not supported yet."
out_var, = ZeroPadding2D(None, padding=tuple(padding))(in_var)
converter.set_variable(out_tf_var, out_var)
@TensorFlowConverter.register_handler("PadV2")
def pad_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ParallelConcat")
def parallel_concat_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Placeholder")
def placeholder_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("PlaceholderV2")
def placeholder_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("PlaceholderWithDefault")
def placeholder_with_default_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("PreventGradient")
def prevent_gradient_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizeAndDequantize")
def quantize_and_dequantize_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizeAndDequantizeV2")
def quantize_and_dequantize_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizeAndDequantizeV3")
def quantize_and_dequantize_v3_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizeV2")
def quantize_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedConcat")
def quantized_concat_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedInstanceNorm")
def quantized_instance_norm_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedReshape")
def quantized_reshape_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Rank")
def rank_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("RefIdentity")
def ref_identity_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Reshape")
def reshape_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# input: data, output_shape
# output: reshaped_data
# Currently, ignores output_shape.
in_var = converter.get_variable(tf_op.inputs[0])
out_tf_var = tf_op.outputs[0]
# calculate output shape from out_tf_var.shape and in_var.shape
# out_tf_var.shape can have at most one placeholder.
out_placeholder_count = 0
out_placeholder_idx = None
out_constant_prod = 1
out_shape = []
for i, dim_size in enumerate(out_tf_var.shape.dims):
out_shape.append(dim_size.value)
if dim_size.value is None:
out_placeholder_count += 1
out_placeholder_idx = i
else:
out_constant_prod *= dim_size.value
if out_placeholder_count > 1:
raise NotImplementedError(
"[TensorFlowConverter] Reshape: output with more than one placeholder is not supported yet.")
elif out_placeholder_count == 1:
if in_var.size % out_constant_prod != 0:
raise ValueError("[TensorFlowConverter] Reshape: invalid reshape output value.")
out_shape[out_placeholder_idx] = in_var.size // out_constant_prod
out_var, = Reshape(None, in_order=in_var.order, out_order=Order([AxisVar() for _ in out_shape]),
out_shape=out_shape)(in_var)
converter.set_variable(out_tf_var, out_var)
@TensorFlowConverter.register_handler("ResourceStridedSliceAssign")
def resource_strided_slice_assign_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Reverse")
def reverse_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ReverseSequence")
def reverse_sequence_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ReverseV2")
def reverse_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ScatterNd")
def scatter_nd_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ScatterNdNonAliasingAdd")
def scatter_nd_non_aliasing_add_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Shape")
def shape_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ShapeN")
def shape_n_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Size")
def size_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Slice")
def slice_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SpaceToBatch")
def space_to_batch_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SpaceToBatchND")
def space_to_batch_nd_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SpaceToDepth")
def space_to_depth_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Split")
def split_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SplitV")
def split_v_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Squeeze")
def squeeze_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
squeeze_dims = tf_op.get_attr("squeeze_dims") # type: List[int]
in_var = converter.get_variable(tf_op.inputs[0])
in_var_shape = in_var.shape
out_var_shape = [] # type: List[int]
out_var_order = [] # type: List[Axis]
for dim in range(len(in_var_shape)):
if dim in squeeze_dims:
assert in_var_shape[dim] == 1, f"[TensorFlowConverter] {tf_op.type}: dimension to be squeezed have to be 1."
else:
out_var_shape.append(in_var_shape[dim])
out_var_order.append(in_var.order.axes[dim])
out_var, = Reshape(None, in_order=in_var.order, out_order=Order(out_var_order), out_shape=out_var_shape)(in_var)
out_tf_var = tf_op.outputs[0]
converter.set_variable(out_tf_var, out_var)
@TensorFlowConverter.register_handler("StopGradient")
def stop_gradient_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("StridedSlice")
def strided_slice_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("StridedSliceAssign")
def strided_slice_assign_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("StridedSliceGrad")
def strided_slice_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Tile")
def tile_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise | |
<reponame>alex123012/biotite
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.structure.io.pdb"
__author__ = "<NAME>, <NAME>"
__all__ = ["PDBFile"]
import warnings
import numpy as np
from ...atoms import AtomArray, AtomArrayStack
from ...bonds import BondList, connect_via_residue_names
from ...box import vectors_from_unitcell, unitcell_from_vectors
from ....file import TextFile, InvalidFileError
from ..general import _guess_element as guess_element
from ...error import BadStructureError
from ...filter import filter_first_altloc, filter_highest_occupancy_altloc, \
filter_solvent
from .hybrid36 import encode_hybrid36, decode_hybrid36, max_hybrid36_number
import copy
from warnings import warn
_atom_records = {"hetero" : (0, 6),
"atom_id" : (6, 11),
"atom_name" : (12, 16),
"alt_loc" : (16, ),
"res_name" : (17, 20),
"chain_id" : (21, ),
"res_id" : (22, 26),
"ins_code" : (26, ),
"coord_x" : (30, 38),
"coord_y" : (38, 46),
"coord_z" : (46, 54),
"occupancy" : (54, 60),
"temp_f" : (60, 66),
"element" : (76, 78),
"charge" : (78, 80),}
class PDBFile(TextFile):
r"""
This class represents a PDB file.
The usage of PDBxFile is encouraged in favor of this class.
This class only provides support for reading/writing the pure atom
information (*ATOM*, *HETATM*, *MODEL* and *ENDMDL* records). *TER*
records cannot be written.
See also
--------
PDBxFile
Examples
--------
Load a `\\*.pdb` file, modify the structure and save the new
structure into a new file:
>>> import os.path
>>> file = PDBFile.read(os.path.join(path_to_structures, "1l2y.pdb"))
>>> array_stack = file.get_structure()
>>> array_stack_mod = rotate(array_stack, [1,2,3])
>>> file = PDBFile()
>>> file.set_structure(array_stack_mod)
>>> file.write(os.path.join(path_to_directory, "1l2y_mod.pdb"))
"""
def get_model_count(self):
"""
Get the number of models contained in the PDB file.
Returns
-------
model_count : int
The number of models.
"""
model_count = 0
for line in self.lines:
if line.startswith("MODEL"):
model_count += 1
if model_count == 0:
# It could be an empty file or a file with a single model,
# where the 'MODEL' line is missing
for line in self.lines:
if line.startswith(("ATOM", "HETATM")):
return 1
return 0
else:
return model_count
def get_coord(self, model=None):
"""
Get only the coordinates of the PDB file.
Parameters
----------
model : int, optional
If this parameter is given, the function will return a
2D coordinate array from the atoms corresponding to the
given model number (starting at 1).
Negative values are used to index models starting from the
last model insted of the first model.
If this parameter is omitted, an 2D coordinate array
containing all models will be returned, even if
the structure contains only one model.
Returns
-------
coord : ndarray, shape=(m,n,3) or shape=(n,2), dtype=float
The coordinates read from the ATOM and HETATM records of the
file.
Notes
-----
Note that :func:`get_coord()` may output more coordinates than
the atom array (stack) from the corresponding
:func:`get_structure()` call has.
The reason for this is, that :func:`get_structure()` filters
*altloc* IDs, while `get_coord()` does not.
Examples
--------
Read an :class:`AtomArrayStack` from multiple PDB files, where
each PDB file contains the same atoms but different positions.
This is an efficient approach when a trajectory is spread into
multiple PDB files, as done e.g. by the *Rosetta* modeling
software.
For the purpose of this example, the PDB files are created from
an existing :class:`AtomArrayStack`.
>>> import os.path
>>> from tempfile import gettempdir
>>> file_names = []
>>> for i in range(atom_array_stack.stack_depth()):
... pdb_file = PDBFile()
... pdb_file.set_structure(atom_array_stack[i])
... file_name = os.path.join(gettempdir(), f"model_{i+1}.pdb")
... pdb_file.write(file_name)
... file_names.append(file_name)
>>> print(file_names)
['...model_1.pdb', '...model_2.pdb', ..., '...model_38.pdb']
Now the PDB files are used to create an :class:`AtomArrayStack`,
where each model represents a different model.
Construct a new :class:`AtomArrayStack` with annotations taken
from one of the created files used as template and coordinates
from all of the PDB files.
>>> template_file = PDBFile.read(file_names[0])
>>> template = template_file.get_structure()
>>> coord = []
>>> for i, file_name in enumerate(file_names):
... pdb_file = PDBFile.read(file_name)
... coord.append(pdb_file.get_coord(model=1))
>>> new_stack = from_template(template, np.array(coord))
The newly created :class:`AtomArrayStack` should now be equal to
the :class:`AtomArrayStack` the PDB files were created from.
>>> print(np.allclose(new_stack.coord, atom_array_stack.coord))
True
"""
# Line indices where a new model starts
model_start_i = np.array([i for i in range(len(self.lines))
if self.lines[i].startswith("MODEL")],
dtype=int)
# Line indices with ATOM or HETATM records
atom_line_i = np.array([i for i in range(len(self.lines)) if
self.lines[i].startswith(("ATOM", "HETATM"))],
dtype=int)
# Structures containing only one model may omit MODEL record
# In these cases model starting index is set to 0
if len(model_start_i) == 0:
model_start_i = np.array([0])
if model is None:
depth = len(model_start_i)
length = self._get_model_length(model_start_i, atom_line_i)
coord_i = atom_line_i
else:
last_model = len(model_start_i)
if model == 0:
raise ValueError("The model index must not be 0")
# Negative models mean index starting from last model
model = last_model + model + 1 if model < 0 else model
if model < last_model:
line_filter = ( ( atom_line_i >= model_start_i[model-1] ) &
( atom_line_i < model_start_i[model ] ) )
elif model == last_model:
line_filter = (atom_line_i >= model_start_i[model-1])
else:
raise ValueError(
f"The file has {last_model} models, "
f"the given model {model} does not exist"
)
coord_i = atom_line_i[line_filter]
length = len(coord_i)
# Fill in coordinates
if model is None:
coord = np.zeros((depth, length, 3), dtype=np.float32)
m = 0
i = 0
for line_i in atom_line_i:
if m < len(model_start_i)-1 and line_i > model_start_i[m+1]:
m += 1
i = 0
line = self.lines[line_i]
coord[m,i,0] = float(line[30:38])
coord[m,i,1] = float(line[38:46])
coord[m,i,2] = float(line[46:54])
i += 1
return coord
else:
coord = np.zeros((length, 3), dtype=np.float32)
for i, line_i in enumerate(coord_i):
line = self.lines[line_i]
coord[i,0] = float(line[30:38])
coord[i,1] = float(line[38:46])
coord[i,2] = float(line[46:54])
return coord
def get_structure(self, model=None, altloc="first", extra_fields=[],
include_bonds=False):
"""
Get an :class:`AtomArray` or :class:`AtomArrayStack` from the PDB file.
This function parses standard base-10 PDB files as well as
hybrid-36 PDB.
Parameters
----------
model : int, optional
If this parameter is given, the function will return an
:class:`AtomArray` from the atoms corresponding to the given
model number (starting at 1).
Negative values are used to index models starting from the
last model insted of the first model.
If this parameter is omitted, an :class:`AtomArrayStack`
containing all models will be returned, even if the
structure contains only one model.
altloc : {'first', 'occupancy', 'all'}
This parameter defines how *altloc* IDs are handled:
- ``'first'`` - Use atoms that have the first
*altloc* ID appearing in a residue.
- ``'occupancy'`` - Use atoms that have the *altloc* ID
with the highest occupancy for a residue.
- ``'all'`` - Use all atoms.
Note that this leads to duplicate atoms.
When this option is chosen, the ``altloc_id``
annotation array is added to the returned structure.
extra_fields : list of str, optional
The strings in the list are optional annotation categories
that should be stored in the output array or stack.
These are valid values:
``'atom_id'``, ``'b_factor'``, ``'occupancy'`` and
``'charge'``.
include_bonds : bool, optional
If set to true, a :class:`BondList` will be created for the
resulting :class:`AtomArray` containing the bond information
from the file.
All bonds have :attr:`BondType.ANY`, since the PDB format
does not support bond orders.
Returns
-------
array : AtomArray or AtomArrayStack
The return type depends on the `model` parameter.
"""
# Line indices where a new model starts
model_start_i = np.array([i for i in range(len(self.lines))
if self.lines[i].startswith(("MODEL"))],
dtype=int)
# Line indices with ATOM or HETATM records
atom_line_i = np.array([i for i in range(len(self.lines)) if
self.lines[i].startswith(("ATOM", "HETATM"))],
dtype=int)
# Structures containing only one model may omit MODEL record
# In these cases model starting index is set to 0
if len(model_start_i) == 0:
model_start_i = np.array([0])
if model is None:
depth = len(model_start_i)
length = self._get_model_length(model_start_i, atom_line_i)
array = AtomArrayStack(depth, length)
# Line indices for annotation determination
# Annotation is determined from model 1,
# therefore from ATOM records before second MODEL record
if len(model_start_i) == 1:
annot_i = atom_line_i
else:
annot_i = atom_line_i[atom_line_i < model_start_i[1]]
# Line indices for coordinate determination
coord_i = atom_line_i
else:
last_model = len(model_start_i)
if model | |
from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.contrib.auth.models import User
from django.contrib.auth import login as auth_login, authenticate, logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles.templatetags.staticfiles import static
from datetime import datetime
from django.utils import formats
import time, requests, json, urllib
from time import *
from django.core.cache import caches
from django.db import transaction
from hashlib import sha1 as sha_constructor
from xml.etree import ElementTree
from telle.models import *
from telle.forms import *
CLIENT_ID = "4a441871572e1939a9421e9138c6a5a95dcd1ac4776d71818590ef521e0b6110"
CLIENT_SECRET = "215d5f7969c36a06735ba85abae86f8e2c2b4bfc35eff64e9aca1f7a5a5da8b3"
REDIRECT_URI = "http://localhost:8000/token"
################### TESTING #######################
import random
shows_example = [{
"title":"The Walking Dead",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/001/393/posters/thumb/dec5cd226c.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/001/393/fanarts/original/fc68b3b649.jpg"
},
{
"title":"11.22.63",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/102/771/posters/thumb/e9cfb8f315.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/102/771/fanarts/original/3c06324bd8.jpg"
},
{
"title":"Shameless",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/034/160/posters/thumb/0fed195982.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/034/160/fanarts/original/c04bebba45.jpg"
},
{
"title":"<NAME>",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/077/938/posters/thumb/2d62b7fe39.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/077/938/fanarts/original/caf0eacd29.jpg"
},
{
"title":"Better Call Saul",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/059/660/posters/thumb/a847b27956.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/059/660/fanarts/original/5885092434.jpg"
},
{
"title":"The Big Bang Theory",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/001/409/posters/thumb/8adfe77938.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/001/409/fanarts/original/cff0b01ee7.jpg"
},
{
"title":"House of Cards",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/001/416/posters/thumb/d157e5bbb2.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/001/416/fanarts/original/28b9159c81.jpg"
},
{
"title":"Game of Thrones",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/001/390/posters/thumb/93df9cd612.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/001/390/fanarts/original/76d5df8aed.jpg"
},
{
"title":"Arrow",
"trakt_id":random.randint(1,1000),
"poster_url":"https://walter.trakt.us/images/shows/000/001/403/posters/thumb/e68cd618e2.jpg",
"fanart_url":"https://walter.trakt.us/images/shows/000/001/403/fanarts/original/bbe10773b5.jpg"
}]
################### TESTING #######################
blacklisted_listnames = ["col-xs-4", "nopadding", "poster", "fanart", "grid-item", "watched",
"watching", "to-watch", "all"]
@transaction.atomic
def register(request):
context = {
"register_form" : RegisterForm()
}
if request.method != "POST":
return render(request, "register.html", context)
form = RegisterForm(request.POST)
context["register_form"] = form
if not form.is_valid():
return render(request, "register.html", context)
user = User.objects.create_user(username=form.cleaned_data["email"],
email=form.cleaned_data["email"], first_name=form.cleaned_data["first_name"],
password=form.cleaned_data["password"])
user.save()
# user_settings = Settings.objects.create_settings(user=user);
user_settings = Settings(user=user)
user_settings.save()
# Default lists
all_list = List(user=user,name="all",pretty_name="All",type="default")
all_list.save()
fav_list = List(user=user,name="favorites",pretty_name="Favorites",type="default")
fav_list.save()
watch_list = List(user=user,name="to-watch",pretty_name="To Watch",type="default")
watch_list.save()
watching_list = List(user=user,name="watching",pretty_name="Watching",type="default")
watching_list.save()
watched_list = List(user=user,name="watched",pretty_name="Watched",type="default")
watched_list.save()
# log in the user upon registration
user = authenticate(username=form.cleaned_data["email"],
password=form.cleaned_data["password"])
auth_login(request, user)
return home(request)
# @login_required
@transaction.atomic
def delete_account(request):
if request.user.is_authenticated():
username = request.user.username
auth_logout(request)
User.objects.get(username=username).delete()
return HttpResponseRedirect(reverse("register"))
@login_required
def home(request):
if request.user.settings.default_home_page == "movies":
return HttpResponseRedirect(reverse('movies',
args=[request.user.settings.default_movie_list]))
else:
return HttpResponseRedirect(reverse('shows',
args=[request.user.settings.default_show_list]))
@transaction.atomic
@login_required
def settings(request):
lists = List.objects.filter(user=request.user).order_by('id')
context = { 'lists' : lists }
if request.method == "POST":
response_text = { "error" : True }
settings = request.user.settings
settings_form = SettingsForm(request.POST, instance=settings)
if settings_form.is_valid():
response_text["error_messages"] = []
response_text["error"] = False
settings_form.save()
else:
response_text["error_messages"] = "An error has occured"
return HttpResponse(json.dumps(response_text), content_type='application/json')
return render(request, 'settings.html', context)
def policy(request):
return render(request, 'privacy.html', {})
@transaction.atomic
@login_required
def add_list(request):
response_text = { "error" : True, "error_messages": "Invalid name" }
if request.method == "POST":
if request.POST['name']:
pretty_name = request.POST['name']
name = pretty_name.lower().replace(" ", "-")
try:
List.objects.get(user=request.user, name=name)
response_text["error_messages"] = "List already exists";
except List.DoesNotExist:
if name not in blacklisted_listnames:
new_list = List(user=request.user, name=name,
pretty_name=pretty_name, type="custom")
new_list.save()
response_text["lists"] = {
"name":name,
"pretty_name":pretty_name,
"type":"custom"
};
response_text["error"] = False
return HttpResponse(json.dumps(response_text), content_type='application/json')
@transaction.atomic
@login_required
def delete_list(request):
response_text = { "error" : True, "error_messages": "Invalid input" }
if request.method == "POST":
if request.POST['name']:
name = request.POST['name']
try:
list_to_delete = List.objects.get(user=request.user, name=name)
list_to_delete.delete();
response_text["error_messages"] = "";
response_text["error"] = False
except List.DoesNotExist:
response_text["error_messages"] = "List does not exist";
return HttpResponse(json.dumps(response_text), content_type='application/json')
# @transaction.atomic
# @login_required
# def update_settings(request):
# response_text = {
# "error" : True,
# "error_messages" : ["Request was not a post"]
# }
# if request.method == "POST":
# settings = request.user.settings
# settings_form = SettingsForm(request.POST, user=request.user,
# instance=settings)
# if settings_form.is_valid():
# response_text["error_messages"] = []
# response_text["error"] = False
# settings_form.save()
# else:
# response_text["error_messages"] = ["An error has occured"]
# return HttpResponse(json.dumps(response_text), content_type='application/json')
@transaction.atomic
@login_required
def manage_list(request):
response_text = {
"error" : True,
"error_messages" : ["Error Occurred"]
}
if request.method == "POST":
form = ManageListForm(request.POST)
print form
print request.POST
print "MANAGED"
if form.is_valid():
print "FORM VALID"
trakt_id = form.cleaned_data["trakt_id"]
name = form.cleaned_data["name"]
# get list if exists
try:
curr_list = List.objects.get(user=request.user,name=name)
except:
curr_list = None
response_text["error_messages"] = ["List does not exist"]
# get media item
try:
media = Movie.objects.get(trakt_id=trakt_id)
media_type = "movies"
# remove media from list if exists
try:
in_list = curr_list.movies.all().get(trakt_id=trakt_id)
except:
in_list = None
except:
try:
media = Show.objects.get(trakt_id=trakt_id)
media_type = "shows"
# remove media from list if exists, otherwise adds to list
try:
in_list = curr_list.shows.all().get(trakt_id=trakt_id)
except:
in_list = None
except:
response_text["error_messages"] = ["Media item does not exist"]
if in_list:
if curr_list.name not in ["watched", "watching", "to-watch"]:
getattr(curr_list, media_type).remove(media)
else:
if name in ["watched", "to-watch", "watching"]:
defaults = { list_obj.name : list_obj for list_obj in List.objects.filter(user=request.user,
type="default") }
if (getattr(defaults["watched"], media_type).filter(trakt_id=trakt_id).exists()):
getattr(defaults["watched"], media_type).remove(media)
defaults["watched"].save()
elif (getattr(defaults["to-watch"], media_type).filter(trakt_id=trakt_id).exists()):
getattr(defaults["to-watch"], media_type).remove(media)
defaults["to-watch"].save()
elif (getattr(defaults["watching"], media_type).filter(trakt_id=trakt_id).exists()):
getattr(defaults["watching"], media_type).remove(media)
defaults["watching"].save()
getattr(curr_list, media_type).add(media)
curr_list.save()
response_text["error"] = False
response_text["error_messages"] = []
return HttpResponse(json.dumps(response_text), content_type='application/json')
@transaction.atomic
@login_required
def remove_media(request):
response_text = {
"error" : True,
"error_messages" : ["Request was not a post"]
}
if request.method == "POST":
trakt_id = request.POST["trakt_id"]
# get media item
try:
media = Movie.objects.get(trakt_id=trakt_id)
media_type = "movies"
except:
try:
media = Show.objects.get(trakt_id=trakt_id)
media_type = "shows"
except:
response_text["error_messages"] = ["Media item does not exist"]
print media_type
if media:
if media_type == "movies":
lists = List.objects.filter(user=request.user,movies__trakt_id=trakt_id)
else:
lists = List.objects.filter(user=request.user,shows__trakt_id=trakt_id)
print lists
for l in lists:
getattr(l, media_type).remove(media)
l.save()
response_text["error"] = False;
else:
response_text["error_messages"] = ["The media does not exist"]
return HttpResponse(json.dumps(response_text), content_type='application/json')
def movie_info_page(request, trakt_id):
if request.method != "GET":
return HttpResponseRedirect(reverse("movies"))
headers = {
'Content-Type': 'application/json',
'trakt-api-version' : '2',
'trakt-api-key' : CLIENT_ID
}
url = 'https://api-v2launch.trakt.tv/movies/{0}/people?extended=images'.format(trakt_id)
result = requests.get(url, headers=headers)
try:
movie_people = result.json()
except:
return home(request)
orig_crew = movie_people.get('crew', {})
full_crew = []
full_crew += orig_crew.get('directing', [])
full_crew += orig_crew.get('production', [])
full_crew += orig_crew.get('writing', [])
full_crew += orig_crew.get('camera', [])
full_crew += orig_crew.get('art', [])
full_crew += orig_crew.get('costume & make-up', [])
full_crew += orig_crew.get('sound', [])
full_crew += orig_crew.get('crew', [])
movie_people['crew'] = full_crew
# Get from the API and add the expected fields
# movie_info = get_full_movie(trakt_id, SEARCH_CACHE, CACHE_TIMEOUT)
movie_info = get_from_API(trakt_id, 'movie');
movie_info["genre_set"] = {}
movie_info["genre_set"]["all"] = movie_info["genres"]
movie_info["fanart_url"] = movie_info["images"]["fanart"]["thumb"]
movie_info["poster_url"] = movie_info["images"]["poster"]["medium"]
#TEMP API
# a_movie = cani_search('V for Vendetta')[0]
# availability = [streaming(a_movie['_id']), rental(a_movie['_id']), purchase(a_movie['_id']),
# dvd(a_movie['_id']), xfinity(a_movie['_id'])]
watched = False
add = True
if request.user.is_authenticated():
lists = List.objects.filter(user=request.user)
if Movie.objects.filter(trakt_id=trakt_id,list__in=lists).exists():
add = False
movie = Movie.objects.get(trakt_id=trakt_id)
watched_list = List.objects.get(user=request.user, name="watched")
if watched_list in movie.list_set.all():
watched = True
context = {
# "temp_available": availability,
"add": add,
"watched": watched,
"media_info": movie_info,
"media_people": movie_people
}
return render(request, 'movie_info.html', context)
def person_info_page(request, trakt_id):
if request.method != "GET":
return home(request)
headers = {
'Content-Type': 'application/json',
'trakt-api-version' : '2',
'trakt-api-key' : CLIENT_ID
}
url = 'https://api-v2launch.trakt.tv/people/{0}?extended=full,images'.format(trakt_id)
result = requests.get(url, headers=headers)
try:
person_info = result.json()
except:
return home(request)
if person_info['birthday'] != None:
formatted = strptime(person_info['birthday'],"%Y-%m-%d")
person_info['birthday'] = datetime.fromtimestamp(mktime(formatted))
if person_info['death'] != None:
formatted = strptime(person_info['death'],"%Y-%m-%d")
person_info['death'] = datetime.fromtimestamp(mktime(formatted))
url = 'https://api-v2launch.trakt.tv/people/{0}/movies?extended=images'.format(trakt_id)
result = requests.get(url, headers=headers)
try:
person_movies = result.json()
except:
return home(request)
movies_dict = person_movies.get('crew', {})
full_movies = []
full_movies += person_movies.get('cast', {})
full_movies += movies_dict.get('directing', [])
full_movies += movies_dict.get('production', [])
full_movies += movies_dict.get('writing', [])
full_movies += movies_dict.get('camera', [])
full_movies += movies_dict.get('art', [])
full_movies += movies_dict.get('costume & make-up', [])
full_movies += movies_dict.get('sound', [])
full_movies += movies_dict.get('crew', [])
person_movies = full_movies
url = 'https://api-v2launch.trakt.tv/people/{0}/shows?extended=images'.format(trakt_id)
result = requests.get(url, headers=headers)
try:
person_shows = result.json()
except:
return home(request)
show_dict = person_shows.get('crew', {})
full_shows = []
full_shows += person_shows.get('cast', [])
full_shows += show_dict.get('directing', [])
full_shows += show_dict.get('production', [])
full_shows += show_dict.get('writing', [])
full_shows += show_dict.get('camera', [])
full_shows += show_dict.get('art', [])
full_shows += show_dict.get('costume & make-up', [])
full_shows += show_dict.get('sound', [])
full_shows += show_dict.get('crew', [])
person_shows = full_shows
context = {
"person_info": person_info,
"person_movies": person_movies,
"person_shows": person_shows
}
return render(request, 'person_info.html', context)
@login_required
def movies(request, listname):
lists = List.objects.filter(user=request.user).exclude(name="watching").order_by('id')
sort_order = Settings.objects.get(user=request.user).sort_movie_by
if listname == "all":
movies = Movie.objects.filter(list__in=lists).distinct().order_by(sort_order)
curr_list = get_object_or_404(List.objects.filter(user=request.user, name=listname))
else:
curr_list = get_object_or_404(List.objects.filter(user=request.user, name=listname))
movies = Movie.objects.filter(list=curr_list).distinct().order_by(sort_order)
context = {
'media': movies,
'lists' : lists,
'curr_list': curr_list
}
return render(request, 'movies.html', context)
@login_required
def shows(request, listname):
lists = List.objects.filter(user=request.user).order_by('id')
curr_list = get_object_or_404(List.objects.filter(user=request.user, name=listname))
shows = shows_example
context = {
'media': shows,
'lists' : lists,
'curr_list': curr_list
}
return render(request, 'shows.html', context)
@login_required
@transaction.atomic
def search(request):
context = {
"movies" : [],
"shows" : [],
"people" : [],
"lists" : List.objects.filter(user=request.user),
}
if request.method != "GET" or "query" not in request.GET:
return render(request, "search.html", context)
# get query
query = request.GET["query"]
# limit types to movies, shows, people
content_types = "movie,show,person"
# API call url
url = 'https://api-v2launch.trakt.tv/search?query={0}&type={1}&limit=50'.format(query, content_types)
headers = {
'Content-Type': 'application/json',
'trakt-api-version' : '2',
'trakt-api-key' : CLIENT_ID,
}
tag = "search-{0}".format(query.strip().lower())
# send API call
result = requests.get(url, headers=headers)
# convert to readable JSON
result = result.json()
# split into categories
context["movies"] = [item["movie"] for item in result if item["type"] == "movie"]
context["shows"] = [item["show"] for item in result if item["type"] == "show"]
context["people"] | |
armor', variants=[
Variant('festive_2_heavy.jpg', 'heavy armor'),
]),
Shot("Flames", 'flames_1_regular.jpg', 'light armor', variants=[
Variant('flames_2_heavy.jpg', 'heavy armor'),
]),
Shot("Forest", 'forest_1_regular.jpg', 'light armor', variants=[
Variant('forest_2_heavy.jpg', 'heavy armor'),
]),
Shot("Gearbox", 'gearbox_1_regular.jpg', 'light armor', variants=[
Variant('gearbox_2_heavy.jpg', 'heavy armor'),
]),
Shot("Golden Hog", 'golden_hog_1_regular.jpg', 'light armor', variants=[
Variant('golden_hog_2_heavy.jpg', 'heavy armor'),
]),
Shot("Grog Lube", 'grog_lube_1_regular.jpg', 'light armor', variants=[
Variant('grog_lube_2_heavy.jpg', 'heavy armor'),
]),
Shot("Hexagons", 'hexagons_1_regular.jpg', 'light armor', variants=[
Variant('hexagons_2_heavy.jpg', 'heavy armor'),
]),
Shot("Historic Racing", 'historic_racing_1_regular.jpg', 'light armor', variants=[
Variant('historic_racing_2_heavy.jpg', 'heavy armor'),
]),
Shot("Houndstooth", 'houndstooth_1_regular.jpg', 'light armor', variants=[
Variant('houndstooth_2_heavy.jpg', 'heavy armor'),
]),
Shot("Hyperion", 'hyperion_1_regular.jpg', 'light armor', variants=[
Variant('hyperion_2_heavy.jpg', 'heavy armor'),
]),
Shot("Infection", 'infection_1_regular.jpg', 'light armor', variants=[
Variant('infection_2_heavy.jpg', 'heavy armor'),
]),
Shot("Jakobs", 'jakobs_1_regular.jpg', 'light armor', variants=[
Variant('jakobs_2_heavy.jpg', 'heavy armor'),
]),
Shot("Maliwan", 'maliwan_1_regular.jpg', 'light armor', variants=[
Variant('maliwan_2_heavy.jpg', 'heavy armor'),
]),
Shot("Pirate", 'pirate_1_regular.jpg', 'light armor', variants=[
Variant('pirate_2_heavy.jpg', 'heavy armor'),
]),
Shot("Prisa's Garage", 'prisas_garage_1_regular.jpg', 'light armor', variants=[
Variant('prisas_garage_2_heavy.jpg', 'heavy armor'),
]),
Shot("Psycho-mobile", 'psycho-mobile_1_regular.jpg', 'light armor', variants=[
Variant('psycho-mobile_2_heavy.jpg', 'heavy armor'),
]),
Shot("Rage Cage", 'rage_cage_1_regular.jpg', 'light armor', variants=[
Variant('rage_cage_2_heavy.jpg', 'heavy armor'),
]),
Shot("Red Machine", 'red_machine_1_regular.jpg', 'light armor - BUGGED, can only spawn at exactly level 9', variants=[
Variant('red_machine_2_heavy.jpg', 'heavy armor - BUGGED, can only spawn at exactly level 9'),
]),
Shot("Shirking Convention", 'shirking_convention_1_regular.jpg', 'light armor', variants=[
Variant('shirking_convention_2_heavy.jpg', 'heavy armor'),
]),
Shot("Slow and Mad", 'slow_and_mad_1_regular.jpg', 'light armor', variants=[
Variant('slow_and_mad_2_heavy.jpg', 'heavy armor'),
]),
Shot("Spicy Tuna Roll", 'spicy_tuna_roll_1_regular.jpg', 'light armor', variants=[
Variant('spicy_tuna_roll_2_heavy.jpg', 'heavy armor'),
]),
Shot("Stealth", 'stealth_1_regular.jpg', 'light armor', variants=[
Variant('stealth_2_heavy.jpg', 'heavy armor'),
]),
Shot("Tentacar", 'tentacar_1_regular.jpg', 'light armor', variants=[
Variant('tentacar_2_heavy.jpg', 'heavy armor'),
]),
Shot("Torgue", 'torgue_1_regular.jpg', 'light armor', variants=[
Variant('torgue_2_heavy.jpg', 'heavy armor'),
]),
Shot("Triangles", 'triangles_1_regular.jpg', 'light armor', variants=[
Variant('triangles_2_heavy.jpg', 'heavy armor'),
]),
Shot("Vladof", 'vladof_1_regular.jpg', 'light armor', variants=[
Variant('vladof_2_heavy.jpg', 'heavy armor'),
]),
]))
vehicles.append(Collection('cyclone',
'Cyclone Skins',
'Cyclone Skin',
'vehicle_skins/cyclone',
'Mar 26, 2020',
[
Shot("Atlas", 'atlas_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('atlas_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Bubblegum", 'bubblegum_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('bubblegum_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Children of the Vault", 'children_of_the_vault_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('children_of_the_vault_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Chopper", 'chopper_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('chopper_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Dahl", 'dahl_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('dahl_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Dark", 'dark_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('dark_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Default", 'default_1_heavy_blade.jpg', 'heavy armor, blade wheel (default skin)', order=1, variants=[
Variant('default_2_regular_hover.jpg', 'light armor, hover wheel (default skin)'),
]),
Shot("Ellie", 'ellie_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('ellie_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Emergency", 'emergency_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('emergency_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Firehawk", 'firehawk_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('firehawk_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Forest", 'forest_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('forest_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Gamescom 2019", 'gamescom_2019_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('gamescom_2019_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Gearbox", 'gearbox_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('gearbox_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Golden Wheel", 'golden_wheel_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('golden_wheel_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Hyperion", 'hyperion_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('hyperion_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Jakobs", 'jakobs_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('jakobs_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Lifeline", 'lifeline_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('lifeline_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Lollipop", 'lollipop_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('lollipop_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Maliwan", 'maliwan_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('maliwan_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Ninja Shark", 'ninja_shark_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('ninja_shark_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Police", 'police_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('police_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Psycho", 'psycho_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('psycho_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Spicy Tuna Roll", 'spicy_tuna_roll_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('spicy_tuna_roll_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Stealth", 'stealth_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('stealth_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Torgue", 'torgue_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('torgue_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
Shot("Vladof", 'vladof_1_heavy_blade.jpg', 'heavy armor, blade wheel', variants=[
Variant('vladof_2_regular_hover.jpg', 'light armor, hover wheel'),
]),
]))
vehicles.append(Collection('technical',
'Technical Skins',
'Technical Skin',
'vehicle_skins/technical',
'Mar 26, 2020',
[
Shot("Atlas", 'atlas_1_regular.jpg', 'light armor', variants=[
Variant('atlas_2_heavy.jpg', 'heavy armor'),
]),
Shot("Blue Angels", 'blue_angels_1_regular.jpg', 'light armor', variants=[
Variant('blue_angels_2_heavy.jpg', 'heavy armor'),
]),
Shot("Bubblegum", 'bubblegum_1_regular.jpg', 'light armor', variants=[
Variant('bubblegum_2_heavy.jpg', 'heavy armor'),
]),
Shot("Children of the Vault", 'children_of_the_vault_1_regular.jpg', 'light armor', variants=[
Variant('children_of_the_vault_2_heavy.jpg', 'heavy armor'),
]),
Shot("Dahl", 'dahl_1_regular.jpg', 'light armor', variants=[
Variant('dahl_2_heavy.jpg', 'heavy armor'),
]),
Shot("Default", 'default_1_regular.jpg', 'light armor (default skin)', order=1, variants=[
Variant('default_2_heavy.jpg', 'heavy armor (default skin)'),
]),
Shot("E3 2019", 'e3_2019_1_regular.jpg', 'light armor', variants=[
Variant('e3_2019_2_heavy.jpg', 'heavy armor'),
]),
Shot("Ellie", 'ellie_1_regular.jpg', 'light armor', variants=[
Variant('ellie_2_heavy.jpg', 'heavy armor'),
]),
Shot("Festi-Flesh", 'festi-flesh_1_regular.jpg', 'light armor', variants=[
Variant('festi-flesh_2_heavy.jpg', 'heavy armor'),
]),
Shot("Follow-Me", 'follow-me_1_regular.jpg', 'light armor', variants=[
Variant('follow-me_2_heavy.jpg', 'heavy armor'),
]),
Shot("Forest Camo", 'forest_camo_1_regular.jpg', 'light armor', variants=[
Variant('forest_camo_2_heavy.jpg', 'heavy armor'),
]),
Shot("Frost Rider", 'frost_rider_1_regular.jpg', 'light armor', variants=[
Variant('frost_rider_2_heavy.jpg', 'heavy armor'),
]),
Shot("Gearbox", 'gearbox_1_regular.jpg', 'light armor', variants=[
Variant('gearbox_2_heavy.jpg', 'heavy armor'),
]),
Shot("Golden Ticket", 'golden_ticket_1_regular.jpg', 'light armor', variants=[
Variant('golden_ticket_2_heavy.jpg', 'heavy armor'),
]),
Shot("Halftone", 'halftone_1_regular.jpg', 'light armor', variants=[
Variant('halftone_2_heavy.jpg', 'heavy armor'),
]),
Shot("Hyperion", 'hyperion_1_regular.jpg', 'light armor', variants=[
Variant('hyperion_2_heavy.jpg', 'heavy armor'),
]),
Shot("I like dinosaurs.", 'i_like_dinosaurs_1_regular.jpg', 'light armor', variants=[
Variant('i_like_dinosaurs_2_heavy.jpg', 'heavy armor'),
]),
Shot("Jakobs", 'jakobs_1_regular.jpg', 'light armor', variants=[
Variant('jakobs_2_heavy.jpg', 'heavy armor'),
]),
Shot("Leather", 'leather_1_regular.jpg', 'light armor', variants=[
Variant('leather_2_heavy.jpg', 'heavy armor'),
]),
Shot("Magic Number", 'magic_number_1_regular.jpg', 'light armor', variants=[
Variant('magic_number_2_heavy.jpg', 'heavy armor'),
]),
Shot("Maliwan", 'maliwan_1_regular.jpg', 'light armor', variants=[
Variant('maliwan_2_heavy.jpg', 'heavy armor'),
]),
Shot("Maya", 'maya_1_regular.jpg', 'light armor', variants=[
Variant('maya_2_heavy.jpg', 'heavy armor'),
]),
Shot("Plaid", 'plaid_1_regular.jpg', 'light armor', variants=[
Variant('plaid_2_heavy.jpg', 'heavy armor'),
]),
Shot("Roadkill", 'roadkill_1_regular.jpg', 'light armor', variants=[
Variant('roadkill_2_heavy.jpg', 'heavy armor'),
]),
Shot("Skag", 'skag_1_regular.jpg', 'light armor', variants=[
Variant('skag_2_heavy.jpg', 'heavy armor'),
]),
Shot("Spicy Tuna Roll", 'spicy_tuna_roll_1_regular.jpg', 'light armor', variants=[
Variant('spicy_tuna_roll_2_heavy.jpg', 'heavy armor'),
]),
Shot("Stealth", 'stealth_1_regular.jpg', 'light armor', variants=[
Variant('stealth_2_heavy.jpg', 'heavy armor'),
]),
Shot("Tentacar", 'tentacar_1_regular.jpg', 'light armor', variants=[
Variant('tentacar_2_heavy.jpg', 'heavy armor'),
]),
Shot("Thunderbird", 'thunderbird_1_regular.jpg', 'light armor', variants=[
Variant('thunderbird_2_heavy.jpg', 'heavy armor'),
]),
Shot("Torgue", 'torgue_1_regular.jpg', 'light armor', variants=[
Variant('torgue_2_heavy.jpg', 'heavy armor'),
]),
Shot("Vaughn's", 'vaughns_1_regular.jpg', 'light armor', variants=[
Variant('vaughns_2_heavy.jpg', 'heavy armor'),
]),
Shot("Vladof", 'vladof_1_regular.jpg', 'light armor', variants=[
Variant('vladof_2_heavy.jpg', 'heavy armor'),
]),
Shot("Woodland Camo", 'woodland_camo_1_regular.jpg', 'light armor', variants=[
Variant('woodland_camo_2_heavy.jpg', 'heavy armor'),
]),
]))
vehicles.append(Collection('jetbeast',
'Jetbeast Skins',
'Jetbeast Skin',
'vehicle_skins/jetbeast',
'Jun 30, 2020',
[
Shot('Beefy', 'beefy.jpg'),
Shot('Company Car', 'company_car.jpg'),
Shot('Default', 'default.jpg', '(default skin)', order=1),
Shot('Devil Rider', 'devil_rider.jpg'),
Shot('Reptile', 'reptile.jpg'),
Shot('Woodsy', 'woodsy.jpg'),
]))
###
### Main rendering functions + such follow
###
def main(base_img_href, thumb_size, urls=False, verbose=False):
"""
Do the generation.
if `urls` is `True`, use URLs instead of local files to do the linking.
"""
global bl3collections
# Cache of file mtimes, so we know whether or not to regenerate thumbnails
cache_dir = '.cache'
cache_file = os.path.join(cache_dir, 'shots.json.xz')
cache_data = {}
if os.path.exists(cache_file):
with lzma.open(cache_file, 'r') as df:
cache_data = json.load(df)
# Loop through our categories and process 'em
navigation = {
'main': [
# Actually, I don't think I want that link up there...
#{'title': 'Home', 'url': '/'},
],
'side': [
{'title': 'Main Links', 'children': [
{'title': 'Home', 'url': '/'},
{'title': 'Changelog', 'url': '/changelog/'},
{'title': 'Original Imgur', 'url': '/original/'},
]},
],
}
for category in categories:
# Add the category to our sidebar
new_cat = {'title': category.label, 'children': []}
navigation['side'].append(new_cat)
# Loop through collections and write those out
for collection in category:
# ... but first add the collection to our sidebar
report_url = f'/{collection.slug}/'
new_cat['children'].append({'title': collection.name_plural, 'url': report_url})
# Now get to actually processing
filename = collection.get_filename()
print(f'Writing to {filename}...')
with open(filename, 'w') as odf:
# Header
print('---', file=odf)
print(f'title: {collection.name_plural}', file=odf)
print(f'permalink: {report_url}', file=odf)
print('---', file=odf)
print('', file=odf)
print(f'<h1>{collection.name_plural}</h1>', file=odf)
print('', file=odf)
# Last updated, and (if we have it) extra text
print('<p class="last_updated"><strong>Last Updated:</strong> {}</p>'.format(collection.last_updated), file=odf)
if collection.extra_text:
print('<p class="extra_text">{}</p>'.format(html.escape(collection.extra_text)), file=odf)
print('', file=odf)
# Report on the total count
if len(collection) == 1:
report = collection.name_single
else:
report = collection.name_plural
print('<div class="cosmetic_count">Total {}: {}</div>'.format(
report,
len(collection),
), file=odf)
print('', file=odf)
# Loop over shots
if collection.grid:
print('<div class="grid-area">', file=odf)
print('', file=odf)
for shot in sorted(collection):
shot.output(odf, collection, base_img_href, thumb_size, urls, verbose)
if collection.grid:
print('</div>', file=odf)
# Write out navigation
print('Writing out navigation')
with open('_data/navigation.yml', 'w') as odf:
yaml.dump(navigation, odf, indent=2, default_flow_style=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate BL3 Cosmetics Archive',
)
parser.add_argument('-b', '--base-img-href',
default='/bl3cosmetics',
help='Base image | |
:math:`[h^{-1}\mathrm{Mpc}]`
"""
logdens = np.log10(self.mass_to_dens(Mthre, redshift))
return self.get_wauto_cut(R2d, logdens, logdens, redshift, pimax, integration)
def get_wauto_mass(self, R2d, M1, M2, redshift):
"""get_wauto_mass
Compute the projected halo-halo correlation function :math:`w_{hh}(R;M_1,M_2)` for 2 mass threshold halo samples.
Args:
R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]`
M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]`
M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]`
redshift (float): redshift at which the power spectrum is evaluated
Returns:
numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]`
"""
M1p = M1 * 1.01
M1m = M1 * 0.99
M2p = M2 * 1.01
M2m = M2 * 0.99
dens1p = self.mass_to_dens(M1p, redshift)
dens1m = self.mass_to_dens(M1m, redshift)
dens2p = self.mass_to_dens(M2p, redshift)
dens2m = self.mass_to_dens(M2m, redshift)
logdens1p, logdens1m, logdens2p, logdens2m = np.log10(
dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m)
wmm = self.get_wauto(R2d, logdens1m, logdens2m, redshift)
wmp = self.get_wauto(R2d, logdens1m, logdens2p, redshift)
wpm = self.get_wauto(R2d, logdens1p, logdens2m, redshift)
wpp = self.get_wauto(R2d, logdens1p, logdens2p, redshift)
numer = wmm * dens1m * dens2m - wmp * dens1m * dens2p - \
wpm * dens1p * dens2m + wpp * dens1p * dens2p
denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p
return numer / denom
def get_wauto_mass_cut(self, R2d, M1, M2, redshift, pimax):
"""get_wauto_mass_cut
Compute the projected halo-halo correlation function :math:`w_{hh}(R;M_1,M_2)` for 2 mass threshold halo samples.
Unlike get_wauto_mass, this function considers a finite width for the radial integration, from :math:`-\pi_\mathrm{max}` to :math:`\pi_\mathrm{max}`.
Args:
R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]`
M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]`
M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]`
redshift (float): redshift at which the power spectrum is evaluated
pimax (float): :math:`\pi_\mathrm{max}` for the upper limit of the integral
Returns:
numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]`
"""
M1p = M1 * 1.01
M1m = M1 * 0.99
M2p = M2 * 1.01
M2m = M2 * 0.99
dens1p = self.mass_to_dens(M1p, redshift)
dens1m = self.mass_to_dens(M1m, redshift)
dens2p = self.mass_to_dens(M2p, redshift)
dens2m = self.mass_to_dens(M2m, redshift)
logdens1p, logdens1m, logdens2p, logdens2m = np.log10(
dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m)
wmm = self.get_wauto_cut(R2d, logdens1m, logdens2m, redshift, pimax)
wmp = self.get_wauto_cut(R2d, logdens1m, logdens2p, redshift, pimax)
wpm = self.get_wauto_cut(R2d, logdens1p, logdens2m, redshift, pimax)
wpp = self.get_wauto_cut(R2d, logdens1p, logdens2p, redshift, pimax)
numer = wmm * dens1m * dens2m - wmp * dens1m * dens2p - \
wpm * dens1p * dens2m + wpp * dens1p * dens2p
denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p
return numer / denom
def _get_pkmatter_tree(self, redshift):
ks = np.logspace(-3, 3, 2000)
g1_dm = self.g1.get_dm(ks, redshift)
pm_lin = self.get_pklin(ks)
return g1_dm**2 * pm_lin
# TN suppressed this because it is a duplication of get_pknl
# def get_pmnl(self,ks,redshift):
# xs = np.logspace(-3,3,2000)
# xi = self.get_xinl(xs,redshift)
# return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi))(ks)
def _get_pkmatter_tree_spline(self, redshift):
ks = np.logspace(-3, 3, 2000)
g1_dm = self.g1.get_dm(ks, redshift)
pm_lin = self.get_pklin(ks)
return iuspline(ks, g1_dm**2 * pm_lin)
def _get_pkcross_tree(self, logdens, redshift):
ks = np.logspace(-3, 3, 2000)
g1 = self.g1.get(ks, redshift, logdens)
g1_dm = self.g1.get_dm(ks, redshift)
pm_lin = self.get_pklin(ks)
return g1*g1_dm * pm_lin
def _get_pkcross_tree_spline(self, logdens, redshift):
ks = np.logspace(-3, 3, 2000)
g1 = self.g1.get(ks, redshift, logdens)
g1_dm = self.g1.get_dm(ks, redshift)
pm_lin = self.get_pklin(ks)
return iuspline(ks, g1*g1_dm * pm_lin)
def _get_xicross_tree(self, xs, logdens, redshift):
return pyfftlog_interface.pk2xi_pyfftlog(self._get_pkcross_tree_spline(logdens, redshift))(xs)
def _get_xicross_direct(self, xs, logdens, redshift):
return self.xi_cross.get(xs, redshift, logdens)
def get_xicross(self, xs, logdens, redshift):
"""get_xicross
Compute the halo-matter cross correlation function :math:`\\xi_{hm}(x;n_h)` for a mass threshold halo sample specified by the corresponding cumulative number density.
Args:
xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]`
logdens (float): Logarithm of the cumulative halo number density of the halo sample taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]`
redshift (float): redshift at which the power spectrum is evaluated
Returns:
numpy array: Halo-matter cross correlation function
"""
xi_dir = self._get_xicross_direct(xs, logdens, redshift)
xi_tree = self._get_xicross_tree(xs, logdens, redshift)
rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx())
return xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4))
def get_xicross_massthreshold(self, xs, Mthre, redshift):
"""get_xicross_massthreshold
Compute the halo-matter cross correlation function :math:`\\xi_{hm}(x;>M_\mathrm{th})` for a mass threshold halo sample.
Args:
xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]`
Mthre (float): Minimum mass threshold of a halo sample in :math:`[h^{-1}M_\odot]`
redshift (float): redshift at which the power spectrum is evaluated
Returns:
numpy array: Halo-matter cross correlation function
"""
logdens = np.log10(self.mass_to_dens(Mthre, redshift))
return self.get_xicross(xs, logdens, redshift)
def get_xicross_mass(self, xs, M, redshift):
"""get_xicross_mass
Compute the halo-matter cross correlation function :math:`\\xi_{hm}(x;M)` for halos with mass :math:`M`.
Args:
xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]`
M (float): Halo mass in :math:`[h^{-1}M_\odot]`
redshift (float): redshift at which the power spectrum is evaluated
Returns:
numpy array: Halo-matter cross correlation function
"""
Mp = M * 1.01
Mm = M * 0.99
logdensp = np.log10(self.mass_to_dens(Mp, redshift))
logdensm = np.log10(self.mass_to_dens(Mm, redshift))
xip = self.get_xicross(xs, logdensp, redshift)
xim = self.get_xicross(xs, logdensm, redshift)
return (xim * 10**logdensm - xip * 10**logdensp) / (10**logdensm - 10**logdensp)
def _get_phm_tree(self,ks,logdens,redshift):
g1 = self.g1.get(ks,redshift,logdens)
g1_dm = self.g1.get_dm(ks,redshift)
pm_lin = self.get_pklin(ks)
return g1*g1_dm * pm_lin
def _get_phm_direct(self,ks,logdens,redshift):
xs = np.logspace(-3,3,2000)
return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,self.xi_cross.get(xs,redshift,logdens)))(ks)
def get_phm(self,ks,logdens,redshift):
"""get_phm
Compute the halo-matter cross power spectrum :math:`P_{hm}(k;n_h)` for a mass threshold halo sample specified by the corresponding cumulative number density.
Args:
ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]`
logdens (float): Logarithm of the cumulative halo number density of the halo sample taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]`
redshift (float): redshift at which the power spectrum is evaluated
Returns:
numpy array: Halo-matter cross power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]`
"""
xs = np.logspace(-4,3,4000)
xi_dir = self._get_xicross_direct(xs,logdens,redshift)
xi_tree = self._get_xicross_tree(xs,logdens,redshift)
rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx())
xi = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4))
return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi),logrmin = -4.0, logrmax = 3.0)(ks)
def _get_phm_tree_cut(self,ks,logdens,redshift):
xs = np.logspace(-4,3,4000)
xi_tree = self._get_xicross_tree(xs,logdens,redshift)
rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx())
xi = xi_tree * (1-np.exp(-(xs/rswitch)**4))
return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi),logrmin = -4.0, logrmax = 3.0)(ks)
def _get_phm_direct_cut(self,ks,logdens,redshift):
xs = np.logspace(-4,3,4000)
xi_dir = self._get_xicross_direct(xs,logdens,redshift)
rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx())
xi = xi_dir * np.exp(-(xs/rswitch)**4)
return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi),logrmin = -4.0, logrmax = 3.0)(ks)
def get_phm_massthreshold(self,ks,Mthre,redshift):
"""get_phm_massthreshold
Compute the halo-matter cross power spectrum :math:`P_{hm}(k;>M_\mathrm{th})` for a mass threshold halo sample.
Args:
ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]`
Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]`
redshift (float): redshift at which the power spectrum is evaluated
Returns:
numpy array: Halo-matter cross power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]`
"""
logdens = np.log10(self.mass_to_dens(Mthre,redshift))
return self.get_phm(ks,logdens,redshift)
def get_phm_mass(self,ks,M,redshift):
"""get_phm_mass
Compute the halo-matter cross power spectrum :math:`P_{hm}(k;M)` for halos with mass :math:`M`.
Args:
ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]`
M (float): Halo mass in :math:`[h^{-1}M_\odot]`
redshift (float): redshift at which the power spectrum is evaluated
Returns:
numpy array: Halo-matter cross power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]`
"""
Mp = M * 1.01
Mm = M * 0.99
logdensp = np.log10(self.mass_to_dens(Mp,redshift))
logdensm = np.log10(self.mass_to_dens(Mm,redshift))
pip = self.get_phm(ks,logdensp,redshift)
pim = self.get_phm(ks,logdensm,redshift)
return (pim * 10**logdensm - pip * 10**logdensp) / (10**logdensm - 10**logdensp)
def _get_DeltaSigma_tree(self, R2d, logdens, redshift):
return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J2_pyfftlog(self._get_pkcross_tree_spline(logdens, redshift))(R2d)
def _get_DeltaSigma_direct(self, R2d, logdens, redshift):
xs = np.logspace(-3, 3, 2000)
xi = self._get_xicross_direct(xs, logdens, redshift)
pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi))
return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J2_pyfftlog(pk_spl)(R2d)
def get_DeltaSigma(self, R2d, logdens, redshift):
"""get_DeltaSigma
Compute the halo-galaxy lensing signal, the excess surface mass density, :math:`\Delta\Sigma(R;n_h)`, for a mass threshold halo sample specified by the corresponding cumulative number density.
Args:
R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}`
logdens (float): Logarithm of the cumulative halo number density taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]`
redshift (float): redshift at which the lens halos are located
Returns:
numpy array: excess surface mass density in :math:`[h M_\odot \mathrm{pc}^{-2}]`
"""
xs = np.logspace(-3, 3, 2000)
xi_tot = self.get_xicross(xs, logdens, redshift)
pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi_tot))
return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J2_pyfftlog(pk_spl)(R2d)
def get_DeltaSigma_massthreshold(self, R2d, Mthre, redshift):
"""get_DeltaSigma_massthreshold
Compute the halo-galaxy lensing signal, the excess surface mass density, :math:`\Delta\Sigma(R;>M_\mathrm{th})`, for a mass threshold halo sample.
Args:
R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}`
Mthre (float): | |
#
import numpy as np
import torch
from torch.utils.data import DataLoader
import sklearn.model_selection as skm
from apps.drl.chpA01.e01.chp_a01_e01_ds import ChpA01E01Ds
from apps.drl.chpA01.e01.chp_a01_e01_model import ChpA01E01Model
class ChpA01E01(object):
def __init__(self):
self.name = ''
self.model_file = './work/lnrn.pt'
def startup(self, args={}):
print('线性回归 adam')
#self.lnrn_plain()
#self.lnrn_sgd()
#self.lnrn_adam()
#self.lnrn_adam_mse()
#self.lnrn_with_ds()
#self.lnrn_with_model()
#self.lnrn_gpu()
#self.lnrn_eval()
#self.lnrn_save_load()
self.lnrn_ds_split() # train and valid split
def lnrn_ds_split(self):
print('分配训练、验证、测试数据集 v0.0.1')
# load dataset
ds = ChpA01E01Ds(num=1000)
total_count= len(ds)
train_count = int(0.7 * total_count)
valid_count = int(0.2 * total_count)
test_count = total_count - train_count - valid_count
train_ds, valid_ds, test_ds = torch.utils.data.random_split(ds, (train_count, valid_count, test_count))
train_batch_size = 10
valid_batch_size = 23
test_batch_size = 88
train_dl = DataLoader(train_ds, batch_size=train_batch_size, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=valid_batch_size, shuffle=False)
test_dl = DataLoader(test_ds, batch_size=test_batch_size, shuffle=False)
# define the model
device = self.get_exec_device()
model = ChpA01E01Model().to(device)
# define the loss function
criterion = torch.nn.MSELoss()
# define optimization method
#learning_params = model.parameters() # 需要epochs=100才能收敛
learning_params = []
for k, v in model.named_parameters():
if k == 'w001':
learning_params.append({'params': v, 'lr': 0.01})
elif k == 'b001':
learning_params.append({'params': v, 'lr': 0.1})
optimizer = torch.optim.Adam(learning_params, lr=0.001)
epochs = 10
best_loss = 10000.0
unimproved_loop = 0
improved_threshold = 0.000000001
max_unimproved_loop = 5
train_done = False
for epoch in range(epochs):
model.train()
for X, y_hat in train_dl:
optimizer.zero_grad()
X, y_hat = X.to(device), y_hat.to(device)
y = model(X)
loss = criterion(y, y_hat)
lossv = 0.0
for Xv, yv_hat in valid_dl:
with torch.no_grad():
Xv, yv_hat = Xv.to(device), yv_hat.to(device)
yv = model(Xv)
lossv += criterion(yv, yv_hat)
lossv /= valid_count
if lossv < best_loss:
# save the model
torch.save(model.state_dict(), self.model_file)
if lossv < best_loss - improved_threshold:
unimproved_loop = 0
else:
unimproved_loop += 1
best_loss = lossv
if unimproved_loop >= max_unimproved_loop:
train_done = True
break
# early stopping处理
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss))
if train_done:
break
# 模型验证
test_loss = 0
for X, y_hat in test_dl:
X, y_hat = X.to(device), y_hat.to(device)
with torch.no_grad():
y = model(X)
test_loss += criterion(y, y_hat)
test_loss /= len(test_ds)
print('测试集上代价函数值:{0};'.format(test_loss))
# 载入模型
ckpt = torch.load(self.model_file)
m1 = ChpA01E01Model()
print('初始值:w={0}; b={1};'.format(m1.w001, m1.b001))
m1.load_state_dict(ckpt)
print('载入值:w={0}; b={1};'.format(m1.w001, m1.b001))
def lnrn_eval(self):
# load dataset
ds = ChpA01E01Ds(num=1000)
batch_size = 10
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
# define the model
device = self.get_exec_device()
model = ChpA01E01Model().to(device)
# define the loss function
criterion = torch.nn.MSELoss()
# define optimization method
#learning_params = model.parameters() # 需要epochs=100才能收敛
learning_params = []
for k, v in model.named_parameters():
if k == 'w001':
learning_params.append({'params': v, 'lr': 0.01})
elif k == 'b001':
learning_params.append({'params': v, 'lr': 0.1})
optimizer = torch.optim.Adam(learning_params, lr=0.001)
epochs = 10
for epoch in range(epochs):
model.train()
for X, y_hat in dl:
optimizer.zero_grad()
X, y_hat = X.to(device), y_hat.to(device)
y = model(X)
loss = criterion(y, y_hat)
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss))
# 模型验证
test_num = 100
test_ds = ChpA01E01Ds(num=test_num)
model.eval()
preds = []
batch_size = 30
test_dl = DataLoader(ds, batch_size=batch_size, shuffle=False)
test_loss = 0
for X, y_hat in test_dl:
X, y_hat = X.to(device), y_hat.to(device)
with torch.no_grad():
y = model(X)
test_loss += criterion(y, y_hat)
test_loss /= test_num
print('测试集上代价函数值:{0};'.format(test_loss))
def lnrn_save_load(self):
# load dataset
ds = ChpA01E01Ds(num=1000)
batch_size = 10
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
# define the model
device = self.get_exec_device()
model = ChpA01E01Model().to(device)
# define the loss function
criterion = torch.nn.MSELoss()
# define optimization method
#learning_params = model.parameters() # 需要epochs=100才能收敛
learning_params = []
for k, v in model.named_parameters():
if k == 'w001':
learning_params.append({'params': v, 'lr': 0.01})
elif k == 'b001':
learning_params.append({'params': v, 'lr': 0.1})
optimizer = torch.optim.Adam(learning_params, lr=0.001)
epochs = 10
for epoch in range(epochs):
model.train()
for X, y_hat in dl:
optimizer.zero_grad()
X, y_hat = X.to(device), y_hat.to(device)
y = model(X)
loss = criterion(y, y_hat)
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss))
# 模型验证
test_num = 100
test_ds = ChpA01E01Ds(num=test_num)
model.eval()
preds = []
batch_size = 30
test_dl = DataLoader(ds, batch_size=batch_size, shuffle=False)
test_loss = 0
for X, y_hat in test_dl:
X, y_hat = X.to(device), y_hat.to(device)
with torch.no_grad():
y = model(X)
test_loss += criterion(y, y_hat)
test_loss /= test_num
print('测试集上代价函数值:{0};'.format(test_loss))
print('模型保存和加载测试')
# 保存模型
torch.save(model.state_dict(), self.model_file)
# 载入模型
ckpt = torch.load(self.model_file)
m1 = ChpA01E01Model()
print('初始值:w={0}; b={1};'.format(m1.w001, m1.b001))
m1.load_state_dict(ckpt)
print('载入值:w={0}; b={1};'.format(m1.w001, m1.b001))
def ds_exp(self):
ds = ChpA01E01Ds(num=1000)
batch_size = 10
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
for X, y in dl:
print('X: {0}; y: {1};'.format(X, y))
break
def lnrn_with_ds(self):
# load dataset
ds = ChpA01E01Ds(num=1000)
batch_size = 10
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
# define the model
w = torch.tensor(1.0, requires_grad=True)
b = torch.tensor(0.0, requires_grad=True)
# define the loss function
criterion = torch.nn.MSELoss()
# define optimization method
optimizer = torch.optim.Adam([
{'params': w, 'lr': 0.01},
{'params': b, 'lr': 0.1}
], lr=0.001)
epochs = 10
for epoch in range(epochs):
for X, y_hat in dl:
optimizer.zero_grad()
y = w * X + b
loss = criterion(y, y_hat)
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss))
def lnrn_with_model(self):
# load dataset
ds = ChpA01E01Ds(num=1000)
batch_size = 10
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
# define the model
model = ChpA01E01Model()
# define the loss function
criterion = torch.nn.MSELoss()
# define optimization method
#learning_params = model.parameters() # 需要epochs=100才能收敛
learning_params = []
for k, v in model.named_parameters():
if k == 'w001':
learning_params.append({'params': v, 'lr': 0.01})
elif k == 'b001':
learning_params.append({'params': v, 'lr': 0.1})
optimizer = torch.optim.Adam(learning_params, lr=0.001)
epochs = 10
for epoch in range(epochs):
model.train()
for X, y_hat in dl:
optimizer.zero_grad()
y = model(X)
loss = criterion(y, y_hat)
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss))
def get_exec_device(self):
gpu_num = torch.cuda.device_count()
for gi in range(gpu_num):
print(torch.cuda.get_device_name(gi))
pref_gi = 0
if torch.cuda.is_available():
if pref_gi is not None:
device = 'cuda:{0}'.format(pref_gi)
else:
device = 'cuda'
else:
device = 'cpu'
#device1 = 'cuda' if torch.cuda.is_available() else 'cpu'
return device
def lnrn_gpu(self):
# load dataset
ds = ChpA01E01Ds(num=1000)
batch_size = 10
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
# define the model
device = self.get_exec_device()
model = ChpA01E01Model().to(device)
# define the loss function
criterion = torch.nn.MSELoss()
# define optimization method
#learning_params = model.parameters() # 需要epochs=100才能收敛
learning_params = []
for k, v in model.named_parameters():
if k == 'w001':
learning_params.append({'params': v, 'lr': 0.01})
elif k == 'b001':
learning_params.append({'params': v, 'lr': 0.1})
optimizer = torch.optim.Adam(learning_params, lr=0.001)
epochs = 10
for epoch in range(epochs):
model.train()
for X, y_hat in dl:
optimizer.zero_grad()
X, y_hat = X.to(device), y_hat.to(device)
y = model(X)
loss = criterion(y, y_hat)
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, model.w001, model.b001, loss))
def lnrn_plain(self):
X, y_hat = self.load_ds()
w = torch.tensor(1.0, requires_grad=True)
w_lr = 0.01
b = torch.tensor(0.0, requires_grad=True)
b_lr = 0.1
epochs = 6000
X = torch.tensor(X)
y_hat = torch.tensor(y_hat)
for epoch in range(epochs):
y = w * X + b
tl = 0.5 * (y - y_hat)**2
loss = tl.sum() / 1000.0
loss.backward()
with torch.no_grad():
w -= w_lr * w.grad
w.grad = torch.zeros_like(w.grad)
b -= b_lr * b.grad
b.grad = torch.zeros_like(b.grad)
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss))
def lnrn_sgd(self):
X, y_hat = self.load_ds()
w = torch.tensor(1.0, requires_grad=True)
b = torch.tensor(0.0, requires_grad=True)
epochs = 6000
optimizer = torch.optim.SGD([
{'params': w, 'lr': 0.01},
{'params': b, 'lr': 0.1}
], 0.001)
X = torch.tensor(X)
y_hat = torch.tensor(y_hat)
for epoch in range(epochs):
optimizer.zero_grad()
y = w * X + b
tl = 0.5 * (y - y_hat)**2
loss = tl.sum() / 1000.0
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss))
def lnrn_adam(self):
X, y_hat = self.load_ds()
w = torch.tensor(1.0, requires_grad=True)
b = torch.tensor(0.0, requires_grad=True)
epochs = 6000
optimizer = torch.optim.Adam([
{'params': w, 'lr': 0.01},
{'params': b, 'lr': 0.1}
], lr=0.001)
X = torch.tensor(X)
y_hat = torch.tensor(y_hat)
for epoch in range(epochs):
optimizer.zero_grad()
y = w * X + b
tl = 0.5 * (y - y_hat)**2
loss = tl.sum() / 1000.0
loss.backward()
optimizer.step()
print('{0}: w={1}; b={2}; loss={3};'.format(epoch, w, b, loss))
def lnrn_adam_mse(self):
X, y_hat = self.load_ds()
w = torch.tensor(1.0, requires_grad=True)
w_lr = 0.01
b = torch.tensor(0.0, requires_grad=True)
b_lr = 0.1
epochs = 1000
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam([
{'params': w, 'lr': 0.01},
{'params': b, 'lr': 0.1}
], lr=0.001)
X = torch.tensor(X)
y_hat = torch.tensor(y_hat)
for epoch in range(epochs):
optimizer.zero_grad()
y = w * X + b
loss = criterion(y, y_hat)
loss.backward()
| |
c_v_combined_propagated,
fill_value="extrapolate"
)(_np.concatenate((v_load, v_wash), axis=0))
c_combined_propagated[c_combined_propagated < 0] = 0
c_unbound_propagated = c_combined_propagated[:, :v_load.size]
c_wash_desorbed_propagated = c_combined_propagated[:, v_load.size:]
return c_unbound_propagated, c_wash_desorbed_propagated
def _sim_c_elution_desorption(self,
m_bound: _np.ndarray
) -> _typing.Tuple[_np.ndarray,
_np.ndarray]:
"""Simulate elution step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column.
`m_bound.size == n_species`
Returns
-------
c_elution
Outlet concentration profile during the elution.
b_elution_peak
Boolean vector. Peak is collected where the value is `True`.
"""
assert self._elution_f > 0
assert self._elution_t > 0
i_elution_duration = int(round(self._elution_t / self._dt))
# Multiply elution peak with the amount of captured product.
c_elution = \
self._p_elution_peak[_np.newaxis, :i_elution_duration] * \
m_bound[:, _np.newaxis] / self._elution_f
# Pad with zeros to cover the entire elution step duration.
if c_elution.shape[1] < i_elution_duration:
c_elution = _np.pad(c_elution,
((0, 0),
(0, i_elution_duration - c_elution.shape[1])),
mode="constant")
# Boolean mask - `True` where peak is being collected.
b_elution_peak = self._elution_peak_mask
return c_elution, b_elution_peak
def _sim_c_elution_buffer(self, n_time_steps: int) -> _np.ndarray:
"""Get elution buffer composition at the outlet of the column.
By default the buffer composition is constant throughout the
elution step.
Feel free to override this function if you want to simulate
linear gradient or if the transient phenomena at the beginning
of peak cut needs to be considered.
Parameters
----------
n_time_steps
Duration of elution step in number of time steps.
Returns
-------
ndarray
Buffer concentration profile at the outlet of the column
during the elution step.
"""
# Elution buffer composition.
elution_buffer_composition = \
self.elution_buffer_c.reshape(self.elution_buffer_c.size, 1)
assert elution_buffer_composition.size == 0 \
or elution_buffer_composition.size == self._n_species, \
f"Elution buffer composition must be either empty or have" \
f" a concentration value for each specie."
assert _np.all(elution_buffer_composition >= 0), \
"Concentration values in elution buffer must be >= 0"
if elution_buffer_composition.size == 0:
elution_buffer_composition = _np.zeros([self._n_species, 1])
self.log.i_data(self._log_tree,
"elution_buffer_composition",
elution_buffer_composition)
# Constant profile.
c_elution_buffer = elution_buffer_composition \
* _np.ones_like(self._t[:n_time_steps])
return c_elution_buffer
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _sim_c_regeneration(self,
m_bound: _np.ndarray
) -> _typing.Optional[_np.ndarray]:
"""Simulate regeneration step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column at
the beginning of the regeneration step.
`m_bound.size == n_species`.
Returns
-------
Optional[ndarray]
Outlet concentration profile during regeneration step.
E.g. regeneration peak.
"""
# No default implementation.
c_regeneration = None
return c_regeneration
def _sim_c_out_cycle(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_typing.Optional[_np.ndarray],
_typing.Optional[_np.ndarray],
_np.ndarray,
_np.ndarray,
_typing.Optional[_np.ndarray]]:
"""Simulates load-wash-elution-regeneration steps.
Regeneration is optional.
This function can be replaced in case user wants to use some
other variation of bind-elution dynamics.
Elution peak cut is applied in this function.
Elution peak shape must be defined by now.
Return profiles that are `None` are considered being zero.
Parameters
----------
f_load
Inlet (recycle + load) flow rate profile for a cycle.
The flow rate might be different during wash recycle.
c_load
Inlet (recycle + load) concentration profile.
Returns
-------
c_load
Conc profile at the outlet of the column during load.
c_wash
Conc profile at the outlet of the column during wash.
c_elution
Conc profile at the outlet of the column during elution.
b_elution
Boolean mask for elution step. `True` where peak is being
collected.
c_regeneration
Conc profile at the outlet of the column during
regeneration.
"""
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._elution_f > 0
assert self._elution_t > 0
assert self._load_f > 0
assert self._cv > 0
# Evaluate binding.
c_bound, c_unbound = self._sim_c_load_binding(f_load, c_load)
# Log.
m_load = (c_load * f_load[_np.newaxis, :]).sum(1) * self._dt
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1) * self._dt
self.log.i_data(self._cycle_tree,
"column_utilization",
m_bound / self._cv / self.load_bt.get_total_bc())
self.log.i_data(self._cycle_tree, "m_load", m_load)
self.log.i_data(self._cycle_tree, "m_bound", m_bound)
self.log.i_data(self._cycle_tree, "m_unbound", m_load - m_bound)
self.log.d_data(self._cycle_tree, "f_load", f_load)
self.log.d_data(self._cycle_tree, "c_load", c_load)
self.log.d_data(self._cycle_tree, "c_bound", c_bound)
self.log.d_data(self._cycle_tree, "c_unbound", c_unbound)
# Evaluate desorption during wash.
c_wash_desorbed = None
if self.wash_desorption:
c_wash_desorbed = self._sim_c_wash_desorption(f_load, c_bound)
if c_wash_desorbed.size > 0:
# Subtract desorbed material from bound material.
m_bound -= c_wash_desorbed.sum(1)
# Log.
self.log.i_data(self._cycle_tree,
"m_wash_desorbed",
c_wash_desorbed.sum(1) * self._wash_f * self._dt)
self.log.d_data(self._cycle_tree,
"c_wash_desorbed",
c_wash_desorbed)
# Propagate unbound and desorbed material throughout the column.
c_out_load = c_unbound
c_out_wash = c_wash_desorbed
if self.load_recycle or self.wash_recycle:
c_out_load, c_out_wash = \
self._sim_c_recycle_propagation(f_load,
c_unbound,
c_wash_desorbed)
# Get elution peak.
c_out_elution, elution_peak_mask = \
self._sim_c_elution_desorption(m_bound)
# Log.
m_elution_peak = (c_out_elution * elution_peak_mask[_np.newaxis, :]
).sum(1) * self._elution_f * self._dt
m_elution = c_out_elution.sum(1) * self._elution_f * self._dt
self.log.i_data(self._cycle_tree,
"m_elution_peak", m_elution_peak)
self.log.i_data(self._cycle_tree,
"m_elution", m_elution)
self.log.i_data(self._cycle_tree,
"m_elution_peak_cut_loss", m_elution - m_elution_peak)
# Get regeneration peak.
c_out_regeneration = self._sim_c_regeneration(
m_bound - c_out_elution.sum(1) * self._elution_f * self._dt)
return c_out_load, c_out_wash, c_out_elution, \
elution_peak_mask, c_out_regeneration
def _calculate(self):
# Pre calculate parameters and repetitive profiles.
self._prepare_simulation()
# Assert proper list of binding species.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
assert len(binding_species) > 0
# Copy inlet vectors.
c_in_load = self._c[binding_species].copy()
f_in_load = self._f.copy()
f_in_i_end = min(_utils.vectors.true_end(f_in_load > 0), self._t.size)
c_in_load[:, f_in_i_end:] = 0
# Clear for results.
self._c[:] = 0
self._f[:] = 0
# Prepare logger.
log_data_cycles = list()
self.log.set_branch(self._log_tree, "cycles", log_data_cycles)
# Variable to store wash recycle to.
previous_c_bt_wash: _typing.Optional[_np.ndarray] = None
# Loop across cycles.
for i in range(self._cycle_start_i_list.size):
# Load-wash-elution-regeneration-equilibration steps for a column.
# Load step starts at `self._cycle_start_i_list[i]`.
# Prepare logger for this cycle.
self._cycle_tree = dict()
log_data_cycles.append(self._cycle_tree)
# Load start and end time as the column sees it.
if i > 0 and self.load_recycle:
# Column sees leftovers from previous load during recycling.
cycle_load_i_start = self._cycle_start_i_list[i - 1]
else:
cycle_load_i_start = self._cycle_start_i_list[i]
# Calc cycle end (either next cycle or end or simulation time).
if i + 1 < self._cycle_start_i_list.size:
cycle_load_i_end = self._cycle_start_i_list[i + 1]
else:
cycle_load_i_end = f_in_i_end - 1
# Log results.
self.log.i_data(self._cycle_tree,
"i_cycle_load_start",
cycle_load_i_start)
self.log.i_data(self._cycle_tree,
"i_cycle_load_step_start",
self._cycle_start_i_list[i])
self.log.i_data(self._cycle_tree,
"i_cycle_load_end",
cycle_load_i_end)
# Calc profiles at column outlet.
c_out_load, c_out_wash, c_out_elution, \
b_out_elution, c_out_regeneration = self._sim_c_out_cycle(
f_in_load[cycle_load_i_start:cycle_load_i_end],
c_in_load[:, cycle_load_i_start:cycle_load_i_end]
)
self.log.d_data(self._cycle_tree,
"c_out_load", c_out_load)
self.log.d_data(self._cycle_tree,
"c_out_wash", c_out_wash)
self.log.d_data(self._cycle_tree,
"c_out_elution", c_out_elution)
self.log.d_data(self._cycle_tree,
"b_out_elution", b_out_elution)
self.log.d_data(self._cycle_tree,
"c_out_regeneration", c_out_regeneration)
# Load recycle.
if self.load_recycle:
# Recycle load during the load step.
i_load_start_rel = self._cycle_start_i_list[i] \
- cycle_load_i_start
c_load_recycle = c_out_load[:, i_load_start_rel:]
c_in_load[:, self._cycle_start_i_list[i]:cycle_load_i_end] = \
c_load_recycle
self.log.i_data(self._cycle_tree, "m_load_recycle",
c_load_recycle.sum(1)
* self._load_f * self._dt)
self.log.d_data(self._cycle_tree, "c_load_recycle",
c_load_recycle)
# Losses during load == bt through 2nd column.
c_loss_bt_2nd_column = c_out_load[:, i_load_start_rel]
self.log.i_data(self._cycle_tree, "m_loss_bt_2nd_column",
c_loss_bt_2nd_column.sum()
* self._dt * self._load_f)
self.log.d_data(self._cycle_tree, "c_loss_bt_2nd_column",
c_loss_bt_2nd_column)
else:
# report losses during load
m_loss_load = c_out_load.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_load", m_loss_load)
# Wash recycle.
if self.wash_recycle:
if previous_c_bt_wash is not None \
and previous_c_bt_wash.size > 0:
# Clip wash recycle duration if needed.
i_wash_duration = min(
self._wash_recycle_i_duration,
self._t.size - self._cycle_start_i_list[i])
# Log losses due to discarding load bt during wash recycle.
s = c_in_load[
:,
self._cycle_start_i_list[i]:self._cycle_start_i_list[i]
+ i_wash_duration]
self.log.i_data(self._cycle_tree,
"m_loss_load_bt_during_wash_recycle",
s.sum() * self._dt * self._load_f)
self.log.d_data(self._cycle_tree,
"c_lost_load_during_wash_recycle", s)
self.log.d_data(self._cycle_tree, "c_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration])
self.log.i_data(
self._cycle_tree, "m_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration].sum(1)
* self._dt * self._wash_f)
# Apply previous wash recycle onto the inlet profile.
s[:] = previous_c_bt_wash[:, :i_wash_duration]
f_in_load[self._cycle_start_i_list[i]:
self._cycle_start_i_list[i]
+ i_wash_duration] = self._wash_f
# Save wash from this cycle to be used during the next cycle.
previous_c_bt_wash = c_out_wash
else:
# Report losses during wash.
if c_out_wash is None:
c_out_wash = _np.zeros(
[len(binding_species),
int(round(self._wash_t / self._dt))])
m_loss_wash = c_out_wash.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_wash", m_loss_wash)
# Elution.
[i_el_rel_start, i_el_rel_end] = \
_utils.vectors.true_start_and_end(b_out_elution)
i_el_start = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_start)
i_el_end = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_end)
i_el_rel_end = i_el_rel_start + i_el_end - i_el_start
# Log.
self.log.i_data(self._cycle_tree, "i_elution_start", i_el_start)
self.log.i_data(self._cycle_tree, "i_elution_end", i_el_end)
# Write to global outlet.
self._f[i_el_start:i_el_end] = self._elution_f
self._c[binding_species, i_el_start:i_el_end] = \
c_out_elution[:, i_el_rel_start:i_el_rel_end]
class ACC(AlternatingChromatography):
"""Alternating column chromatography without recycling.
Alternating load-bind-elution twin-column chromatography without
recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "ACC".
Notes
-----
For list of attributes refer to :class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> | |
['client+industry', 'client+business'],
'client_interview': ['client+interview'],
'client_meeting': ['client+meeting'],
'client_mission': ['client+mission', 'client_business+mission'],
'client_relationship': ['client+relationship'],
'client_request': ['client+request'],
'client_requirement': ['client+requirement'],
'client_role': ['client+role'],
'client_satisfaction': ['client+satisfaction'],
'client_server': ['client+server'],
'client_skill': ['client+skill', 'direct+client'],
'client_solution_executive': ['client+solution+executive'],
'client_success': ['client+success', 'customer+success'],
'client_technical_architect': [ 'client+technical+architect',
'customer+technical+architect'],
'client_technical_solutioner': ['client+technical+solutioner'],
'client_technical_specialist': [ 'client+technical+specialist',
'customer+technical+specialist'],
'client_training': ['client+training'],
'client_transformation': ['client+transformation'],
'client_travel': ['client+travel', 'travel+client', 'travel+onsite'],
'client_unit_executive': ['client+unit+executive'],
'clique': ['clique'],
'clojure': ['clojure'],
'cloud': ['cloud'],
'cloud_9': ['cloud+9'],
'cloud_9_sclm': ['cloud+9+sclm'],
'cloud_9_sclm_zos': ['cloud+9+sclm+zos'],
'cloud_access_security_broker': ['cloud+access+security+broker'],
'cloud_albedo': ['cloud+albedo'],
'cloud_analytics': ['cloud+analytics'],
'cloud_application': ['cloud+application', 'cloud+app'],
'cloud_architect': ['cloud+architect'],
'cloud_atlas': ['cloud+atlas'],
'cloud_backup': ['cloud+backup'],
'cloud_cms': ['cloud+cms'],
'cloud_collaboration': ['cloud+collaboration'],
'cloud_communications': ['cloud+communications', 'cloud+communication'],
'cloud_computing': ['cloud+computing'],
'cloud_computing_architecture': [ 'cloud+computing+architecture',
'cloud+computing+architect'],
'cloud_computing_company': ['cloud+computing+company'],
'cloud_computing_comparison': ['cloud+computing+comparison'],
'cloud_computing_issues': [ 'cloud+computing+issues',
'cloud+computing+problem'],
'cloud_computing_platform': ['cloud+computing+platform', 'cloud+computing'],
'cloud_computing_roi_model': ['cloud+computing+roi+model'],
'cloud_computing_security': ['cloud+computing+security'],
'cloud_computing_system': ['cloud+computing+system', 'cloud+computing'],
'cloud_computing_tco_model': ['cloud+computing+tco+model'],
'cloud_container': ['cloud+container'],
'cloud_container_mordernization': ['cloud+container+mordernization'],
'cloud_container_platform_architecture': [ 'cloud+container+platform+architecture'],
'cloud_container_re_platform': ['cloud+container+re+platform'],
'cloud_cover': ['cloud+cover'],
'cloud_cruiser': ['cloud+cruiser'],
'cloud_data_management_interface': [ 'cloud+data+management+interface',
'cloud+management+interface'],
'cloud_database': ['cloud+database'],
'cloud_deployment': ['cloud+deployment'],
'cloud_desktop': ['cloud+desktop', 'cloud+pc'],
'cloud_developer': ['cloud+developer'],
'cloud_elements': ['cloud+elements', 'cloud+element'],
'cloud_engineering': ['cloud+engineering'],
'cloud_experiment': ['cloud+experiment'],
'cloud_feedback': ['cloud+feedback', 'cloud+status'],
'cloud_files': ['cloud+files', 'cloud+document'],
'cloud_forcing': ['cloud+forcing'],
'cloud_foundry': ['cloud+foundry'],
'cloud_foundry_certified_developer': [ 'cloud+foundry+certified+developer',
'cfcd exam+certification'],
'cloud_fraction': ['cloud+fraction'],
'cloud_front': ['cloud+front'],
'cloud_gaming': ['cloud+gaming'],
'cloud_gate': ['cloud+gate'],
'cloud_gateway': ['cloud+gateway'],
'cloud_hosting': ['cloud+hosting'],
'cloud_ide': ['cloud+ide', 'cloud+integrated_development_environment'],
'cloud_imperium_games': ['cloud+imperium+game'],
'cloud_imperium_games_corporation': ['cloud+imperium+game+company'],
'cloud_infrastructure': ['cloud+infrastructure'],
'cloud_infrastructure_management_interface': [ 'cloud+infrastructure+management+interface'],
'cloud_iridescence': ['cloud+iridescence'],
'cloud_load_balancing': ['cloud+load+balancing', 'cloud+load+balance'],
'cloud_management': ['cloud+management'],
'cloud_manufacturing': ['cloud+manufacturing'],
'cloud_microphysics': ['cloud+microphysics'],
'cloud_migration': ['cloud+migration'],
'cloud_native_computing_foundation': [ 'cloud+native+computing+foundation',
'cloud+native+computing'],
'cloud_printing': ['cloud+printing', 'cloud+print'],
'cloud_provider': ['cloud+provider', 'cloud+provide'],
'cloud_research': ['cloud+research'],
'cloud_robotics': ['cloud+robotics'],
'cloud_school': ['cloud+school'],
'cloud_security': ['cloud+security'],
'cloud_security_alliance': ['cloud+security+alliance', 'cloud+security'],
'cloud_server': ['cloud+server'],
'cloud_service': ['cloud+service'],
'cloud_skill': ['cloud+skill'],
'cloud_software': ['cloud+software'],
'cloud_sponge': ['cloud+sponge'],
'cloud_storage': ['cloud+storage'],
'cloud_storage_gateway': ['cloud+storage+gateway'],
'cloud_storage_service': ['cloud+storage+service', 'cloud+storage'],
'cloud_technology': ['cloud+technology'],
'cloud_tool': ['cloud+tool'],
'cloud_trail': ['cloud+trail'],
'cloud_watch': ['cloud+watch'],
'cloudburst': ['cloudburst'],
'cloudburst_power': ['cloudburst+power'],
'cloudburst_power_systems': ['cloudburst+power+systems'],
'cloudera': ['cloudera'],
'cloudera_administrator_certification': [ 'cloudera+administrator+certification'],
'cloudera_certification': ['cloudera+certification'],
'cloudera_certified_associate': ['cloudera+certified+associate'],
'cloudera_certified_data_engineer': ['cloudera+certified+data+engineer'],
'cloudera_certified_professional': ['cloudera+certified+professional'],
'cloudera_data_analyst_certification': [ 'cloudera+data+analyst+certification'],
'cloudera_spark_hadoop_certification': [ 'cloudera+spark+hadoop+certification'],
'cluster': ['cluster'],
'cluster_analysis': ['cluster+analysis'],
'cluster_computing': ['cluster+computing'],
'cluster_systems': ['cluster+systems'],
'cluster_systems_management': ['cluster+systems+management'],
'cluster_systems_management_linux': ['cluster+systems+management+linux'],
'clustered_file_system': ['clustered+file+system'],
'clustering_model': ['clustering+model'],
'cmos_device': ['cmos+device'],
'coach': ['coach'],
'coachable': ['coachable', 'receptive+request', 'know+follow'],
'coaching_skill': ['coaching+skill'],
'cobol': ['cobol'],
'cobol_aix': ['cobol+aix'],
'cobol_cics': ['cobol+cics'],
'cobol_cicsvs': ['cobol+cicsvs'],
'cobol_cicsvs_command': ['cobol+cicsvs+command'],
'cobol_cicsvs_command_level': ['cobol+cicsvs+command+level'],
'cobol_cicsvs_command_level_conversion': [ 'cobol+cicsvs+command+level+conversion'],
'cobol_cicsvs_command_level_conversion_aid': [ 'cobol+cicsvs+command+level+conversion+aid'],
'cobol_os390': ['cobol+os390'],
'cobol_os390_vm': ['cobol+os390+vm'],
'cobol_report_writer': ['cobol+report+writer'],
'cobol_systems': ['cobol+systems'],
'cobol_vseesa': ['cobol+vseesa'],
'cobol_windows': ['cobol+windows'],
'coca_cola': ['coca+cola'],
'code_quality': ['code+quality'],
'coffeescript': ['coffeescript'],
'cognitive_framework': ['cognitive+framework'],
'cognitive_neuroscience': ['cognitive+neuroscience'],
'cognitive_psychology': ['cognitive+psychology'],
'cognitive_science': ['cognitive+science'],
'cognitive_skill': ['cognitive+skill'],
'cognitive_test': ['cognitive+test'],
'cognitive_testing': ['cognitive+testing'],
'cognitive_training': ['cognitive+training', 'cognitive+reskill'],
'cognizant': ['cognizant'],
'cognos': ['cognos', 'go+_search'],
'cognos_8': ['cognos+8'],
'cognos_8_business': ['cognos+8+business'],
'cognos_8_business_intelligence': ['cognos+8+business+intelligence'],
'cognos_8_controller': ['cognos+8+controller'],
'cognos_8_go!': ['cognos+8+go!'],
'cognos_8_go!_mobile': ['cognos+8+go!+mobile'],
'cognos_8_planning': ['cognos+8+planning'],
'cognos_analysis': ['cognos+analysis'],
'cognos_analysis_microsoft': ['cognos+analysis+microsoft'],
'cognos_analysis_microsoft_excel': ['cognos+analysis+microsoft+excel'],
'cognos_analytic_applications': ['cognos+analytic+applications'],
'cognos_application': ['cognos+application'],
'cognos_application_development': ['cognos+application+development'],
'cognos_application_development_tools': [ 'cognos+application+development+tools'],
'cognos_axiant_4gl': ['cognos+axiant+4gl'],
'cognos_business': ['cognos+business'],
'cognos_business_intelligence': ['cognos+business+intelligence'],
'cognos_business_intelligence_financial': [ 'cognos+business+intelligence+financial'],
'cognos_business_intelligence_financial_performance': [ 'cognos+business+intelligence+financial+performance'],
'cognos_business_intelligence_financial_performance_management': [ 'cognos+business+intelligence+financial+performance+management'],
'cognos_business_viewpoint': ['cognos+business+viewpoint'],
'cognos_consolidator': ['cognos+consolidator'],
'cognos_consumer_insight': ['cognos+consumer+insight'],
'cognos_controller': ['cognos+controller'],
'cognos_customer': ['cognos+customer'],
'cognos_customer_performance': ['cognos+customer+performance'],
'cognos_customer_performance_sales': ['cognos+customer+performance+sales'],
'cognos_customer_performance_sales_analytics': [ 'cognos+customer+performance+sales+analytics'],
'cognos_decisionstream': ['cognos+decisionstream'],
'cognos_executive_viewer': ['cognos+executive+viewer'],
'cognos_express': ['cognos+express'],
'cognos_finance': ['cognos+finance'],
'cognos_financial': ['cognos+financial'],
'cognos_financial_statement': ['cognos+financial+statement'],
'cognos_financial_statement_reporting': [ 'cognos+financial+statement+reporting'],
'cognos_impromptu': ['cognos+impromptu'],
'cognos_impromptu_web': ['cognos+impromptu+web'],
'cognos_impromptu_web_reports': ['cognos+impromptu+web+reports'],
'cognos_insight': ['cognos+insight'],
'cognos_metrics_manager': ['cognos+metrics+manager'],
'cognos_microsoft': ['cognos+microsoft'],
'cognos_microsoft_office': ['cognos+microsoft+office'],
'cognos_mobile': ['cognos+mobile'],
'cognos_noticecast': ['cognos+noticecast'],
'cognos_now!': ['cognos+now!'],
'cognos_planning': ['cognos+planning'],
'cognos_powerhouse_4gl': ['cognos+powerhouse+4gl'],
'cognos_powerplay': ['cognos+powerplay'],
'cognos_query': ['cognos+query'],
'cognos_real-time_monitoring': ['cognos+real-time+monitoring'],
'cognos_series_7': ['cognos+series+7'],
'cognos_statistics': ['cognos+statistics'],
'cognos_supply': ['cognos+supply'],
'cognos_supply_chain': ['cognos+supply+chain'],
'cognos_supply_chain_performance': ['cognos+supply+chain+performance'],
'cognos_supply_chain_performance_procurement': [ 'cognos+supply+chain+performance+procurement'],
'cognos_supply_chain_performance_procurement_analytics': [ 'cognos+supply+chain+performance+procurement+analytics'],
'cognos_tm1': ['cognos+tm1'],
'cognos_visualizer': ['cognos+visualizer'],
'cognos_workforce_performance': ['cognos+workforce+performance'],
'collaborate': ['collaborate'],
'collaboration': ['collaboration'],
'collaboration_solutions_specialist': [ 'collaboration+solutions+specialist'],
'collaborative_culture': ['collaborative+culture'],
'collaborative_filtering': ['collaborative+filtering'],
'collaborative_software': ['collaborative+software'],
'collection': ['collection'],
'column_oriented_database': ['column+oriented+database'],
'combat_logger': ['combat+logger'],
'command': ['command'],
'commerce': ['commerce'],
'commerical_license': ['commerical+license'],
'commerical_software': ['commerical+software'],
'common_interface': ['ci+'],
'common_lisp': ['common+lisp'],
'commonstore': ['commonstore'],
'commonstore_exchange': ['commonstore+exchange'],
'commonstore_exchange_server': ['commonstore+exchange+server'],
'commonstore_lotus': ['commonstore+lotus'],
'commonstore_lotus_domino': ['commonstore+lotus+domino'],
'commonstore_sap': ['commonstore+sap'],
'communication': ['communication'],
'communication_bus': ['communication+bus'],
'communication_controller': ['communication+controller'],
'communication_controller_for_linux': ['communication+controller+linux'],
'communication_controller_for_linux_on_system': [ 'communication+controller+linux+system'],
'communication_controller_linux': ['communication+controller+linux'],
'communication_controller_linux_on_system': [ 'communication+controller+linux+on+system'],
'communication_controller_linux_on_system_z': [ 'communication+controller+linux+on+system+z'],
'communication_designer': ['communication+designer'],
'communication_device': ['communication+device'],
'communication_player': ['communication+player'],
'communication_protocol': ['communication+protocol'],
'communication_security': ['communication+security'],
'communication_skill': [ 'communication+skill',
'communication+relate',
'explain+mission',
'confident+verbal',
'clarity+message'],
'communications_server': ['communications+server', 'communication+server'],
'communications_server_aix': ['communications+server+aix'],
'communications_server_for_aix': ['aix+communication+server'],
'communications_server_for_linux': ['communication+linux+server'],
'communications_server_for_linux_on_zseries': [ 'communication+linux+server+zseries'],
'communications_server_for_windows': ['communication+server+windows'],
'communications_server_linux': ['communications+server+linux'],
'communications_server_linux_on_zseries': [ 'communications+server+linux+on+zseries'],
'communications_server_windows': ['communications+server+windows'],
'community_cloud': ['community+cloud'],
'company': ['company'],
'company_asset': ['company+asset'],
'comparative_literature': ['comparative+literature'],
'comparative_religion': ['comparative+religion'],
'comparative_studies': ['comparative+studies'],
'comparison_of_java_and_c++': [ 'c++_versus_java',
'c++_vs_java',
'comparison_of_c++_and_java',
'comparison_of_java_&_c++',
'comparison_of_java_to_c++',
'java_versus_c++',
'java_vs_c++'],
'competitive_analysis': ['competitive+analysis'],
'competitive_research': ['competitive+research'],
'competitive_workplace': ['competitive+workplace'],
'competitiveness': ['competitiveness'],
'compiler': ['compiler'],
'compiler_and_library': ['compiler+library'],
'compiler_and_library_for_rexx': ['compiler+library+rexx'],
'compiler_library': ['compiler+library'],
'compiler_library_rexx': ['compiler+library+rexx'],
'compiler_library_rexx_on_zseries': ['compiler+library+rexx+on+zseries'],
'complex_circuit': ['complex+circuit'],
'complexity_theory': ['complexity+theory'],
'compliance': ['compliance'],
'compliance_engineer': ['compliance+engineer'],
'compliance_lead': ['compliance+lead'],
'compliance_policy': ['compliance+policy'],
'compliance_test': ['compliance+test'],
'compliance_testing': ['compliance+testing'],
'component_object_model': ['com+_events'],
'computational_biology': ['computational+biology'],
'computational_chemistry': ['computational+chemistry'],
'computational_complexity': ['computational+complexity'],
'computational_linguistics': ['computational+linguistics'],
'computational_neuroscience': ['computational+neuroscience'],
'computational_physics': ['computational+physics'],
'computational_science': ['computational+science'],
'computer_architecture': ['computer+architecture', 'pc+architect'],
'computer_configuration': ['computer+configuration'],
'computer_hardware': ['computer+hardware'],
'computer_model': ['computer+model'],
'computer_operator': ['computer+operator'],
'computer_readable_medium': ['computer+readable+medium'],
'computer_science': ['computer+science'],
'computer_security': ['computer+security'],
'computer_vision': ['computer+vision'],
'computerworld': ['computerworld'],
'computing_platform': ['computing+platform'],
'concept_artist': ['concept+artist'],
'concurrency': ['concurrency'],
'concurrent_versioning_system': ['concurrent+versioning+system'],
'conda': ['conda'],
'confidence': ['confidence'],
'configurability': ['configurability'],
'configuration': ['configuration'],
'configuration_management': ['configuration+management'],
'configuration_management_software': ['configuration+management+software'],
'configure': ['configure'],
'conflict_resolution': ['conflict+resolution', 'combat+description'],
'connections_enterprise': ['connections+enterprise', 'connect+enterprise'],
'connections_enterprise_content': [ 'connections+enterprise+content',
'connect+content+enterprise'],
'connections_enterprise_content_edition': [ 'connections+enterprise+content+edition',
'connect+content+edition+enterprise'],
'connectivity': ['connectivity'],
'constantly_strive': ['constantly+strive'],
'construction': ['construction'],
'consult': ['consult'],
'consultant': ['consultant'],
'consulting': ['consulting'],
'consulting_skill': ['consulting+skill'],
'consumer_good': ['consumer+good'],
'consumer_research': ['consumer+research'],
'container': ['container'],
'container_software': ['container+software'],
'containerization': ['containerization'],
'content_analytics': ['content+analytics', 'analytics+content'],
'content_analytics_enterprise': ['content+analytics+enterprise'],
'content_analytics_enterprise_search': [ 'content+analytics+enterprise+search'],
'content_analytics_with_enterprise': ['analytics+content+enterprise'],
'content_analytics_with_enterprise_search': [ 'analytics+content+enterprise+find'],
'content_analyzer': ['content+analyzer', 'analyzer+content'],
'content_based_filtering': ['content+based+filtering'],
'content_classification': [ 'content+classification',
'classification+content'],
'content_collector': ['content+collector', 'collector+content'],
'content_collector_for_sap': ['collector+content+sap'],
'content_collector_for_sap_applications': ['app+collector+content+sap'],
'content_collector_sap': ['content+collector+sap'],
'content_collector_sap_applications': [ 'content+collector+sap+applications'],
'content_designer': ['content+designer'],
'content_integrator': ['content+integrator'],
'content_management': ['content+management'],
'content_management_framework': ['content+management+framework'],
'content_manager': ['content+manager'],
'content_manager_enterprise': [ 'content+manager+enterprise',
'content+enterprise+manager'],
'content_manager_enterprise_edition': [ 'content+manager+enterprise+edition',
'content+edition+enterprise+manager'],
'content_manager_for_iseries': ['content+iseries+manager'],
'content_manager_for_z_os': ['content+manager+zos'],
'content_manager_iseries': ['content+manager+iseries'],
'content_manager_ondemand': ['content+manager+ondemand'],
'content_manager_ondemand_for_multiplatforms': [ 'content+manager+multiplatforms+ondemand'],
'content_manager_ondemand_for_z_os': ['content+manager+ondemand+zos'],
'content_manager_ondemand_i': ['content+manager+ondemand+i'],
'content_manager_ondemand_i5os': ['content+manager+ondemand+i5os'],
'content_manager_ondemand_multiplatforms': [ 'content+manager+ondemand+multiplatforms'],
'content_manager_ondemand_zos': ['content+manager+ondemand+zos'],
'content_manager_videocharger': ['content+manager+videocharger'],
'content_manager_zos': ['content+manager+zos'],
'content_navigator': ['content+navigator'],
'continental_casualty': ['continental+casualty'],
'continental_philosophy': ['continental+philosophy'],
'continuity': ['continuity'],
'continuous_deployment': ['continuous+deployment'],
'continuous_improvement': ['continuous+improvement', 'lessen+load'],
'continuous_integration': ['continuous+integration'],
'contract': ['contract'],
'contract_number': [ 'avp[\\d]+',
'avpn[\\d]+',
'bvp[\\d]+',
'con[\\d]+',
'contract [x]+',
'contract avp[\\d]+',
'contract avpn[\\d]+',
'contract bvp[\\d]+',
'contract con[\\d]+',
'contract csm[\\d]+',
'contract mis[\\d]+',
'contract nbi[\\d]+',
'csm[\\d]+',
'mis[\\d]+',
'nbi[\\d]+'],
'contract_renewal': ['contract+renewal', 'contract+renew'],
'contract_template': ['contract+template'],
'contractor': ['contractor'],
'contrail_cloud': ['contrail+cloud'],
'contrail_edge_cloud': ['contrail+edge+cloud'],
'contrail_enterprise_multicloud': ['contrail+enterprise+multicloud'],
'contrail_product': ['contrail+product'],
'contrail_sdwan': ['contrail+sdwan'],
'contrail_service_orchestration': ['contrail+service+orchestration'],
'control_framework': ['control+framework'],
'control_protocol': ['control+protocol'],
'control_theory': ['control+theory', 'risk_management+theory'],
'conversation': ['conversation'],
'conversion': ['conversion'],
'convert': ['convert'],
'convolutional_neural_networks': ['convolutional+neural+networks'],
'cooperation': ['cooperation'],
'cooperative_storage_cloud': ['cooperative+storage+cloud'],
'coordinator': ['coordinator'],
'core_configuration_ats': ['core+configuration+ats'],
'corporate_audit': ['corporate+audit'],
'corporate_customer': ['corporate+customer'],
'corporate_division': ['corporate+division'],
'corporate_finance': ['corporate+finance'],
'corporation': ['corporation'],
'corrective_action': ['corrective+action'],
'correlated_topic_model': ['correlated+topic+model'],
'correlation': ['correlation'],
'correlation_explanation': ['correlation+explanation'],
'correspondence_analysis': ['correspondence+analysis'],
'cosmology': ['cosmology'],
'cosmos_db': ['cosmos+db'],
'cost': ['cost'],
'cost_accounting': ['count+nickel', 'save+dime'],
'cost_cutting': [ 'cost+cutting',
'pricing_info+cut',
'pricing_info+cutting'],
'cost_living': ['cost+living'],
'cost_reduction': [ 'cost+reduction',
'cut+cost',
'pricing_info+reduction',
'pricing_info+save'],
'couch_db': ['couch+db'],
'couchbase': ['couchbase'],
'couchdb': ['couchdb'],
'counterfeit': ['counterfeit'],
'coursera': ['coursera'],
'courtesy': ['courtesy'],
'cp_system': ['cps+qsound', 'cps1+qsound', 'cps1_+_qsound', 'cps_+_qsound'],
'cplex': ['cplex'],
'cplex_optimizer_for_z_os': ['cplex+optimizer+zos'],
'cplex_optimizer_zos': ['cplex+optimizer+zos'],
'cpu': ['cpu'],
'creative': ['creative'],
'creative_cloud': ['creative+cloud'],
'creative_cloud_controversy': ['creative+cloud+controversy'],
'creative_director': ['creative+director'],
'creativity': ['creativity'],
'credit_card': ['credit+card'],
'credit_card_company': ['credit+card+company'],
'crisc_certification': [ 'crisc+certification',
'certification+information_system+risk_management'],
'crisp_dm': ['crisp+dm'],
'critical_observation': ['critical+observation'],
'critical_theory': ['critical+theory'],
'critical_thinking': ['critical+thinking'],
'crm': ['crm'],
'cross_kill': ['cross+skill'],
'cross_platform': ['cross+platform'],
'cross_platform_software': ['cross+platform+software'],
'crossing_the_line': ['crossing+the+line'],
'cryptocurrency': ['cryptocurrency'],
'cryptographic_protocol': ['cryptographic+protocol'],
'cryptography': ['cryptography'],
'cryptomining': ['cryptomining'],
'crystal_report': ['crystal+report'],
'csa': ['csa'],
'csa_certification': ['csa+certification'],
'css': ['css'],
'cssa_certification': [ 'cssa+certification',
'certification+scada+security_architect'],
'csslp_certification': [ 'csslp+certification',
'application_security+certification+sdlc',
'application_security+certification+software_development_process',
'certification+secure+software_development_process'],
'cubist_model': ['cubist+model'],
'cultural_anthropology': ['cultural+anthropology'],
'cultural_studies': ['cultural+studies'],
'cultural_study': ['cultural+study'],
'culture': ['culture'],
'cumuliform_cloud': ['cumuliform+cloud'],
'cumulonimbus_cloud': ['cumulonimbus+cloud'],
'cumulus_cloud': ['cumulus+cloud'],
'cumulus_congestus_cloud': ['cumulus+congestus+cloud'],
'cumulus_humilis_cloud': ['cumulus+humilis+cloud'],
'cumulus_mediocris_cloud': ['cumulus+mediocris+cloud'],
'customer': ['customer'],
'customer_behavior': ['customer+behavior'],
'customer_benefit': ['customer+benefit'],
'customer_engagement': ['customer+engagement'],
'customer_experience': ['customer+experience'],
'customer_management': [ 'customer+management',
'client+management',
'client+opinion',
'customer+opinion',
'stakeholder+opinion',
'stakeholder+understand'],
'customer_oriented': ['customer+oriented'],
'customer_service': ['customer+service'],
'customer_service_lead': ['customer+service+lead', 'team_lead+csr'],
'customer_support': ['customer+support', 'customer+service'],
'customer_team': ['customer+team'],
'cyberdefense': ['cyberdefense'],
'cybersecurity': ['cybersecurity'],
'cybersecurity_company': ['cybersecurity+company'],
'cython': ['cython'],
'd3.js': ['d3.js'],
'danske_bank': ['danske+bank'],
'dashboard': ['dashboard'],
'data': ['data'],
'data_access_layer': ['data+access+layer'],
'data_analysis': ['data+analysis'],
'data_analysis_skill': ['data+analysis+skill'],
'data_analyst': ['data+analyst', 'analyst+data'],
'data_architect': ['data+architect'],
'data_architecture': ['data+architecture'],
'data_artifact': ['data+artifact'],
'data_at_rest': ['data+at+rest'],
'data_at_rest_encryption': [ 'data+at+rest+encryption',
'data_at_rest+encryption'],
'data_audit': ['data+audit'],
'data_center': ['data+center'],
'data_center_specialist': ['data+center+specialist'],
'data_communication_hardware': ['data+communication+hardware'],
'data_dictionary': ['data+dictionary'],
'data_dimension': ['data+dimension'],
'data_encryption': ['data+encryption'],
'data_encryption_for_ims': ['data+encryption+ims'],
'data_encryption_for_ims_and_db2': ['data+db2+encryption+ims'],
'data_encryption_for_ims_and_db2_databases': [ 'data+databases+db2+encryption+ims'],
'data_encryption_ims': ['data+encryption+ims'],
'data_encryption_ims_db2': ['data+encryption+ims+db2'],
'data_encryption_ims_db2_databases': ['data+encryption+ims+db2+databases'],
'data_engineering_azure': ['data+engineering+azure'],
'data_error': ['data+error'],
'data_governance': ['data+governance'],
'data_integration_sap_data_services_4.2': [ 'data+integration+sap+data+services+4.2'],
'data_integration_skill': ['data+integration+skill'],
'data_lake': ['data+lake'],
'data_management': ['data+management', 'information_governance+management'],
'data_mart': ['data+mart'],
'data_methodology': ['data+methodology'],
'data_migration': ['data+migration', 'data+migrate'],
'data_mining': ['data+mining', 'mine+social web'],
'data_model': ['data+model'],
'data_modeling_skill': ['data+modeling+skill'],
'data_privacy': ['data+privacy'],
'data_privacy_regulation': ['data+privacy+regulation'],
'data_processing': ['data+processing', 'prepare+data'],
'data_quality': ['data+quality'],
'data_scentist': ['data+scentist'],
'data_science': ['data+science'],
'data_scientist': ['data+scientist'],
'data_scientist_workbench': ['data+scientist+workbench'],
'data_security': ['data+security'],
'data_server': ['data+server'],
'data_server_client': ['data+server+client', 'client+data+server'],
'data_server_client_packages': [ 'data+server+client+packages',
'client+data+package+server'],
'data_skill': ['data+skill'],
'data_storage': ['data+storage'],
'data_storage_library': ['data+storage+library'],
'data_structure': ['data+structure'],
'data_studio': ['data+studio'],
'data_studio_purequery': ['data+studio+purequery', 'data+purequery+studio'],
'data_studio_purequery_runtime': [ 'data+studio+purequery+runtime',
'data+purequery+runtime+studio'],
'data_warehouse': ['data+warehouse'],
'data_wrangling': ['data+wrangling'],
'database': ['database'],
'database_administrator': ['database+administrator'],
'database_certification': ['database+certification'],
'database_design': ['database+design'],
'database_dimension': ['database+dimension'],
'database_function': ['database+function'],
'database_index': ['database+index'],
'database_query': ['database+query'],
'database_schema': ['database+schema'],
'database_skill': ['database+skill'],
'database_table': ['database+table'],
'databases_modernization': ['databases+modernization'],
'datacap': ['datacap'],
'datacap_fastdoc_capture': [ 'datacap+fastdoc+capture',
'capture+datacap+fastdoc'],
'datacap_taskmaster_capture': [ 'datacap+taskmaster+capture',
'capture+datacap+taskmaster'],
'day_in_the_life_training': [ 'day+in+the+life+training',
'education+day_in_the_life'],
'db2': ['db2'],
'db2_administration': ['db2+administration', 'administrator+db2'],
'db2_administration_tool': [ 'db2+administration+tool',
'administrator+db2+tool'],
'db2_administration_tool_for_z_os': ['administrator+db2+tool+zos'],
'db2_administration_tool_zos': ['db2+administration+tool+zos'],
'db2_alphablox': ['db2+alphablox', 'alphablox+db2'],
'db2_analytics': ['db2+analytics', 'analytics+db2'],
'db2_analytics_accelerator': [ 'db2+analytics+accelerator',
'accelerator+analytics+db2'],
'db2_analytics_accelerator_for_z_os': | |
= m.group(1)
val = m.group(2)
val = val.rstrip()
# shlex isn't up to the task of parsing shell. Whatever,
# we can only parse shell to a certain degree and this is
# good enough for now.
try:
c = list(cls._shlex.split(val, comments=True, posix=True))
except:
return None
if len(c) != 1:
return None
return (key, c[0])
@classmethod
def ifcfg_parse(cls, content):
if content is None:
return None
ifcfg = {}
for line in content.splitlines():
val = cls.ifcfg_parse_line(line)
if val:
ifcfg[val[0]] = val[1]
return ifcfg
@classmethod
def content_from_dict(cls, ifcfg_all, file_type = None, header = None):
content = {}
for file_type in cls._file_types(file_type):
h = ifcfg_all[file_type]
if file_type == 'ifcfg':
if header is not None:
s = header + '\n'
else:
s = ""
for key in sorted(h.keys()):
value = h[key]
if not cls.KeyValid(key):
raise MyError('invalid ifcfg key %s' % (key))
if value is not None:
s += key + '=' + cls.ValueEscape(value) + '\n'
content[file_type] = s
else:
content[file_type] = h
return content
@classmethod
def content_to_dict(cls, content, file_type = None):
ifcfg_all = {}
for file_type in cls._file_types(file_type):
ifcfg_all[file_type] = cls.ifcfg_parse(content[file_type])
return ifcfg_all
@classmethod
def content_from_file(cls, name, file_type = None):
content = {}
for file_type in cls._file_types(file_type):
path = cls.ifcfg_path(name, file_type)
try:
with open(path, 'r') as content_file:
i_content = content_file.read()
except Exception as e:
i_content = None
content[file_type] = i_content
return content
@classmethod
def content_to_file(cls, name, content, file_type = None):
for file_type in cls._file_types(file_type):
path = cls.ifcfg_path(name, file_type)
h = content[file_type]
if h is None:
try:
os.unlink(path)
except OSError as e:
import errno
if e.errno != errno.ENOENT:
raise
else:
with open(path, 'w') as text_file:
text_file.write(h)
@classmethod
def connection_seems_active(cls, name):
# we don't know whether a ifcfg file is currently active,
# and we also don't know which.
#
# Do a very basic guess based on whether the interface
# is in operstate "up".
#
# But first we need to find the interface name. Do
# some naive parsing and check for DEVICE setting.
content = cls.content_from_file(name, 'ifcfg')
if content['ifcfg'] is not None:
content = cls.ifcfg_parse(content['ifcfg'])
else:
content = {}
if 'DEVICE' not in content:
return None
path = '/sys/class/net/' + content['DEVICE'] + '/operstate'
try:
with open(path, 'r') as content_file:
i_content = str(content_file.read())
except Exception as e:
return None
if i_content.strip() != 'up':
return False
return True
###############################################################################
class NMUtil:
def __init__(self, nmclient = None):
if nmclient is None:
nmclient = Util.NM().Client.new(None)
self.nmclient = nmclient
def setting_ip_config_get_routes(self, s_ip):
if s_ip is not None:
for i in range(0, s_ip.get_num_routes()):
yield s_ip.get_route(i)
def connection_ensure_setting(self, connection, setting_type):
setting = connection.get_setting(setting_type)
if not setting:
setting = setting_type.new()
connection.add_setting(setting)
return setting
def device_is_master_type(self, dev):
if dev:
NM = Util.NM()
GObject = Util.GObject()
if GObject.type_is_a(dev, NM.DeviceBond) \
or GObject.type_is_a(dev, NM.DeviceBridge) \
or GObject.type_is_a(dev, NM.DeviceTeam):
return True
return False
def active_connection_list(self, connections = None, black_list = None):
active_cons = self.nmclient.get_active_connections()
if connections:
connections = set(connections)
active_cons = [ac for ac in active_cons if ac.get_connection() in connections]
if black_list:
active_cons = [ac for ac in active_cons if ac not in black_list]
return list(active_cons)
def connection_list(self, name = None, uuid = None, black_list = None, black_list_names = None, black_list_uuids = None):
cons = self.nmclient.get_connections()
if name is not None:
cons = [c for c in cons if c.get_id() == name]
if uuid is not None:
cons = [c for c in cons if c.get_uuid() == uuid]
if black_list:
cons = [c for c in cons if c not in black_list]
if black_list_uuids:
cons = [c for c in cons if c.get_uuid() not in black_list_uuids]
if black_list_names:
cons = [c for c in cons if c.get_id() not in black_list_names]
cons = list(cons)
def _cmp(a, b):
s_a = a.get_setting_connection()
s_b = b.get_setting_connection()
if not s_a and not s_b:
return 0
if not s_a:
return 1
if not s_b:
return -1
t_a = s_a.get_timestamp()
t_b = s_b.get_timestamp()
if t_a == t_b:
return 0
if t_a <= 0:
return 1
if t_b <= 0:
return -1
return cmp(t_a, t_b)
cons.sort(cmp = _cmp)
return cons
def connection_compare(self, con_a, con_b, normalize_a = False, normalize_b = False, compare_flags = None):
NM = Util.NM()
if normalize_a:
con_a = NM.SimpleConnection.new_clone(con_a)
try:
con_a.normalize()
except:
pass
if normalize_b:
con_b = NM.SimpleConnection.new_clone(con_b)
try:
con_b.normalize()
except:
pass
if compare_flags == None:
compare_flags = NM.SettingCompareFlags.IGNORE_TIMESTAMP
return not(not(con_a.compare (con_b, compare_flags)))
def connection_is_active(self, con):
NM = Util.NM()
for ac in self.active_connection_list(connections=[con]):
if ac.get_state() >= NM.ActiveConnectionState.ACTIVATING \
and ac.get_state() <= NM.ActiveConnectionState.ACTIVATED:
return True
return False
def connection_create(self, connections, idx, connection_current = None):
NM = Util.NM()
connection = connections[idx]
con = NM.SimpleConnection.new()
s_con = self.connection_ensure_setting(con, NM.SettingConnection)
s_con.set_property(NM.SETTING_CONNECTION_ID, connection['name'])
s_con.set_property(NM.SETTING_CONNECTION_UUID, connection['nm.uuid'])
s_con.set_property(NM.SETTING_CONNECTION_AUTOCONNECT, connection['autoconnect'])
s_con.set_property(NM.SETTING_CONNECTION_INTERFACE_NAME, connection['interface_name'])
if connection['type'] == 'ethernet':
s_con.set_property(NM.SETTING_CONNECTION_TYPE, '802-3-ethernet')
s_wired = self.connection_ensure_setting(con, NM.SettingWired)
s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, connection['mac'])
elif connection['type'] == 'infiniband':
s_con.set_property(NM.SETTING_CONNECTION_TYPE, 'infiniband')
s_infiniband = self.connection_ensure_setting(con, NM.SettingInfiniband)
s_infiniband.set_property(NM.SETTING_INFINIBAND_MAC_ADDRESS, connection['mac'])
s_infiniband.set_property(NM.SETTING_INFINIBAND_TRANSPORT_MODE, connection['infiniband']['transport_mode'])
if connection['infiniband']['p_key'] != -1:
s_infiniband.set_property(NM.SETTING_INFINIBAND_P_KEY, connection['infiniband']['p_key'])
if connection['parent']:
s_infiniband.set_property(NM.SETTING_INFINIBAND_PARENT, ArgUtil.connection_find_master(connection['parent'], connections, idx))
elif connection['type'] == 'bridge':
s_con.set_property(NM.SETTING_CONNECTION_TYPE, 'bridge')
s_bridge = self.connection_ensure_setting(con, NM.SettingBridge)
s_bridge.set_property(NM.SETTING_BRIDGE_STP, False)
elif connection['type'] == 'bond':
s_con.set_property(NM.SETTING_CONNECTION_TYPE, 'bond')
s_bond = self.connection_ensure_setting(con, NM.SettingBond)
s_bond.add_option('mode', connection['bond']['mode'])
if connection['bond']['miimon'] is not None:
s_bond.add_option('miimon', str(connection['bond']['miimon']))
elif connection['type'] == 'team':
s_con.set_property(NM.SETTING_CONNECTION_TYPE, 'team')
elif connection['type'] == 'vlan':
s_vlan = self.connection_ensure_setting(con, NM.SettingVlan)
s_vlan.set_property(NM.SETTING_VLAN_ID, connection['vlan']['id'])
s_vlan.set_property(NM.SETTING_VLAN_PARENT, ArgUtil.connection_find_master_uuid(connection['parent'], connections, idx))
elif connection['type'] == 'macvlan':
# convert mode name to a number (which is actually expected by nm)
mode = connection['macvlan']['mode']
try:
mode_id = int(getattr( NM.SettingMacvlanMode, mode.upper() ))
except AttributeError as e:
raise MyError('Macvlan mode "%s" is not recognized' % (mode))
s_macvlan = self.connection_ensure_setting(con, NM.SettingMacvlan)
s_macvlan.set_property(NM.SETTING_MACVLAN_MODE, mode_id)
s_macvlan.set_property(NM.SETTING_MACVLAN_PROMISCUOUS, connection['macvlan']['promiscuous'])
s_macvlan.set_property(NM.SETTING_MACVLAN_TAP, connection['macvlan']['tap'])
s_macvlan.set_property(NM.SETTING_MACVLAN_PARENT, ArgUtil.connection_find_master(connection['parent'], connections, idx))
else:
raise MyError('unsupported type %s' % (connection['type']))
if 'ethernet' in connection:
if connection['ethernet']['autoneg'] is not None:
s_wired = self.connection_ensure_setting(con, NM.SettingWired)
s_wired.set_property(NM.SETTING_WIRED_AUTO_NEGOTIATE, connection['ethernet']['autoneg'])
s_wired.set_property(NM.SETTING_WIRED_DUPLEX, connection['ethernet']['duplex'])
s_wired.set_property(NM.SETTING_WIRED_SPEED, connection['ethernet']['speed'])
if connection['mtu']:
if connection['type'] == 'infiniband':
s_infiniband = self.connection_ensure_setting(con, NM.SettingInfiniband)
s_infiniband.set_property(NM.SETTING_INFINIBAND_MTU, connection['mtu'])
else:
s_wired = self.connection_ensure_setting(con, NM.SettingWired)
s_wired.set_property(NM.SETTING_WIRED_MTU, connection['mtu'])
if connection['master'] is not None:
s_con.set_property(NM.SETTING_CONNECTION_SLAVE_TYPE, connection['slave_type'])
s_con.set_property(NM.SETTING_CONNECTION_MASTER, ArgUtil.connection_find_master_uuid(connection['master'], connections, idx))
else:
if connection['zone']:
s_con.set_property(NM.SETTING_CONNECTION_ZONE, connection['zone'])
ip = connection['ip']
s_ip4 = self.connection_ensure_setting(con, NM.SettingIP4Config)
s_ip6 = self.connection_ensure_setting(con, NM.SettingIP6Config)
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, 'auto')
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, 'auto')
addrs4 = list([a for a in ip['address'] if a['family'] == socket.AF_INET])
addrs6 = list([a for a in ip['address'] if a['family'] == socket.AF_INET6])
if ip['dhcp4']:
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, 'auto')
s_ip4.set_property(NM.SETTING_IP_CONFIG_DHCP_SEND_HOSTNAME, ip['dhcp4_send_hostname'] != False)
elif addrs4:
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, 'manual')
else:
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, 'disabled')
for a in addrs4:
s_ip4.add_address(NM.IPAddress.new(a['family'], a['address'], a['prefix']))
if ip['gateway4'] is not None:
s_ip4.set_property(NM.SETTING_IP_CONFIG_GATEWAY, ip['gateway4'])
if ip['route_metric4'] is not None and ip['route_metric4'] >= 0:
s_ip4.set_property(NM.SETTING_IP_CONFIG_ROUTE_METRIC, ip['route_metric4'])
for d in ip['dns']:
if d['family'] == socket.AF_INET:
s_ip4.add_dns(d['address'])
for s in ip['dns_search']:
s_ip4.add_dns_search(s)
if ip['auto6']:
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, 'auto')
elif addrs6:
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, 'manual')
else:
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, 'ignore')
for a in addrs6:
s_ip6.add_address(NM.IPAddress.new(a['family'], a['address'], a['prefix']))
if ip['gateway6'] is not None:
s_ip6.set_property(NM.SETTING_IP_CONFIG_GATEWAY, ip['gateway6'])
if ip['route_metric6'] is not None and ip['route_metric6'] >= 0:
s_ip6.set_property(NM.SETTING_IP_CONFIG_ROUTE_METRIC, ip['route_metric6'])
for d in ip['dns']:
if d['family'] == socket.AF_INET6:
s_ip6.add_dns(d['address'])
if ip['route_append_only'] and connection_current:
for r in self.setting_ip_config_get_routes(connection_current.get_setting(NM.SettingIP4Config)):
s_ip4.add_route(r)
for r in self.setting_ip_config_get_routes(connection_current.get_setting(NM.SettingIP6Config)):
s_ip6.add_route(r)
for r in ip['route']:
rr = NM.IPRoute.new(r['family'], r['network'], r['prefix'], r['gateway'], r['metric'])
if r['family'] == socket.AF_INET:
s_ip4.add_route(rr)
else:
s_ip6.add_route(rr)
try:
con.normalize()
except Exception as e:
raise MyError('created connection failed to normalize: %s' % (e))
return con
def connection_add(self, con, timeout = 10):
def add_cb(client, result, cb_args):
con = None
try:
con = client.add_connection_finish(result)
except Exception as e:
if Util.error_is_cancelled(e):
return
cb_args['error'] = str(e)
cb_args['con'] = con
Util.GMainLoop().quit()
cancellable = Util.create_cancellable()
cb_args = {}
self.nmclient.add_connection_async(con, True, cancellable, add_cb, cb_args)
if not Util.GMainLoop_run(timeout):
cancellable.cancel()
raise MyError('failure to add connection: %s' % ('timeout'))
if not cb_args.get('con', None):
raise MyError('failure to add connection: %s' % (cb_args.get('error', 'unknown error')))
return cb_args['con']
def connection_update(self, con, con_new, timeout = 10):
con.replace_settings_from_connection(con_new)
def update_cb(connection, result, cb_args):
success = False
try:
success = connection.commit_changes_finish(result)
except Exception as e:
if Util.error_is_cancelled(e):
return
cb_args['error'] = str(e)
cb_args['success'] = success
Util.GMainLoop().quit()
cancellable = Util.create_cancellable()
cb_args = {}
con.commit_changes_async(True, cancellable, update_cb, cb_args)
if not Util.GMainLoop_run(timeout):
cancellable.cancel()
raise MyError('failure to update connection: %s' % ('timeout'))
if not cb_args.get('success', False):
raise MyError('failure to update connection: %s' % (cb_args.get('error', 'unknown error')))
return True
def connection_delete(self, connection, timeout = 10):
c_uuid = | |
result.returncode != 0:
print( "[ERROR]: unable to export a dts" )
print( "\n%s" % textwrap.indent(result.stderr.decode(), ' ') )
return result
@staticmethod
def dt_to_fdt( dtb, rmode='rb' ):
"""takes a dtb and returns a flattened device tree object
Args:
dtb: a compiled device tree
rmode (string,optional): the read mode of the file, see libfdt for possible values
default is 'rb'
Returns:
A flattended device tree object (as defined by libfdt)
"""
fdt = libfdt.Fdt(open(dtb, mode=rmode).read())
return fdt
@staticmethod
def node_getphandle( fdt, node_number ):
"""utility command to get a phandle (as a number) from a node
Args:
fdt (FDT): flattened device tree
node_number (int): node number in the fdt
Returns:
int: the phandle of the node number, if successful, -1 if not
"""
prop = fdt.get_phandle( node_number )
return prop
@staticmethod
def property_get( fdt, node_number, prop_name, ftype=LopperFmt.SIMPLE, encode=LopperFmt.DEC ):
"""utility command to get a property (as a string) from a node
A more robust way to get the value of a property in a node, when
you aren't sure of the format of that property. This routine takes
hints when getting the property in the form of a "format type" and
an encoding.
The format and encoding options are in the following enum type:
class LopperFmt(Enum):
SIMPLE = 1 (format)
COMPOUND = 2 (format)
HEX = 3 (encoding)
DEC = 4 (encoding)
STRING = 5 (encoding)
MULTI_STRING = 5 (encoding)
Args:
fdt (FDT): flattened device tree
node_number (int): node number in the fdt
property (string): property name whose value to get
ftype (LopperFmt,optional): format of the property. Default SIMPLE.
encode (LopperFmt,optional); encoding of the property. Default DEC
Returns:
string: if format is SIMPLE: string value of the property, or "" if not found
list: if format is COMPOUND: list of property values as strings, [] if not found
"""
try:
prop = fdt.getprop( node_number, prop_name )
val = LopperFDT.property_value_decode( prop, 0, ftype, encode )
except Exception as e:
val = ""
return val
@staticmethod
def property_set( fdt, node_number, prop_name, prop_val, ftype=LopperFmt.SIMPLE, verbose=False ):
"""utility command to set a property in a node
A more robust way to set the value of a property in a node, This routine
takes hints when getting the property in the form of a "format type"
The format options are in the following enum type:
class LopperFmt(Enum):
SIMPLE = 1 (format)
COMPOUND = 2 (format)
Based on the format hint, and the passed value, the property is encoded
into a byte array and stored into the flattened device tree node.
Args:
fdt_dst (FDT): flattened device tree
node_number (int): node number in the fdt
prop_name (string): property name whose value to set
ftype (LopperFmt,optional): format of the property. Default SIMPLE.
Returns:
Nothing
"""
# if it's a list, we dig in a bit to see if it is a single item list.
# if so, we grab the value so it can be propery encoded. We also have
# a special case if the '' string is the only element .. we explicity
# set the empty list, so it will encode properly.
if type(prop_val) == list:
if len(prop_val) == 1 and prop_val[0] != '':
prop_val = prop_val[0]
elif len(prop_val) == 1 and prop_val[0] == '':
pass
try:
prop_val_converted = int(prop_val,0)
# if it works, that's our new prop_val. This covers the case where
# a string is passed in, but it is really just a single number.
# note: we may need to consult "ftype" in the future so the caller
# can override this automatical conversion
prop_val = prop_val_converted
except:
# do nothing. let propval go through as whatever it was
pass
# we have to re-encode based on the type of what we just decoded.
if type(prop_val) == int:
# this seems to break some operations, but a variant may be required
# to prevent overflow situations
# if sys.getsizeof(prop_val) >= 32:
for _ in range(MAX_RETRIES):
try:
if sys.getsizeof(prop_val) > 32:
fdt.setprop_u64( node_number, prop_name, prop_val )
else:
fdt.setprop_u32( node_number, prop_name, prop_val )
break
except Exception as e:
fdt.resize( fdt.totalsize() + 1024 )
continue
else:
break
else:
# it wasn't set all all, we could thrown an error
pass
elif type(prop_val) == str:
for _ in range(MAX_RETRIES):
try:
fdt.setprop_str( node_number, prop_name, prop_val )
break
except Exception as e:
if verbose:
print( "[WARNING]: property set exception: %s" % e)
fdt.resize( fdt.totalsize() + 1024 )
continue
else:
break
else:
# we totally failed!
pass
elif type(prop_val) == list:
if len(prop_val) > 1:
val_to_sync = []
iseq = iter(prop_val)
first_type = type(next(iseq))
# check for a mixed type, we get "false" if it is not all the same, or
# the type otherwise
the_same = first_type if all( (type(x) is first_type) for x in iseq ) else False
if the_same == False:
# convert everything to strings
val_to_sync = []
for v in prop_val:
val_to_sync.append( str(v) )
else:
val_to_sync = prop_val
else:
val_to_sync = prop_val
prop_val = val_to_sync
# list is a compound value, or an empty one!
if len(prop_val) >= 0:
try:
bval = LopperFDT.encode_byte_array_from_strings(prop_val)
except Exception as e:
bval = LopperFDT.encode_byte_array(prop_val)
for _ in range(MAX_RETRIES):
try:
fdt.setprop( node_number, prop_name, bval)
except Exception as e:
fdt.resize( fdt.totalsize() + 1024 )
continue
else:
break
else:
# fail!
print( "[WARNING]: lopper_fdt: unable to write property '%s' to fdt" % prop_name )
else:
print( "[WARNING]: %s: unknown type was used: %s" % (prop_name,type(prop_val)) )
@staticmethod
def property_remove( fdt, node_name, prop_name, verbose=0 ):
"""removes a property from a fdt
Removes a property (if it exists) from a node (and optionally its children).
Args:
fdt (FDT): flattened device tree to modify
node_name (int or string): the node number or name to process
prop_name (string): name of property to remove
Returns:
Boolean: True if the property was deleted, False if it wasn't
"""
node = LopperFDT.node_find( fdt, node_name )
if node == -1:
node, nodes = LopperFDT.node_find_by_name( fdt, node_name )
if node == -1:
return False
prop_list = []
poffset = fdt.first_property_offset(node, QUIET_NOTFOUND)
while poffset > 0:
# if we delete the only property of a node, all calls to the FDT
# will throw an except. So if we get an exception, we set our poffset
# to zero to escape the loop.
try:
prop = fdt.get_property_by_offset(poffset)
except:
poffset = 0
continue
prop_list.append(prop.name)
poffset = fdt.next_property_offset(poffset, QUIET_NOTFOUND)
if prop_name in prop_list:
# node is an integer offset, prop_name is a string
if verbose:
print( "[INFO]: removing property %s from %s" % (prop_name, fdt.get_name(node)) )
fdt.delprop(node, prop_name)
else:
return False
return True
@staticmethod
def dt_compile( dts_file, i_files, includes, force_overwrite=False, outdir="./",
save_temps=False, verbose=0, enhanced = True ):
"""Compile a dts file to a dtb
This routine takes a dts input file, other dts include files,
include search path and then uses standard tools (cpp, dtc, etc).
Environment variables can be used tweak the execution of the various
tools and stages:
LOPPER_CPP: set if a different cpp than the standard one should
be used, or if cpp is not on the path
LOPPER_PPFLAGS: flags to be used when calling cpp
LOPPER_DTC: set if a non standard dtc should be used, or if dtc
is not on the path
LOPPER_DTC_FLAGS: flags to use when calling dtc
LOPPER_DTC_OFLAGS: extra dtc flags if an overlay is being compiled
LOPPER_DTC_BFLAGS: extra dtc args/flags
Args:
dts_file (string): path to the dts file to be compiled
i_files (list): files to be included
includes (list): list of include directories (translated into -i <foo>
for dtc calls)
force_overwrite (bool,optional): should files be overwritten.
Default is False
save_temps (bool, optional): should temporary files be saved on failure
verbose (bool,optional): verbosity level
Returns:
string: Name of the compiled dtb
"""
output_dtb = ""
# Note: i_files is not currently used. They are typically concatenated
# before calling this routine due to pecularities in include
# processing
# | |
from __future__ import print_function
import argparse
import datetime
import dateutil.parser
import decimal
import facepy
import json
import logging
import numpy
import pandas
import pickle
import pdb
import pprint
import sys
import time
import traceback
import weakref
import matplotlib.pyplot as plt
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def main():
parser = argparse.ArgumentParser(description="BDMY FB group data wrangler")
parser.add_argument("--last-n-pages", action="store", type=int, default=None,
help="Only fetch the last N most recent pages worth of data. Not applicable if loading from file.")
parser.add_argument("--group-id", action="store", type=str, default="497068793653308",
help="Group ID (default is the group id of https://www.facebook.com/groups/bigdatamy/.)")
parser.add_argument("--load-from-file", action="store", type=str, default=None,
help="File to unpickle from; if not specified, download from Facebook servers.")
args = parser.parse_args()
wckl = Group("179139768764782")
wckl.unpickle_posts_from_file("wckl-2016-06-24.dat")
wckl.generate_standard_data_sets()
wckl_resample_engagement_cnt_daily = wckl.series_engagement_cnt.resample('1D',
how='sum').fillna(0)
wckl_resample_unique_engagers_cnt_daily = wckl.series_unique_engagers_cnt.resample('1D',
how='sum').fillna(0)
wckl_resample_unique_engagers_cum_cnt_daily = wckl.series_unique_engagers_cum_cnt.resample('1D',
how='max').fillna(method='pad')
wckl_rolling_average_30d_engagement_cnt = pandas.rolling_mean(wckl_resample_engagement_cnt_daily,
window=30)
wckl_rolling_average_30d_unique_engagers_cnt_daily = pandas.rolling_mean(wckl_resample_unique_engagers_cnt_daily,
window=30)
wckl_resample_engagement_ave_daily = wckl.series_engagement_cnt.resample('1D', how='mean').fillna(0)
wckl_rolling_average_30d_engagement_ave_daily = pandas.rolling_mean(wckl_resample_engagement_ave_daily, window=30)
wckl_resample_engagement_uq_daily = wckl.series_engagement_cnt.resample('1D', how=lambda x: x.mean() + x.std()).fillna(0)
wckl_rolling_average_30d_engagement_uq_daily = pandas.rolling_mean(wckl_resample_engagement_uq_daily, window=30)
try:
bdmy = Group(args.group_id)
if args.load_from_file is not None:
bdmy.unpickle_posts_from_file(args.load_from_file)
else:
oauth_access_token_file = 'oauth_file'
with open(oauth_access_token_file, 'r') as oauth_fd:
oauth_access_token = oauth_fd.readlines()[0]
bdmy.fetch(oauth_access_token,
args.last_n_pages)
bdmy.generate_standard_data_sets()
print("Close plot to enter REPL...")
resample_engagement_cnt_daily = bdmy.series_engagement_cnt.resample('1D',
how='sum').fillna(0)
resample_unique_engagers_cnt_daily = bdmy.series_unique_engagers_cnt.resample('1D',
how='sum').fillna(0)
resample_unique_engagers_cum_cnt_daily = bdmy.series_unique_engagers_cum_cnt.resample('1D',
how='max').fillna(method='pad')
rolling_average_30d_engagement_cnt = pandas.rolling_mean(resample_engagement_cnt_daily,
window=30)
rolling_average_30d_unique_engagers_cnt_daily = pandas.rolling_mean(resample_unique_engagers_cnt_daily,
window=30)
#ax = resample_engagement_cnt_daily.plot(style="bo-",
# title="BDMY: Engagements (posts, comments, reactions, comment likes)",
# legend=True,
# label='Engagements daily agg')
ax = rolling_average_30d_engagement_cnt.plot(#ax=ax,
style="r-",
#linewidth=3.0,
legend=True,
label="BDMY: Engagements 30 day moving ave")
wckl_rolling_average_30d_engagement_cnt.plot(ax=ax,
style="b-",
#linewidth=3.0,
legend=True,
label="WCKL: Engagements 30 day moving ave")
#resample_unique_engagers_cnt_daily.plot(ax=ax,
# style="go-",
# legend=True,
# label="Unique engagers daily agg")
#rolling_average_30d_unique_engagers_cnt_daily.plot(ax=ax,
# style="y-",
# #linewidth=3.0,
# legend=True,
# label="BDMY: Unique engagers 30 day moving ave")
#wckl_rolling_average_30d_unique_engagers_cnt_daily.plot(ax=ax,
# style="g-",
# #linewidth=3.0,
# legend=True,
# label="WCKL: Unique engagers 30 day moving ave")
resample_engagement_ave_daily = bdmy.series_engagement_cnt.resample('1D', how='mean').fillna(0)
rolling_average_30d_engagement_ave_daily = pandas.rolling_mean(resample_engagement_ave_daily, window=30)
#rolling_average_30d_engagement_ave_daily.plot(ax=ax,
# style="c-",
# #linewidth=3.0,
# legend=True,
# label="BDMY: Engagements per-post 30 day moving ave")
resample_engagement_uq_daily = bdmy.series_engagement_cnt.resample('1D', how=lambda x: x.mean() + x.std()).fillna(0)
rolling_average_30d_engagement_uq_daily = pandas.rolling_mean(resample_engagement_uq_daily, window=30)
#rolling_average_30d_engagement_uq_daily.plot(ax=ax,
# style="b-",
# linewidth=3.0,
# legend=True,
# label="BDMY: Engagements per-post 30 day moving mean+1std")
#wckl_rolling_average_30d_engagement_ave_daily.plot(ax=ax,
# style="m-",
# #linewidth=3.0,
# legend=True,
# label="WCKL: Engagements per-post 30 day moving ave")
resample_unique_engagers_cum_cnt_daily.plot(ax=ax,
style="c-",
linewidth=3.0,
legend=True,
label="BDMY: Cumulative unique engagers",
secondary_y=True)
wckl_resample_unique_engagers_cum_cnt_daily.plot(ax=ax,
style="m-",
linewidth=3.0,
legend=True,
label="WCKL: Cumulative unique engagers",
secondary_y=True)
ax.set_xlabel("Update date of post")
#ax.set_ylabel("Number of engagement events")
ax.set_ylabel("cnt")
plt.show()
popular_posts = []
for post in bdmy.posts:
if post.get_all_engagements_count() > rolling_average_30d_engagement_uq_daily[post.updated_date.replace(hour=0, minute=0, second=0)]:
popular_posts.append(post)
except:
traceback.print_exc()
finally:
print("Entering REPL. To interact with current dataset, play with the bdmy object.")
if not args.load_from_file:
print("Tip: save your data for reuse with the --load-from-file arg, by calling the pickle_posts method on the bdmy object.")
pdb.set_trace()
class Engagement(object):
def __init__(self, raw_info):
self._raw_info = raw_info
print("ENGAGEMENT:")
pprint.pprint(self._raw_info)
class Reaction(object):
reaction_types = set()
def __init__(self, raw_info, is_like=False):
self._raw_info = raw_info
if is_like:
self.add_reaction_type("LIKE")
else:
self.add_reaction_type(self._raw_info['type'])
def get_reactor_id(self):
return self._raw_info['id']
def add_reaction_type(self, reaction_type_string):
rtype = str(reaction_type_string).upper()
self.reaction_type = rtype
self.add_seen_reaction_type(rtype)
@classmethod
def add_seen_reaction_type(cls, reaction_type_string):
cls.reaction_types.add(str(reaction_type_string).upper())
class Comment(object):
reactions = None
def __init__(self, raw_info):
self._raw_info = raw_info
self.fb_id = self._raw_info["id"]
self.reactions = []
def add_reaction(self, reaction):
self.reactions.append(reaction)
def get_commenter_id(self):
return self._raw_info['id']
def get_reactor_ids(self):
return frozenset([r.get_reactor_id() for r in self.reactions])
class Post(object):
comments = None
reactions = None
def __init__(self, raw_info):
self._base_info = raw_info
self.fb_id = self._base_info["id"]
self.updated_date = dateutil.parser.parse(self._base_info["updated_time"])
self.reactions = []
self.comments = []
self.url_post_resource = "/permalink/{}/".format(self._base_info['id'].split('_')[1])
def get_poster(self):
return self.poster_id
def get_all_engager_ids(self):
"""
Returns a deduped set of user IDs for everyone who has engaged with the post - poster, commenters, reactors, and comment reactors.
"""
commenters = frozenset([c.get_commenter_id() for c in self.comments])
comment_reactors = reduce(frozenset().union, [c.get_reactor_ids() for c in self.comments], frozenset())
post_reactors = frozenset([r.get_reactor_id() for r in self.reactions])
all_engagers = set()
all_engagers = all_engagers.union(commenters)
all_engagers = all_engagers.union(comment_reactors)
all_engagers = all_engagers.union(post_reactors)
all_engagers.add(self.get_poster())
return frozenset(all_engagers)
def get_all_engagements_count(self):
count = len(self.reactions)
for comment in self.comments:
count += 1
count += len(comment.reactions)
return count + 1 # +1 because the very existence of this post is an engagement
def add_comment(self, comment):
self.comments.append(comment)
def add_reaction(self, reaction):
self.reactions.append(reaction)
class Group(object):
posts = None
def __init__(self, group_id):
logging.info("created Group object for group_id %s", str(group_id))
self.group_id = group_id
self.posts = []
self.oauth_access_token = None
self.graph = None
def add_post(self, post):
self.posts.append(post)
def make_url(self, post_obj=None):
url = "https://www.facebook.com/groups/{}".format(self.group_id)
if post_obj:
url += post_obj.url_post_resource
return url
def pickle_posts(self, filename):
with open(filename, "wb") as pickle_dst:
pickle.dump(self.posts, pickle_dst)
def unpickle_posts_from_file(self, filename):
with open(filename, "rb") as pickle_src:
self.posts = pickle.load(pickle_src)
def generate_standard_data_sets(self):
self.time_index = pandas.to_datetime([p.updated_date for p in self.posts])
self.series_engagement_cnt = pandas.Series([p.get_all_engagements_count() for p in self.posts],
index=self.time_index)
self.series_unique_engagers_cnt = pandas.Series([len(set(p.get_all_engager_ids())) for p in self.posts],
index=self.time_index)
logging.info("Validating time index")
last_time = None
for next_time in reversed(self.time_index):
if last_time:
assert not last_time > next_time, "Integrity issue with time index: last_time = {}, next_time = {}".format(last_time, next_time)
if last_time == next_time:
logging.warning("Weirdness in time index: last_time = {}, next_time = {}".format(last_time, next_time))
last_time = next_time
logging.info("Making time range pairs sequence")
time_range_pairs = list(reversed([(self.time_index[x], self.time_index[x+1]) for x in range(len(self.time_index))[:-1]]))
time_range_pairs.insert(0, (time_range_pairs[0][1], (time_range_pairs[0][1]- datetime.timedelta(days=1))))
assert len(time_range_pairs) == len(self.time_index)
unique_engagers_cum_cnt = []
seen_engagers = set()
end_time, start_time = time_range_pairs.pop(0)
logging.info("Making cumulative engagers count over posts")
logging.debug("Updating time range; initial range is {} -> {}".format(start_time, end_time))
expect_more_posts = True
for post in list(reversed(self.posts)):
assert expect_more_posts
logging.debug("post updated_date={}".format(post.updated_date))
assert post.updated_date >= start_time
for engager_id in post.get_all_engager_ids(): # this MUST come before the end_time check
seen_engagers.add(engager_id)
if post.updated_date == end_time:
unique_engagers_cum_cnt.append(len(seen_engagers))
if time_range_pairs:
logging.debug("Updating time range; current range is {} -> {}".format(start_time, end_time))
end_time, start_time = time_range_pairs.pop(0)
logging.debug("Updated time range; new range is {} -> {}".format(start_time, end_time))
assert end_time >= start_time
else:
expect_more_posts = False
assert len(unique_engagers_cum_cnt) == len(self.time_index)
self.series_unique_engagers_cum_cnt = pandas.Series(list(reversed(unique_engagers_cum_cnt)), self.time_index)
self.engagers_engagement_cnt = {}
for post in self.posts:
for engager in post.get_all_engager_ids():
if engager in self.engagers_engagement_cnt:
self.engagers_engagement_cnt[engager] += 1
else:
self.engagers_engagement_cnt[engager] = 1
def graph_get_with_oauth_retry(self, url, page, max_retry_cycles=3):
"""a closure to let the user deal with oauth token expiry"""
assert max_retry_cycles > 0
retry_cycle = 0
while True:
if retry_cycle >= max_retry_cycles:
logging.error("Giving up on query {} after {} tries; last exception was {}/{}".format(url,
retry_cycle,
type(last_exc),
last_exc))
return list()
retry_cycle += 1
try:
return list(self.graph.get(url, page=page))
except Exception as exc:
last_exc = exc
logging.error(exc)
if "unknown error" not in exc.message:
# might be able to recover with a retry or a new token
logging.info("Failed with {}/{}: doing simple retry".format(type(exc), exc))
try:
time.sleep(3)
return list(self.graph.get(url, page=page))
except facepy.exceptions.OAuthError as exc:
logging.error("Retry {} failed; {}/{}".format(retry_cycle, type(exc), exc))
logging.info("Update your token; generate a new token by visiting {}".format("https://developers.facebook.com/tools/explorer"))
logging.info("Waiting for user to enter new oauth access token...")
self.oauth_access_token = raw_input("Enter new oath access token: ")
self.oauth_access_token = self.oauth_access_token.strip()
self.graph = facepy.GraphAPI(self.oauth_access_token)
def fetch(self, oauth_access_token, max_pages=None):
"""
For testing purposes one may limit max_pages.
"""
self.oauth_access_token = oauth_access_token
self.graph = facepy.GraphAPI(self.oauth_access_token)
data = self.graph_get_with_oauth_retry('/v2.6/{}/feed'.format(self.group_id), page=True)
raw_post_data = []
page_count = 0
print("foo")
for page in data:
if max_pages and page_count >= max_pages:
break
page_count += 1
try:
logging.debug("new page")
if "data" in page:
logging.debug("page has %s posts", len(page['data']))
raw_post_data += [p for p in page['data']]
logging.info("current accumulated posts count: %d, oldest timestamp: %s",
len(raw_post_data),
raw_post_data[-1]["updated_time"])
except:
pprint.pprint(page)
raise
for post in raw_post_data:
try:
post_obj = Post(post)
except:
logging.error("Problem with raw post data: %s", pprint.pformat(post))
raise
self.add_post(post_obj)
try:
logging.info("Fleshing out post {} of {}; {}".format(len(self.posts), len(raw_post_data), self.make_url(post_obj)))
# TODO sort out this horrible boilerplate
# Step 0: get post from
logging.info("Fleshing out post {} of {}; {} -- getting from info".format(len(self.posts), len(raw_post_data), self.make_url(post_obj)))
post_obj.from_info = self.graph_get_with_oauth_retry('/v2.6/{}?fields=from'.format(post_obj.fb_id), page=True)
assert len(post_obj.from_info) == 1, post_obj.from_info
post_obj.poster_id = post_obj.from_info[0]['from']['id']
# Step 1: extract post reactions
logging.info("Fleshing out post {} of {}; {} -- getting reactions".format(len(self.posts), len(raw_post_data), self.make_url(post_obj)))
reactions_pages = list(self.graph_get_with_oauth_retry('/v2.6/{}/reactions'.format(post_obj.fb_id), page=True))
logging.debug("reactions: %d, %s", len(reactions_pages), pprint.pformat(reactions_pages))
reactions = []
try:
if reactions_pages and reactions_pages[-1]:
for reactions_page in reactions_pages:
reactions += reactions_page['data']
if 'paging' in reactions_pages[-1]:
if 'next' in reactions_pages[-1]['paging']:
raise Exception("well that was unexpected")
except:
logging.error("Tripped up on {}".format(pprint.pformat(reactions_pages)))
raise
for reaction_data in reactions:
post_obj.add_reaction(Reaction(reaction_data))
# Step 2: extract post comments, along with their likes
logging.info("Fleshing out post {} of {}; {} -- getting comments".format(len(self.posts), len(raw_post_data), self.make_url(post_obj)))
comments_pages = list(self.graph_get_with_oauth_retry('/v2.6/{}/comments?fields=from,created_time,message,id,likes'.format(post_obj.fb_id), page=True))
logging.debug("comments: %d, %s", len(comments_pages), pprint.pformat(comments_pages))
comments = []
try:
if comments_pages | |
<reponame>captain-c00keys/pyramid-stocks
import os
import unittest
from pyramid import testing
from pyramid.tests.test_config import IDummy
from pyramid.tests.test_config import dummy_view
from pyramid.compat import (
im_func,
text_,
)
from pyramid.exceptions import ConfigurationError
from pyramid.exceptions import ConfigurationExecutionError
from pyramid.exceptions import ConfigurationConflictError
class TestViewsConfigurationMixin(unittest.TestCase):
def _makeOne(self, *arg, **kw):
from pyramid.config import Configurator
config = Configurator(*arg, **kw)
config.set_default_csrf_options(require_csrf=False)
return config
def _getViewCallable(self, config, ctx_iface=None, exc_iface=None,
request_iface=None, name=''):
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IExceptionViewClassifier
if exc_iface:
classifier = IExceptionViewClassifier
ctx_iface = exc_iface
else:
classifier = IViewClassifier
if ctx_iface is None:
ctx_iface = Interface
if request_iface is None:
request_iface = IRequest
return config.registry.adapters.lookup(
(classifier, request_iface, ctx_iface), IView, name=name,
default=None)
def _registerRenderer(self, config, name='.txt'):
from pyramid.interfaces import IRendererFactory
class Renderer:
def __init__(self, info):
self.__class__.info = info
def __call__(self, *arg):
return b'Hello!'
config.registry.registerUtility(Renderer, IRendererFactory, name=name)
return Renderer
def _makeRequest(self, config):
request = DummyRequest()
request.registry = config.registry
return request
def _assertNotFound(self, wrapper, *arg):
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, wrapper, *arg)
def _getRouteRequestIface(self, config, name):
from pyramid.interfaces import IRouteRequest
iface = config.registry.getUtility(IRouteRequest, name)
return iface
def _assertRoute(self, config, name, path, num_predicates=0):
from pyramid.interfaces import IRoutesMapper
mapper = config.registry.getUtility(IRoutesMapper)
routes = mapper.get_routes()
route = routes[0]
self.assertEqual(len(routes), 1)
self.assertEqual(route.name, name)
self.assertEqual(route.path, path)
self.assertEqual(len(routes[0].predicates), num_predicates)
return route
def test_add_view_view_callable_None_no_renderer(self):
config = self._makeOne(autocommit=True)
self.assertRaises(ConfigurationError, config.add_view)
def test_add_view_with_request_type_and_route_name(self):
config = self._makeOne(autocommit=True)
view = lambda *arg: 'OK'
self.assertRaises(ConfigurationError, config.add_view, view, '', None,
None, True, True)
def test_add_view_with_request_type(self):
from pyramid.renderers import null_renderer
from zope.interface import directlyProvides
from pyramid.interfaces import IRequest
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
config.add_view(view=view,
request_type='pyramid.interfaces.IRequest',
renderer=null_renderer)
wrapper = self._getViewCallable(config)
request = DummyRequest()
self._assertNotFound(wrapper, None, request)
directlyProvides(request, IRequest)
result = wrapper(None, request)
self.assertEqual(result, 'OK')
def test_add_view_view_callable_None_with_renderer(self):
config = self._makeOne(autocommit=True)
self._registerRenderer(config, name='dummy')
config.add_view(renderer='dummy')
view = self._getViewCallable(config)
self.assertTrue(b'Hello!' in view(None, None).body)
def test_add_view_with_tmpl_renderer_factory_introspector_missing(self):
config = self._makeOne(autocommit=True)
config.introspection = False
config.introspector = None
config.add_view(renderer='dummy.pt')
view = self._getViewCallable(config)
self.assertRaises(ValueError, view, None, None)
def test_add_view_with_tmpl_renderer_factory_no_renderer_factory(self):
config = self._makeOne(autocommit=True)
introspector = DummyIntrospector()
config.introspector = introspector
config.add_view(renderer='dummy.pt')
self.assertFalse(('renderer factories', '.pt') in
introspector.related[-1])
view = self._getViewCallable(config)
self.assertRaises(ValueError, view, None, None)
def test_add_view_with_tmpl_renderer_factory_with_renderer_factory(self):
config = self._makeOne(autocommit=True)
introspector = DummyIntrospector(True)
config.introspector = introspector
def dummy_factory(helper):
return lambda val, system_vals: 'Hello!'
config.add_renderer('.pt', dummy_factory)
config.add_view(renderer='dummy.pt')
self.assertTrue(
('renderer factories', '.pt') in introspector.related[-1])
view = self._getViewCallable(config)
self.assertTrue(b'Hello!' in view(None, None).body)
def test_add_view_wrapped_view_is_decorated(self):
def view(request): # request-only wrapper
""" """
config = self._makeOne(autocommit=True)
config.add_view(view=view)
wrapper = self._getViewCallable(config)
self.assertEqual(wrapper.__module__, view.__module__)
self.assertEqual(wrapper.__name__, view.__name__)
self.assertEqual(wrapper.__doc__, view.__doc__)
self.assertEqual(wrapper.__discriminator__(None, None).resolve()[0],
'view')
def test_add_view_view_callable_dottedname(self):
from pyramid.renderers import null_renderer
config = self._makeOne(autocommit=True)
config.add_view(view='pyramid.tests.test_config.dummy_view',
renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertEqual(wrapper(None, None), 'OK')
def test_add_view_with_function_callable(self):
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_with_function_callable_requestonly(self):
from pyramid.renderers import null_renderer
def view(request):
return 'OK'
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_with_name(self):
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
config.add_view(view=view, name='abc', renderer=null_renderer)
wrapper = self._getViewCallable(config, name='abc')
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_with_name_unicode(self):
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
name = text_(b'La Pe\xc3\xb1a', 'utf-8')
config.add_view(view=view, name=name, renderer=null_renderer)
wrapper = self._getViewCallable(config, name=name)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_with_decorator(self):
from pyramid.renderers import null_renderer
def view(request):
""" ABC """
return 'OK'
def view_wrapper(fn):
def inner(context, request):
return fn(context, request)
return inner
config = self._makeOne(autocommit=True)
config.add_view(view=view, decorator=view_wrapper,
renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertFalse(wrapper is view)
self.assertEqual(wrapper.__doc__, view.__doc__)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_with_decorator_tuple(self):
from pyramid.renderers import null_renderer
def view(request):
""" ABC """
return 'OK'
def view_wrapper1(fn):
def inner(context, request):
return 'wrapped1' + fn(context, request)
return inner
def view_wrapper2(fn):
def inner(context, request):
return 'wrapped2' + fn(context, request)
return inner
config = self._makeOne(autocommit=True)
config.add_view(view=view, decorator=(view_wrapper2, view_wrapper1),
renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertFalse(wrapper is view)
self.assertEqual(wrapper.__doc__, view.__doc__)
result = wrapper(None, None)
self.assertEqual(result, 'wrapped2wrapped1OK')
def test_add_view_with_http_cache(self):
import datetime
from pyramid.response import Response
response = Response('OK')
def view(request):
""" ABC """
return response
config = self._makeOne(autocommit=True)
config.add_view(view=view, http_cache=(86400, {'public':True}))
wrapper = self._getViewCallable(config)
self.assertFalse(wrapper is view)
self.assertEqual(wrapper.__doc__, view.__doc__)
request = testing.DummyRequest()
when = datetime.datetime.utcnow() + datetime.timedelta(days=1)
result = wrapper(None, request)
self.assertEqual(result, response)
headers = dict(response.headerlist)
self.assertEqual(headers['Cache-Control'], 'max-age=86400, public')
expires = parse_httpdate(headers['Expires'])
assert_similar_datetime(expires, when)
def test_add_view_as_instance(self):
from pyramid.renderers import null_renderer
class AView:
def __call__(self, context, request):
""" """
return 'OK'
view = AView()
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_as_instancemethod(self):
from pyramid.renderers import null_renderer
class View:
def index(self, context, request):
return 'OK'
view = View()
config=self._makeOne(autocommit=True)
config.add_view(view=view.index, renderer=null_renderer)
wrapper = self._getViewCallable(config)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_as_instancemethod_requestonly(self):
from pyramid.renderers import null_renderer
class View:
def index(self, request):
return 'OK'
view = View()
config=self._makeOne(autocommit=True)
config.add_view(view=view.index, renderer=null_renderer)
wrapper = self._getViewCallable(config)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_as_instance_requestonly(self):
from pyramid.renderers import null_renderer
class AView:
def __call__(self, request):
""" """
return 'OK'
view = AView()
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
result = wrapper(None, None)
self.assertEqual(result, 'OK')
def test_add_view_as_oldstyle_class(self):
from pyramid.renderers import null_renderer
class view:
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
return 'OK'
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
request = self._makeRequest(config)
result = wrapper(None, request)
self.assertEqual(result, 'OK')
self.assertEqual(request.__view__.__class__, view)
def test_add_view_as_oldstyle_class_requestonly(self):
from pyramid.renderers import null_renderer
class view:
def __init__(self, request):
self.request = request
def __call__(self):
return 'OK'
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
request = self._makeRequest(config)
result = wrapper(None, request)
self.assertEqual(result, 'OK')
self.assertEqual(request.__view__.__class__, view)
def test_add_view_context_as_class(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
view = lambda *arg: 'OK'
class Foo:
pass
config = self._makeOne(autocommit=True)
config.add_view(context=Foo, view=view, renderer=null_renderer)
foo = implementedBy(Foo)
wrapper = self._getViewCallable(config, foo)
self.assertEqual(wrapper, view)
def test_add_view_context_as_iface(self):
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
config.add_view(context=IDummy, view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config, IDummy)
self.assertEqual(wrapper, view)
def test_add_view_context_as_dottedname(self):
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
config.add_view(context='pyramid.tests.test_config.IDummy',
view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config, IDummy)
self.assertEqual(wrapper, view)
def test_add_view_for__as_dottedname(self):
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
config.add_view(for_='pyramid.tests.test_config.IDummy',
view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config, IDummy)
self.assertEqual(wrapper, view)
def test_add_view_for_as_class(self):
# ``for_`` is older spelling for ``context``
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
view = lambda *arg: 'OK'
class Foo:
pass
config = self._makeOne(autocommit=True)
config.add_view(for_=Foo, view=view, renderer=null_renderer)
foo = implementedBy(Foo)
wrapper = self._getViewCallable(config, foo)
self.assertEqual(wrapper, view)
def test_add_view_for_as_iface(self):
# ``for_`` is older spelling for ``context``
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
config.add_view(for_=IDummy, view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config, IDummy)
self.assertEqual(wrapper, view)
def test_add_view_context_trumps_for(self):
# ``for_`` is older spelling for ``context``
from pyramid.renderers import null_renderer
view = lambda *arg: 'OK'
config = self._makeOne(autocommit=True)
class Foo:
pass
config.add_view(context=IDummy, for_=Foo, view=view,
renderer=null_renderer)
wrapper = self._getViewCallable(config, IDummy)
self.assertEqual(wrapper, view)
def test_add_view_register_secured_view(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import ISecuredView
from pyramid.interfaces import IViewClassifier
view = lambda *arg: 'OK'
view.__call_permissive__ = view
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
wrapper = config.registry.adapters.lookup(
(IViewClassifier, IRequest, Interface),
ISecuredView, name='', default=None)
self.assertEqual(wrapper, view)
def test_add_view_exception_register_secured_view(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IExceptionViewClassifier
view = lambda *arg: 'OK'
view.__call_permissive__ = view
config = self._makeOne(autocommit=True)
config.add_view(view=view, context=RuntimeError, renderer=null_renderer)
wrapper = config.registry.adapters.lookup(
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='', default=None)
self.assertEqual(wrapper, view)
def test_add_view_same_phash_overrides_existing_single_view(self):
from pyramid.renderers import null_renderer
from hashlib import md5
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IMultiView
phash = md5()
phash.update(b'xhr = True')
view = lambda *arg: 'NOT OK'
view.__phash__ = phash.hexdigest()
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface), IView, name='')
def newview(context, request):
return 'OK'
config.add_view(view=newview, xhr=True, renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertFalse(IMultiView.providedBy(wrapper))
request = DummyRequest()
request.is_xhr = True
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_exc_same_phash_overrides_existing_single_view(self):
from pyramid.renderers import null_renderer
from hashlib import md5
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IExceptionViewClassifier
from pyramid.interfaces import IMultiView
phash = md5()
phash.update(b'xhr = True')
view = lambda *arg: 'NOT OK'
view.__phash__ | |
# See LICENSE.incore for details
import ruamel
from ruamel.yaml import YAML
import riscv_isac.parsers as helpers
import riscv_isac.utils as utils
from riscv_isac.constants import *
from riscv_isac.log import logger
from collections import Counter
import sys
from riscv_isac.utils import yaml
from riscv_isac.cgf_normalize import *
import struct
import pytablewriter
class archState:
'''
Defines the architectural state of the RISC-V device.
'''
def __init__ (self, xlen, flen):
'''
Class constructor
:param xlen: max XLEN value of the RISC-V device
:param flen: max FLEN value of the RISC-V device
:type xlen: int
:type flen: int
Currently defines the integer and floating point register files the
width of which is defined by the xlen and flen parameters. These are
implemented as an array holding the hexadecimal representations of the
values as string.
The program counter is also defined as an int.
'''
if xlen == 32:
self.x_rf = ['00000000']*32
else:
self.x_rf = ['0000000000000000']*32
if flen == 32:
self.f_rf = ['00000000']*32
else:
self.f_rf = ['0000000000000000']*32
self.pc = 0
class statistics:
'''
Class for holding statistics used for Data propagation report
'''
def __init__(self, xlen, flen):
'''
This class maintains a collection of arrays which are useful in
calculating the following set of statistics:
- STAT1 : Number of instructions that hit unique coverpoints and update the signature.
- STAT2 : Number of instructions that hit covepoints which are not unique but still update the signature
- STAT3 : Number of instructions that hit a unique coverpoint but do not update signature
- STAT4 : Number of multiple signature updates for the same coverpoint
- STAT5 : Number of times the signature was overwritten
'''
self.stat1 = []
self.stat2 = []
self.stat3 = []
self.stat4 = []
self.stat5 = []
self.code_seq = []
self.ucode_seq = []
self.covpt = []
self.ucovpt = []
self.cov_pt_sig = []
self.last_meta = []
def pretty_print_yaml(yaml):
res = ''''''
for line in ruamel.yaml.round_trip_dump(yaml, indent=5, block_seq_indent=3).splitlines(True):
res += line
return res
def pretty_print_regfile(regfile):
res = ""
for index in range(0, 32, 4):
print('x'+str(index) + ' : ' + regfile[index] + '\t' +\
'x'+str(index+1) + ' : ' + regfile[index+1] + '\t' + \
'x'+str(index+2) + ' : ' + regfile[index+2] + '\t' + \
'x'+str(index+3) + ' : ' + regfile[index+3] + '\t' )
print('\n\n')
def gen_report(cgf, detailed):
'''
Function to convert a CGF to a string report. A detailed report includes the individual coverpoints and the corresponding values of the same
:param cgf: an input CGF dictionary
:param detailed: boolean value indicating a detailed report must be generated.
:type cgf: dict
:type detailed: bool
:return: string holding the final report
'''
rpt_str = ''
for cov_labels, value in cgf.items():
if cov_labels != 'datasets':
rpt_str += cov_labels + ':\n'
total_uncovered = 0
total_categories = 0
for categories in value:
if categories not in ['cond','config','ignore']:
for coverpoints, coverage in value[categories].items():
if coverage == 0:
total_uncovered += 1
total_categories += len(value[categories])
rpt_str += ' coverage: '+str(total_categories -total_uncovered) + \
'/' + str(total_categories)+'\n'
for categories in value:
if categories not in ['cond','config','ignore']:
uncovered = 0
for coverpoints, coverage in value[categories].items():
if coverage == 0:
uncovered += 1
percentage_covered = str((len(value[categories]) - uncovered)/len(value[categories]))
node_level_str = ' ' + categories + ':\n'
node_level_str += ' coverage: ' + \
str(len(value[categories]) - uncovered) + \
'/' + str(len(value[categories]))
rpt_str += node_level_str + '\n'
if detailed:
rpt_str += ' detail:\n'
for coverpoints in value[categories]:
rpt_str += ' - '+str(coverpoints) + ': ' + str(value[categories][coverpoints]) + '\n'
return rpt_str
def merge_coverage(files, cgf, detailed, xlen):
'''
This function merges values of multiple CGF files and return a single cgf
file. This can be treated analogous to how coverage files are merged
traditionally.
:param file: an array of input CGF file names which need to be merged.
:param cgf: a cgf against which coverpoints need to be checked for.
:param detailed: a boolean value indicating if a detailed report needs to be generated
:param xlen: XLEN of the trace
:type file: [str]
:type cgf: dict
:type detailed: bool
:type xlen: int
:return: a string contain the final report of the merge.
'''
for logs in files:
with open(logs, "r") as file:
logs_cov = yaml.load(file)
for cov_labels, value in logs_cov.items():
for categories in value:
if categories not in ['cond','config','ignore']:
for coverpoints, coverage in value[categories].items():
if coverpoints in cgf[cov_labels][categories]:
cgf[cov_labels][categories][coverpoints] += coverage
return gen_report(cgf, detailed)
def twos_complement(val,bits):
if (val & (1 << (bits - 1))) != 0:
val = val - (1 << bits)
return val
def compute_per_line(instr, mnemonic, commitvalue, cgf, xlen, addr_pairs, sig_addrs):
'''
This function checks if the current instruction under scrutiny matches a
particular coverpoint of interest. If so, it updates the coverpoints and
return the same.
:param instr: an instructionObject of the single instruction currently parsed
:param commitvalue: a tuple containing the register to be updated and the value it should be updated with
:param cgf: a cgf against which coverpoints need to be checked for.
:param xlen: Max xlen of the trace
:param addr_pairs: pairs of start and end addresses for which the coverage needs to be updated
:type instr: :class:`helpers.instructionObject`
:type commitvalue: (str, str)
:type cgf: dict
:type xlen: int
:type addr_pairs: (int, int)
'''
global arch_state
global stats
# assign default values to operands
rs1 = 0
rs2 = 0
rd = 0
rs1_type = 'x'
rs2_type = 'x'
rd_type = 'x'
# create signed/unsigned conversion params
if xlen == 32:
unsgn_sz = '>I'
sgn_sz = '>i'
else:
unsgn_sz = '>Q'
sgn_sz = '>q'
# if instruction is empty then return
if instr is None:
return cgf
# check if instruction lies within the valid region of interest
if addr_pairs:
if any([instr.instr_addr >= saddr and instr.instr_addr < eaddr for saddr,eaddr in addr_pairs]):
enable = True
else:
enable = False
else:
enable=True
# capture the operands and their values from the regfile
if instr.rs1 is not None:
rs1 = instr.rs1[0]
rs1_type = instr.rs1[1]
if instr.rs2 is not None:
rs2 = instr.rs2[0]
rs2_type = instr.rs2[1]
if instr.rd is not None:
rd = instr.rd[0]
is_rd_valid = True
rd_type = instr.rd[1]
else:
is_rd_valid = False
if instr.imm is not None:
imm_val = instr.imm
if instr.shamt is not None:
imm_val = instr.shamt
# special value conversion based on signed/unsigned operations
if instr.instr_name in ['sw','sd','sh','sb','ld','lw','lwu','lh','lhu','lb', 'lbu','bgeu', 'bltu', 'sltiu', 'sltu','c.lw','c.ld','c.lwsp','c.ldsp','c.sw','c.sd','c.swsp','c.sdsp','mulhu','divu','remu','divuw','remuw']:
rs1_val = struct.unpack(unsgn_sz, bytes.fromhex(arch_state.x_rf[rs1]))[0]
elif rs1_type == 'x':
rs1_val = struct.unpack(sgn_sz, bytes.fromhex(arch_state.x_rf[rs1]))[0]
elif rs1_type == 'f':
rs1_val = struct.unpack(sgn_sz, bytes.fromhex(arch_state.f_rf[rs1]))[0]
if instr.instr_name in ['bgeu', 'bltu', 'sltiu', 'sltu', 'sll', 'srl', 'sra','mulhu','mulhsu','divu','remu','divuw','remuw']:
rs2_val = struct.unpack(unsgn_sz, bytes.fromhex(arch_state.x_rf[rs2]))[0]
elif rs2_type == 'x':
rs2_val = struct.unpack(sgn_sz, bytes.fromhex(arch_state.x_rf[rs2]))[0]
elif rs2_type == 'f':
rs2_val = struct.unpack(sgn_sz, bytes.fromhex(arch_state.f_rf[rs2]))[0]
arch_state.pc = instr.instr_addr
# the ea_align variable is used by the eval statements of the
# coverpoints for conditional ops and memory ops
if instr.instr_name in ['jal','bge','bgeu','blt','bltu','beq','bne']:
ea_align = (instr.instr_addr+(imm_val<<1)) % 4
if instr.instr_name == "jalr":
ea_align = (rs1_val + imm_val) % 4
if instr.instr_name in ['sw','sh','sb','lw','lhu','lh','lb','lbu','lwu']:
ea_align = (rs1_val + imm_val) % 4
if instr.instr_name in ['ld','sd']:
ea_align = (rs1_val + imm_val) % 8
if enable :
for cov_labels,value in cgf.items():
if cov_labels != 'datasets':
if instr.instr_name in value['opcode']:
if stats.code_seq:
logger.error('Found a coverpoint without sign Upd ' + str(stats.code_seq))
stats.stat3.append('\n'.join(stats.code_seq))
stats.code_seq = []
stats.covpt = []
stats.ucovpt = []
stats.ucode_seq = []
if value['opcode'][instr.instr_name] == 0:
stats.ucovpt.append('opcode : ' + instr.instr_name)
stats.covpt.append('opcode : ' + instr.instr_name)
value['opcode'][instr.instr_name] += 1
if 'rs1' in value and 'x'+str(rs1) in value['rs1']:
if value['rs1']['x'+str(rs1)] == 0:
stats.ucovpt.append('rs1 : ' + 'x'+str(rs1))
stats.covpt.append('rs1 : ' + 'x'+str(rs1))
value['rs1']['x'+str(rs1)] += 1
if 'rs2' in value and 'x'+str(rs2) in value['rs2']:
if value['rs2']['x'+str(rs2)] == 0:
stats.ucovpt.append('rs2 : ' + 'x'+str(rs2))
stats.covpt.append('rs2 : ' + 'x'+str(rs2))
value['rs2']['x'+str(rs2)] += 1
if 'rd' in value and is_rd_valid and 'x'+str(rd) in value['rd']:
if value['rd']['x'+str(rd)] == 0:
stats.ucovpt.append('rd : ' + 'x'+str(rd))
stats.covpt.append('rd : ' + 'x'+str(rd))
value['rd']['x'+str(rd)] += 1
if 'op_comb' in value and len(value['op_comb']) != 0 :
for coverpoints in value['op_comb']:
if eval(coverpoints):
if cgf[cov_labels]['op_comb'][coverpoints] == 0:
stats.ucovpt.append(str(coverpoints))
stats.covpt.append(str(coverpoints))
cgf[cov_labels]['op_comb'][coverpoints] += 1
if 'val_comb' in value and len(value['val_comb']) | |
True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits']['meta_info']
class VlanUnawareFxcPseudowireEvpns(object):
"""
List of EVPN Services
.. attribute:: vlan_unaware_fxc_pseudowire_evpn
EVPN FXC Service Configuration
**type**\: list of :py:class:`VlanUnawareFxcPseudowireEvpn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns.VlanUnawareFxcPseudowireEvpn>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vlan_unaware_fxc_pseudowire_evpn = YList()
self.vlan_unaware_fxc_pseudowire_evpn.parent = self
self.vlan_unaware_fxc_pseudowire_evpn.name = 'vlan_unaware_fxc_pseudowire_evpn'
class VlanUnawareFxcPseudowireEvpn(object):
"""
EVPN FXC Service Configuration
.. attribute:: acid <key>
AC ID
**type**\: int
**range:** 1..4294967295
.. attribute:: eviid <key>
Ethernet VPN ID
**type**\: int
**range:** 1..65534
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acid = None
self.eviid = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.acid is None:
raise YPYModelError('Key property acid is None')
if self.eviid is None:
raise YPYModelError('Key property eviid is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:vlan-unaware-fxc-pseudowire-evpn[Cisco-IOS-XR-l2vpn-cfg:acid = ' + str(self.acid) + '][Cisco-IOS-XR-l2vpn-cfg:eviid = ' + str(self.eviid) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.acid is not None:
return True
if self.eviid is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns.VlanUnawareFxcPseudowireEvpn']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:vlan-unaware-fxc-pseudowire-evpns'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vlan_unaware_fxc_pseudowire_evpn is not None:
for child_ref in self.vlan_unaware_fxc_pseudowire_evpn:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:flexible-xconnect-service-table/Cisco-IOS-XR-l2vpn-cfg:vlan-unaware-flexible-xconnect-services/Cisco-IOS-XR-l2vpn-cfg:vlan-unaware-flexible-xconnect-service[Cisco-IOS-XR-l2vpn-cfg:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.vlan_unaware_fxc_attachment_circuits is not None and self.vlan_unaware_fxc_attachment_circuits._has_data():
return True
if self.vlan_unaware_fxc_pseudowire_evpns is not None and self.vlan_unaware_fxc_pseudowire_evpns._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:flexible-xconnect-service-table/Cisco-IOS-XR-l2vpn-cfg:vlan-unaware-flexible-xconnect-services'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vlan_unaware_flexible_xconnect_service is not None:
for child_ref in self.vlan_unaware_flexible_xconnect_service:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:flexible-xconnect-service-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vlan_unaware_flexible_xconnect_services is not None and self.vlan_unaware_flexible_xconnect_services._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.FlexibleXconnectServiceTable']['meta_info']
class Redundancy(object):
"""
Redundancy groups
.. attribute:: enable
Enable redundancy groups
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: iccp_redundancy_groups
List of Inter\-Chassis Communication Protocol redundancy groups
**type**\: :py:class:`IccpRedundancyGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.Redundancy.IccpRedundancyGroups>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.iccp_redundancy_groups = L2Vpn.Database.Redundancy.IccpRedundancyGroups()
self.iccp_redundancy_groups.parent = self
class IccpRedundancyGroups(object):
"""
List of Inter\-Chassis Communication Protocol
redundancy groups
.. attribute:: iccp_redundancy_group
ICCP Redundancy group
**type**\: list of :py:class:`IccpRedundancyGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.iccp_redundancy_group = YList()
self.iccp_redundancy_group.parent = self
self.iccp_redundancy_group.name = 'iccp_redundancy_group'
class IccpRedundancyGroup(object):
"""
ICCP Redundancy group
.. attribute:: group_id <key>
Group ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: iccp_interfaces
List of interfaces
**type**\: :py:class:`IccpInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces>`
.. attribute:: multi_homing_node_id
ICCP\-based service multi\-homing node ID
**type**\: int
**range:** 0..254
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.group_id = None
self.iccp_interfaces = L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces()
self.iccp_interfaces.parent = self
self.multi_homing_node_id = None
class IccpInterfaces(object):
"""
List of interfaces
.. attribute:: iccp_interface
Interface name
**type**\: list of :py:class:`IccpInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces.IccpInterface>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.iccp_interface = YList()
self.iccp_interface.parent = self
self.iccp_interface.name = 'iccp_interface'
class IccpInterface(object):
"""
Interface name
.. attribute:: interface_name <key>
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: mac_flush_tcn
Enable STP\-TCN MAC flushing
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: primary_vlan_range
Primary VLAN range, in the form of 1\-3,5 ,8\-11
**type**\: str
.. attribute:: recovery_delay
Failure clear recovery delay
**type**\: int
**range:** 30..3600
**default value**\: 180
.. attribute:: secondary_vlan_range
Secondary VLAN range, in the form of 1\-3,5 ,8\-11
**type**\: str
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.mac_flush_tcn = None
self.primary_vlan_range = None
self.recovery_delay = None
self.secondary_vlan_range = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:iccp-interface[Cisco-IOS-XR-l2vpn-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.mac_flush_tcn is not None:
return True
if self.primary_vlan_range is not None:
return True
if self.recovery_delay is not None:
return True
if self.secondary_vlan_range is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces.IccpInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:iccp-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.iccp_interface is not None:
for child_ref in self.iccp_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces']['meta_info']
@property
def _common_path(self):
if self.group_id is None:
raise YPYModelError('Key property group_id is None')
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:redundancy/Cisco-IOS-XR-l2vpn-cfg:iccp-redundancy-groups/Cisco-IOS-XR-l2vpn-cfg:iccp-redundancy-group[Cisco-IOS-XR-l2vpn-cfg:group-id = ' + str(self.group_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.group_id is not None:
return True
if self.iccp_interfaces is not None and self.iccp_interfaces._has_data():
return True
if self.multi_homing_node_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:redundancy/Cisco-IOS-XR-l2vpn-cfg:iccp-redundancy-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.iccp_redundancy_group is not None:
for child_ref in self.iccp_redundancy_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database/Cisco-IOS-XR-l2vpn-cfg:redundancy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.iccp_redundancy_groups is not None and self.iccp_redundancy_groups._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.Redundancy']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:database'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.bridge_domain_groups is not None and self.bridge_domain_groups._has_data():
return True
if self.flexible_xconnect_service_table is not None and self.flexible_xconnect_service_table._has_data():
return True
if self.g8032_rings is not None and self.g8032_rings._has_data():
return True
if self.pseudowire_classes is not None and self.pseudowire_classes._has_data():
return True
if self.redundancy is not None and self.redundancy._has_data():
return True
if self.xconnect_groups is not None and self.xconnect_groups._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database']['meta_info']
class Pbb(object):
"""
L2VPN PBB Global
.. attribute:: backbone_source_mac
Backbone Source MAC
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.